summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/omap-rng.c14
-rw-r--r--drivers/clk/keystone/syscon-clk.c28
-rw-r--r--drivers/clocksource/Kconfig9
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/timer-keystone.c48
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c53
-rw-r--r--drivers/clocksource/timer-ti-dm.c123
-rw-r--r--drivers/counter/104-quad-8.c1551
-rw-r--r--drivers/counter/Kconfig43
-rw-r--r--drivers/counter/Makefile4
-rw-r--r--drivers/counter/counter-chrdev.c577
-rw-r--r--drivers/counter/counter-chrdev.h14
-rw-r--r--drivers/counter/counter-core.c282
-rw-r--r--drivers/counter/counter-sysfs.c964
-rw-r--r--drivers/counter/counter-sysfs.h13
-rw-r--r--drivers/counter/counter.c50
-rw-r--r--drivers/counter/ftm-quaddec.c101
-rw-r--r--drivers/counter/intel-qep.c525
-rw-r--r--drivers/counter/interrupt-cnt.c244
-rw-r--r--drivers/counter/microchip-tcb-capture.c151
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c279
-rw-r--r--drivers/counter/stm32-timer-cnt.c247
-rw-r--r--drivers/counter/ti-ecap-capture.c677
-rw-r--r--drivers/counter/ti-eqep.c257
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/ti-cpufreq.c41
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/omap-des.c8
-rw-r--r--drivers/crypto/sa2ul.c196
-rw-r--r--drivers/crypto/sa2ul.h9
-rw-r--r--drivers/crypto/ti/Kconfig8
-rw-r--r--drivers/crypto/ti/Makefile2
-rw-r--r--drivers/crypto/ti/mcrc.c371
-rw-r--r--drivers/dma-buf/dma-heap.c2
-rw-r--r--drivers/dma-buf/heaps/Kconfig9
-rw-r--r--drivers/dma-buf/heaps/Makefile1
-rw-r--r--drivers/dma-buf/heaps/carveout-heap.c411
-rw-r--r--drivers/dma/dmatest.c13
-rw-r--r--drivers/dma/of-dma.c10
-rw-r--r--drivers/dma/ti/Kconfig7
-rw-r--r--drivers/dma/ti/Makefile14
-rw-r--r--drivers/dma/ti/dma-crossbar.c6
-rw-r--r--drivers/dma/ti/k3-psil-am62.c196
-rw-r--r--drivers/dma/ti/k3-psil-am62a.c196
-rw-r--r--drivers/dma/ti/k3-psil-am64.c158
-rw-r--r--drivers/dma/ti/k3-psil-j7200.c67
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c152
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c259
-rw-r--r--drivers/dma/ti/k3-psil-j784s4.c354
-rw-r--r--drivers/dma/ti/k3-psil-priv.h5
-rw-r--r--drivers/dma/ti/k3-psil.c7
-rw-r--r--drivers/dma/ti/k3-udma-glue.c387
-rw-r--r--drivers/dma/ti/k3-udma-private.c45
-rw-r--r--drivers/dma/ti/k3-udma.c2179
-rw-r--r--drivers/dma/ti/k3-udma.h28
-rw-r--r--drivers/firmware/ti_sci.c702
-rw-r--r--drivers/firmware/ti_sci.h164
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-davinci.c94
-rw-r--r--drivers/gpio/gpio-omap.c7
-rw-r--r--drivers/gpio/gpio-tps6594x.c142
-rw-r--r--drivers/gpio/gpiolib.c8
-rw-r--r--drivers/gpu/drm/bridge/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig21
-rw-r--r--drivers/gpu/drm/bridge/cadence/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c (renamed from drivers/gpu/drm/bridge/cdns-dsi.c)509
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h471
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c51
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h18
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c37
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h1
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c46
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c203
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c177
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c16
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig11
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c157
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c133
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c236
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h59
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c33
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.c333
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.h49
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c488
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_wb.c178
-rw-r--r--drivers/gpu/drm/omapdrm/omap_wb.h214
-rw-r--r--drivers/gpu/drm/omapdrm/omap_wb_cap.c1045
-rw-r--r--drivers/gpu/drm/omapdrm/omap_wb_m2m.c1198
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c93
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c245
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h9
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h9
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c4
-rw-r--r--drivers/i2c/Kconfig9
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/i2c-atr.c558
-rw-r--r--drivers/i2c/i2c-core-base.c18
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/tps65219-pwrbutton.c150
-rw-r--r--drivers/input/touchscreen/Kconfig1
-rw-r--r--drivers/input/touchscreen/goodix.c38
-rw-r--r--drivers/input/touchscreen/ili210x.c561
-rw-r--r--drivers/iommu/omap-iommu.c31
-rw-r--r--drivers/iommu/omap-iommu.h5
-rw-r--r--drivers/irqchip/Kconfig12
-rw-r--r--drivers/irqchip/irq-pruss-intc.c47
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c86
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c112
-rw-r--r--drivers/mailbox/omap-mailbox.c6
-rw-r--r--drivers/mailbox/ti-msgmgr.c181
-rw-r--r--drivers/media/dvb-frontends/ascot2e.h2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r.h2
-rw-r--r--drivers/media/dvb-frontends/drxk.h2
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.h2
-rw-r--r--drivers/media/dvb-frontends/helene.h4
-rw-r--r--drivers/media/dvb-frontends/horus3a.h2
-rw-r--r--drivers/media/dvb-frontends/ix2505v.h4
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.h2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.h2
-rw-r--r--drivers/media/dvb-frontends/stb6000.h2
-rw-r--r--drivers/media/dvb-frontends/tda826x.h2
-rw-r--r--drivers/media/dvb-frontends/zl10036.h4
-rw-r--r--drivers/media/i2c/Kconfig60
-rw-r--r--drivers/media/i2c/Makefile5
-rw-r--r--drivers/media/i2c/adv7170.c6
-rw-r--r--drivers/media/i2c/adv7175.c6
-rw-r--r--drivers/media/i2c/adv7180.c18
-rw-r--r--drivers/media/i2c/adv7183.c8
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c13
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c14
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c13
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c10
-rw-r--r--drivers/media/i2c/adv7604.c12
-rw-r--r--drivers/media/i2c/adv7842.c12
-rw-r--r--drivers/media/i2c/ak881x.c6
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/ds90ub953.c1169
-rw-r--r--drivers/media/i2c/ds90ub960.c2504
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c23
-rw-r--r--drivers/media/i2c/hi556.c15
-rw-r--r--drivers/media/i2c/imx214.c37
-rw-r--r--drivers/media/i2c/imx219.c87
-rw-r--r--drivers/media/i2c/imx258.c19
-rw-r--r--drivers/media/i2c/imx274.c32
-rw-r--r--drivers/media/i2c/imx290.c20
-rw-r--r--drivers/media/i2c/imx319.c18
-rw-r--r--drivers/media/i2c/imx355.c18
-rw-r--r--drivers/media/i2c/imx390.c900
-rw-r--r--drivers/media/i2c/imx390.h7158
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c21
-rw-r--r--drivers/media/i2c/max9286.c17
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/i2c/mt9m001.c18
-rw-r--r--drivers/media/i2c/mt9m032.c34
-rw-r--r--drivers/media/i2c/mt9m111.c18
-rw-r--r--drivers/media/i2c/mt9p031.c45
-rw-r--r--drivers/media/i2c/mt9t001.c44
-rw-r--r--drivers/media/i2c/mt9t112.c14
-rw-r--r--drivers/media/i2c/mt9v011.c6
-rw-r--r--drivers/media/i2c/mt9v032.c44
-rw-r--r--drivers/media/i2c/mt9v111.c23
-rw-r--r--drivers/media/i2c/noon010pc30.c19
-rw-r--r--drivers/media/i2c/ov1063x.c975
-rw-r--r--drivers/media/i2c/ov1063x_regs.h699
-rw-r--r--drivers/media/i2c/ov13858.c18
-rw-r--r--drivers/media/i2c/ov2312.c796
-rw-r--r--drivers/media/i2c/ov2312.h268
-rw-r--r--drivers/media/i2c/ov2640.c16
-rw-r--r--drivers/media/i2c/ov2659.c14
-rw-r--r--drivers/media/i2c/ov2680.c23
-rw-r--r--drivers/media/i2c/ov2685.c10
-rw-r--r--drivers/media/i2c/ov2740.c15
-rw-r--r--drivers/media/i2c/ov5640.c1549
-rw-r--r--drivers/media/i2c/ov5645.c38
-rw-r--r--drivers/media/i2c/ov5647.c1260
-rw-r--r--drivers/media/i2c/ov5670.c19
-rw-r--r--drivers/media/i2c/ov5675.c15
-rw-r--r--drivers/media/i2c/ov5695.c15
-rw-r--r--drivers/media/i2c/ov6650.c28
-rw-r--r--drivers/media/i2c/ov7251.c39
-rw-r--r--drivers/media/i2c/ov7670.c17
-rw-r--r--drivers/media/i2c/ov772x.c12
-rw-r--r--drivers/media/i2c/ov7740.c17
-rw-r--r--drivers/media/i2c/ov8856.c15
-rw-r--r--drivers/media/i2c/ov9640.c8
-rw-r--r--drivers/media/i2c/ov9650.c17
-rw-r--r--drivers/media/i2c/rdacm20.c4
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c12
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c55
-rw-r--r--drivers/media/i2c/s5k4ecgx.c22
-rw-r--r--drivers/media/i2c/s5k5baf.c49
-rw-r--r--drivers/media/i2c/s5k6a3.c19
-rw-r--r--drivers/media/i2c/s5k6aa.c39
-rw-r--r--drivers/media/i2c/saa6752hs.c6
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c87
-rw-r--r--drivers/media/i2c/sr030pc30.c8
-rw-r--r--drivers/media/i2c/st-mipid02.c21
-rw-r--r--drivers/media/i2c/tc358743.c8
-rw-r--r--drivers/media/i2c/tda1997x.c14
-rw-r--r--drivers/media/i2c/tvp514x.c6
-rw-r--r--drivers/media/i2c/tvp5150.c20
-rw-r--r--drivers/media/i2c/tvp7002.c11
-rw-r--r--drivers/media/i2c/tw9910.c10
-rw-r--r--drivers/media/i2c/vs6624.c8
-rw-r--r--drivers/media/mc/mc-device.c13
-rw-r--r--drivers/media/mc/mc-entity.c255
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c51
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c5
-rw-r--r--drivers/media/platform/Kconfig65
-rw-r--r--drivers/media/platform/Makefile6
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c19
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c19
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c467
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c14
-rw-r--r--drivers/media/platform/chips-media/Kconfig2
-rw-r--r--drivers/media/platform/chips-media/Makefile2
-rw-r--r--drivers/media/platform/chips-media/wave5/Kconfig12
-rw-r--r--drivers/media/platform/chips-media/wave5/Makefile10
-rw-r--r--drivers/media/platform/chips-media/wave5/TODO18
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c188
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.h28
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-hw.c3372
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-regdefine.h743
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vdi.c245
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vdi.h67
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c1441
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c1758
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c428
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.h72
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.c1040
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.h1138
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h90
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuerror.h454
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5.h94
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c30
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c8
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c39
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c49
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c20
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c17
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c5
-rw-r--r--drivers/media/platform/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c85
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c41
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c41
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c69
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c70
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c25
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h2
-rw-r--r--drivers/media/platform/pxa_camera.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c35
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c40
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c36
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c84
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c6
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c16
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c8
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c8
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c13
-rw-r--r--drivers/media/platform/renesas-ceu.c7
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c24
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c20
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c6
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c16
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c6
-rw-r--r--drivers/media/platform/ti-vpe/cal-video.c886
-rw-r--r--drivers/media/platform/ti/Makefile4
-rw-r--r--drivers/media/platform/ti/cal/Makefile3
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c (renamed from drivers/media/platform/ti-vpe/cal-camerarx.c)639
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c1156
-rw-r--r--drivers/media/platform/ti/cal/cal.c (renamed from drivers/media/platform/ti-vpe/cal.c)830
-rw-r--r--drivers/media/platform/ti/cal/cal.h (renamed from drivers/media/platform/ti-vpe/cal.h)173
-rw-r--r--drivers/media/platform/ti/cal/cal_regs.h (renamed from drivers/media/platform/ti-vpe/cal_regs.h)53
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/Makefile2
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c1626
-rw-r--r--drivers/media/platform/ti/vpe/Makefile (renamed from drivers/media/platform/ti-vpe/Makefile)6
-rw-r--r--drivers/media/platform/ti/vpe/csc.c (renamed from drivers/media/platform/ti-vpe/csc.c)0
-rw-r--r--drivers/media/platform/ti/vpe/csc.h (renamed from drivers/media/platform/ti-vpe/csc.h)0
-rw-r--r--drivers/media/platform/ti/vpe/sc.c (renamed from drivers/media/platform/ti-vpe/sc.c)0
-rw-r--r--drivers/media/platform/ti/vpe/sc.h (renamed from drivers/media/platform/ti-vpe/sc.h)0
-rw-r--r--drivers/media/platform/ti/vpe/sc_coeff.h (renamed from drivers/media/platform/ti-vpe/sc_coeff.h)0
-rw-r--r--drivers/media/platform/ti/vpe/vip.c3995
-rw-r--r--drivers/media/platform/ti/vpe/vip.h719
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.c (renamed from drivers/media/platform/ti-vpe/vpdma.c)0
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.h (renamed from drivers/media/platform/ti-vpe/vpdma.h)0
-rw-r--r--drivers/media/platform/ti/vpe/vpdma_priv.h (renamed from drivers/media/platform/ti-vpe/vpdma_priv.h)0
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c (renamed from drivers/media/platform/ti-vpe/vpe.c)8
-rw-r--r--drivers/media/platform/ti/vpe/vpe_regs.h (renamed from drivers/media/platform/ti-vpe/vpe_regs.h)0
-rw-r--r--drivers/media/platform/via-camera.c5
-rw-r--r--drivers/media/platform/video-mux.c22
-rw-r--r--drivers/media/platform/vsp1/vsp1_brx.c34
-rw-r--r--drivers/media/platform/vsp1/vsp1_clu.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c59
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h20
-rw-r--r--drivers/media/platform/vsp1/vsp1_histo.c51
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c32
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c22
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c22
-rw-r--r--drivers/media/platform/vsp1/vsp1_uif.c27
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c18
-rw-r--r--drivers/media/platform/vxe-vxd/Makefile163
-rw-r--r--drivers/media/platform/vxe-vxd/common/addr_alloc.c499
-rw-r--r--drivers/media/platform/vxe-vxd/common/addr_alloc.h238
-rw-r--r--drivers/media/platform/vxe-vxd/common/dq.c248
-rw-r--r--drivers/media/platform/vxe-vxd/common/dq.h36
-rw-r--r--drivers/media/platform/vxe-vxd/common/hash.c481
-rw-r--r--drivers/media/platform/vxe-vxd/common/hash.h86
-rw-r--r--drivers/media/platform/vxe-vxd/common/idgen_api.c449
-rw-r--r--drivers/media/platform/vxe-vxd/common/idgen_api.h59
-rw-r--r--drivers/media/platform/vxe-vxd/common/img_errors.h112
-rw-r--r--drivers/media/platform/vxe-vxd/common/img_mem.h43
-rw-r--r--drivers/media/platform/vxe-vxd/common/img_mem_man.c1125
-rw-r--r--drivers/media/platform/vxe-vxd/common/img_mem_man.h231
-rw-r--r--drivers/media/platform/vxe-vxd/common/img_mem_unified.c276
-rw-r--r--drivers/media/platform/vxe-vxd/common/imgmmu.c782
-rw-r--r--drivers/media/platform/vxe-vxd/common/imgmmu.h180
-rw-r--r--drivers/media/platform/vxe-vxd/common/lst.c119
-rw-r--r--drivers/media/platform/vxe-vxd/common/lst.h37
-rw-r--r--drivers/media/platform/vxe-vxd/common/pool.c228
-rw-r--r--drivers/media/platform/vxe-vxd/common/pool.h66
-rw-r--r--drivers/media/platform/vxe-vxd/common/pool_api.c710
-rw-r--r--drivers/media/platform/vxe-vxd/common/pool_api.h113
-rw-r--r--drivers/media/platform/vxe-vxd/common/ra.c972
-rw-r--r--drivers/media/platform/vxe-vxd/common/ra.h200
-rw-r--r--drivers/media/platform/vxe-vxd/common/resource.c576
-rw-r--r--drivers/media/platform/vxe-vxd/common/resource.h66
-rw-r--r--drivers/media/platform/vxe-vxd/common/rman_api.c621
-rw-r--r--drivers/media/platform/vxe-vxd/common/rman_api.h66
-rw-r--r--drivers/media/platform/vxe-vxd/common/talmmu_api.c753
-rw-r--r--drivers/media/platform/vxe-vxd/common/talmmu_api.h246
-rw-r--r--drivers/media/platform/vxe-vxd/common/vid_buf.h42
-rw-r--r--drivers/media/platform/vxe-vxd/common/work_queue.c188
-rw-r--r--drivers/media/platform/vxe-vxd/common/work_queue.h66
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/bspp.c2483
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/bspp.h363
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/bspp_int.h514
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/core.c3719
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/core.h72
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/dec_resources.c554
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/dec_resources.h46
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/decoder.c4621
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/decoder.h375
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/fw_interface.h818
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/h264_idx.h60
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/h264_secure_parser.c3051
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/h264_secure_parser.h278
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/h264_vlc.h604
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/h264fw_data.h652
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/h264fw_data_shared.h759
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/hevc_secure_parser.c2895
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/hevc_secure_parser.h455
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/hevcfw_data.h472
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/hevcfw_data_shared.h767
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/hw_control.c1232
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/hw_control.h144
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_dec_common.h278
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_msvdx_cmds.h279
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_msvdx_core_regs.h22
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_msvdx_vdmc_regs.h26
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_msvdx_vec_regs.h60
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_pixfmts.h195
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_profiles_levels.h33
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_pvdec_core_regs.h60
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_pvdec_pixel_regs.h35
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_pvdec_test_regs.h39
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_vdec_fw_msg.h192
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/img_video_bus4_mmu_regs.h120
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/jpeg_secure_parser.c645
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/jpeg_secure_parser.h37
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/jpegfw_data.h83
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/jpegfw_data_shared.h84
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/mem_io.h42
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/mmu_defs.h42
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/pixel_api.c895
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/pixel_api.h152
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/pvdec_entropy_regs.h33
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/pvdec_int.h82
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/pvdec_vec_be_regs.h35
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/reg_io2.h74
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/scaler_setup.h59
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/swsr.c1657
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/swsr.h278
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/translation_api.c1725
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/translation_api.h42
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vdec_defs.h549
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vdec_mmu_wrapper.c829
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vdec_mmu_wrapper.h174
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vdecdd_defs.h446
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vdecdd_utils.c95
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vdecdd_utils.h93
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vdecdd_utils_buf.c897
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vdecfw_share.h36
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vdecfw_shared.h893
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_core.c1684
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_dec.c185
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_dec.h489
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_ext.h74
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_int.c1137
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_int.h128
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_mmu_defs.h30
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_props.h80
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_pvdec.c1745
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_pvdec_priv.h126
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_pvdec_regs.h779
-rw-r--r--drivers/media/platform/vxe-vxd/decoder/vxd_v4l2.c2232
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/fw_binaries/ALL_CODECS_FW_ALL_pipes_2_contexts_8_hwconfig_1_bin.c29013
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/fw_binaries/include_all_fw_variants.h47
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/fw_headers/coreflags.h41
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/fw_headers/defs.h42
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/fw_headers/mtx_fwif.h205
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/fw_headers/topazscfwif.h1104
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/fw_headers/vxe_common.h165
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/header_gen.c1751
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/header_gen.h111
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/mtx_fwif.c990
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/reg_headers/img_soc_dmac_regs.h62
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/reg_headers/mtx_regs.h72
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_coreext_regs.h183
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_db_regs.h22
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_vlc_regs.h46
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/reg_headers/topazhp_core_regs.h232
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/reg_headers/topazhp_multicore_regs_old.h22
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/target.h68
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/target_config.h56
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/topaz_api.c3887
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/topaz_api.h1047
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/topaz_api_utils.c1487
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/topaz_api_utils.h55
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/topaz_color_formats.h97
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/topaz_device.c1671
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/topaz_device.h160
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/topazmmu.c741
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/topazmmu.h199
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/vxe_enc.c472
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/vxe_enc.h237
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/vxe_public_regdefs.h926
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/vxe_v4l2.c1949
-rw-r--r--drivers/media/platform/vxe-vxd/encoder/vxe_v4l2.h39
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c20
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c20
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c25
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h4
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c6
-rw-r--r--drivers/media/test-drivers/vimc/vimc-debayer.c20
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c36
-rw-r--r--drivers/media/test-drivers/vimc/vimc-sensor.c16
-rw-r--r--drivers/media/usb/au0828/au0828-core.c8
-rw-r--r--drivers/media/usb/go7007/s2250-board.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c33
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c35
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c832
-rw-r--r--drivers/memory/Kconfig4
-rw-r--r--drivers/memory/omap-gpmc.c43
-rw-r--r--drivers/mfd/Kconfig31
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/tps65219.c435
-rw-r--r--drivers/mfd/tps6594x.c120
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/dma-buf-phys.c221
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/pci_endpoint_test.c29
-rw-r--r--drivers/misc/sram-dma-heap.c243
-rw-r--r--drivers/misc/sram.c20
-rw-r--r--drivers/misc/sram.h17
-rw-r--r--drivers/mmc/host/sdhci_am654.c190
-rw-r--r--drivers/mtd/nand/raw/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/omap2.c529
-rw-r--r--drivers/mtd/nand/spi/core.c366
-rw-r--r--drivers/mtd/nand/spi/winbond.c242
-rw-r--r--drivers/mtd/spi-nor/core.c742
-rw-r--r--drivers/mtd/spi-nor/core.h29
-rw-r--r--drivers/mtd/spi-nor/micron-st.c115
-rw-r--r--drivers/mtd/spi-nor/sfdp.c170
-rw-r--r--drivers/mtd/spi-nor/sfdp.h8
-rw-r--r--drivers/mtd/spi-nor/spansion.c327
-rw-r--r--drivers/mtd/ubi/build.c4
-rw-r--r--drivers/mtd/ubi/io.c9
-rw-r--r--drivers/mux/core.c278
-rw-r--r--drivers/net/can/m_can/m_can.c11
-rw-r--r--drivers/net/can/m_can/m_can.h2
-rw-r--r--drivers/net/can/m_can/m_can_platform.c13
-rw-r--r--drivers/net/ethernet/ti/Kconfig54
-rw-r--r--drivers/net/ethernet/ti/Makefile13
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c177
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c1739
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h81
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c884
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h49
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c552
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.h34
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c252
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h15
-rw-r--r--drivers/net/ethernet/ti/am65-debugfs.c198
-rw-r--r--drivers/net/ethernet/ti/cpsw.c48
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c126
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c27
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h18
-rw-r--r--drivers/net/ethernet/ti/cpsw_switch_ioctl.c271
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c258
-rw-r--r--drivers/net/ethernet/ti/icss_iep.c1166
-rw-r--r--drivers/net/ethernet/ti/icss_iep.h40
-rw-r--r--drivers/net/ethernet/ti/icss_lre_firmware.h136
-rw-r--r--drivers/net/ethernet/ti/icss_mii_rt.h154
-rw-r--r--drivers/net/ethernet/ti/icss_switch.h336
-rw-r--r--drivers/net/ethernet/ti/icss_vlan_mcast_filter_mmap.h100
-rw-r--r--drivers/net/ethernet/ti/icssg_classifier.c471
-rw-r--r--drivers/net/ethernet/ti/icssg_config.c863
-rw-r--r--drivers/net/ethernet/ti/icssg_config.h291
-rw-r--r--drivers/net/ethernet/ti/icssg_ethtool.c476
-rw-r--r--drivers/net/ethernet/ti/icssg_mii_cfg.c121
-rw-r--r--drivers/net/ethernet/ti/icssg_prueth.c3638
-rw-r--r--drivers/net/ethernet/ti/icssg_prueth.h398
-rw-r--r--drivers/net/ethernet/ti/icssg_qos.c476
-rw-r--r--drivers/net/ethernet/ti/icssg_qos.h136
-rw-r--r--drivers/net/ethernet/ti/icssg_queues.c50
-rw-r--r--drivers/net/ethernet/ti/icssg_switch_map.h183
-rw-r--r--drivers/net/ethernet/ti/icssg_switchdev.c494
-rw-r--r--drivers/net/ethernet/ti/icssg_switchdev.h13
-rw-r--r--drivers/net/ethernet/ti/j721e-cpsw-virt-mac.c1614
-rw-r--r--drivers/net/ethernet/ti/prueth.h491
-rw-r--r--drivers/net/ethernet/ti/prueth_core.c3526
-rw-r--r--drivers/net/ethernet/ti/prueth_fdb_tbl.h67
-rw-r--r--drivers/net/ethernet/ti/prueth_lre.c1320
-rw-r--r--drivers/net/ethernet/ti/prueth_lre.h201
-rw-r--r--drivers/net/ethernet/ti/prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/prueth_qos.c214
-rw-r--r--drivers/net/ethernet/ti/prueth_switch.c1341
-rw-r--r--drivers/net/ethernet/ti/prueth_switch.h58
-rw-r--r--drivers/net/mdio/mdio-bitbang.c6
-rw-r--r--drivers/net/mdio/of_mdio.c11
-rw-r--r--drivers/net/phy/dp83867.c15
-rw-r--r--drivers/net/phy/dp83869.c5
-rw-r--r--drivers/net/phy/mscc/mscc.h2
-rw-r--r--drivers/net/phy/mscc/mscc_main.c20
-rw-r--r--drivers/net/phy/phy_device.c106
-rw-r--r--drivers/net/phy/phylink.c396
-rw-r--r--drivers/ntb/hw/Kconfig1
-rw-r--r--drivers/ntb/hw/Makefile1
-rw-r--r--drivers/ntb/hw/epf/Kconfig6
-rw-r--r--drivers/ntb/hw/epf/Makefile1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c753
-rw-r--r--drivers/of/irq.c1
-rw-r--r--drivers/pci/controller/cadence/Kconfig12
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c257
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c320
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c5
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c9
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h12
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c356
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c36
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c19
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c18
-rw-r--r--drivers/pci/endpoint/functions/Kconfig13
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c2145
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c83
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c200
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c206
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c249
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/phy/Kconfig10
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/cadence/Kconfig2
-rw-r--r--drivers/phy/cadence/cdns-dphy.c473
-rw-r--r--drivers/phy/cadence/phy-cadence-salvo.c4
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c2117
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c4543
-rw-r--r--drivers/phy/phy-can-transceiver.c168
-rw-r--r--drivers/phy/phy-core.c16
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c137
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c799
-rw-r--r--drivers/regulator/Kconfig9
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/palmas-regulator.c26
-rw-r--r--drivers/regulator/tps65219-regulator.c414
-rw-r--r--drivers/remoteproc/Kconfig26
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c5
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c675
-rw-r--r--drivers/remoteproc/omap_remoteproc.c16
-rw-r--r--drivers/remoteproc/omap_remoteproc.h4
-rw-r--r--drivers/remoteproc/pru_rproc.c1255
-rw-r--r--drivers/remoteproc/pru_rproc.h46
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c28
-rw-r--r--drivers/remoteproc/remoteproc_core.c666
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c7
-rw-r--r--drivers/remoteproc/remoteproc_internal.h19
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c90
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c227
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c967
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c582
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c1
-rw-r--r--drivers/rpmsg-kdrv/Kconfig48
-rw-r--r--drivers/rpmsg-kdrv/Makefile5
-rw-r--r--drivers/rpmsg-kdrv/rpmsg_kdrv.c744
-rw-r--r--drivers/rpmsg-kdrv/rpmsg_kdrv_demo.c214
-rw-r--r--drivers/rpmsg-kdrv/rpmsg_kdrv_display.c473
-rw-r--r--drivers/rpmsg-kdrv/rpmsg_kdrv_internal.h48
-rw-r--r--drivers/rpmsg-kdrv/rpmsg_kdrv_switch.c977
-rw-r--r--drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-common.h80
-rw-r--r--drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-demo.h78
-rw-r--r--drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-display.h223
-rw-r--r--drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-switch.h664
-rw-r--r--drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport.h83
-rw-r--r--drivers/rpmsg/Kconfig13
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/qcom_glink_native.c1
-rw-r--r--drivers/rpmsg/qcom_smd.c1
-rw-r--r--drivers/rpmsg/rpmsg_char.c10
-rw-r--r--drivers/rpmsg/rpmsg_core.c23
-rw-r--r--drivers/rpmsg/rpmsg_internal.h2
-rw-r--r--drivers/rpmsg/rpmsg_pru.c351
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c82
-rw-r--r--drivers/rtc/Kconfig21
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-ti-k3.c689
-rw-r--r--drivers/rtc/rtc-tps6594x.c181
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c1
-rw-r--r--drivers/soc/ti/Kconfig42
-rw-r--r--drivers/soc/ti/Makefile2
-rw-r--r--drivers/soc/ti/k3-ringacc.c460
-rw-r--r--drivers/soc/ti/k3-socinfo.c5
-rw-r--r--drivers/soc/ti/keystone_dsp_mem.c401
-rw-r--r--drivers/soc/ti/knav_dma.c3
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c65
-rw-r--r--drivers/soc/ti/pm33xx.c4
-rw-r--r--drivers/soc/ti/pruss.c359
-rw-r--r--drivers/soc/ti/ti-pat.c670
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c12
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c214
-rw-r--r--drivers/spi/spi-cadence-quadspi.c1301
-rw-r--r--drivers/spi/spi-mem.c130
-rw-r--r--drivers/spi/spi-omap2-mcspi.c156
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c12
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_file.c14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c64
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.c12
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c19
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c31
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c82
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c12
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c24
-rw-r--r--drivers/staging/media/imx/imx-media.h2
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c12
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c33
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c35
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h8
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c32
-rw-r--r--drivers/staging/media/omap4iss/iss.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c37
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c37
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c47
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c39
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c38
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h2
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-capture.c4
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-isp.c110
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-resizer.c95
-rw-r--r--drivers/staging/media/tegra-video/csi.c10
-rw-r--r--drivers/staging/media/tegra-video/tegra210.c6
-rw-r--r--drivers/staging/media/tegra-video/vi.c24
-rw-r--r--drivers/staging/media/zoran/zoran.h7
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c682
-rw-r--r--drivers/tty/serial/8250/8250_omap.c84
-rw-r--r--drivers/tty/serial/8250/8250_pruss.c215
-rw-r--r--drivers/tty/serial/8250/Kconfig10
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig10
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/pru_swuart.c777
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c1
-rw-r--r--drivers/usb/dwc3/Kconfig9
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c371
-rw-r--r--drivers/usb/typec/tps6598x.c77
716 files changed, 213401 insertions, 10752 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index dcecc9f6e33f..c9ac03631977 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -166,6 +166,8 @@ source "drivers/remoteproc/Kconfig"
source "drivers/rpmsg/Kconfig"
+source "drivers/rpmsg-kdrv/Kconfig"
+
source "drivers/soundwire/Kconfig"
source "drivers/soc/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 576228037718..e78252b59899 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -156,6 +156,7 @@ obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_REMOTEPROC) += remoteproc/
obj-$(CONFIG_RPMSG) += rpmsg/
+obj-$(CONFIG_RPMSG_KDRV) += rpmsg-kdrv/
obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index a7d9e4600d40..e3d4340ed0b6 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -165,7 +165,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 5cc5fc504968..cede9f159102 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -30,8 +30,7 @@
#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#define RNG_REG_STATUS_RDY (1 << 0)
@@ -378,16 +377,13 @@ MODULE_DEVICE_TABLE(of, omap_rng_of_match);
static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
int irq, err;
- match = of_match_device(of_match_ptr(omap_rng_of_match), dev);
- if (!match) {
- dev_err(dev, "no compatible OF match\n");
- return -EINVAL;
- }
- priv->pdata = match->data;
+ priv->pdata = of_device_get_match_data(dev);
+ if (!priv->pdata)
+ return -ENODEV;
+
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
index 5b3d36462174..19198325b909 100644
--- a/drivers/clk/keystone/syscon-clk.c
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -149,11 +149,39 @@ static const struct ti_syscon_gate_clk_data am654_clk_data[] = {
{ /* Sentinel */ },
};
+static const struct ti_syscon_gate_clk_data am64_clk_data[] = {
+ TI_SYSCON_CLK_GATE("epwm_tbclk0", 0x0, 0),
+ TI_SYSCON_CLK_GATE("epwm_tbclk1", 0x0, 1),
+ TI_SYSCON_CLK_GATE("epwm_tbclk2", 0x0, 2),
+ TI_SYSCON_CLK_GATE("epwm_tbclk3", 0x0, 3),
+ TI_SYSCON_CLK_GATE("epwm_tbclk4", 0x0, 4),
+ TI_SYSCON_CLK_GATE("epwm_tbclk5", 0x0, 5),
+ TI_SYSCON_CLK_GATE("epwm_tbclk6", 0x0, 6),
+ TI_SYSCON_CLK_GATE("epwm_tbclk7", 0x0, 7),
+ TI_SYSCON_CLK_GATE("epwm_tbclk8", 0x0, 8),
+ { /* Sentinel */ },
+};
+
+static const struct ti_syscon_gate_clk_data am62_clk_data[] = {
+ TI_SYSCON_CLK_GATE("epwm_tbclk0", 0x0, 0),
+ TI_SYSCON_CLK_GATE("epwm_tbclk1", 0x0, 1),
+ TI_SYSCON_CLK_GATE("epwm_tbclk2", 0x0, 2),
+ { /* Sentinel */ },
+};
+
static const struct of_device_id ti_syscon_gate_clk_ids[] = {
{
.compatible = "ti,am654-ehrpwm-tbclk",
.data = &am654_clk_data,
},
+ {
+ .compatible = "ti,am64-epwm-tbclk",
+ .data = &am64_clk_data,
+ },
+ {
+ .compatible = "ti,am62-epwm-tbclk",
+ .data = &am62_clk_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ti_syscon_gate_clk_ids);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a0c6e88bebe0..e8d9e905acc6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -22,7 +22,7 @@ config CLKEVT_I8253
config I8253_LOCK
bool
-config OMAP_DM_TIMER
+config OMAP_DM_SYSTIMER
bool
select TIMER_OF
@@ -56,6 +56,13 @@ config DIGICOLOR_TIMER
help
Enables the support for the digicolor timer driver.
+config OMAP_DM_TIMER
+ bool "OMAP dual-mode timer driver" if ARCH_K3 || COMPILE_TEST
+ default y if ARCH_K3
+ select TIMER_OF
+ help
+ Enables the support for the TI dual-mode timer driver.
+
config DW_APB_TIMER
bool "DW APB timer driver" if COMPILE_TEST
help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 1c444cc3bb44..17d2baa528e4 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
-obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm-systimer.o
+obj-$(CONFIG_OMAP_DM_SYSTIMER) += timer-ti-dm-systimer.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index fea8a4f85669..ed98a9e1bd7f 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#define TIMER_NAME "timer-keystone"
@@ -38,11 +39,13 @@
* @base: timer memory base address
* @hz_period: cycles per HZ period
* @event_dev: event device based on timer
+ * @registered: Flag to keep a track of registration status
*/
static struct keystone_timer {
void __iomem *base;
unsigned long hz_period;
struct clock_event_device event_dev;
+ bool registered;
} timer;
static inline u32 keystone_timer_readl(unsigned long rg)
@@ -140,13 +143,14 @@ static int keystone_set_periodic(struct clock_event_device *evt)
return 0;
}
-static int __init keystone_timer_init(struct device_node *np)
+static int keystone_timer_init(struct device_node *np)
{
struct clock_event_device *event_dev = &timer.event_dev;
unsigned long rate;
struct clk *clk;
int irq, error;
+ timer.registered = false;
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("%s: failed to map interrupts\n", __func__);
@@ -215,6 +219,7 @@ static int __init keystone_timer_init(struct device_node *np)
clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
pr_info("keystone timer clock @%lu Hz\n", rate);
+ timer.registered = true;
return 0;
err:
clk_put(clk);
@@ -224,3 +229,44 @@ err:
TIMER_OF_DECLARE(keystone_timer, "ti,keystone-timer",
keystone_timer_init);
+
+static const struct of_device_id keystone_clocksource_of_match[] = {
+ {.compatible = "ti,k2g-timer", },
+ {},
+};
+
+static int keystone_clocksource_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ if (timer.registered)
+ return 0;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ clk_put(clk);
+ keystone_timer_init(pdev->dev.of_node);
+ if (!timer.registered)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct platform_driver keystone_clocksource_driver = {
+ .probe = keystone_clocksource_probe,
+ .driver = {
+ .name = "keystone_clocksource",
+ .of_match_table = keystone_clocksource_of_match,
+ },
+};
+
+static int __init keystone_clocksource_init_driver(void)
+{
+ return platform_driver_register(&keystone_clocksource_driver);
+}
+device_initcall(keystone_clocksource_init_driver);
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index 632523c1232f..b9e9daf49502 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -11,6 +11,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#include <linux/irqchip/arm-gic.h>
#include <linux/clk/clk-conf.h>
@@ -29,6 +30,11 @@
static int counter_32k;
static u32 clocksource;
static u32 clockevent;
+static struct irq_chip *clkev_irq_chip;
+static struct irq_desc *clkev_irq_desc;
+
+#define AM43XX_GIC_CPU_BASE 0x48240100
+static void __iomem *gic_cpu_base;
/*
* Subset of the timer registers we use. Note that the register offsets
@@ -508,12 +514,52 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
return 0;
}
+static int omap_clockevent_late_ack_init(void)
+{
+ gic_cpu_base = ioremap(AM43XX_GIC_CPU_BASE, SZ_4K);
+
+ if (!gic_cpu_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void omap_clockevent_late_ack(void)
+{
+ u32 val;
+
+ if (!clkev_irq_chip)
+ return;
+
+ /*
+ * For the gic to properly clear an interrupt it must be read
+ * from INTACK register
+ */
+ if (gic_cpu_base)
+ val = readl_relaxed(gic_cpu_base + GIC_CPU_INTACK);
+ if (clkev_irq_chip->irq_ack)
+ clkev_irq_chip->irq_ack(&clkev_irq_desc->irq_data);
+ if (clkev_irq_chip->irq_eoi)
+ clkev_irq_chip->irq_eoi(&clkev_irq_desc->irq_data);
+
+ clkev_irq_chip->irq_unmask(&clkev_irq_desc->irq_data);
+}
+
static void omap_clockevent_idle(struct clock_event_device *evt)
{
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
struct dmtimer_systimer *t = &clkevt->t;
dmtimer_systimer_disable(t);
+
+ /*
+ * It is possible for a late interrupt to be generated which will
+ * cause a suspend failure. Let's ack it here both in the timer
+ * and the interrupt controller to avoid this.
+ */
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
+ omap_clockevent_late_ack();
+
clk_disable(t->fck);
}
@@ -622,8 +668,15 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
of_machine_is_compatible("ti,am43")) {
clkevt->dev.suspend = omap_clockevent_idle;
clkevt->dev.resume = omap_clockevent_unidle;
+
+ clkev_irq_desc = irq_to_desc(&clkevt->dev.irq);
+ if (clkev_irq_desc)
+ clkev_irq_chip = irq_desc_get_chip(clkev_irq_desc);
}
+ if (of_machine_is_compatible("ti,am43"))
+ omap_clockevent_late_ack_init();
+
return 0;
err_out_free:
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index e5c631f1b5cb..9e6c2b6e15f0 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -44,6 +44,121 @@ enum {
REQUEST_BY_NODE,
};
+static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
+ int posted)
+{
+ if (posted)
+ while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ cpu_relax();
+
+ return readl_relaxed(timer->func_base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
+ u32 reg, u32 val, int posted)
+{
+ if (posted)
+ while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ cpu_relax();
+
+ writel_relaxed(val, timer->func_base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
+{
+ u32 tidr;
+
+ /* Assume v1 ip if bits [31:16] are zero */
+ tidr = readl_relaxed(timer->io_base);
+ if (!(tidr >> 16)) {
+ timer->revision = 1;
+ timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
+ timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
+ timer->func_base = timer->io_base;
+ } else {
+ timer->revision = 2;
+ timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
+ timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
+ timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
+ timer->pend = timer->io_base +
+ _OMAP_TIMER_WRITE_PEND_OFFSET +
+ OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
+ }
+}
+
+/*
+ * __omap_dm_timer_enable_posted - enables write posted mode
+ * @timer: pointer to timer instance handle
+ *
+ * Enables the write posted mode for the timer. When posted mode is enabled
+ * writes to certain timer registers are immediately acknowledged by the
+ * internal bus and hence prevents stalling the CPU waiting for the write to
+ * complete. Enabling this feature can improve performance for writing to the
+ * timer registers.
+ */
+static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
+{
+ if (timer->posted)
+ return;
+
+ if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
+ timer->posted = OMAP_TIMER_NONPOSTED;
+ __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
+ return;
+ }
+
+ __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
+ OMAP_TIMER_CTRL_POSTED, 0);
+ timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
+ timer->posted = OMAP_TIMER_POSTED;
+}
+
+static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
+ int posted, unsigned long rate)
+{
+ u32 l;
+
+ l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ l &= ~0x1;
+ __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ /* Readback to make sure write has completed */
+ __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ /*
+ * Wait for functional clock period x 3.5 to make sure that
+ * timer is stopped
+ */
+ udelay(3500000 / rate + 1);
+#endif
+ }
+
+ /* Ack possibly pending interrupt */
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
+}
+
+static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
+ unsigned int value)
+{
+ writel_relaxed(value, timer->irq_ena);
+ __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
+}
+
+static inline unsigned int
+__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
+{
+ return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
+}
+
+static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
+ unsigned int value)
+{
+ writel_relaxed(value, timer->irq_stat);
+}
+
/**
* omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
* @timer: timer pointer over which read operation to perform
@@ -921,6 +1036,10 @@ static const struct dmtimer_platform_data omap3plus_pdata = {
.timer_ops = &dmtimer_ops,
};
+static const struct dmtimer_platform_data am6_pdata = {
+ .timer_ops = &dmtimer_ops,
+};
+
static const struct of_device_id omap_timer_match[] = {
{
.compatible = "ti,omap2420-timer",
@@ -949,6 +1068,10 @@ static const struct of_device_id omap_timer_match[] = {
.compatible = "ti,dm816-timer",
.data = &omap3plus_pdata,
},
+ {
+ .compatible = "ti,am654-timer",
+ .data = &am6_pdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_timer_match);
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 21bb2bb767a1..62c2b7ac4339 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -9,27 +9,63 @@
#include <linux/counter.h>
#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/types.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/interrupt.h>
#include <linux/isa.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#define QUAD8_EXTENT 32
static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
static unsigned int num_quad8;
-module_param_array(base, uint, &num_quad8, 0);
+module_param_hw_array(base, uint, ioport, &num_quad8, 0);
MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
+static unsigned int irq[max_num_isa_dev(QUAD8_EXTENT)];
+module_param_hw_array(irq, uint, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
+
#define QUAD8_NUM_COUNTERS 8
/**
- * struct quad8_iio - IIO device private data structure
+ * struct channel_reg - channel register structure
+ * @data: Count data
+ * @control: Channel flags and control
+ */
+struct channel_reg {
+ u8 data;
+ u8 control;
+};
+
+/**
+ * struct quad8_reg - device register structure
+ * @channel: quadrature counter data and control
+ * @interrupt_status: channel interrupt status
+ * @channel_oper: enable/reset counters and interrupt functions
+ * @index_interrupt: enable channel interrupts
+ * @reserved: reserved for Factory Use
+ * @index_input_levels: index signal logical input level
+ * @cable_status: differential encoder cable status
+ */
+struct quad8_reg {
+ struct channel_reg channel[QUAD8_NUM_COUNTERS];
+ u8 interrupt_status;
+ u8 channel_oper;
+ u8 index_interrupt;
+ u8 reserved[3];
+ u8 index_input_levels;
+ u8 cable_status;
+};
+
+/**
+ * struct quad8 - device private data structure
+ * @lock: lock to prevent clobbering device states during R/W ops
* @counter: instance of the counter_device
* @fck_prescaler: array of filter clock prescaler configurations
* @preset: array of preset values
@@ -38,14 +74,14 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
* @quadrature_scale: array of quadrature mode scale configurations
* @ab_enable: array of A and B inputs enable configurations
* @preset_enable: array of set_to_preset_on_index attribute configurations
+ * @irq_trigger: array of current IRQ trigger function configurations
* @synchronous_mode: array of index function synchronous mode configurations
* @index_polarity: array of index function polarity configurations
* @cable_fault_enable: differential encoder cable status enable configurations
- * @base: base port address of the IIO device
+ * @reg: I/O address offset for the device registers
*/
-struct quad8_iio {
- struct mutex lock;
- struct counter_device counter;
+struct quad8 {
+ spinlock_t lock;
unsigned int fck_prescaler[QUAD8_NUM_COUNTERS];
unsigned int preset[QUAD8_NUM_COUNTERS];
unsigned int count_mode[QUAD8_NUM_COUNTERS];
@@ -53,15 +89,13 @@ struct quad8_iio {
unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
unsigned int ab_enable[QUAD8_NUM_COUNTERS];
unsigned int preset_enable[QUAD8_NUM_COUNTERS];
+ unsigned int irq_trigger[QUAD8_NUM_COUNTERS];
unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
unsigned int index_polarity[QUAD8_NUM_COUNTERS];
unsigned int cable_fault_enable;
- unsigned int base;
+ struct quad8_reg __iomem *reg;
};
-#define QUAD8_REG_CHAN_OP 0x11
-#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
-#define QUAD8_DIFF_ENCODER_CABLE_STATUS 0x17
/* Borrow Toggle flip-flop */
#define QUAD8_FLAG_BT BIT(0)
/* Carry Toggle flip-flop */
@@ -92,684 +126,161 @@ struct quad8_iio {
#define QUAD8_RLD_CNTR_OUT 0x10
/* Transfer Preset Register LSB to FCK Prescaler */
#define QUAD8_RLD_PRESET_PSC 0x18
-#define QUAD8_CHAN_OP_ENABLE_COUNTERS 0x00
#define QUAD8_CHAN_OP_RESET_COUNTERS 0x01
+#define QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC 0x04
#define QUAD8_CMR_QUADRATURE_X1 0x08
#define QUAD8_CMR_QUADRATURE_X2 0x10
#define QUAD8_CMR_QUADRATURE_X4 0x18
-
-static int quad8_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int *val, int *val2, long mask)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel;
- unsigned int flags;
- unsigned int borrow;
- unsigned int carry;
- int i;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- if (chan->type == IIO_INDEX) {
- *val = !!(inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
- & BIT(chan->channel));
- return IIO_VAL_INT;
- }
-
- flags = inb(base_offset + 1);
- borrow = flags & QUAD8_FLAG_BT;
- carry = !!(flags & QUAD8_FLAG_CT);
-
- /* Borrow XOR Carry effectively doubles count range */
- *val = (borrow ^ carry) << 24;
-
- mutex_lock(&priv->lock);
-
- /* Reset Byte Pointer; transfer Counter to Output Latch */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
- base_offset + 1);
-
- for (i = 0; i < 3; i++)
- *val |= (unsigned int)inb(base_offset) << (8 * i);
-
- mutex_unlock(&priv->lock);
-
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_ENABLE:
- *val = priv->ab_enable[chan->channel];
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = 1;
- *val2 = priv->quadrature_scale[chan->channel];
- return IIO_VAL_FRACTIONAL_LOG2;
- }
-
- return -EINVAL;
-}
-
-static int quad8_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val, int val2, long mask)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel;
- int i;
- unsigned int ior_cfg;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- if (chan->type == IIO_INDEX)
- return -EINVAL;
-
- /* Only 24-bit values are supported */
- if ((unsigned int)val > 0xFFFFFF)
- return -EINVAL;
-
- mutex_lock(&priv->lock);
-
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
-
- /* Counter can only be set via Preset Register */
- for (i = 0; i < 3; i++)
- outb(val >> (8 * i), base_offset);
-
- /* Transfer Preset Register to Counter */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
-
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
-
- /* Set Preset Register back to original value */
- val = priv->preset[chan->channel];
- for (i = 0; i < 3; i++)
- outb(val >> (8 * i), base_offset);
-
- /* Reset Borrow, Carry, Compare, and Sign flags */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
- /* Reset Error flag */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
-
- mutex_unlock(&priv->lock);
-
- return 0;
- case IIO_CHAN_INFO_ENABLE:
- /* only boolean values accepted */
- if (val < 0 || val > 1)
- return -EINVAL;
-
- mutex_lock(&priv->lock);
-
- priv->ab_enable[chan->channel] = val;
-
- ior_cfg = val | priv->preset_enable[chan->channel] << 1;
-
- /* Load I/O control configuration */
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
-
- mutex_unlock(&priv->lock);
-
- return 0;
- case IIO_CHAN_INFO_SCALE:
- mutex_lock(&priv->lock);
-
- /* Quadrature scaling only available in quadrature mode */
- if (!priv->quadrature_mode[chan->channel] &&
- (val2 || val != 1)) {
- mutex_unlock(&priv->lock);
- return -EINVAL;
- }
-
- /* Only three gain states (1, 0.5, 0.25) */
- if (val == 1 && !val2)
- priv->quadrature_scale[chan->channel] = 0;
- else if (!val)
- switch (val2) {
- case 500000:
- priv->quadrature_scale[chan->channel] = 1;
- break;
- case 250000:
- priv->quadrature_scale[chan->channel] = 2;
- break;
- default:
- mutex_unlock(&priv->lock);
- return -EINVAL;
- }
- else {
- mutex_unlock(&priv->lock);
- return -EINVAL;
- }
-
- mutex_unlock(&priv->lock);
- return 0;
- }
-
- return -EINVAL;
-}
-
-static const struct iio_info quad8_info = {
- .read_raw = quad8_read_raw,
- .write_raw = quad8_write_raw
-};
-
-static ssize_t quad8_read_preset(struct iio_dev *indio_dev, uintptr_t private,
- const struct iio_chan_spec *chan, char *buf)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset[chan->channel]);
-}
-
-static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
- const struct iio_chan_spec *chan, const char *buf, size_t len)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel;
- unsigned int preset;
- int ret;
- int i;
-
- ret = kstrtouint(buf, 0, &preset);
- if (ret)
- return ret;
-
- /* Only 24-bit values are supported */
- if (preset > 0xFFFFFF)
- return -EINVAL;
-
- mutex_lock(&priv->lock);
-
- priv->preset[chan->channel] = preset;
-
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
-
- /* Set Preset Register */
- for (i = 0; i < 3; i++)
- outb(preset >> (8 * i), base_offset);
-
- mutex_unlock(&priv->lock);
-
- return len;
-}
-
-static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
- uintptr_t private, const struct iio_chan_spec *chan, char *buf)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n",
- !priv->preset_enable[chan->channel]);
-}
-
-static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
- uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
- size_t len)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel + 1;
- bool preset_enable;
- int ret;
- unsigned int ior_cfg;
-
- ret = kstrtobool(buf, &preset_enable);
- if (ret)
- return ret;
-
- /* Preset enable is active low in Input/Output Control register */
- preset_enable = !preset_enable;
-
- mutex_lock(&priv->lock);
-
- priv->preset_enable[chan->channel] = preset_enable;
-
- ior_cfg = priv->ab_enable[chan->channel] |
- (unsigned int)preset_enable << 1;
-
- /* Load I/O control configuration to Input / Output Control Register */
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
-
- mutex_unlock(&priv->lock);
-
- return len;
-}
-
-static const char *const quad8_noise_error_states[] = {
- "No excessive noise is present at the count inputs",
- "Excessive noise is present at the count inputs"
-};
-
-static int quad8_get_noise_error(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel + 1;
-
- return !!(inb(base_offset) & QUAD8_FLAG_E);
-}
-
-static const struct iio_enum quad8_noise_error_enum = {
- .items = quad8_noise_error_states,
- .num_items = ARRAY_SIZE(quad8_noise_error_states),
- .get = quad8_get_noise_error
-};
-
-static const char *const quad8_count_direction_states[] = {
- "down",
- "up"
-};
-
-static int quad8_get_count_direction(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel + 1;
-
- return !!(inb(base_offset) & QUAD8_FLAG_UD);
-}
-
-static const struct iio_enum quad8_count_direction_enum = {
- .items = quad8_count_direction_states,
- .num_items = ARRAY_SIZE(quad8_count_direction_states),
- .get = quad8_get_count_direction
-};
-
-static const char *const quad8_count_modes[] = {
- "normal",
- "range limit",
- "non-recycle",
- "modulo-n"
-};
-
-static int quad8_set_count_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int cnt_mode)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- unsigned int mode_cfg = cnt_mode << 1;
- const int base_offset = priv->base + 2 * chan->channel + 1;
-
- mutex_lock(&priv->lock);
-
- priv->count_mode[chan->channel] = cnt_mode;
-
- /* Add quadrature mode configuration */
- if (priv->quadrature_mode[chan->channel])
- mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
-
- /* Load mode configuration to Counter Mode Register */
- outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
-
- mutex_unlock(&priv->lock);
-
- return 0;
-}
-
-static int quad8_get_count_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return priv->count_mode[chan->channel];
-}
-
-static const struct iio_enum quad8_count_mode_enum = {
- .items = quad8_count_modes,
- .num_items = ARRAY_SIZE(quad8_count_modes),
- .set = quad8_set_count_mode,
- .get = quad8_get_count_mode
-};
-
-static const char *const quad8_synchronous_modes[] = {
- "non-synchronous",
- "synchronous"
-};
-
-static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int synchronous_mode)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel + 1;
- unsigned int idr_cfg = synchronous_mode;
-
- mutex_lock(&priv->lock);
-
- idr_cfg |= priv->index_polarity[chan->channel] << 1;
-
- /* Index function must be non-synchronous in non-quadrature mode */
- if (synchronous_mode && !priv->quadrature_mode[chan->channel]) {
- mutex_unlock(&priv->lock);
- return -EINVAL;
- }
-
- priv->synchronous_mode[chan->channel] = synchronous_mode;
-
- /* Load Index Control configuration to Index Control Register */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
-
- mutex_unlock(&priv->lock);
-
- return 0;
-}
-
-static int quad8_get_synchronous_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return priv->synchronous_mode[chan->channel];
-}
-
-static const struct iio_enum quad8_synchronous_mode_enum = {
- .items = quad8_synchronous_modes,
- .num_items = ARRAY_SIZE(quad8_synchronous_modes),
- .set = quad8_set_synchronous_mode,
- .get = quad8_get_synchronous_mode
-};
-
-static const char *const quad8_quadrature_modes[] = {
- "non-quadrature",
- "quadrature"
-};
-
-static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int quadrature_mode)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel + 1;
- unsigned int mode_cfg;
-
- mutex_lock(&priv->lock);
-
- mode_cfg = priv->count_mode[chan->channel] << 1;
-
- if (quadrature_mode)
- mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
- else {
- /* Quadrature scaling only available in quadrature mode */
- priv->quadrature_scale[chan->channel] = 0;
-
- /* Synchronous function not supported in non-quadrature mode */
- if (priv->synchronous_mode[chan->channel])
- quad8_set_synchronous_mode(indio_dev, chan, 0);
- }
-
- priv->quadrature_mode[chan->channel] = quadrature_mode;
-
- /* Load mode configuration to Counter Mode Register */
- outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
-
- mutex_unlock(&priv->lock);
-
- return 0;
-}
-
-static int quad8_get_quadrature_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return priv->quadrature_mode[chan->channel];
-}
-
-static const struct iio_enum quad8_quadrature_mode_enum = {
- .items = quad8_quadrature_modes,
- .num_items = ARRAY_SIZE(quad8_quadrature_modes),
- .set = quad8_set_quadrature_mode,
- .get = quad8_get_quadrature_mode
-};
-
-static const char *const quad8_index_polarity_modes[] = {
- "negative",
- "positive"
-};
-
-static int quad8_set_index_polarity(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int index_polarity)
-{
- struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel + 1;
- unsigned int idr_cfg = index_polarity << 1;
-
- mutex_lock(&priv->lock);
-
- idr_cfg |= priv->synchronous_mode[chan->channel];
-
- priv->index_polarity[chan->channel] = index_polarity;
-
- /* Load Index Control configuration to Index Control Register */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
-
- mutex_unlock(&priv->lock);
-
- return 0;
-}
-
-static int quad8_get_index_polarity(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- const struct quad8_iio *const priv = iio_priv(indio_dev);
-
- return priv->index_polarity[chan->channel];
-}
-
-static const struct iio_enum quad8_index_polarity_enum = {
- .items = quad8_index_polarity_modes,
- .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
- .set = quad8_set_index_polarity,
- .get = quad8_get_index_polarity
-};
-
-static const struct iio_chan_spec_ext_info quad8_count_ext_info[] = {
- {
- .name = "preset",
- .shared = IIO_SEPARATE,
- .read = quad8_read_preset,
- .write = quad8_write_preset
- },
- {
- .name = "set_to_preset_on_index",
- .shared = IIO_SEPARATE,
- .read = quad8_read_set_to_preset_on_index,
- .write = quad8_write_set_to_preset_on_index
- },
- IIO_ENUM("noise_error", IIO_SEPARATE, &quad8_noise_error_enum),
- IIO_ENUM_AVAILABLE("noise_error", &quad8_noise_error_enum),
- IIO_ENUM("count_direction", IIO_SEPARATE, &quad8_count_direction_enum),
- IIO_ENUM_AVAILABLE("count_direction", &quad8_count_direction_enum),
- IIO_ENUM("count_mode", IIO_SEPARATE, &quad8_count_mode_enum),
- IIO_ENUM_AVAILABLE("count_mode", &quad8_count_mode_enum),
- IIO_ENUM("quadrature_mode", IIO_SEPARATE, &quad8_quadrature_mode_enum),
- IIO_ENUM_AVAILABLE("quadrature_mode", &quad8_quadrature_mode_enum),
- {}
-};
-
-static const struct iio_chan_spec_ext_info quad8_index_ext_info[] = {
- IIO_ENUM("synchronous_mode", IIO_SEPARATE,
- &quad8_synchronous_mode_enum),
- IIO_ENUM_AVAILABLE("synchronous_mode", &quad8_synchronous_mode_enum),
- IIO_ENUM("index_polarity", IIO_SEPARATE, &quad8_index_polarity_enum),
- IIO_ENUM_AVAILABLE("index_polarity", &quad8_index_polarity_enum),
- {}
-};
-
-#define QUAD8_COUNT_CHAN(_chan) { \
- .type = IIO_COUNT, \
- .channel = (_chan), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_SCALE), \
- .ext_info = quad8_count_ext_info, \
- .indexed = 1 \
-}
-
-#define QUAD8_INDEX_CHAN(_chan) { \
- .type = IIO_INDEX, \
- .channel = (_chan), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .ext_info = quad8_index_ext_info, \
- .indexed = 1 \
-}
-
-static const struct iio_chan_spec quad8_channels[] = {
- QUAD8_COUNT_CHAN(0), QUAD8_INDEX_CHAN(0),
- QUAD8_COUNT_CHAN(1), QUAD8_INDEX_CHAN(1),
- QUAD8_COUNT_CHAN(2), QUAD8_INDEX_CHAN(2),
- QUAD8_COUNT_CHAN(3), QUAD8_INDEX_CHAN(3),
- QUAD8_COUNT_CHAN(4), QUAD8_INDEX_CHAN(4),
- QUAD8_COUNT_CHAN(5), QUAD8_INDEX_CHAN(5),
- QUAD8_COUNT_CHAN(6), QUAD8_INDEX_CHAN(6),
- QUAD8_COUNT_CHAN(7), QUAD8_INDEX_CHAN(7)
-};
-
static int quad8_signal_read(struct counter_device *counter,
- struct counter_signal *signal, enum counter_signal_value *val)
+ struct counter_signal *signal,
+ enum counter_signal_level *level)
{
- const struct quad8_iio *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
unsigned int state;
/* Only Index signal levels can be read */
if (signal->id < 16)
return -EINVAL;
- state = inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
- & BIT(signal->id - 16);
+ state = ioread8(&priv->reg->index_input_levels) & BIT(signal->id - 16);
- *val = (state) ? COUNTER_SIGNAL_HIGH : COUNTER_SIGNAL_LOW;
+ *level = (state) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
return 0;
}
static int quad8_count_read(struct counter_device *counter,
- struct counter_count *count, unsigned long *val)
+ struct counter_count *count, u64 *val)
{
- struct quad8_iio *const priv = counter->priv;
- const int base_offset = priv->base + 2 * count->id;
+ struct quad8 *const priv = counter_priv(counter);
+ struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
unsigned int flags;
unsigned int borrow;
unsigned int carry;
+ unsigned long irqflags;
int i;
- flags = inb(base_offset + 1);
+ flags = ioread8(&chan->control);
borrow = flags & QUAD8_FLAG_BT;
carry = !!(flags & QUAD8_FLAG_CT);
/* Borrow XOR Carry effectively doubles count range */
*val = (unsigned long)(borrow ^ carry) << 24;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
/* Reset Byte Pointer; transfer Counter to Output Latch */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
- base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ &chan->control);
for (i = 0; i < 3; i++)
- *val |= (unsigned long)inb(base_offset) << (8 * i);
+ *val |= (unsigned long)ioread8(&chan->data) << (8 * i);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
static int quad8_count_write(struct counter_device *counter,
- struct counter_count *count, unsigned long val)
+ struct counter_count *count, u64 val)
{
- struct quad8_iio *const priv = counter->priv;
- const int base_offset = priv->base + 2 * count->id;
+ struct quad8 *const priv = counter_priv(counter);
+ struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
+ unsigned long irqflags;
int i;
/* Only 24-bit values are supported */
if (val > 0xFFFFFF)
- return -EINVAL;
+ return -ERANGE;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
/* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
/* Counter can only be set via Preset Register */
for (i = 0; i < 3; i++)
- outb(val >> (8 * i), base_offset);
+ iowrite8(val >> (8 * i), &chan->data);
/* Transfer Preset Register to Counter */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, &chan->control);
/* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
/* Set Preset Register back to original value */
val = priv->preset[count->id];
for (i = 0; i < 3; i++)
- outb(val >> (8 * i), base_offset);
+ iowrite8(val >> (8 * i), &chan->data);
/* Reset Borrow, Carry, Compare, and Sign flags */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, &chan->control);
/* Reset Error flag */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, &chan->control);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-enum quad8_count_function {
- QUAD8_COUNT_FUNCTION_PULSE_DIRECTION = 0,
- QUAD8_COUNT_FUNCTION_QUADRATURE_X1,
- QUAD8_COUNT_FUNCTION_QUADRATURE_X2,
- QUAD8_COUNT_FUNCTION_QUADRATURE_X4
+static const enum counter_function quad8_count_functions_list[] = {
+ COUNTER_FUNCTION_PULSE_DIRECTION,
+ COUNTER_FUNCTION_QUADRATURE_X1_A,
+ COUNTER_FUNCTION_QUADRATURE_X2_A,
+ COUNTER_FUNCTION_QUADRATURE_X4,
};
-static enum counter_count_function quad8_count_functions_list[] = {
- [QUAD8_COUNT_FUNCTION_PULSE_DIRECTION] = COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
- [QUAD8_COUNT_FUNCTION_QUADRATURE_X1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
- [QUAD8_COUNT_FUNCTION_QUADRATURE_X2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
- [QUAD8_COUNT_FUNCTION_QUADRATURE_X4] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4
-};
-
-static int quad8_function_get(struct counter_device *counter,
- struct counter_count *count, size_t *function)
+static int quad8_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int id = count->id;
+ unsigned long irqflags;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
if (priv->quadrature_mode[id])
switch (priv->quadrature_scale[id]) {
case 0:
- *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1;
+ *function = COUNTER_FUNCTION_QUADRATURE_X1_A;
break;
case 1:
- *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X2;
+ *function = COUNTER_FUNCTION_QUADRATURE_X2_A;
break;
case 2:
- *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X4;
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
break;
}
else
- *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION;
+ *function = COUNTER_FUNCTION_PULSE_DIRECTION;
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static int quad8_function_set(struct counter_device *counter,
- struct counter_count *count, size_t function)
+static int quad8_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int id = count->id;
unsigned int *const quadrature_mode = priv->quadrature_mode + id;
unsigned int *const scale = priv->quadrature_scale + id;
unsigned int *const synchronous_mode = priv->synchronous_mode + id;
- const int base_offset = priv->base + 2 * id + 1;
+ u8 __iomem *const control = &priv->reg->channel[id].control;
+ unsigned long irqflags;
unsigned int mode_cfg;
unsigned int idr_cfg;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
mode_cfg = priv->count_mode[id] << 1;
idr_cfg = priv->index_polarity[id] << 1;
- if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) {
+ if (function == COUNTER_FUNCTION_PULSE_DIRECTION) {
*quadrature_mode = 0;
/* Quadrature scaling only available in quadrature mode */
@@ -779,136 +290,234 @@ static int quad8_function_set(struct counter_device *counter,
if (*synchronous_mode) {
*synchronous_mode = 0;
/* Disable synchronous function mode */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ iowrite8(QUAD8_CTR_IDR | idr_cfg, control);
}
} else {
*quadrature_mode = 1;
switch (function) {
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X1:
+ case COUNTER_FUNCTION_QUADRATURE_X1_A:
*scale = 0;
mode_cfg |= QUAD8_CMR_QUADRATURE_X1;
break;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X2:
+ case COUNTER_FUNCTION_QUADRATURE_X2_A:
*scale = 1;
mode_cfg |= QUAD8_CMR_QUADRATURE_X2;
break;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X4:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
*scale = 2;
mode_cfg |= QUAD8_CMR_QUADRATURE_X4;
break;
+ default:
+ /* should never reach this path */
+ spin_unlock_irqrestore(&priv->lock, irqflags);
+ return -EINVAL;
}
}
/* Load mode configuration to Counter Mode Register */
- outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ iowrite8(QUAD8_CTR_CMR | mode_cfg, control);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static void quad8_direction_get(struct counter_device *counter,
- struct counter_count *count, enum counter_count_direction *direction)
+static int quad8_direction_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_direction *direction)
{
- const struct quad8_iio *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
unsigned int ud_flag;
- const unsigned int flag_addr = priv->base + 2 * count->id + 1;
+ u8 __iomem *const flag_addr = &priv->reg->channel[count->id].control;
/* U/D flag: nonzero = up, zero = down */
- ud_flag = inb(flag_addr) & QUAD8_FLAG_UD;
+ ud_flag = ioread8(flag_addr) & QUAD8_FLAG_UD;
*direction = (ud_flag) ? COUNTER_COUNT_DIRECTION_FORWARD :
COUNTER_COUNT_DIRECTION_BACKWARD;
-}
-enum quad8_synapse_action {
- QUAD8_SYNAPSE_ACTION_NONE = 0,
- QUAD8_SYNAPSE_ACTION_RISING_EDGE,
- QUAD8_SYNAPSE_ACTION_FALLING_EDGE,
- QUAD8_SYNAPSE_ACTION_BOTH_EDGES
-};
+ return 0;
+}
-static enum counter_synapse_action quad8_index_actions_list[] = {
- [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
- [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE
+static const enum counter_synapse_action quad8_index_actions_list[] = {
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
};
-static enum counter_synapse_action quad8_synapse_actions_list[] = {
- [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
- [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- [QUAD8_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- [QUAD8_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+static const enum counter_synapse_action quad8_synapse_actions_list[] = {
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
};
-static int quad8_action_get(struct counter_device *counter,
- struct counter_count *count, struct counter_synapse *synapse,
- size_t *action)
+static int quad8_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
int err;
- size_t function = 0;
+ enum counter_function function;
const size_t signal_a_id = count->synapses[0].signal->id;
enum counter_count_direction direction;
/* Handle Index signals */
if (synapse->signal->id >= 16) {
if (priv->preset_enable[count->id])
- *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else
- *action = QUAD8_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
}
- err = quad8_function_get(counter, count, &function);
+ err = quad8_function_read(counter, count, &function);
if (err)
return err;
/* Default action mode */
- *action = QUAD8_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
/* Determine action mode based on current count function mode */
switch (function) {
- case QUAD8_COUNT_FUNCTION_PULSE_DIRECTION:
+ case COUNTER_FUNCTION_PULSE_DIRECTION:
if (synapse->signal->id == signal_a_id)
- *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
- break;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X1:
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ return 0;
+ case COUNTER_FUNCTION_QUADRATURE_X1_A:
if (synapse->signal->id == signal_a_id) {
- quad8_direction_get(counter, count, &direction);
+ err = quad8_direction_read(counter, count, &direction);
+ if (err)
+ return err;
if (direction == COUNTER_COUNT_DIRECTION_FORWARD)
- *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else
- *action = QUAD8_SYNAPSE_ACTION_FALLING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
}
- break;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X2:
+ return 0;
+ case COUNTER_FUNCTION_QUADRATURE_X2_A:
if (synapse->signal->id == signal_a_id)
- *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES;
- break;
- case QUAD8_COUNT_FUNCTION_QUADRATURE_X4:
- *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES;
- break;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+ return 0;
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+ return 0;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
+}
+
+enum {
+ QUAD8_EVENT_CARRY = 0,
+ QUAD8_EVENT_COMPARE = 1,
+ QUAD8_EVENT_CARRY_BORROW = 2,
+ QUAD8_EVENT_INDEX = 3,
+};
+
+static int quad8_events_configure(struct counter_device *counter)
+{
+ struct quad8 *const priv = counter_priv(counter);
+ unsigned long irq_enabled = 0;
+ unsigned long irqflags;
+ struct counter_event_node *event_node;
+ unsigned int next_irq_trigger;
+ unsigned long ior_cfg;
+
+ spin_lock_irqsave(&priv->lock, irqflags);
+
+ list_for_each_entry(event_node, &counter->events_list, l) {
+ switch (event_node->event) {
+ case COUNTER_EVENT_OVERFLOW:
+ next_irq_trigger = QUAD8_EVENT_CARRY;
+ break;
+ case COUNTER_EVENT_THRESHOLD:
+ next_irq_trigger = QUAD8_EVENT_COMPARE;
+ break;
+ case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
+ next_irq_trigger = QUAD8_EVENT_CARRY_BORROW;
+ break;
+ case COUNTER_EVENT_INDEX:
+ next_irq_trigger = QUAD8_EVENT_INDEX;
+ break;
+ default:
+ /* should never reach this path */
+ spin_unlock_irqrestore(&priv->lock, irqflags);
+ return -EINVAL;
+ }
+
+ /* Skip configuration if it is the same as previously set */
+ if (priv->irq_trigger[event_node->channel] == next_irq_trigger)
+ continue;
+
+ /* Save new IRQ function configuration */
+ priv->irq_trigger[event_node->channel] = next_irq_trigger;
+
+ /* Load configuration to I/O Control Register */
+ ior_cfg = priv->ab_enable[event_node->channel] |
+ priv->preset_enable[event_node->channel] << 1 |
+ priv->irq_trigger[event_node->channel] << 3;
+ iowrite8(QUAD8_CTR_IOR | ior_cfg,
+ &priv->reg->channel[event_node->channel].control);
+
+ /* Enable IRQ line */
+ irq_enabled |= BIT(event_node->channel);
}
+ iowrite8(irq_enabled, &priv->reg->index_interrupt);
+
+ spin_unlock_irqrestore(&priv->lock, irqflags);
+
return 0;
}
+static int quad8_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ struct counter_event_node *event_node;
+
+ if (watch->channel > QUAD8_NUM_COUNTERS - 1)
+ return -EINVAL;
+
+ switch (watch->event) {
+ case COUNTER_EVENT_OVERFLOW:
+ case COUNTER_EVENT_THRESHOLD:
+ case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
+ case COUNTER_EVENT_INDEX:
+ list_for_each_entry(event_node, &counter->next_events_list, l)
+ if (watch->channel == event_node->channel &&
+ watch->event != event_node->event)
+ return -EINVAL;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct counter_ops quad8_ops = {
.signal_read = quad8_signal_read,
.count_read = quad8_count_read,
.count_write = quad8_count_write,
- .function_get = quad8_function_get,
- .function_set = quad8_function_set,
- .action_get = quad8_action_get
+ .function_read = quad8_function_read,
+ .function_write = quad8_function_write,
+ .action_read = quad8_action_read,
+ .events_configure = quad8_events_configure,
+ .watch_validate = quad8_watch_validate,
+};
+
+static const char *const quad8_index_polarity_modes[] = {
+ "negative",
+ "positive"
};
static int quad8_index_polarity_get(struct counter_device *counter,
- struct counter_signal *signal, size_t *index_polarity)
+ struct counter_signal *signal,
+ u32 *index_polarity)
{
- const struct quad8_iio *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
*index_polarity = priv->index_polarity[channel_id];
@@ -917,38 +526,39 @@ static int quad8_index_polarity_get(struct counter_device *counter,
}
static int quad8_index_polarity_set(struct counter_device *counter,
- struct counter_signal *signal, size_t index_polarity)
+ struct counter_signal *signal,
+ u32 index_polarity)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
- const int base_offset = priv->base + 2 * channel_id + 1;
+ u8 __iomem *const control = &priv->reg->channel[channel_id].control;
+ unsigned long irqflags;
unsigned int idr_cfg = index_polarity << 1;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
idr_cfg |= priv->synchronous_mode[channel_id];
priv->index_polarity[channel_id] = index_polarity;
/* Load Index Control configuration to Index Control Register */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ iowrite8(QUAD8_CTR_IDR | idr_cfg, control);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static struct counter_signal_enum_ext quad8_index_pol_enum = {
- .items = quad8_index_polarity_modes,
- .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
- .get = quad8_index_polarity_get,
- .set = quad8_index_polarity_set
+static const char *const quad8_synchronous_modes[] = {
+ "non-synchronous",
+ "synchronous"
};
static int quad8_synchronous_mode_get(struct counter_device *counter,
- struct counter_signal *signal, size_t *synchronous_mode)
+ struct counter_signal *signal,
+ u32 *synchronous_mode)
{
- const struct quad8_iio *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
*synchronous_mode = priv->synchronous_mode[channel_id];
@@ -957,51 +567,49 @@ static int quad8_synchronous_mode_get(struct counter_device *counter,
}
static int quad8_synchronous_mode_set(struct counter_device *counter,
- struct counter_signal *signal, size_t synchronous_mode)
+ struct counter_signal *signal,
+ u32 synchronous_mode)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
- const int base_offset = priv->base + 2 * channel_id + 1;
+ u8 __iomem *const control = &priv->reg->channel[channel_id].control;
+ unsigned long irqflags;
unsigned int idr_cfg = synchronous_mode;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
idr_cfg |= priv->index_polarity[channel_id] << 1;
/* Index function must be non-synchronous in non-quadrature mode */
if (synchronous_mode && !priv->quadrature_mode[channel_id]) {
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return -EINVAL;
}
priv->synchronous_mode[channel_id] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
- outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ iowrite8(QUAD8_CTR_IDR | idr_cfg, control);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static struct counter_signal_enum_ext quad8_syn_mode_enum = {
- .items = quad8_synchronous_modes,
- .num_items = ARRAY_SIZE(quad8_synchronous_modes),
- .get = quad8_synchronous_mode_get,
- .set = quad8_synchronous_mode_set
-};
-
-static ssize_t quad8_count_floor_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
+static int quad8_count_floor_read(struct counter_device *counter,
+ struct counter_count *count, u64 *floor)
{
/* Only a floor of 0 is supported */
- return sprintf(buf, "0\n");
+ *floor = 0;
+
+ return 0;
}
-static int quad8_count_mode_get(struct counter_device *counter,
- struct counter_count *count, size_t *cnt_mode)
+static int quad8_count_mode_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_mode *cnt_mode)
{
- const struct quad8_iio *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
/* Map 104-QUAD-8 count mode to Generic Counter count mode */
switch (priv->count_mode[count->id]) {
@@ -1022,311 +630,289 @@ static int quad8_count_mode_get(struct counter_device *counter,
return 0;
}
-static int quad8_count_mode_set(struct counter_device *counter,
- struct counter_count *count, size_t cnt_mode)
+static int quad8_count_mode_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_mode cnt_mode)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
+ unsigned int count_mode;
unsigned int mode_cfg;
- const int base_offset = priv->base + 2 * count->id + 1;
+ u8 __iomem *const control = &priv->reg->channel[count->id].control;
+ unsigned long irqflags;
/* Map Generic Counter count mode to 104-QUAD-8 count mode */
switch (cnt_mode) {
case COUNTER_COUNT_MODE_NORMAL:
- cnt_mode = 0;
+ count_mode = 0;
break;
case COUNTER_COUNT_MODE_RANGE_LIMIT:
- cnt_mode = 1;
+ count_mode = 1;
break;
case COUNTER_COUNT_MODE_NON_RECYCLE:
- cnt_mode = 2;
+ count_mode = 2;
break;
case COUNTER_COUNT_MODE_MODULO_N:
- cnt_mode = 3;
+ count_mode = 3;
break;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
}
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
- priv->count_mode[count->id] = cnt_mode;
+ priv->count_mode[count->id] = count_mode;
/* Set count mode configuration value */
- mode_cfg = cnt_mode << 1;
+ mode_cfg = count_mode << 1;
/* Add quadrature mode configuration */
if (priv->quadrature_mode[count->id])
mode_cfg |= (priv->quadrature_scale[count->id] + 1) << 3;
/* Load mode configuration to Counter Mode Register */
- outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ iowrite8(QUAD8_CTR_CMR | mode_cfg, control);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
-static struct counter_count_enum_ext quad8_cnt_mode_enum = {
- .items = counter_count_mode_str,
- .num_items = ARRAY_SIZE(counter_count_mode_str),
- .get = quad8_count_mode_get,
- .set = quad8_count_mode_set
-};
-
-static ssize_t quad8_count_direction_read(struct counter_device *counter,
- struct counter_count *count, void *priv, char *buf)
+static int quad8_count_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
{
- enum counter_count_direction dir;
-
- quad8_direction_get(counter, count, &dir);
+ const struct quad8 *const priv = counter_priv(counter);
- return sprintf(buf, "%s\n", counter_count_direction_str[dir]);
-}
-
-static ssize_t quad8_count_enable_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
-{
- const struct quad8_iio *const priv = counter->priv;
+ *enable = priv->ab_enable[count->id];
- return sprintf(buf, "%u\n", priv->ab_enable[count->id]);
+ return 0;
}
-static ssize_t quad8_count_enable_write(struct counter_device *counter,
- struct counter_count *count, void *private, const char *buf, size_t len)
+static int quad8_count_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
{
- struct quad8_iio *const priv = counter->priv;
- const int base_offset = priv->base + 2 * count->id;
- int err;
- bool ab_enable;
+ struct quad8 *const priv = counter_priv(counter);
+ u8 __iomem *const control = &priv->reg->channel[count->id].control;
+ unsigned long irqflags;
unsigned int ior_cfg;
- err = kstrtobool(buf, &ab_enable);
- if (err)
- return err;
-
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
- priv->ab_enable[count->id] = ab_enable;
+ priv->ab_enable[count->id] = enable;
- ior_cfg = ab_enable | priv->preset_enable[count->id] << 1;
+ ior_cfg = enable | priv->preset_enable[count->id] << 1 |
+ priv->irq_trigger[count->id] << 3;
/* Load I/O control configuration */
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+ iowrite8(QUAD8_CTR_IOR | ior_cfg, control);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
+static const char *const quad8_noise_error_states[] = {
+ "No excessive noise is present at the count inputs",
+ "Excessive noise is present at the count inputs"
+};
+
static int quad8_error_noise_get(struct counter_device *counter,
- struct counter_count *count, size_t *noise_error)
+ struct counter_count *count, u32 *noise_error)
{
- const struct quad8_iio *const priv = counter->priv;
- const int base_offset = priv->base + 2 * count->id + 1;
+ const struct quad8 *const priv = counter_priv(counter);
+ u8 __iomem *const flag_addr = &priv->reg->channel[count->id].control;
- *noise_error = !!(inb(base_offset) & QUAD8_FLAG_E);
+ *noise_error = !!(ioread8(flag_addr) & QUAD8_FLAG_E);
return 0;
}
-static struct counter_count_enum_ext quad8_error_noise_enum = {
- .items = quad8_noise_error_states,
- .num_items = ARRAY_SIZE(quad8_noise_error_states),
- .get = quad8_error_noise_get
-};
-
-static ssize_t quad8_count_preset_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
+static int quad8_count_preset_read(struct counter_device *counter,
+ struct counter_count *count, u64 *preset)
{
- const struct quad8_iio *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
+
+ *preset = priv->preset[count->id];
- return sprintf(buf, "%u\n", priv->preset[count->id]);
+ return 0;
}
-static void quad8_preset_register_set(struct quad8_iio *quad8iio, int id,
- unsigned int preset)
+static void quad8_preset_register_set(struct quad8 *const priv, const int id,
+ const unsigned int preset)
{
- const unsigned int base_offset = quad8iio->base + 2 * id;
+ struct channel_reg __iomem *const chan = priv->reg->channel + id;
int i;
- quad8iio->preset[id] = preset;
+ priv->preset[id] = preset;
/* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
/* Set Preset Register */
for (i = 0; i < 3; i++)
- outb(preset >> (8 * i), base_offset);
+ iowrite8(preset >> (8 * i), &chan->data);
}
-static ssize_t quad8_count_preset_write(struct counter_device *counter,
- struct counter_count *count, void *private, const char *buf, size_t len)
+static int quad8_count_preset_write(struct counter_device *counter,
+ struct counter_count *count, u64 preset)
{
- struct quad8_iio *const priv = counter->priv;
- unsigned int preset;
- int ret;
-
- ret = kstrtouint(buf, 0, &preset);
- if (ret)
- return ret;
+ struct quad8 *const priv = counter_priv(counter);
+ unsigned long irqflags;
/* Only 24-bit values are supported */
if (preset > 0xFFFFFF)
- return -EINVAL;
+ return -ERANGE;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
quad8_preset_register_set(priv, count->id, preset);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
-static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
+static int quad8_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, u64 *ceiling)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
+ unsigned long irqflags;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) {
case 1:
case 3:
- mutex_unlock(&priv->lock);
- return sprintf(buf, "%u\n", priv->preset[count->id]);
+ *ceiling = priv->preset[count->id];
+ break;
+ default:
+ /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
+ *ceiling = 0x1FFFFFF;
+ break;
}
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
- return sprintf(buf, "33554431\n");
+ return 0;
}
-static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
- struct counter_count *count, void *private, const char *buf, size_t len)
+static int quad8_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count, u64 ceiling)
{
- struct quad8_iio *const priv = counter->priv;
- unsigned int ceiling;
- int ret;
-
- ret = kstrtouint(buf, 0, &ceiling);
- if (ret)
- return ret;
+ struct quad8 *const priv = counter_priv(counter);
+ unsigned long irqflags;
/* Only 24-bit values are supported */
if (ceiling > 0xFFFFFF)
- return -EINVAL;
+ return -ERANGE;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) {
case 1:
case 3:
quad8_preset_register_set(priv, count->id, ceiling);
- mutex_unlock(&priv->lock);
- return len;
+ spin_unlock_irqrestore(&priv->lock, irqflags);
+ return 0;
}
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return -EINVAL;
}
-static ssize_t quad8_count_preset_enable_read(struct counter_device *counter,
- struct counter_count *count, void *private, char *buf)
+static int quad8_count_preset_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ u8 *preset_enable)
{
- const struct quad8_iio *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
+
+ *preset_enable = !priv->preset_enable[count->id];
- return sprintf(buf, "%u\n", !priv->preset_enable[count->id]);
+ return 0;
}
-static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
- struct counter_count *count, void *private, const char *buf, size_t len)
+static int quad8_count_preset_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ u8 preset_enable)
{
- struct quad8_iio *const priv = counter->priv;
- const int base_offset = priv->base + 2 * count->id + 1;
- bool preset_enable;
- int ret;
+ struct quad8 *const priv = counter_priv(counter);
+ u8 __iomem *const control = &priv->reg->channel[count->id].control;
+ unsigned long irqflags;
unsigned int ior_cfg;
- ret = kstrtobool(buf, &preset_enable);
- if (ret)
- return ret;
-
/* Preset enable is active low in Input/Output Control register */
preset_enable = !preset_enable;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
priv->preset_enable[count->id] = preset_enable;
- ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1;
+ ior_cfg = priv->ab_enable[count->id] | preset_enable << 1 |
+ priv->irq_trigger[count->id] << 3;
/* Load I/O control configuration to Input / Output Control Register */
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+ iowrite8(QUAD8_CTR_IOR | ior_cfg, control);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
-static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter,
- struct counter_signal *signal,
- void *private, char *buf)
+static int quad8_signal_cable_fault_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 *cable_fault)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
+ unsigned long irqflags;
bool disabled;
unsigned int status;
- unsigned int fault;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
disabled = !(priv->cable_fault_enable & BIT(channel_id));
if (disabled) {
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
return -EINVAL;
}
/* Logic 0 = cable fault */
- status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
+ status = ioread8(&priv->reg->cable_status);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
/* Mask respective channel and invert logic */
- fault = !(status & BIT(channel_id));
+ *cable_fault = !(status & BIT(channel_id));
- return sprintf(buf, "%u\n", fault);
+ return 0;
}
-static ssize_t quad8_signal_cable_fault_enable_read(
- struct counter_device *counter, struct counter_signal *signal,
- void *private, char *buf)
+static int quad8_signal_cable_fault_enable_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 *enable)
{
- const struct quad8_iio *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
- const unsigned int enb = !!(priv->cable_fault_enable & BIT(channel_id));
- return sprintf(buf, "%u\n", enb);
+ *enable = !!(priv->cable_fault_enable & BIT(channel_id));
+
+ return 0;
}
-static ssize_t quad8_signal_cable_fault_enable_write(
- struct counter_device *counter, struct counter_signal *signal,
- void *private, const char *buf, size_t len)
+static int quad8_signal_cable_fault_enable_write(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 enable)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
- bool enable;
- int ret;
+ unsigned long irqflags;
unsigned int cable_fault_enable;
- ret = kstrtobool(buf, &enable);
- if (ret)
- return ret;
-
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
if (enable)
priv->cable_fault_enable |= BIT(channel_id);
@@ -1336,75 +922,71 @@ static ssize_t quad8_signal_cable_fault_enable_write(
/* Enable is active low in Differential Encoder Cable Status register */
cable_fault_enable = ~priv->cable_fault_enable;
- outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
+ iowrite8(cable_fault_enable, &priv->reg->cable_status);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
-static ssize_t quad8_signal_fck_prescaler_read(struct counter_device *counter,
- struct counter_signal *signal, void *private, char *buf)
+static int quad8_signal_fck_prescaler_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 *prescaler)
{
- const struct quad8_iio *const priv = counter->priv;
- const size_t channel_id = signal->id / 2;
+ const struct quad8 *const priv = counter_priv(counter);
+
+ *prescaler = priv->fck_prescaler[signal->id / 2];
- return sprintf(buf, "%u\n", priv->fck_prescaler[channel_id]);
+ return 0;
}
-static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
- struct counter_signal *signal, void *private, const char *buf,
- size_t len)
+static int quad8_signal_fck_prescaler_write(struct counter_device *counter,
+ struct counter_signal *signal,
+ u8 prescaler)
{
- struct quad8_iio *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
- const int base_offset = priv->base + 2 * channel_id;
- u8 prescaler;
- int ret;
-
- ret = kstrtou8(buf, 0, &prescaler);
- if (ret)
- return ret;
+ struct channel_reg __iomem *const chan = priv->reg->channel + channel_id;
+ unsigned long irqflags;
- mutex_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, irqflags);
priv->fck_prescaler[channel_id] = prescaler;
/* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
/* Set filter clock factor */
- outb(prescaler, base_offset);
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
- base_offset + 1);
+ iowrite8(prescaler, &chan->data);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
+ &chan->control);
- mutex_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, irqflags);
- return len;
+ return 0;
}
-static const struct counter_signal_ext quad8_signal_ext[] = {
- {
- .name = "cable_fault",
- .read = quad8_signal_cable_fault_read
- },
- {
- .name = "cable_fault_enable",
- .read = quad8_signal_cable_fault_enable_read,
- .write = quad8_signal_cable_fault_enable_write
- },
- {
- .name = "filter_clock_prescaler",
- .read = quad8_signal_fck_prescaler_read,
- .write = quad8_signal_fck_prescaler_write
- }
+static struct counter_comp quad8_signal_ext[] = {
+ COUNTER_COMP_SIGNAL_BOOL("cable_fault", quad8_signal_cable_fault_read,
+ NULL),
+ COUNTER_COMP_SIGNAL_BOOL("cable_fault_enable",
+ quad8_signal_cable_fault_enable_read,
+ quad8_signal_cable_fault_enable_write),
+ COUNTER_COMP_SIGNAL_U8("filter_clock_prescaler",
+ quad8_signal_fck_prescaler_read,
+ quad8_signal_fck_prescaler_write)
};
-static const struct counter_signal_ext quad8_index_ext[] = {
- COUNTER_SIGNAL_ENUM("index_polarity", &quad8_index_pol_enum),
- COUNTER_SIGNAL_ENUM_AVAILABLE("index_polarity", &quad8_index_pol_enum),
- COUNTER_SIGNAL_ENUM("synchronous_mode", &quad8_syn_mode_enum),
- COUNTER_SIGNAL_ENUM_AVAILABLE("synchronous_mode", &quad8_syn_mode_enum)
+static DEFINE_COUNTER_ENUM(quad8_index_pol_enum, quad8_index_polarity_modes);
+static DEFINE_COUNTER_ENUM(quad8_synch_mode_enum, quad8_synchronous_modes);
+
+static struct counter_comp quad8_index_ext[] = {
+ COUNTER_COMP_SIGNAL_ENUM("index_polarity", quad8_index_polarity_get,
+ quad8_index_polarity_set,
+ quad8_index_pol_enum),
+ COUNTER_COMP_SIGNAL_ENUM("synchronous_mode", quad8_synchronous_mode_get,
+ quad8_synchronous_mode_set,
+ quad8_synch_mode_enum),
};
#define QUAD8_QUAD_SIGNAL(_id, _name) { \
@@ -1473,39 +1055,30 @@ static struct counter_synapse quad8_count_synapses[][3] = {
QUAD8_COUNT_SYNAPSES(6), QUAD8_COUNT_SYNAPSES(7)
};
-static const struct counter_count_ext quad8_count_ext[] = {
- {
- .name = "ceiling",
- .read = quad8_count_ceiling_read,
- .write = quad8_count_ceiling_write
- },
- {
- .name = "floor",
- .read = quad8_count_floor_read
- },
- COUNTER_COUNT_ENUM("count_mode", &quad8_cnt_mode_enum),
- COUNTER_COUNT_ENUM_AVAILABLE("count_mode", &quad8_cnt_mode_enum),
- {
- .name = "direction",
- .read = quad8_count_direction_read
- },
- {
- .name = "enable",
- .read = quad8_count_enable_read,
- .write = quad8_count_enable_write
- },
- COUNTER_COUNT_ENUM("error_noise", &quad8_error_noise_enum),
- COUNTER_COUNT_ENUM_AVAILABLE("error_noise", &quad8_error_noise_enum),
- {
- .name = "preset",
- .read = quad8_count_preset_read,
- .write = quad8_count_preset_write
- },
- {
- .name = "preset_enable",
- .read = quad8_count_preset_enable_read,
- .write = quad8_count_preset_enable_write
- }
+static const enum counter_count_mode quad8_cnt_modes[] = {
+ COUNTER_COUNT_MODE_NORMAL,
+ COUNTER_COUNT_MODE_RANGE_LIMIT,
+ COUNTER_COUNT_MODE_NON_RECYCLE,
+ COUNTER_COUNT_MODE_MODULO_N,
+};
+
+static DEFINE_COUNTER_AVAILABLE(quad8_count_mode_available, quad8_cnt_modes);
+
+static DEFINE_COUNTER_ENUM(quad8_error_noise_enum, quad8_noise_error_states);
+
+static struct counter_comp quad8_count_ext[] = {
+ COUNTER_COMP_CEILING(quad8_count_ceiling_read,
+ quad8_count_ceiling_write),
+ COUNTER_COMP_FLOOR(quad8_count_floor_read, NULL),
+ COUNTER_COMP_COUNT_MODE(quad8_count_mode_read, quad8_count_mode_write,
+ quad8_count_mode_available),
+ COUNTER_COMP_DIRECTION(quad8_direction_read),
+ COUNTER_COMP_ENABLE(quad8_count_enable_read, quad8_count_enable_write),
+ COUNTER_COMP_COUNT_ENUM("error_noise", quad8_error_noise_get, NULL,
+ quad8_error_noise_enum),
+ COUNTER_COMP_PRESET(quad8_count_preset_read, quad8_count_preset_write),
+ COUNTER_COMP_PRESET_ENABLE(quad8_count_preset_enable_read,
+ quad8_count_preset_enable_write),
};
#define QUAD8_COUNT(_id, _cntname) { \
@@ -1530,12 +1103,80 @@ static struct counter_count quad8_counts[] = {
QUAD8_COUNT(7, "Channel 8 Count")
};
+static irqreturn_t quad8_irq_handler(int irq, void *private)
+{
+ struct counter_device *counter = private;
+ struct quad8 *const priv = counter_priv(counter);
+ unsigned long irq_status;
+ unsigned long channel;
+ u8 event;
+
+ irq_status = ioread8(&priv->reg->interrupt_status);
+ if (!irq_status)
+ return IRQ_NONE;
+
+ for_each_set_bit(channel, &irq_status, QUAD8_NUM_COUNTERS) {
+ switch (priv->irq_trigger[channel]) {
+ case QUAD8_EVENT_CARRY:
+ event = COUNTER_EVENT_OVERFLOW;
+ break;
+ case QUAD8_EVENT_COMPARE:
+ event = COUNTER_EVENT_THRESHOLD;
+ break;
+ case QUAD8_EVENT_CARRY_BORROW:
+ event = COUNTER_EVENT_OVERFLOW_UNDERFLOW;
+ break;
+ case QUAD8_EVENT_INDEX:
+ event = COUNTER_EVENT_INDEX;
+ break;
+ default:
+ /* should never reach this path */
+ WARN_ONCE(true, "invalid interrupt trigger function %u configured for channel %lu\n",
+ priv->irq_trigger[channel], channel);
+ continue;
+ }
+
+ counter_push_event(counter, event, channel);
+ }
+
+ /* Clear pending interrupts on device */
+ iowrite8(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, &priv->reg->channel_oper);
+
+ return IRQ_HANDLED;
+}
+
+static void quad8_init_counter(struct channel_reg __iomem *const chan)
+{
+ unsigned long i;
+
+ /* Reset Byte Pointer */
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
+ /* Reset filter clock factor */
+ iowrite8(0, &chan->data);
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
+ &chan->control);
+ /* Reset Byte Pointer */
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, &chan->control);
+ /* Reset Preset Register */
+ for (i = 0; i < 3; i++)
+ iowrite8(0x00, &chan->data);
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, &chan->control);
+ /* Reset Error flag */
+ iowrite8(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, &chan->control);
+ /* Binary encoding; Normal count; non-quadrature mode */
+ iowrite8(QUAD8_CTR_CMR, &chan->control);
+ /* Disable A and B inputs; preset on index; FLG1 as Carry */
+ iowrite8(QUAD8_CTR_IOR, &chan->control);
+ /* Disable index function; negative index polarity */
+ iowrite8(QUAD8_CTR_IDR, &chan->control);
+}
+
static int quad8_probe(struct device *dev, unsigned int id)
{
- struct iio_dev *indio_dev;
- struct quad8_iio *quad8iio;
- int i, j;
- unsigned int base_offset;
+ struct counter_device *counter;
+ struct quad8 *priv;
+ unsigned long i;
int err;
if (!devm_request_region(dev, base[id], QUAD8_EXTENT, dev_name(dev))) {
@@ -1544,72 +1185,48 @@ static int quad8_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- /* Allocate IIO device; this also allocates driver data structure */
- indio_dev = devm_iio_device_alloc(dev, sizeof(*quad8iio));
- if (!indio_dev)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
- /* Initialize IIO device */
- indio_dev->info = &quad8_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
- indio_dev->channels = quad8_channels;
- indio_dev->name = dev_name(dev);
+ priv->reg = devm_ioport_map(dev, base[id], QUAD8_EXTENT);
+ if (!priv->reg)
+ return -ENOMEM;
/* Initialize Counter device and driver data */
- quad8iio = iio_priv(indio_dev);
- quad8iio->counter.name = dev_name(dev);
- quad8iio->counter.parent = dev;
- quad8iio->counter.ops = &quad8_ops;
- quad8iio->counter.counts = quad8_counts;
- quad8iio->counter.num_counts = ARRAY_SIZE(quad8_counts);
- quad8iio->counter.signals = quad8_signals;
- quad8iio->counter.num_signals = ARRAY_SIZE(quad8_signals);
- quad8iio->counter.priv = quad8iio;
- quad8iio->base = base[id];
-
- /* Initialize mutex */
- mutex_init(&quad8iio->lock);
-
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &quad8_ops;
+ counter->counts = quad8_counts;
+ counter->num_counts = ARRAY_SIZE(quad8_counts);
+ counter->signals = quad8_signals;
+ counter->num_signals = ARRAY_SIZE(quad8_signals);
+
+ spin_lock_init(&priv->lock);
+
+ /* Reset Index/Interrupt Register */
+ iowrite8(0x00, &priv->reg->index_interrupt);
/* Reset all counters and disable interrupt function */
- outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+ iowrite8(QUAD8_CHAN_OP_RESET_COUNTERS, &priv->reg->channel_oper);
/* Set initial configuration for all counters */
- for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
- base_offset = base[id] + 2 * i;
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
- /* Reset filter clock factor */
- outb(0, base_offset);
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
- base_offset + 1);
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
- /* Reset Preset Register */
- for (j = 0; j < 3; j++)
- outb(0x00, base_offset);
- /* Reset Borrow, Carry, Compare, and Sign flags */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
- /* Reset Error flag */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
- /* Binary encoding; Normal count; non-quadrature mode */
- outb(QUAD8_CTR_CMR, base_offset + 1);
- /* Disable A and B inputs; preset on index; FLG1 as Carry */
- outb(QUAD8_CTR_IOR, base_offset + 1);
- /* Disable index function; negative index polarity */
- outb(QUAD8_CTR_IDR, base_offset + 1);
- }
+ for (i = 0; i < QUAD8_NUM_COUNTERS; i++)
+ quad8_init_counter(priv->reg->channel + i);
/* Disable Differential Encoder Cable Status for all channels */
- outb(0xFF, base[id] + QUAD8_DIFF_ENCODER_CABLE_STATUS);
- /* Enable all counters */
- outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+ iowrite8(0xFF, &priv->reg->cable_status);
+ /* Enable all counters and enable interrupt function */
+ iowrite8(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, &priv->reg->channel_oper);
- /* Register IIO device */
- err = devm_iio_device_register(dev, indio_dev);
+ err = devm_request_irq(&counter->dev, irq[id], quad8_irq_handler,
+ IRQF_SHARED, counter->name, counter);
if (err)
return err;
- /* Register Counter device */
- return devm_counter_register(dev, &quad8iio->counter);
+ err = devm_counter_add(dev, counter);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to add counter\n");
+
+ return 0;
}
static struct isa_driver quad8_driver = {
@@ -1622,5 +1239,5 @@ static struct isa_driver quad8_driver = {
module_isa_driver(quad8_driver, num_quad8);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
-MODULE_DESCRIPTION("ACCES 104-QUAD-8 IIO driver");
+MODULE_DESCRIPTION("ACCES 104-QUAD-8 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index cbdf84200e27..d388bf26f4dc 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -14,7 +14,7 @@ if COUNTER
config 104_QUAD_8
tristate "ACCES 104-QUAD-8 driver"
- depends on PC104 && X86 && IIO
+ depends on (PC104 && X86) || COMPILE_TEST
select ISA_BUS_API
help
Say yes here to build support for the ACCES 104-QUAD-8 quadrature
@@ -23,11 +23,21 @@ config 104_QUAD_8
A counter's respective error flag may be cleared by performing a write
operation on the respective count value attribute. Although the
104-QUAD-8 counters have a 25-bit range, only the lower 24 bits may be
- set, either directly or via the counter's preset attribute. Interrupts
- are not supported by this driver.
+ set, either directly or via the counter's preset attribute.
The base port addresses for the devices may be configured via the base
- array module parameter.
+ array module parameter. The interrupt line numbers for the devices may
+ be configured via the irq array module parameter.
+
+config INTERRUPT_CNT
+ tristate "Interrupt counter driver"
+ depends on GPIOLIB
+ help
+ Select this option to enable interrupt counter driver. Any interrupt
+ source can be used by this driver as the event source.
+
+ To compile this driver as a module, choose M here: the
+ module will be called interrupt-cnt.
config STM32_TIMER_CNT
tristate "STM32 Timer encoder counter driver"
@@ -81,4 +91,29 @@ config MICROCHIP_TCB_CAPTURE
To compile this driver as a module, choose M here: the
module will be called microchip-tcb-capture.
+config INTEL_QEP
+ tristate "Intel Quadrature Encoder Peripheral driver"
+ depends on PCI
+ help
+ Select this option to enable the Intel Quadrature Encoder Peripheral
+ driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-qep.
+
+config TI_ECAP_CAPTURE
+ tristate "TI eCAP capture driver"
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select REGMAP_MMIO
+ help
+ Select this option to enable the Texas Instruments Enhanced Capture
+ (eCAP) driver in input mode.
+
+ It can be used to timestamp events (falling/rising edges) detected
+ on ECAP input signal.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ti-ecap-capture.
+
endif # COUNTER
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index 0a393f71e481..b9a369e0d4fc 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -4,10 +4,14 @@
#
obj-$(CONFIG_COUNTER) += counter.o
+counter-y := counter-core.o counter-sysfs.o counter-chrdev.o
obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
+obj-$(CONFIG_INTERRUPT_CNT) += interrupt-cnt.o
obj-$(CONFIG_STM32_TIMER_CNT) += stm32-timer-cnt.o
obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
obj-$(CONFIG_TI_EQEP) += ti-eqep.o
obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o
obj-$(CONFIG_MICROCHIP_TCB_CAPTURE) += microchip-tcb-capture.o
+obj-$(CONFIG_INTEL_QEP) += intel-qep.o
+obj-$(CONFIG_TI_ECAP_CAPTURE) += ti-ecap-capture.o
diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
new file mode 100644
index 000000000000..69d340be9c93
--- /dev/null
+++ b/drivers/counter/counter-chrdev.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Counter character device interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#include <linux/cdev.h>
+#include <linux/counter.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/nospec.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timekeeping.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "counter-chrdev.h"
+
+struct counter_comp_node {
+ struct list_head l;
+ struct counter_component component;
+ struct counter_comp comp;
+ void *parent;
+};
+
+#define counter_comp_read_is_equal(a, b) \
+ (a.action_read == b.action_read || \
+ a.device_u8_read == b.device_u8_read || \
+ a.count_u8_read == b.count_u8_read || \
+ a.signal_u8_read == b.signal_u8_read || \
+ a.device_u32_read == b.device_u32_read || \
+ a.count_u32_read == b.count_u32_read || \
+ a.signal_u32_read == b.signal_u32_read || \
+ a.device_u64_read == b.device_u64_read || \
+ a.count_u64_read == b.count_u64_read || \
+ a.signal_u64_read == b.signal_u64_read)
+
+#define counter_comp_read_is_set(comp) \
+ (comp.action_read || \
+ comp.device_u8_read || \
+ comp.count_u8_read || \
+ comp.signal_u8_read || \
+ comp.device_u32_read || \
+ comp.count_u32_read || \
+ comp.signal_u32_read || \
+ comp.device_u64_read || \
+ comp.count_u64_read || \
+ comp.signal_u64_read)
+
+static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *f_ps)
+{
+ struct counter_device *const counter = filp->private_data;
+ int err;
+ unsigned int copied;
+
+ if (!counter->ops)
+ return -ENODEV;
+
+ if (len < sizeof(struct counter_event))
+ return -EINVAL;
+
+ do {
+ if (kfifo_is_empty(&counter->events)) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ err = wait_event_interruptible(counter->events_wait,
+ !kfifo_is_empty(&counter->events) ||
+ !counter->ops);
+ if (err < 0)
+ return err;
+ if (!counter->ops)
+ return -ENODEV;
+ }
+
+ if (mutex_lock_interruptible(&counter->events_out_lock))
+ return -ERESTARTSYS;
+ err = kfifo_to_user(&counter->events, buf, len, &copied);
+ mutex_unlock(&counter->events_out_lock);
+ if (err < 0)
+ return err;
+ } while (!copied);
+
+ return copied;
+}
+
+static __poll_t counter_chrdev_poll(struct file *filp,
+ struct poll_table_struct *pollt)
+{
+ struct counter_device *const counter = filp->private_data;
+ __poll_t events = 0;
+
+ if (!counter->ops)
+ return events;
+
+ poll_wait(filp, &counter->events_wait, pollt);
+
+ if (!kfifo_is_empty(&counter->events))
+ events = EPOLLIN | EPOLLRDNORM;
+
+ return events;
+}
+
+static void counter_events_list_free(struct list_head *const events_list)
+{
+ struct counter_event_node *p, *n;
+ struct counter_comp_node *q, *o;
+
+ list_for_each_entry_safe(p, n, events_list, l) {
+ /* Free associated component nodes */
+ list_for_each_entry_safe(q, o, &p->comp_list, l) {
+ list_del(&q->l);
+ kfree(q);
+ }
+
+ /* Free event node */
+ list_del(&p->l);
+ kfree(p);
+ }
+}
+
+static int counter_set_event_node(struct counter_device *const counter,
+ struct counter_watch *const watch,
+ const struct counter_comp_node *const cfg)
+{
+ struct counter_event_node *event_node;
+ int err = 0;
+ struct counter_comp_node *comp_node;
+
+ /* Search for event in the list */
+ list_for_each_entry(event_node, &counter->next_events_list, l)
+ if (event_node->event == watch->event &&
+ event_node->channel == watch->channel)
+ break;
+
+ /* If event is not already in the list */
+ if (&event_node->l == &counter->next_events_list) {
+ /* Allocate new event node */
+ event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
+ if (!event_node)
+ return -ENOMEM;
+
+ /* Configure event node and add to the list */
+ event_node->event = watch->event;
+ event_node->channel = watch->channel;
+ INIT_LIST_HEAD(&event_node->comp_list);
+ list_add(&event_node->l, &counter->next_events_list);
+ }
+
+ /* Check if component watch has already been set before */
+ list_for_each_entry(comp_node, &event_node->comp_list, l)
+ if (comp_node->parent == cfg->parent &&
+ counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
+ err = -EINVAL;
+ goto exit_free_event_node;
+ }
+
+ /* Allocate component node */
+ comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
+ if (!comp_node) {
+ err = -ENOMEM;
+ goto exit_free_event_node;
+ }
+ *comp_node = *cfg;
+
+ /* Add component node to event node */
+ list_add_tail(&comp_node->l, &event_node->comp_list);
+
+exit_free_event_node:
+ /* Free event node if no one else is watching */
+ if (list_empty(&event_node->comp_list)) {
+ list_del(&event_node->l);
+ kfree(event_node);
+ }
+
+ return err;
+}
+
+static int counter_enable_events(struct counter_device *const counter)
+{
+ unsigned long flags;
+ int err = 0;
+
+ mutex_lock(&counter->n_events_list_lock);
+ spin_lock_irqsave(&counter->events_list_lock, flags);
+
+ counter_events_list_free(&counter->events_list);
+ list_replace_init(&counter->next_events_list,
+ &counter->events_list);
+
+ if (counter->ops->events_configure)
+ err = counter->ops->events_configure(counter);
+
+ spin_unlock_irqrestore(&counter->events_list_lock, flags);
+ mutex_unlock(&counter->n_events_list_lock);
+
+ return err;
+}
+
+static int counter_disable_events(struct counter_device *const counter)
+{
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&counter->events_list_lock, flags);
+
+ counter_events_list_free(&counter->events_list);
+
+ if (counter->ops->events_configure)
+ err = counter->ops->events_configure(counter);
+
+ spin_unlock_irqrestore(&counter->events_list_lock, flags);
+
+ mutex_lock(&counter->n_events_list_lock);
+
+ counter_events_list_free(&counter->next_events_list);
+
+ mutex_unlock(&counter->n_events_list_lock);
+
+ return err;
+}
+
+static int counter_add_watch(struct counter_device *const counter,
+ const unsigned long arg)
+{
+ void __user *const uwatch = (void __user *)arg;
+ struct counter_watch watch;
+ struct counter_comp_node comp_node = {};
+ size_t parent, id;
+ struct counter_comp *ext;
+ size_t num_ext;
+ int err = 0;
+
+ if (copy_from_user(&watch, uwatch, sizeof(watch)))
+ return -EFAULT;
+
+ if (watch.component.type == COUNTER_COMPONENT_NONE)
+ goto no_component;
+
+ parent = watch.component.parent;
+
+ /* Configure parent component info for comp node */
+ switch (watch.component.scope) {
+ case COUNTER_SCOPE_DEVICE:
+ ext = counter->ext;
+ num_ext = counter->num_ext;
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ if (parent >= counter->num_signals)
+ return -EINVAL;
+ parent = array_index_nospec(parent, counter->num_signals);
+
+ comp_node.parent = counter->signals + parent;
+
+ ext = counter->signals[parent].ext;
+ num_ext = counter->signals[parent].num_ext;
+ break;
+ case COUNTER_SCOPE_COUNT:
+ if (parent >= counter->num_counts)
+ return -EINVAL;
+ parent = array_index_nospec(parent, counter->num_counts);
+
+ comp_node.parent = counter->counts + parent;
+
+ ext = counter->counts[parent].ext;
+ num_ext = counter->counts[parent].num_ext;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ id = watch.component.id;
+
+ /* Configure component info for comp node */
+ switch (watch.component.type) {
+ case COUNTER_COMPONENT_SIGNAL:
+ if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
+ return -EINVAL;
+
+ comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
+ comp_node.comp.signal_u32_read = counter->ops->signal_read;
+ break;
+ case COUNTER_COMPONENT_COUNT:
+ if (watch.component.scope != COUNTER_SCOPE_COUNT)
+ return -EINVAL;
+
+ comp_node.comp.type = COUNTER_COMP_U64;
+ comp_node.comp.count_u64_read = counter->ops->count_read;
+ break;
+ case COUNTER_COMPONENT_FUNCTION:
+ if (watch.component.scope != COUNTER_SCOPE_COUNT)
+ return -EINVAL;
+
+ comp_node.comp.type = COUNTER_COMP_FUNCTION;
+ comp_node.comp.count_u32_read = counter->ops->function_read;
+ break;
+ case COUNTER_COMPONENT_SYNAPSE_ACTION:
+ if (watch.component.scope != COUNTER_SCOPE_COUNT)
+ return -EINVAL;
+ if (id >= counter->counts[parent].num_synapses)
+ return -EINVAL;
+ id = array_index_nospec(id, counter->counts[parent].num_synapses);
+
+ comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
+ comp_node.comp.action_read = counter->ops->action_read;
+ comp_node.comp.priv = counter->counts[parent].synapses + id;
+ break;
+ case COUNTER_COMPONENT_EXTENSION:
+ if (id >= num_ext)
+ return -EINVAL;
+ id = array_index_nospec(id, num_ext);
+
+ comp_node.comp = ext[id];
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!counter_comp_read_is_set(comp_node.comp))
+ return -EOPNOTSUPP;
+
+no_component:
+ mutex_lock(&counter->n_events_list_lock);
+
+ if (counter->ops->watch_validate) {
+ err = counter->ops->watch_validate(counter, &watch);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ comp_node.component = watch.component;
+
+ err = counter_set_event_node(counter, &watch, &comp_node);
+
+err_exit:
+ mutex_unlock(&counter->n_events_list_lock);
+
+ return err;
+}
+
+static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct counter_device *const counter = filp->private_data;
+ int ret = -ENODEV;
+
+ mutex_lock(&counter->ops_exist_lock);
+
+ if (!counter->ops)
+ goto out_unlock;
+
+ switch (cmd) {
+ case COUNTER_ADD_WATCH_IOCTL:
+ ret = counter_add_watch(counter, arg);
+ break;
+ case COUNTER_ENABLE_EVENTS_IOCTL:
+ ret = counter_enable_events(counter);
+ break;
+ case COUNTER_DISABLE_EVENTS_IOCTL:
+ ret = counter_disable_events(counter);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+out_unlock:
+ mutex_unlock(&counter->ops_exist_lock);
+
+ return ret;
+}
+
+static int counter_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct counter_device *const counter = container_of(inode->i_cdev,
+ typeof(*counter),
+ chrdev);
+
+ get_device(&counter->dev);
+ filp->private_data = counter;
+
+ return nonseekable_open(inode, filp);
+}
+
+static int counter_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct counter_device *const counter = filp->private_data;
+ int ret = 0;
+
+ mutex_lock(&counter->ops_exist_lock);
+
+ if (!counter->ops) {
+ /* Free any lingering held memory */
+ counter_events_list_free(&counter->events_list);
+ counter_events_list_free(&counter->next_events_list);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = counter_disable_events(counter);
+ if (ret < 0) {
+ mutex_unlock(&counter->ops_exist_lock);
+ return ret;
+ }
+
+out_unlock:
+ mutex_unlock(&counter->ops_exist_lock);
+
+ put_device(&counter->dev);
+
+ return ret;
+}
+
+static const struct file_operations counter_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = counter_chrdev_read,
+ .poll = counter_chrdev_poll,
+ .unlocked_ioctl = counter_chrdev_ioctl,
+ .open = counter_chrdev_open,
+ .release = counter_chrdev_release,
+};
+
+int counter_chrdev_add(struct counter_device *const counter)
+{
+ /* Initialize Counter events lists */
+ INIT_LIST_HEAD(&counter->events_list);
+ INIT_LIST_HEAD(&counter->next_events_list);
+ spin_lock_init(&counter->events_list_lock);
+ mutex_init(&counter->n_events_list_lock);
+ init_waitqueue_head(&counter->events_wait);
+ spin_lock_init(&counter->events_in_lock);
+ mutex_init(&counter->events_out_lock);
+
+ /* Initialize character device */
+ cdev_init(&counter->chrdev, &counter_fops);
+
+ /* Allocate Counter events queue */
+ return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
+}
+
+void counter_chrdev_remove(struct counter_device *const counter)
+{
+ kfifo_free(&counter->events);
+}
+
+static int counter_get_data(struct counter_device *const counter,
+ const struct counter_comp_node *const comp_node,
+ u64 *const value)
+{
+ const struct counter_comp *const comp = &comp_node->comp;
+ void *const parent = comp_node->parent;
+ u8 value_u8 = 0;
+ u32 value_u32 = 0;
+ int ret;
+
+ if (comp_node->component.type == COUNTER_COMPONENT_NONE)
+ return 0;
+
+ switch (comp->type) {
+ case COUNTER_COMP_U8:
+ case COUNTER_COMP_BOOL:
+ switch (comp_node->component.scope) {
+ case COUNTER_SCOPE_DEVICE:
+ ret = comp->device_u8_read(counter, &value_u8);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ ret = comp->signal_u8_read(counter, parent, &value_u8);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ ret = comp->count_u8_read(counter, parent, &value_u8);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *value = value_u8;
+ return ret;
+ case COUNTER_COMP_SIGNAL_LEVEL:
+ case COUNTER_COMP_FUNCTION:
+ case COUNTER_COMP_ENUM:
+ case COUNTER_COMP_COUNT_DIRECTION:
+ case COUNTER_COMP_COUNT_MODE:
+ switch (comp_node->component.scope) {
+ case COUNTER_SCOPE_DEVICE:
+ ret = comp->device_u32_read(counter, &value_u32);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ ret = comp->signal_u32_read(counter, parent,
+ &value_u32);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ ret = comp->count_u32_read(counter, parent, &value_u32);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *value = value_u32;
+ return ret;
+ case COUNTER_COMP_U64:
+ switch (comp_node->component.scope) {
+ case COUNTER_SCOPE_DEVICE:
+ return comp->device_u64_read(counter, value);
+ case COUNTER_SCOPE_SIGNAL:
+ return comp->signal_u64_read(counter, parent, value);
+ case COUNTER_SCOPE_COUNT:
+ return comp->count_u64_read(counter, parent, value);
+ default:
+ return -EINVAL;
+ }
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ ret = comp->action_read(counter, parent, comp->priv,
+ &value_u32);
+ *value = value_u32;
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * counter_push_event - queue event for userspace reading
+ * @counter: pointer to Counter structure
+ * @event: triggered event
+ * @channel: event channel
+ *
+ * Note: If no one is watching for the respective event, it is silently
+ * discarded.
+ */
+void counter_push_event(struct counter_device *const counter, const u8 event,
+ const u8 channel)
+{
+ struct counter_event ev;
+ unsigned int copied = 0;
+ unsigned long flags;
+ struct counter_event_node *event_node;
+ struct counter_comp_node *comp_node;
+
+ ev.timestamp = ktime_get_ns();
+ ev.watch.event = event;
+ ev.watch.channel = channel;
+
+ /* Could be in an interrupt context, so use a spin lock */
+ spin_lock_irqsave(&counter->events_list_lock, flags);
+
+ /* Search for event in the list */
+ list_for_each_entry(event_node, &counter->events_list, l)
+ if (event_node->event == event &&
+ event_node->channel == channel)
+ break;
+
+ /* If event is not in the list */
+ if (&event_node->l == &counter->events_list)
+ goto exit_early;
+
+ /* Read and queue relevant comp for userspace */
+ list_for_each_entry(comp_node, &event_node->comp_list, l) {
+ ev.watch.component = comp_node->component;
+ ev.status = -counter_get_data(counter, comp_node, &ev.value);
+
+ copied += kfifo_in_spinlocked_noirqsave(&counter->events, &ev,
+ 1, &counter->events_in_lock);
+ }
+
+exit_early:
+ spin_unlock_irqrestore(&counter->events_list_lock, flags);
+
+ if (copied)
+ wake_up_poll(&counter->events_wait, EPOLLIN);
+}
+EXPORT_SYMBOL_GPL(counter_push_event);
diff --git a/drivers/counter/counter-chrdev.h b/drivers/counter/counter-chrdev.h
new file mode 100644
index 000000000000..5529d16703c4
--- /dev/null
+++ b/drivers/counter/counter-chrdev.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Counter character device interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#ifndef _COUNTER_CHRDEV_H_
+#define _COUNTER_CHRDEV_H_
+
+#include <linux/counter.h>
+
+int counter_chrdev_add(struct counter_device *const counter);
+void counter_chrdev_remove(struct counter_device *const counter);
+
+#endif /* _COUNTER_CHRDEV_H_ */
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
new file mode 100644
index 000000000000..938651f9e9e0
--- /dev/null
+++ b/drivers/counter/counter-core.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Counter interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#include <linux/cdev.h>
+#include <linux/counter.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "counter-chrdev.h"
+#include "counter-sysfs.h"
+
+#define COUNTER_NAME "counter"
+
+/* Provides a unique ID for each counter device */
+static DEFINE_IDA(counter_ida);
+
+struct counter_device_allochelper {
+ struct counter_device counter;
+
+ /*
+ * This is cache line aligned to ensure private data behaves like if it
+ * were kmalloced separately.
+ */
+ unsigned long privdata[] ____cacheline_aligned;
+};
+
+static void counter_device_release(struct device *dev)
+{
+ struct counter_device *const counter =
+ container_of(dev, struct counter_device, dev);
+
+ counter_chrdev_remove(counter);
+ ida_free(&counter_ida, dev->id);
+
+ kfree(container_of(counter, struct counter_device_allochelper, counter));
+}
+
+static struct device_type counter_device_type = {
+ .name = "counter_device",
+ .release = counter_device_release,
+};
+
+static struct bus_type counter_bus_type = {
+ .name = "counter",
+ .dev_name = "counter",
+};
+
+static dev_t counter_devt;
+
+/**
+ * counter_priv - access counter device private data
+ * @counter: counter device
+ *
+ * Get the counter device private data
+ */
+void *counter_priv(const struct counter_device *const counter)
+{
+ struct counter_device_allochelper *ch =
+ container_of(counter, struct counter_device_allochelper, counter);
+
+ return &ch->privdata;
+}
+EXPORT_SYMBOL_GPL(counter_priv);
+
+/**
+ * counter_alloc - allocate a counter_device
+ * @sizeof_priv: size of the driver private data
+ *
+ * This is part one of counter registration. The structure is allocated
+ * dynamically to ensure the right lifetime for the embedded struct device.
+ *
+ * If this succeeds, call counter_put() to get rid of the counter_device again.
+ */
+struct counter_device *counter_alloc(size_t sizeof_priv)
+{
+ struct counter_device_allochelper *ch;
+ struct counter_device *counter;
+ struct device *dev;
+ int err;
+
+ ch = kzalloc(sizeof(*ch) + sizeof_priv, GFP_KERNEL);
+ if (!ch)
+ return NULL;
+
+ counter = &ch->counter;
+ dev = &counter->dev;
+
+ /* Acquire unique ID */
+ err = ida_alloc(&counter_ida, GFP_KERNEL);
+ if (err < 0)
+ goto err_ida_alloc;
+ dev->id = err;
+
+ mutex_init(&counter->ops_exist_lock);
+ dev->type = &counter_device_type;
+ dev->bus = &counter_bus_type;
+ dev->devt = MKDEV(MAJOR(counter_devt), dev->id);
+
+ err = counter_chrdev_add(counter);
+ if (err < 0)
+ goto err_chrdev_add;
+
+ device_initialize(dev);
+
+ err = dev_set_name(dev, COUNTER_NAME "%d", dev->id);
+ if (err)
+ goto err_dev_set_name;
+
+ return counter;
+
+err_dev_set_name:
+
+ counter_chrdev_remove(counter);
+err_chrdev_add:
+
+ ida_free(&counter_ida, dev->id);
+err_ida_alloc:
+
+ kfree(ch);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(counter_alloc);
+
+void counter_put(struct counter_device *counter)
+{
+ put_device(&counter->dev);
+}
+EXPORT_SYMBOL_GPL(counter_put);
+
+/**
+ * counter_add - complete registration of a counter
+ * @counter: the counter to add
+ *
+ * This is part two of counter registration.
+ *
+ * If this succeeds, call counter_unregister() to get rid of the counter_device again.
+ */
+int counter_add(struct counter_device *counter)
+{
+ int err;
+ struct device *dev = &counter->dev;
+
+ if (counter->parent) {
+ dev->parent = counter->parent;
+ dev->of_node = counter->parent->of_node;
+ }
+
+ err = counter_sysfs_add(counter);
+ if (err < 0)
+ return err;
+
+ /* implies device_add(dev) */
+ return cdev_device_add(&counter->chrdev, dev);
+}
+EXPORT_SYMBOL_GPL(counter_add);
+
+/**
+ * counter_unregister - unregister Counter from the system
+ * @counter: pointer to Counter to unregister
+ *
+ * The Counter is unregistered from the system.
+ */
+void counter_unregister(struct counter_device *const counter)
+{
+ if (!counter)
+ return;
+
+ cdev_device_del(&counter->chrdev, &counter->dev);
+
+ mutex_lock(&counter->ops_exist_lock);
+
+ counter->ops = NULL;
+ wake_up(&counter->events_wait);
+
+ mutex_unlock(&counter->ops_exist_lock);
+}
+EXPORT_SYMBOL_GPL(counter_unregister);
+
+static void devm_counter_release(void *counter)
+{
+ counter_unregister(counter);
+}
+
+static void devm_counter_put(void *counter)
+{
+ counter_put(counter);
+}
+
+/**
+ * devm_counter_alloc - allocate a counter_device
+ * @dev: the device to register the release callback for
+ * @sizeof_priv: size of the driver private data
+ *
+ * This is the device managed version of counter_add(). It registers a cleanup
+ * callback to care for calling counter_put().
+ */
+struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv)
+{
+ struct counter_device *counter;
+ int err;
+
+ counter = counter_alloc(sizeof_priv);
+ if (!counter)
+ return NULL;
+
+ err = devm_add_action_or_reset(dev, devm_counter_put, counter);
+ if (err < 0)
+ return NULL;
+
+ return counter;
+}
+EXPORT_SYMBOL_GPL(devm_counter_alloc);
+
+/**
+ * devm_counter_add - complete registration of a counter
+ * @dev: the device to register the release callback for
+ * @counter: the counter to add
+ *
+ * This is the device managed version of counter_add(). It registers a cleanup
+ * callback to care for calling counter_unregister().
+ */
+int devm_counter_add(struct device *dev,
+ struct counter_device *const counter)
+{
+ int err;
+
+ err = counter_add(counter);
+ if (err < 0)
+ return err;
+
+ return devm_add_action_or_reset(dev, devm_counter_release, counter);
+}
+EXPORT_SYMBOL_GPL(devm_counter_add);
+
+#define COUNTER_DEV_MAX 256
+
+static int __init counter_init(void)
+{
+ int err;
+
+ err = bus_register(&counter_bus_type);
+ if (err < 0)
+ return err;
+
+ err = alloc_chrdev_region(&counter_devt, 0, COUNTER_DEV_MAX,
+ COUNTER_NAME);
+ if (err < 0)
+ goto err_unregister_bus;
+
+ return 0;
+
+err_unregister_bus:
+ bus_unregister(&counter_bus_type);
+ return err;
+}
+
+static void __exit counter_exit(void)
+{
+ unregister_chrdev_region(counter_devt, COUNTER_DEV_MAX);
+ bus_unregister(&counter_bus_type);
+}
+
+subsys_initcall(counter_init);
+module_exit(counter_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Generic Counter interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/counter-sysfs.c b/drivers/counter/counter-sysfs.c
new file mode 100644
index 000000000000..04eac41dad33
--- /dev/null
+++ b/drivers/counter/counter-sysfs.c
@@ -0,0 +1,964 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Counter sysfs interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#include <linux/counter.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include "counter-sysfs.h"
+
+static inline struct counter_device *counter_from_dev(struct device *dev)
+{
+ return container_of(dev, struct counter_device, dev);
+}
+
+/**
+ * struct counter_attribute - Counter sysfs attribute
+ * @dev_attr: device attribute for sysfs
+ * @l: node to add Counter attribute to attribute group list
+ * @comp: Counter component callbacks and data
+ * @scope: Counter scope of the attribute
+ * @parent: pointer to the parent component
+ */
+struct counter_attribute {
+ struct device_attribute dev_attr;
+ struct list_head l;
+
+ struct counter_comp comp;
+ enum counter_scope scope;
+ void *parent;
+};
+
+#define to_counter_attribute(_dev_attr) \
+ container_of(_dev_attr, struct counter_attribute, dev_attr)
+
+/**
+ * struct counter_attribute_group - container for attribute group
+ * @name: name of the attribute group
+ * @attr_list: list to keep track of created attributes
+ * @num_attr: number of attributes
+ */
+struct counter_attribute_group {
+ const char *name;
+ struct list_head attr_list;
+ size_t num_attr;
+};
+
+static const char *const counter_function_str[] = {
+ [COUNTER_FUNCTION_INCREASE] = "increase",
+ [COUNTER_FUNCTION_DECREASE] = "decrease",
+ [COUNTER_FUNCTION_PULSE_DIRECTION] = "pulse-direction",
+ [COUNTER_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a",
+ [COUNTER_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b",
+ [COUNTER_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a",
+ [COUNTER_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b",
+ [COUNTER_FUNCTION_QUADRATURE_X4] = "quadrature x4"
+};
+
+static const char *const counter_signal_value_str[] = {
+ [COUNTER_SIGNAL_LEVEL_LOW] = "low",
+ [COUNTER_SIGNAL_LEVEL_HIGH] = "high"
+};
+
+static const char *const counter_synapse_action_str[] = {
+ [COUNTER_SYNAPSE_ACTION_NONE] = "none",
+ [COUNTER_SYNAPSE_ACTION_RISING_EDGE] = "rising edge",
+ [COUNTER_SYNAPSE_ACTION_FALLING_EDGE] = "falling edge",
+ [COUNTER_SYNAPSE_ACTION_BOTH_EDGES] = "both edges"
+};
+
+static const char *const counter_count_direction_str[] = {
+ [COUNTER_COUNT_DIRECTION_FORWARD] = "forward",
+ [COUNTER_COUNT_DIRECTION_BACKWARD] = "backward"
+};
+
+static const char *const counter_count_mode_str[] = {
+ [COUNTER_COUNT_MODE_NORMAL] = "normal",
+ [COUNTER_COUNT_MODE_RANGE_LIMIT] = "range limit",
+ [COUNTER_COUNT_MODE_NON_RECYCLE] = "non-recycle",
+ [COUNTER_COUNT_MODE_MODULO_N] = "modulo-n"
+};
+
+static ssize_t counter_comp_u8_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ int err;
+ u8 data = 0;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u8_read(counter, &data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u8_read(counter, a->parent, &data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_u8_read(counter, a->parent, &data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ if (a->comp.type == COUNTER_COMP_BOOL)
+ /* data should already be boolean but ensure just to be safe */
+ data = !!data;
+
+ return sysfs_emit(buf, "%u\n", (unsigned int)data);
+}
+
+static ssize_t counter_comp_u8_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ int err;
+ bool bool_data = 0;
+ u8 data = 0;
+
+ if (a->comp.type == COUNTER_COMP_BOOL) {
+ err = kstrtobool(buf, &bool_data);
+ data = bool_data;
+ } else
+ err = kstrtou8(buf, 0, &data);
+ if (err < 0)
+ return err;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u8_write(counter, data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u8_write(counter, a->parent, data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_u8_write(counter, a->parent, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
+static ssize_t counter_comp_u32_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ const struct counter_available *const avail = a->comp.priv;
+ int err;
+ u32 data = 0;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u32_read(counter, &data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u32_read(counter, a->parent, &data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ if (a->comp.type == COUNTER_COMP_SYNAPSE_ACTION)
+ err = a->comp.action_read(counter, a->parent,
+ a->comp.priv, &data);
+ else
+ err = a->comp.count_u32_read(counter, a->parent, &data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ switch (a->comp.type) {
+ case COUNTER_COMP_FUNCTION:
+ return sysfs_emit(buf, "%s\n", counter_function_str[data]);
+ case COUNTER_COMP_SIGNAL_LEVEL:
+ return sysfs_emit(buf, "%s\n", counter_signal_value_str[data]);
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ return sysfs_emit(buf, "%s\n", counter_synapse_action_str[data]);
+ case COUNTER_COMP_ENUM:
+ return sysfs_emit(buf, "%s\n", avail->strs[data]);
+ case COUNTER_COMP_COUNT_DIRECTION:
+ return sysfs_emit(buf, "%s\n", counter_count_direction_str[data]);
+ case COUNTER_COMP_COUNT_MODE:
+ return sysfs_emit(buf, "%s\n", counter_count_mode_str[data]);
+ default:
+ return sysfs_emit(buf, "%u\n", (unsigned int)data);
+ }
+}
+
+static int counter_find_enum(u32 *const enum_item, const u32 *const enums,
+ const size_t num_enums, const char *const buf,
+ const char *const string_array[])
+{
+ size_t index;
+
+ for (index = 0; index < num_enums; index++) {
+ *enum_item = enums[index];
+ if (sysfs_streq(buf, string_array[*enum_item]))
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t counter_comp_u32_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ struct counter_count *const count = a->parent;
+ struct counter_synapse *const synapse = a->comp.priv;
+ const struct counter_available *const avail = a->comp.priv;
+ int err;
+ u32 data = 0;
+
+ switch (a->comp.type) {
+ case COUNTER_COMP_FUNCTION:
+ err = counter_find_enum(&data, count->functions_list,
+ count->num_functions, buf,
+ counter_function_str);
+ break;
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ err = counter_find_enum(&data, synapse->actions_list,
+ synapse->num_actions, buf,
+ counter_synapse_action_str);
+ break;
+ case COUNTER_COMP_ENUM:
+ err = __sysfs_match_string(avail->strs, avail->num_items, buf);
+ data = err;
+ break;
+ case COUNTER_COMP_COUNT_MODE:
+ err = counter_find_enum(&data, avail->enums, avail->num_items,
+ buf, counter_count_mode_str);
+ break;
+ default:
+ err = kstrtou32(buf, 0, &data);
+ break;
+ }
+ if (err < 0)
+ return err;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u32_write(counter, data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u32_write(counter, a->parent, data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ if (a->comp.type == COUNTER_COMP_SYNAPSE_ACTION)
+ err = a->comp.action_write(counter, count, synapse,
+ data);
+ else
+ err = a->comp.count_u32_write(counter, count, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
+static ssize_t counter_comp_u64_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ int err;
+ u64 data = 0;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u64_read(counter, &data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u64_read(counter, a->parent, &data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_u64_read(counter, a->parent, &data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)data);
+}
+
+static ssize_t counter_comp_u64_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ int err;
+ u64 data = 0;
+
+ err = kstrtou64(buf, 0, &data);
+ if (err < 0)
+ return err;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_u64_write(counter, data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_u64_write(counter, a->parent, data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_u64_write(counter, a->parent, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
+static ssize_t enums_available_show(const u32 *const enums,
+ const size_t num_enums,
+ const char *const strs[], char *buf)
+{
+ size_t len = 0;
+ size_t index;
+
+ for (index = 0; index < num_enums; index++)
+ len += sysfs_emit_at(buf, len, "%s\n", strs[enums[index]]);
+
+ return len;
+}
+
+static ssize_t strs_available_show(const struct counter_available *const avail,
+ char *buf)
+{
+ size_t len = 0;
+ size_t index;
+
+ for (index = 0; index < avail->num_items; index++)
+ len += sysfs_emit_at(buf, len, "%s\n", avail->strs[index]);
+
+ return len;
+}
+
+static ssize_t counter_comp_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ const struct counter_count *const count = a->parent;
+ const struct counter_synapse *const synapse = a->comp.priv;
+ const struct counter_available *const avail = a->comp.priv;
+
+ switch (a->comp.type) {
+ case COUNTER_COMP_FUNCTION:
+ return enums_available_show(count->functions_list,
+ count->num_functions,
+ counter_function_str, buf);
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ return enums_available_show(synapse->actions_list,
+ synapse->num_actions,
+ counter_synapse_action_str, buf);
+ case COUNTER_COMP_ENUM:
+ return strs_available_show(avail, buf);
+ case COUNTER_COMP_COUNT_MODE:
+ return enums_available_show(avail->enums, avail->num_items,
+ counter_count_mode_str, buf);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int counter_avail_attr_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const comp, void *const parent)
+{
+ struct counter_attribute *counter_attr;
+ struct device_attribute *dev_attr;
+
+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+
+ /* Configure Counter attribute */
+ counter_attr->comp.type = comp->type;
+ counter_attr->comp.priv = comp->priv;
+ counter_attr->parent = parent;
+
+ /* Initialize sysfs attribute */
+ dev_attr = &counter_attr->dev_attr;
+ sysfs_attr_init(&dev_attr->attr);
+
+ /* Configure device attribute */
+ dev_attr->attr.name = devm_kasprintf(dev, GFP_KERNEL, "%s_available",
+ comp->name);
+ if (!dev_attr->attr.name)
+ return -ENOMEM;
+ dev_attr->attr.mode = 0444;
+ dev_attr->show = counter_comp_available_show;
+
+ /* Store list node */
+ list_add(&counter_attr->l, &group->attr_list);
+ group->num_attr++;
+
+ return 0;
+}
+
+static int counter_attr_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const comp,
+ const enum counter_scope scope,
+ void *const parent)
+{
+ struct counter_attribute *counter_attr;
+ struct device_attribute *dev_attr;
+
+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+
+ /* Configure Counter attribute */
+ counter_attr->comp = *comp;
+ counter_attr->scope = scope;
+ counter_attr->parent = parent;
+
+ /* Configure device attribute */
+ dev_attr = &counter_attr->dev_attr;
+ sysfs_attr_init(&dev_attr->attr);
+ dev_attr->attr.name = comp->name;
+ switch (comp->type) {
+ case COUNTER_COMP_U8:
+ case COUNTER_COMP_BOOL:
+ if (comp->device_u8_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_u8_show;
+ }
+ if (comp->device_u8_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_u8_store;
+ }
+ break;
+ case COUNTER_COMP_SIGNAL_LEVEL:
+ case COUNTER_COMP_FUNCTION:
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ case COUNTER_COMP_ENUM:
+ case COUNTER_COMP_COUNT_DIRECTION:
+ case COUNTER_COMP_COUNT_MODE:
+ if (comp->device_u32_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_u32_show;
+ }
+ if (comp->device_u32_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_u32_store;
+ }
+ break;
+ case COUNTER_COMP_U64:
+ if (comp->device_u64_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_u64_show;
+ }
+ if (comp->device_u64_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_u64_store;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Store list node */
+ list_add(&counter_attr->l, &group->attr_list);
+ group->num_attr++;
+
+ /* Create "*_available" attribute if needed */
+ switch (comp->type) {
+ case COUNTER_COMP_FUNCTION:
+ case COUNTER_COMP_SYNAPSE_ACTION:
+ case COUNTER_COMP_ENUM:
+ case COUNTER_COMP_COUNT_MODE:
+ return counter_avail_attr_create(dev, group, comp, parent);
+ default:
+ return 0;
+ }
+}
+
+static ssize_t counter_comp_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", to_counter_attribute(attr)->comp.name);
+}
+
+static int counter_name_attr_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const char *const name)
+{
+ struct counter_attribute *counter_attr;
+
+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+
+ /* Configure Counter attribute */
+ counter_attr->comp.name = name;
+
+ /* Configure device attribute */
+ sysfs_attr_init(&counter_attr->dev_attr.attr);
+ counter_attr->dev_attr.attr.name = "name";
+ counter_attr->dev_attr.attr.mode = 0444;
+ counter_attr->dev_attr.show = counter_comp_name_show;
+
+ /* Store list node */
+ list_add(&counter_attr->l, &group->attr_list);
+ group->num_attr++;
+
+ return 0;
+}
+
+static ssize_t counter_comp_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const size_t id = (size_t)to_counter_attribute(attr)->comp.priv;
+
+ return sysfs_emit(buf, "%zu\n", id);
+}
+
+static int counter_comp_id_attr_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const char *name, const size_t id)
+{
+ struct counter_attribute *counter_attr;
+
+ /* Allocate Counter attribute */
+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+
+ /* Generate component ID name */
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s_component_id", name);
+ if (!name)
+ return -ENOMEM;
+
+ /* Configure Counter attribute */
+ counter_attr->comp.priv = (void *)id;
+
+ /* Configure device attribute */
+ sysfs_attr_init(&counter_attr->dev_attr.attr);
+ counter_attr->dev_attr.attr.name = name;
+ counter_attr->dev_attr.attr.mode = 0444;
+ counter_attr->dev_attr.show = counter_comp_id_show;
+
+ /* Store list node */
+ list_add(&counter_attr->l, &group->attr_list);
+ group->num_attr++;
+
+ return 0;
+}
+
+static struct counter_comp counter_signal_comp = {
+ .type = COUNTER_COMP_SIGNAL_LEVEL,
+ .name = "signal",
+};
+
+static int counter_signal_attrs_create(struct counter_device *const counter,
+ struct counter_attribute_group *const cattr_group,
+ struct counter_signal *const signal)
+{
+ const enum counter_scope scope = COUNTER_SCOPE_SIGNAL;
+ struct device *const dev = &counter->dev;
+ int err;
+ struct counter_comp comp;
+ size_t i;
+ struct counter_comp *ext;
+
+ /* Create main Signal attribute */
+ comp = counter_signal_comp;
+ comp.signal_u32_read = counter->ops->signal_read;
+ err = counter_attr_create(dev, cattr_group, &comp, scope, signal);
+ if (err < 0)
+ return err;
+
+ /* Create Signal name attribute */
+ err = counter_name_attr_create(dev, cattr_group, signal->name);
+ if (err < 0)
+ return err;
+
+ /* Create an attribute for each extension */
+ for (i = 0; i < signal->num_ext; i++) {
+ ext = &signal->ext[i];
+
+ err = counter_attr_create(dev, cattr_group, ext, scope, signal);
+ if (err < 0)
+ return err;
+
+ err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
+ i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_sysfs_signals_add(struct counter_device *const counter,
+ struct counter_attribute_group *const groups)
+{
+ size_t i;
+ int err;
+
+ /* Add each Signal */
+ for (i = 0; i < counter->num_signals; i++) {
+ /* Generate Signal attribute directory name */
+ groups[i].name = devm_kasprintf(&counter->dev, GFP_KERNEL,
+ "signal%zu", i);
+ if (!groups[i].name)
+ return -ENOMEM;
+
+ /* Create all attributes associated with Signal */
+ err = counter_signal_attrs_create(counter, groups + i,
+ counter->signals + i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_sysfs_synapses_add(struct counter_device *const counter,
+ struct counter_attribute_group *const group,
+ struct counter_count *const count)
+{
+ size_t i;
+
+ /* Add each Synapse */
+ for (i = 0; i < count->num_synapses; i++) {
+ struct device *const dev = &counter->dev;
+ struct counter_synapse *synapse;
+ size_t id;
+ struct counter_comp comp;
+ int err;
+
+ synapse = count->synapses + i;
+
+ /* Generate Synapse action name */
+ id = synapse->signal - counter->signals;
+ comp.name = devm_kasprintf(dev, GFP_KERNEL, "signal%zu_action",
+ id);
+ if (!comp.name)
+ return -ENOMEM;
+
+ /* Create action attribute */
+ comp.type = COUNTER_COMP_SYNAPSE_ACTION;
+ comp.action_read = counter->ops->action_read;
+ comp.action_write = counter->ops->action_write;
+ comp.priv = synapse;
+ err = counter_attr_create(dev, group, &comp,
+ COUNTER_SCOPE_COUNT, count);
+ if (err < 0)
+ return err;
+
+ /* Create Synapse component ID attribute */
+ err = counter_comp_id_attr_create(dev, group, comp.name, i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct counter_comp counter_count_comp =
+ COUNTER_COMP_COUNT_U64("count", NULL, NULL);
+
+static struct counter_comp counter_function_comp = {
+ .type = COUNTER_COMP_FUNCTION,
+ .name = "function",
+};
+
+static int counter_count_attrs_create(struct counter_device *const counter,
+ struct counter_attribute_group *const cattr_group,
+ struct counter_count *const count)
+{
+ const enum counter_scope scope = COUNTER_SCOPE_COUNT;
+ struct device *const dev = &counter->dev;
+ int err;
+ struct counter_comp comp;
+ size_t i;
+ struct counter_comp *ext;
+
+ /* Create main Count attribute */
+ comp = counter_count_comp;
+ comp.count_u64_read = counter->ops->count_read;
+ comp.count_u64_write = counter->ops->count_write;
+ err = counter_attr_create(dev, cattr_group, &comp, scope, count);
+ if (err < 0)
+ return err;
+
+ /* Create Count name attribute */
+ err = counter_name_attr_create(dev, cattr_group, count->name);
+ if (err < 0)
+ return err;
+
+ /* Create Count function attribute */
+ comp = counter_function_comp;
+ comp.count_u32_read = counter->ops->function_read;
+ comp.count_u32_write = counter->ops->function_write;
+ err = counter_attr_create(dev, cattr_group, &comp, scope, count);
+ if (err < 0)
+ return err;
+
+ /* Create an attribute for each extension */
+ for (i = 0; i < count->num_ext; i++) {
+ ext = &count->ext[i];
+
+ err = counter_attr_create(dev, cattr_group, ext, scope, count);
+ if (err < 0)
+ return err;
+
+ err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
+ i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_sysfs_counts_add(struct counter_device *const counter,
+ struct counter_attribute_group *const groups)
+{
+ size_t i;
+ struct counter_count *count;
+ int err;
+
+ /* Add each Count */
+ for (i = 0; i < counter->num_counts; i++) {
+ count = counter->counts + i;
+
+ /* Generate Count attribute directory name */
+ groups[i].name = devm_kasprintf(&counter->dev, GFP_KERNEL,
+ "count%zu", i);
+ if (!groups[i].name)
+ return -ENOMEM;
+
+ /* Add sysfs attributes of the Synapses */
+ err = counter_sysfs_synapses_add(counter, groups + i, count);
+ if (err < 0)
+ return err;
+
+ /* Create all attributes associated with Count */
+ err = counter_count_attrs_create(counter, groups + i, count);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_num_signals_read(struct counter_device *counter, u8 *val)
+{
+ *val = counter->num_signals;
+ return 0;
+}
+
+static int counter_num_counts_read(struct counter_device *counter, u8 *val)
+{
+ *val = counter->num_counts;
+ return 0;
+}
+
+static int counter_events_queue_size_read(struct counter_device *counter,
+ u64 *val)
+{
+ *val = kfifo_size(&counter->events);
+ return 0;
+}
+
+static int counter_events_queue_size_write(struct counter_device *counter,
+ u64 val)
+{
+ DECLARE_KFIFO_PTR(events, struct counter_event);
+ int err;
+ unsigned long flags;
+
+ /* Allocate new events queue */
+ err = kfifo_alloc(&events, val, GFP_KERNEL);
+ if (err)
+ return err;
+
+ /* Swap in new events queue */
+ mutex_lock(&counter->events_out_lock);
+ spin_lock_irqsave(&counter->events_in_lock, flags);
+ kfifo_free(&counter->events);
+ counter->events.kfifo = events.kfifo;
+ spin_unlock_irqrestore(&counter->events_in_lock, flags);
+ mutex_unlock(&counter->events_out_lock);
+
+ return 0;
+}
+
+static struct counter_comp counter_num_signals_comp =
+ COUNTER_COMP_DEVICE_U8("num_signals", counter_num_signals_read, NULL);
+
+static struct counter_comp counter_num_counts_comp =
+ COUNTER_COMP_DEVICE_U8("num_counts", counter_num_counts_read, NULL);
+
+static struct counter_comp counter_events_queue_size_comp =
+ COUNTER_COMP_DEVICE_U64("events_queue_size",
+ counter_events_queue_size_read,
+ counter_events_queue_size_write);
+
+static int counter_sysfs_attr_add(struct counter_device *const counter,
+ struct counter_attribute_group *cattr_group)
+{
+ const enum counter_scope scope = COUNTER_SCOPE_DEVICE;
+ struct device *const dev = &counter->dev;
+ int err;
+ size_t i;
+ struct counter_comp *ext;
+
+ /* Add Signals sysfs attributes */
+ err = counter_sysfs_signals_add(counter, cattr_group);
+ if (err < 0)
+ return err;
+ cattr_group += counter->num_signals;
+
+ /* Add Counts sysfs attributes */
+ err = counter_sysfs_counts_add(counter, cattr_group);
+ if (err < 0)
+ return err;
+ cattr_group += counter->num_counts;
+
+ /* Create name attribute */
+ err = counter_name_attr_create(dev, cattr_group, counter->name);
+ if (err < 0)
+ return err;
+
+ /* Create num_signals attribute */
+ err = counter_attr_create(dev, cattr_group, &counter_num_signals_comp,
+ scope, NULL);
+ if (err < 0)
+ return err;
+
+ /* Create num_counts attribute */
+ err = counter_attr_create(dev, cattr_group, &counter_num_counts_comp,
+ scope, NULL);
+ if (err < 0)
+ return err;
+
+ /* Create events_queue_size attribute */
+ err = counter_attr_create(dev, cattr_group,
+ &counter_events_queue_size_comp, scope, NULL);
+ if (err < 0)
+ return err;
+
+ /* Create an attribute for each extension */
+ for (i = 0; i < counter->num_ext; i++) {
+ ext = &counter->ext[i];
+
+ err = counter_attr_create(dev, cattr_group, ext, scope, NULL);
+ if (err < 0)
+ return err;
+
+ err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
+ i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * counter_sysfs_add - Adds Counter sysfs attributes to the device structure
+ * @counter: Pointer to the Counter device structure
+ *
+ * Counter sysfs attributes are created and added to the respective device
+ * structure for later registration to the system. Resource-managed memory
+ * allocation is performed by this function, and this memory should be freed
+ * when no longer needed (automatically by a device_unregister call, or
+ * manually by a devres_release_all call).
+ */
+int counter_sysfs_add(struct counter_device *const counter)
+{
+ struct device *const dev = &counter->dev;
+ const size_t num_groups = counter->num_signals + counter->num_counts + 1;
+ struct counter_attribute_group *cattr_groups;
+ size_t i, j;
+ int err;
+ struct attribute_group *groups;
+ struct counter_attribute *p;
+
+ /* Allocate space for attribute groups (signals, counts, and ext) */
+ cattr_groups = devm_kcalloc(dev, num_groups, sizeof(*cattr_groups),
+ GFP_KERNEL);
+ if (!cattr_groups)
+ return -ENOMEM;
+
+ /* Initialize attribute lists */
+ for (i = 0; i < num_groups; i++)
+ INIT_LIST_HEAD(&cattr_groups[i].attr_list);
+
+ /* Add Counter device sysfs attributes */
+ err = counter_sysfs_attr_add(counter, cattr_groups);
+ if (err < 0)
+ return err;
+
+ /* Allocate attribute group pointers for association with device */
+ dev->groups = devm_kcalloc(dev, num_groups + 1, sizeof(*dev->groups),
+ GFP_KERNEL);
+ if (!dev->groups)
+ return -ENOMEM;
+
+ /* Allocate space for attribute groups */
+ groups = devm_kcalloc(dev, num_groups, sizeof(*groups), GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ /* Prepare each group of attributes for association */
+ for (i = 0; i < num_groups; i++) {
+ groups[i].name = cattr_groups[i].name;
+
+ /* Allocate space for attribute pointers */
+ groups[i].attrs = devm_kcalloc(dev,
+ cattr_groups[i].num_attr + 1,
+ sizeof(*groups[i].attrs),
+ GFP_KERNEL);
+ if (!groups[i].attrs)
+ return -ENOMEM;
+
+ /* Add attribute pointers to attribute group */
+ j = 0;
+ list_for_each_entry(p, &cattr_groups[i].attr_list, l)
+ groups[i].attrs[j++] = &p->dev_attr.attr;
+
+ /* Associate attribute group */
+ dev->groups[i] = &groups[i];
+ }
+
+ return 0;
+}
diff --git a/drivers/counter/counter-sysfs.h b/drivers/counter/counter-sysfs.h
new file mode 100644
index 000000000000..14fe566aca0e
--- /dev/null
+++ b/drivers/counter/counter-sysfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Counter sysfs interface
+ * Copyright (C) 2020 William Breathitt Gray
+ */
+#ifndef _COUNTER_SYSFS_H_
+#define _COUNTER_SYSFS_H_
+
+#include <linux/counter.h>
+
+int counter_sysfs_add(struct counter_device *const counter);
+
+#endif /* _COUNTER_SYSFS_H_ */
diff --git a/drivers/counter/counter.c b/drivers/counter/counter.c
index 6a683d086008..de921e8a3f72 100644
--- a/drivers/counter/counter.c
+++ b/drivers/counter/counter.c
@@ -289,9 +289,9 @@ struct counter_signal_unit {
struct counter_signal *signal;
};
-static const char *const counter_signal_value_str[] = {
- [COUNTER_SIGNAL_LOW] = "low",
- [COUNTER_SIGNAL_HIGH] = "high"
+static const char *const counter_signal_level_str[] = {
+ [COUNTER_SIGNAL_LEVEL_LOW] = "low",
+ [COUNTER_SIGNAL_LEVEL_HIGH] = "high"
};
static ssize_t counter_signal_show(struct device *dev,
@@ -302,13 +302,13 @@ static ssize_t counter_signal_show(struct device *dev,
const struct counter_signal_unit *const component = devattr->component;
struct counter_signal *const signal = component->signal;
int err;
- enum counter_signal_value val;
+ enum counter_signal_level level;
- err = counter->ops->signal_read(counter, signal, &val);
+ err = counter->ops->signal_read(counter, signal, &level);
if (err)
return err;
- return sprintf(buf, "%s\n", counter_signal_value_str[val]);
+ return sprintf(buf, "%s\n", counter_signal_level_str[level]);
}
struct counter_name_unit {
@@ -744,15 +744,15 @@ static ssize_t counter_count_store(struct device *dev,
return len;
}
-static const char *const counter_count_function_str[] = {
- [COUNTER_COUNT_FUNCTION_INCREASE] = "increase",
- [COUNTER_COUNT_FUNCTION_DECREASE] = "decrease",
- [COUNTER_COUNT_FUNCTION_PULSE_DIRECTION] = "pulse-direction",
- [COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a",
- [COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b",
- [COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a",
- [COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b",
- [COUNTER_COUNT_FUNCTION_QUADRATURE_X4] = "quadrature x4"
+static const char *const counter_function_str[] = {
+ [COUNTER_FUNCTION_INCREASE] = "increase",
+ [COUNTER_FUNCTION_DECREASE] = "decrease",
+ [COUNTER_FUNCTION_PULSE_DIRECTION] = "pulse-direction",
+ [COUNTER_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a",
+ [COUNTER_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b",
+ [COUNTER_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a",
+ [COUNTER_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b",
+ [COUNTER_FUNCTION_QUADRATURE_X4] = "quadrature x4"
};
static ssize_t counter_function_show(struct device *dev,
@@ -764,7 +764,7 @@ static ssize_t counter_function_show(struct device *dev,
const struct counter_count_unit *const component = devattr->component;
struct counter_count *const count = component->count;
size_t func_index;
- enum counter_count_function function;
+ enum counter_function function;
err = counter->ops->function_get(counter, count, &func_index);
if (err)
@@ -773,7 +773,7 @@ static ssize_t counter_function_show(struct device *dev,
count->function = func_index;
function = count->functions_list[func_index];
- return sprintf(buf, "%s\n", counter_count_function_str[function]);
+ return sprintf(buf, "%s\n", counter_function_str[function]);
}
static ssize_t counter_function_store(struct device *dev,
@@ -785,14 +785,14 @@ static ssize_t counter_function_store(struct device *dev,
struct counter_count *const count = component->count;
const size_t num_functions = count->num_functions;
size_t func_index;
- enum counter_count_function function;
+ enum counter_function function;
int err;
struct counter_device *const counter = dev_get_drvdata(dev);
/* Find requested Count function mode */
for (func_index = 0; func_index < num_functions; func_index++) {
function = count->functions_list[func_index];
- if (sysfs_streq(buf, counter_count_function_str[function]))
+ if (sysfs_streq(buf, counter_function_str[function]))
break;
}
/* Return error if requested Count function mode not found */
@@ -880,25 +880,25 @@ err_free_attr_list:
}
struct counter_func_avail_unit {
- const enum counter_count_function *functions_list;
+ const enum counter_function *functions_list;
size_t num_functions;
};
-static ssize_t counter_count_function_available_show(struct device *dev,
+static ssize_t counter_function_available_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_device_attr *const devattr = to_counter_attr(attr);
const struct counter_func_avail_unit *const component = devattr->component;
- const enum counter_count_function *const func_list = component->functions_list;
+ const enum counter_function *const func_list = component->functions_list;
const size_t num_functions = component->num_functions;
size_t i;
- enum counter_count_function function;
+ enum counter_function function;
ssize_t len = 0;
for (i = 0; i < num_functions; i++) {
function = func_list[i];
len += sprintf(buf + len, "%s\n",
- counter_count_function_str[function]);
+ counter_function_str[function]);
}
return len;
@@ -968,7 +968,7 @@ static int counter_count_attributes_create(
parm.group = group;
parm.prefix = "";
parm.name = "function_available";
- parm.show = counter_count_function_available_show;
+ parm.show = counter_function_available_show;
parm.store = NULL;
parm.component = avail_comp;
err = counter_attribute_create(&parm);
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index c2b3fdfd8b77..2a58582a9df4 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/counter.h>
#include <linux/bitfield.h>
+#include <linux/types.h>
#define FTM_FIELD_UPDATE(ftm, offset, mask, val) \
({ \
@@ -25,7 +26,6 @@
})
struct ftm_quaddec {
- struct counter_device counter;
struct platform_device *pdev;
void __iomem *ftm_base;
bool big_endian;
@@ -115,10 +115,9 @@ static void ftm_quaddec_disable(void *ftm)
}
static int ftm_quaddec_get_prescaler(struct counter_device *counter,
- struct counter_count *count,
- size_t *cnt_mode)
+ struct counter_count *count, u32 *cnt_mode)
{
- struct ftm_quaddec *ftm = counter->priv;
+ struct ftm_quaddec *ftm = counter_priv(counter);
uint32_t scflags;
ftm_read(ftm, FTM_SC, &scflags);
@@ -129,10 +128,9 @@ static int ftm_quaddec_get_prescaler(struct counter_device *counter,
}
static int ftm_quaddec_set_prescaler(struct counter_device *counter,
- struct counter_count *count,
- size_t cnt_mode)
+ struct counter_count *count, u32 cnt_mode)
{
- struct ftm_quaddec *ftm = counter->priv;
+ struct ftm_quaddec *ftm = counter_priv(counter);
mutex_lock(&ftm->ftm_quaddec_mutex);
@@ -151,36 +149,19 @@ static const char * const ftm_quaddec_prescaler[] = {
"1", "2", "4", "8", "16", "32", "64", "128"
};
-static struct counter_count_enum_ext ftm_quaddec_prescaler_enum = {
- .items = ftm_quaddec_prescaler,
- .num_items = ARRAY_SIZE(ftm_quaddec_prescaler),
- .get = ftm_quaddec_get_prescaler,
- .set = ftm_quaddec_set_prescaler
-};
-
-enum ftm_quaddec_synapse_action {
- FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES,
-};
-
-static enum counter_synapse_action ftm_quaddec_synapse_actions[] = {
- [FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES] =
+static const enum counter_synapse_action ftm_quaddec_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
-enum ftm_quaddec_count_function {
- FTM_QUADDEC_COUNT_ENCODER_MODE_1,
-};
-
-static const enum counter_count_function ftm_quaddec_count_functions[] = {
- [FTM_QUADDEC_COUNT_ENCODER_MODE_1] =
- COUNTER_COUNT_FUNCTION_QUADRATURE_X4
+static const enum counter_function ftm_quaddec_count_functions[] = {
+ COUNTER_FUNCTION_QUADRATURE_X4
};
static int ftm_quaddec_count_read(struct counter_device *counter,
struct counter_count *count,
- unsigned long *val)
+ u64 *val)
{
- struct ftm_quaddec *const ftm = counter->priv;
+ struct ftm_quaddec *const ftm = counter_priv(counter);
uint32_t cntval;
ftm_read(ftm, FTM_CNT, &cntval);
@@ -192,9 +173,9 @@ static int ftm_quaddec_count_read(struct counter_device *counter,
static int ftm_quaddec_count_write(struct counter_device *counter,
struct counter_count *count,
- const unsigned long val)
+ const u64 val)
{
- struct ftm_quaddec *const ftm = counter->priv;
+ struct ftm_quaddec *const ftm = counter_priv(counter);
if (val != 0) {
dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n");
@@ -206,21 +187,21 @@ static int ftm_quaddec_count_write(struct counter_device *counter,
return 0;
}
-static int ftm_quaddec_count_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int ftm_quaddec_count_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
- *function = FTM_QUADDEC_COUNT_ENCODER_MODE_1;
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
}
-static int ftm_quaddec_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int ftm_quaddec_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- *action = FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
}
@@ -228,8 +209,8 @@ static int ftm_quaddec_action_get(struct counter_device *counter,
static const struct counter_ops ftm_quaddec_cnt_ops = {
.count_read = ftm_quaddec_count_read,
.count_write = ftm_quaddec_count_write,
- .function_get = ftm_quaddec_count_function_get,
- .action_get = ftm_quaddec_action_get,
+ .function_read = ftm_quaddec_count_function_read,
+ .action_read = ftm_quaddec_action_read,
};
static struct counter_signal ftm_quaddec_signals[] = {
@@ -256,9 +237,12 @@ static struct counter_synapse ftm_quaddec_count_synapses[] = {
}
};
-static const struct counter_count_ext ftm_quaddec_count_ext[] = {
- COUNTER_COUNT_ENUM("prescaler", &ftm_quaddec_prescaler_enum),
- COUNTER_COUNT_ENUM_AVAILABLE("prescaler", &ftm_quaddec_prescaler_enum),
+static DEFINE_COUNTER_ENUM(ftm_quaddec_prescaler_enum, ftm_quaddec_prescaler);
+
+static struct counter_comp ftm_quaddec_count_ext[] = {
+ COUNTER_COMP_COUNT_ENUM("prescaler", ftm_quaddec_get_prescaler,
+ ftm_quaddec_set_prescaler,
+ ftm_quaddec_prescaler_enum),
};
static struct counter_count ftm_quaddec_counts = {
@@ -274,17 +258,17 @@ static struct counter_count ftm_quaddec_counts = {
static int ftm_quaddec_probe(struct platform_device *pdev)
{
+ struct counter_device *counter;
struct ftm_quaddec *ftm;
struct device_node *node = pdev->dev.of_node;
struct resource *io;
int ret;
- ftm = devm_kzalloc(&pdev->dev, sizeof(*ftm), GFP_KERNEL);
- if (!ftm)
+ counter = devm_counter_alloc(&pdev->dev, sizeof(*ftm));
+ if (!counter)
return -ENOMEM;
-
- platform_set_drvdata(pdev, ftm);
+ ftm = counter_priv(counter);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io) {
@@ -300,14 +284,13 @@ static int ftm_quaddec_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to map memory region\n");
return -EINVAL;
}
- ftm->counter.name = dev_name(&pdev->dev);
- ftm->counter.parent = &pdev->dev;
- ftm->counter.ops = &ftm_quaddec_cnt_ops;
- ftm->counter.counts = &ftm_quaddec_counts;
- ftm->counter.num_counts = 1;
- ftm->counter.signals = ftm_quaddec_signals;
- ftm->counter.num_signals = ARRAY_SIZE(ftm_quaddec_signals);
- ftm->counter.priv = ftm;
+ counter->name = dev_name(&pdev->dev);
+ counter->parent = &pdev->dev;
+ counter->ops = &ftm_quaddec_cnt_ops;
+ counter->counts = &ftm_quaddec_counts;
+ counter->num_counts = 1;
+ counter->signals = ftm_quaddec_signals;
+ counter->num_signals = ARRAY_SIZE(ftm_quaddec_signals);
mutex_init(&ftm->ftm_quaddec_mutex);
@@ -317,9 +300,9 @@ static int ftm_quaddec_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_counter_register(&pdev->dev, &ftm->counter);
+ ret = devm_counter_add(&pdev->dev, counter);
if (ret)
- return ret;
+ return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
return 0;
}
diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c
new file mode 100644
index 000000000000..47a6a9dfc9e8
--- /dev/null
+++ b/drivers/counter/intel-qep.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Quadrature Encoder Peripheral driver
+ *
+ * Copyright (C) 2019-2021 Intel Corporation
+ *
+ * Author: Felipe Balbi (Intel)
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ * Author: Raymond Tan <raymond.tan@intel.com>
+ */
+#include <linux/counter.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#define INTEL_QEPCON 0x00
+#define INTEL_QEPFLT 0x04
+#define INTEL_QEPCOUNT 0x08
+#define INTEL_QEPMAX 0x0c
+#define INTEL_QEPWDT 0x10
+#define INTEL_QEPCAPDIV 0x14
+#define INTEL_QEPCNTR 0x18
+#define INTEL_QEPCAPBUF 0x1c
+#define INTEL_QEPINT_STAT 0x20
+#define INTEL_QEPINT_MASK 0x24
+
+/* QEPCON */
+#define INTEL_QEPCON_EN BIT(0)
+#define INTEL_QEPCON_FLT_EN BIT(1)
+#define INTEL_QEPCON_EDGE_A BIT(2)
+#define INTEL_QEPCON_EDGE_B BIT(3)
+#define INTEL_QEPCON_EDGE_INDX BIT(4)
+#define INTEL_QEPCON_SWPAB BIT(5)
+#define INTEL_QEPCON_OP_MODE BIT(6)
+#define INTEL_QEPCON_PH_ERR BIT(7)
+#define INTEL_QEPCON_COUNT_RST_MODE BIT(8)
+#define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9)
+#define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9)
+#define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0)
+#define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1)
+#define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2)
+#define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3)
+#define INTEL_QEPCON_CAP_MODE BIT(11)
+#define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12)
+#define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12)
+#define INTEL_QEPCON_FIFO_EMPTY BIT(15)
+
+/* QEPFLT */
+#define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff)
+
+/* QEPINT */
+#define INTEL_QEPINT_FIFOCRIT BIT(5)
+#define INTEL_QEPINT_FIFOENTRY BIT(4)
+#define INTEL_QEPINT_QEPDIR BIT(3)
+#define INTEL_QEPINT_QEPRST_UP BIT(2)
+#define INTEL_QEPINT_QEPRST_DOWN BIT(1)
+#define INTEL_QEPINT_WDT BIT(0)
+
+#define INTEL_QEPINT_MASK_ALL GENMASK(5, 0)
+
+#define INTEL_QEP_CLK_PERIOD_NS 10
+
+struct intel_qep {
+ struct mutex lock;
+ struct device *dev;
+ void __iomem *regs;
+ bool enabled;
+ /* Context save registers */
+ u32 qepcon;
+ u32 qepflt;
+ u32 qepmax;
+};
+
+static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset)
+{
+ return readl(qep->regs + offset);
+}
+
+static inline void intel_qep_writel(struct intel_qep *qep,
+ u32 offset, u32 value)
+{
+ writel(value, qep->regs + offset);
+}
+
+static void intel_qep_init(struct intel_qep *qep)
+{
+ u32 reg;
+
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ reg &= ~INTEL_QEPCON_EN;
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ qep->enabled = false;
+ /*
+ * Make sure peripheral is disabled by flushing the write with
+ * a dummy read
+ */
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+
+ reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN);
+ reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B |
+ INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE;
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
+}
+
+static int intel_qep_count_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ struct intel_qep *const qep = counter_priv(counter);
+
+ pm_runtime_get_sync(qep->dev);
+ *val = intel_qep_readl(qep, INTEL_QEPCOUNT);
+ pm_runtime_put(qep->dev);
+
+ return 0;
+}
+
+static const enum counter_function intel_qep_count_functions[] = {
+ COUNTER_FUNCTION_QUADRATURE_X4,
+};
+
+static int intel_qep_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
+{
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
+
+ return 0;
+}
+
+static const enum counter_synapse_action intel_qep_synapse_actions[] = {
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+};
+
+static int intel_qep_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
+{
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+ return 0;
+}
+
+static const struct counter_ops intel_qep_counter_ops = {
+ .count_read = intel_qep_count_read,
+ .function_read = intel_qep_function_read,
+ .action_read = intel_qep_action_read,
+};
+
+#define INTEL_QEP_SIGNAL(_id, _name) { \
+ .id = (_id), \
+ .name = (_name), \
+}
+
+static struct counter_signal intel_qep_signals[] = {
+ INTEL_QEP_SIGNAL(0, "Phase A"),
+ INTEL_QEP_SIGNAL(1, "Phase B"),
+ INTEL_QEP_SIGNAL(2, "Index"),
+};
+
+#define INTEL_QEP_SYNAPSE(_signal_id) { \
+ .actions_list = intel_qep_synapse_actions, \
+ .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \
+ .signal = &intel_qep_signals[(_signal_id)], \
+}
+
+static struct counter_synapse intel_qep_count_synapses[] = {
+ INTEL_QEP_SYNAPSE(0),
+ INTEL_QEP_SYNAPSE(1),
+ INTEL_QEP_SYNAPSE(2),
+};
+
+static int intel_qep_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, u64 *ceiling)
+{
+ struct intel_qep *qep = counter_priv(counter);
+
+ pm_runtime_get_sync(qep->dev);
+ *ceiling = intel_qep_readl(qep, INTEL_QEPMAX);
+ pm_runtime_put(qep->dev);
+
+ return 0;
+}
+
+static int intel_qep_ceiling_write(struct counter_device *counter,
+ struct counter_count *count, u64 max)
+{
+ struct intel_qep *qep = counter_priv(counter);
+ int ret = 0;
+
+ /* Intel QEP ceiling configuration only supports 32-bit values */
+ if (max != (u32)max)
+ return -ERANGE;
+
+ mutex_lock(&qep->lock);
+ if (qep->enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ pm_runtime_get_sync(qep->dev);
+ intel_qep_writel(qep, INTEL_QEPMAX, max);
+ pm_runtime_put(qep->dev);
+
+out:
+ mutex_unlock(&qep->lock);
+ return ret;
+}
+
+static int intel_qep_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
+{
+ struct intel_qep *qep = counter_priv(counter);
+
+ *enable = qep->enabled;
+
+ return 0;
+}
+
+static int intel_qep_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 val)
+{
+ struct intel_qep *qep = counter_priv(counter);
+ u32 reg;
+ bool changed;
+
+ mutex_lock(&qep->lock);
+ changed = val ^ qep->enabled;
+ if (!changed)
+ goto out;
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ if (val) {
+ /* Enable peripheral and keep runtime PM always on */
+ reg |= INTEL_QEPCON_EN;
+ pm_runtime_get_noresume(qep->dev);
+ } else {
+ /* Let runtime PM be idle and disable peripheral */
+ pm_runtime_put_noidle(qep->dev);
+ reg &= ~INTEL_QEPCON_EN;
+ }
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ pm_runtime_put(qep->dev);
+ qep->enabled = val;
+
+out:
+ mutex_unlock(&qep->lock);
+ return 0;
+}
+
+static int intel_qep_spike_filter_ns_read(struct counter_device *counter,
+ struct counter_count *count,
+ u64 *length)
+{
+ struct intel_qep *qep = counter_priv(counter);
+ u32 reg;
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ if (!(reg & INTEL_QEPCON_FLT_EN)) {
+ pm_runtime_put(qep->dev);
+ return 0;
+ }
+ reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
+ pm_runtime_put(qep->dev);
+
+ *length = (reg + 2) * INTEL_QEP_CLK_PERIOD_NS;
+
+ return 0;
+}
+
+static int intel_qep_spike_filter_ns_write(struct counter_device *counter,
+ struct counter_count *count,
+ u64 length)
+{
+ struct intel_qep *qep = counter_priv(counter);
+ u32 reg;
+ bool enable;
+ int ret = 0;
+
+ /*
+ * Spike filter length is (MAX_COUNT + 2) clock periods.
+ * Disable filter when userspace writes 0, enable for valid
+ * nanoseconds values and error out otherwise.
+ */
+ do_div(length, INTEL_QEP_CLK_PERIOD_NS);
+ if (length == 0) {
+ enable = false;
+ length = 0;
+ } else if (length >= 2) {
+ enable = true;
+ length -= 2;
+ } else {
+ return -EINVAL;
+ }
+
+ if (length > INTEL_QEPFLT_MAX_COUNT(length))
+ return -ERANGE;
+
+ mutex_lock(&qep->lock);
+ if (qep->enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ if (enable)
+ reg |= INTEL_QEPCON_FLT_EN;
+ else
+ reg &= ~INTEL_QEPCON_FLT_EN;
+ intel_qep_writel(qep, INTEL_QEPFLT, length);
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ pm_runtime_put(qep->dev);
+
+out:
+ mutex_unlock(&qep->lock);
+ return ret;
+}
+
+static int intel_qep_preset_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ u8 *preset_enable)
+{
+ struct intel_qep *qep = counter_priv(counter);
+ u32 reg;
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ pm_runtime_put(qep->dev);
+
+ *preset_enable = !(reg & INTEL_QEPCON_COUNT_RST_MODE);
+
+ return 0;
+}
+
+static int intel_qep_preset_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 val)
+{
+ struct intel_qep *qep = counter_priv(counter);
+ u32 reg;
+ int ret = 0;
+
+ mutex_lock(&qep->lock);
+ if (qep->enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ pm_runtime_get_sync(qep->dev);
+ reg = intel_qep_readl(qep, INTEL_QEPCON);
+ if (val)
+ reg &= ~INTEL_QEPCON_COUNT_RST_MODE;
+ else
+ reg |= INTEL_QEPCON_COUNT_RST_MODE;
+
+ intel_qep_writel(qep, INTEL_QEPCON, reg);
+ pm_runtime_put(qep->dev);
+
+out:
+ mutex_unlock(&qep->lock);
+
+ return ret;
+}
+
+static struct counter_comp intel_qep_count_ext[] = {
+ COUNTER_COMP_ENABLE(intel_qep_enable_read, intel_qep_enable_write),
+ COUNTER_COMP_CEILING(intel_qep_ceiling_read, intel_qep_ceiling_write),
+ COUNTER_COMP_PRESET_ENABLE(intel_qep_preset_enable_read,
+ intel_qep_preset_enable_write),
+ COUNTER_COMP_COUNT_U64("spike_filter_ns",
+ intel_qep_spike_filter_ns_read,
+ intel_qep_spike_filter_ns_write),
+};
+
+static struct counter_count intel_qep_counter_count[] = {
+ {
+ .id = 0,
+ .name = "Channel 1 Count",
+ .functions_list = intel_qep_count_functions,
+ .num_functions = ARRAY_SIZE(intel_qep_count_functions),
+ .synapses = intel_qep_count_synapses,
+ .num_synapses = ARRAY_SIZE(intel_qep_count_synapses),
+ .ext = intel_qep_count_ext,
+ .num_ext = ARRAY_SIZE(intel_qep_count_ext),
+ },
+};
+
+static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct counter_device *counter;
+ struct intel_qep *qep;
+ struct device *dev = &pci->dev;
+ void __iomem *regs;
+ int ret;
+
+ counter = devm_counter_alloc(dev, sizeof(*qep));
+ if (!counter)
+ return -ENOMEM;
+ qep = counter_priv(counter);
+
+ ret = pcim_enable_device(pci);
+ if (ret)
+ return ret;
+
+ pci_set_master(pci);
+
+ ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
+ if (ret)
+ return ret;
+
+ regs = pcim_iomap_table(pci)[0];
+ if (!regs)
+ return -ENOMEM;
+
+ qep->dev = dev;
+ qep->regs = regs;
+ mutex_init(&qep->lock);
+
+ intel_qep_init(qep);
+ pci_set_drvdata(pci, qep);
+
+ counter->name = pci_name(pci);
+ counter->parent = dev;
+ counter->ops = &intel_qep_counter_ops;
+ counter->counts = intel_qep_counter_count;
+ counter->num_counts = ARRAY_SIZE(intel_qep_counter_count);
+ counter->signals = intel_qep_signals;
+ counter->num_signals = ARRAY_SIZE(intel_qep_signals);
+ qep->enabled = false;
+
+ pm_runtime_put(dev);
+ pm_runtime_allow(dev);
+
+ ret = devm_counter_add(&pci->dev, counter);
+ if (ret < 0)
+ return dev_err_probe(&pci->dev, ret, "Failed to add counter\n");
+
+ return 0;
+}
+
+static void intel_qep_remove(struct pci_dev *pci)
+{
+ struct intel_qep *qep = pci_get_drvdata(pci);
+ struct device *dev = &pci->dev;
+
+ pm_runtime_forbid(dev);
+ if (!qep->enabled)
+ pm_runtime_get(dev);
+
+ intel_qep_writel(qep, INTEL_QEPCON, 0);
+}
+
+static int __maybe_unused intel_qep_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct intel_qep *qep = pci_get_drvdata(pdev);
+
+ qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON);
+ qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT);
+ qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX);
+
+ return 0;
+}
+
+static int __maybe_unused intel_qep_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct intel_qep *qep = pci_get_drvdata(pdev);
+
+ /*
+ * Make sure peripheral is disabled when restoring registers and
+ * control register bits that are writable only when the peripheral
+ * is disabled
+ */
+ intel_qep_writel(qep, INTEL_QEPCON, 0);
+ intel_qep_readl(qep, INTEL_QEPCON);
+
+ intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt);
+ intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax);
+ intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
+
+ /* Restore all other control register bits except enable status */
+ intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN);
+ intel_qep_readl(qep, INTEL_QEPCON);
+
+ /* Restore enable status */
+ intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops,
+ intel_qep_suspend, intel_qep_resume, NULL);
+
+static const struct pci_device_id intel_qep_id_table[] = {
+ /* EHL */
+ { PCI_VDEVICE(INTEL, 0x4bc3), },
+ { PCI_VDEVICE(INTEL, 0x4b81), },
+ { PCI_VDEVICE(INTEL, 0x4b82), },
+ { PCI_VDEVICE(INTEL, 0x4b83), },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(pci, intel_qep_id_table);
+
+static struct pci_driver intel_qep_driver = {
+ .name = "intel-qep",
+ .id_table = intel_qep_id_table,
+ .probe = intel_qep_probe,
+ .remove = intel_qep_remove,
+ .driver = {
+ .pm = &intel_qep_pm_ops,
+ }
+};
+
+module_pci_driver(intel_qep_driver);
+
+MODULE_AUTHOR("Felipe Balbi (Intel)");
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
new file mode 100644
index 000000000000..3b13f56bbb11
--- /dev/null
+++ b/drivers/counter/interrupt-cnt.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+
+#include <linux/counter.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define INTERRUPT_CNT_NAME "interrupt-cnt"
+
+struct interrupt_cnt_priv {
+ atomic_t count;
+ struct gpio_desc *gpio;
+ int irq;
+ bool enabled;
+ struct counter_signal signals;
+ struct counter_synapse synapses;
+ struct counter_count cnts;
+};
+
+static irqreturn_t interrupt_cnt_isr(int irq, void *dev_id)
+{
+ struct counter_device *counter = dev_id;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
+
+ atomic_inc(&priv->count);
+
+ counter_push_event(counter, COUNTER_EVENT_CHANGE_OF_STATE, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int interrupt_cnt_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
+{
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
+
+ *enable = priv->enabled;
+
+ return 0;
+}
+
+static int interrupt_cnt_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
+{
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
+
+ if (priv->enabled == enable)
+ return 0;
+
+ if (enable) {
+ priv->enabled = true;
+ enable_irq(priv->irq);
+ } else {
+ disable_irq(priv->irq);
+ priv->enabled = false;
+ }
+
+ return 0;
+}
+
+static struct counter_comp interrupt_cnt_ext[] = {
+ COUNTER_COMP_ENABLE(interrupt_cnt_enable_read,
+ interrupt_cnt_enable_write),
+};
+
+static const enum counter_synapse_action interrupt_cnt_synapse_actions[] = {
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+};
+
+static int interrupt_cnt_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
+{
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+
+ return 0;
+}
+
+static int interrupt_cnt_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
+
+ *val = atomic_read(&priv->count);
+
+ return 0;
+}
+
+static int interrupt_cnt_write(struct counter_device *counter,
+ struct counter_count *count, const u64 val)
+{
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
+
+ if (val != (typeof(priv->count.counter))val)
+ return -ERANGE;
+
+ atomic_set(&priv->count, val);
+
+ return 0;
+}
+
+static const enum counter_function interrupt_cnt_functions[] = {
+ COUNTER_FUNCTION_INCREASE,
+};
+
+static int interrupt_cnt_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
+{
+ *function = COUNTER_FUNCTION_INCREASE;
+
+ return 0;
+}
+
+static int interrupt_cnt_signal_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ enum counter_signal_level *level)
+{
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
+ int ret;
+
+ if (!priv->gpio)
+ return -EINVAL;
+
+ ret = gpiod_get_value(priv->gpio);
+ if (ret < 0)
+ return ret;
+
+ *level = ret ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
+
+ return 0;
+}
+
+static const struct counter_ops interrupt_cnt_ops = {
+ .action_read = interrupt_cnt_action_read,
+ .count_read = interrupt_cnt_read,
+ .count_write = interrupt_cnt_write,
+ .function_read = interrupt_cnt_function_read,
+ .signal_read = interrupt_cnt_signal_read,
+};
+
+static int interrupt_cnt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct counter_device *counter;
+ struct interrupt_cnt_priv *priv;
+ int ret;
+
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
+ return -ENOMEM;
+ priv = counter_priv(counter);
+
+ priv->irq = platform_get_irq_optional(pdev, 0);
+ if (priv->irq == -ENXIO)
+ priv->irq = 0;
+ else if (priv->irq < 0)
+ return dev_err_probe(dev, priv->irq, "failed to get IRQ\n");
+
+ priv->gpio = devm_gpiod_get_optional(dev, NULL, GPIOD_IN);
+ if (IS_ERR(priv->gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->gpio), "failed to get GPIO\n");
+
+ if (!priv->irq && !priv->gpio) {
+ dev_err(dev, "IRQ and GPIO are not found. At least one source should be provided\n");
+ return -ENODEV;
+ }
+
+ if (!priv->irq) {
+ int irq = gpiod_to_irq(priv->gpio);
+
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "failed to get IRQ from GPIO\n");
+
+ priv->irq = irq;
+ }
+
+ priv->signals.name = devm_kasprintf(dev, GFP_KERNEL, "IRQ %d",
+ priv->irq);
+ if (!priv->signals.name)
+ return -ENOMEM;
+
+ counter->signals = &priv->signals;
+ counter->num_signals = 1;
+
+ priv->synapses.actions_list = interrupt_cnt_synapse_actions;
+ priv->synapses.num_actions = ARRAY_SIZE(interrupt_cnt_synapse_actions);
+ priv->synapses.signal = &priv->signals;
+
+ priv->cnts.name = "Channel 0 Count";
+ priv->cnts.functions_list = interrupt_cnt_functions;
+ priv->cnts.num_functions = ARRAY_SIZE(interrupt_cnt_functions);
+ priv->cnts.synapses = &priv->synapses;
+ priv->cnts.num_synapses = 1;
+ priv->cnts.ext = interrupt_cnt_ext;
+ priv->cnts.num_ext = ARRAY_SIZE(interrupt_cnt_ext);
+
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &interrupt_cnt_ops;
+ counter->counts = &priv->cnts;
+ counter->num_counts = 1;
+
+ irq_set_status_flags(priv->irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(dev, priv->irq, interrupt_cnt_isr,
+ IRQF_TRIGGER_RISING | IRQF_NO_THREAD,
+ dev_name(dev), counter);
+ if (ret)
+ return ret;
+
+ ret = devm_counter_add(dev, counter);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to add counter\n");
+
+ return 0;
+}
+
+static const struct of_device_id interrupt_cnt_of_match[] = {
+ { .compatible = "interrupt-counter", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, interrupt_cnt_of_match);
+
+static struct platform_driver interrupt_cnt_driver = {
+ .probe = interrupt_cnt_probe,
+ .driver = {
+ .name = INTERRUPT_CNT_NAME,
+ .of_match_table = interrupt_cnt_of_match,
+ },
+};
+module_platform_driver(interrupt_cnt_driver);
+
+MODULE_ALIAS("platform:interrupt-counter");
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+MODULE_DESCRIPTION("Interrupt counter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 85fbbac06d31..30c7813c8f43 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) 2020 Microchip
*
* Author: Kamel Bouhara <kamel.bouhara@bootlin.com>
@@ -24,35 +24,22 @@
struct mchp_tc_data {
const struct atmel_tcb_config *tc_cfg;
- struct counter_device counter;
struct regmap *regmap;
int qdec_mode;
int num_channels;
int channel[2];
};
-enum mchp_tc_count_function {
- MCHP_TC_FUNCTION_INCREASE,
- MCHP_TC_FUNCTION_QUADRATURE,
+static const enum counter_function mchp_tc_count_functions[] = {
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_QUADRATURE_X4,
};
-static enum counter_count_function mchp_tc_count_functions[] = {
- [MCHP_TC_FUNCTION_INCREASE] = COUNTER_COUNT_FUNCTION_INCREASE,
- [MCHP_TC_FUNCTION_QUADRATURE] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
-};
-
-enum mchp_tc_synapse_action {
- MCHP_TC_SYNAPSE_ACTION_NONE = 0,
- MCHP_TC_SYNAPSE_ACTION_RISING_EDGE,
- MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE,
- MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE
-};
-
-static enum counter_synapse_action mchp_tc_synapse_actions[] = {
- [MCHP_TC_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
- [MCHP_TC_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- [MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- [MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+static const enum counter_synapse_action mchp_tc_synapse_actions[] = {
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
};
static struct counter_signal mchp_tc_count_signals[] = {
@@ -79,25 +66,25 @@ static struct counter_synapse mchp_tc_count_synapses[] = {
}
};
-static int mchp_tc_count_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int mchp_tc_count_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
if (priv->qdec_mode)
- *function = MCHP_TC_FUNCTION_QUADRATURE;
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
else
- *function = MCHP_TC_FUNCTION_INCREASE;
+ *function = COUNTER_FUNCTION_INCREASE;
return 0;
}
-static int mchp_tc_count_function_set(struct counter_device *counter,
- struct counter_count *count,
- size_t function)
+static int mchp_tc_count_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 bmr, cmr;
regmap_read(priv->regmap, ATMEL_TC_BMR, &bmr);
@@ -107,7 +94,7 @@ static int mchp_tc_count_function_set(struct counter_device *counter,
cmr &= ~ATMEL_TC_WAVE;
switch (function) {
- case MCHP_TC_FUNCTION_INCREASE:
+ case COUNTER_FUNCTION_INCREASE:
priv->qdec_mode = 0;
/* Set highest rate based on whether soc has gclk or not */
bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN);
@@ -119,7 +106,7 @@ static int mchp_tc_count_function_set(struct counter_device *counter,
cmr |= ATMEL_TC_CMR_MASK;
cmr &= ~(ATMEL_TC_ABETRG | ATMEL_TC_XC0);
break;
- case MCHP_TC_FUNCTION_QUADRATURE:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
if (!priv->tc_cfg->has_qdec)
return -EINVAL;
/* In QDEC mode settings both channels 0 and 1 are required */
@@ -132,6 +119,9 @@ static int mchp_tc_count_function_set(struct counter_device *counter,
bmr |= ATMEL_TC_QDEN | ATMEL_TC_POSEN;
cmr |= ATMEL_TC_ETRGEDG_RISING | ATMEL_TC_ABETRG | ATMEL_TC_XC0;
break;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
}
regmap_write(priv->regmap, ATMEL_TC_BMR, bmr);
@@ -154,9 +144,9 @@ static int mchp_tc_count_function_set(struct counter_device *counter,
static int mchp_tc_count_signal_read(struct counter_device *counter,
struct counter_signal *signal,
- enum counter_signal_value *val)
+ enum counter_signal_level *lvl)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
bool sigstatus;
u32 sr;
@@ -167,17 +157,17 @@ static int mchp_tc_count_signal_read(struct counter_device *counter,
else
sigstatus = (sr & ATMEL_TC_MTIOA);
- *val = sigstatus ? COUNTER_SIGNAL_HIGH : COUNTER_SIGNAL_LOW;
+ *lvl = sigstatus ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
return 0;
}
-static int mchp_tc_count_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int mchp_tc_count_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 cmr;
if (priv->qdec_mode) {
@@ -195,28 +185,28 @@ static int mchp_tc_count_action_get(struct counter_device *counter,
switch (cmr & ATMEL_TC_ETRGEDG) {
default:
- *action = MCHP_TC_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
break;
case ATMEL_TC_ETRGEDG_RISING:
- *action = MCHP_TC_SYNAPSE_ACTION_RISING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
break;
case ATMEL_TC_ETRGEDG_FALLING:
- *action = MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
break;
case ATMEL_TC_ETRGEDG_BOTH:
- *action = MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
break;
}
return 0;
}
-static int mchp_tc_count_action_set(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t action)
+static int mchp_tc_count_action_write(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 edge = ATMEL_TC_ETRGEDG_NONE;
/* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */
@@ -224,18 +214,21 @@ static int mchp_tc_count_action_set(struct counter_device *counter,
return -EINVAL;
switch (action) {
- case MCHP_TC_SYNAPSE_ACTION_NONE:
+ case COUNTER_SYNAPSE_ACTION_NONE:
edge = ATMEL_TC_ETRGEDG_NONE;
break;
- case MCHP_TC_SYNAPSE_ACTION_RISING_EDGE:
+ case COUNTER_SYNAPSE_ACTION_RISING_EDGE:
edge = ATMEL_TC_ETRGEDG_RISING;
break;
- case MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE:
+ case COUNTER_SYNAPSE_ACTION_FALLING_EDGE:
edge = ATMEL_TC_ETRGEDG_FALLING;
break;
- case MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE:
+ case COUNTER_SYNAPSE_ACTION_BOTH_EDGES:
edge = ATMEL_TC_ETRGEDG_BOTH;
break;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
}
return regmap_write_bits(priv->regmap,
@@ -244,10 +237,9 @@ static int mchp_tc_count_action_set(struct counter_device *counter,
}
static int mchp_tc_count_read(struct counter_device *counter,
- struct counter_count *count,
- unsigned long *val)
+ struct counter_count *count, u64 *val)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 cnt;
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CV), &cnt);
@@ -268,12 +260,12 @@ static struct counter_count mchp_tc_counts[] = {
};
static const struct counter_ops mchp_tc_ops = {
- .signal_read = mchp_tc_count_signal_read,
- .count_read = mchp_tc_count_read,
- .function_get = mchp_tc_count_function_get,
- .function_set = mchp_tc_count_function_set,
- .action_get = mchp_tc_count_action_get,
- .action_set = mchp_tc_count_action_set
+ .signal_read = mchp_tc_count_signal_read,
+ .count_read = mchp_tc_count_read,
+ .function_read = mchp_tc_count_function_read,
+ .function_write = mchp_tc_count_function_write,
+ .action_read = mchp_tc_count_action_read,
+ .action_write = mchp_tc_count_action_write
};
static const struct atmel_tcb_config tcb_rm9200_config = {
@@ -313,6 +305,7 @@ static int mchp_tc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct atmel_tcb_config *tcb_config;
const struct of_device_id *match;
+ struct counter_device *counter;
struct mchp_tc_data *priv;
char clk_name[7];
struct regmap *regmap;
@@ -320,11 +313,10 @@ static int mchp_tc_probe(struct platform_device *pdev)
int channel;
int ret, i;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(&pdev->dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
-
- platform_set_drvdata(pdev, priv);
+ priv = counter_priv(counter);
match = of_match_node(atmel_tc_of_match, np->parent);
tcb_config = match->data;
@@ -379,16 +371,19 @@ static int mchp_tc_probe(struct platform_device *pdev)
priv->tc_cfg = tcb_config;
priv->regmap = regmap;
- priv->counter.name = dev_name(&pdev->dev);
- priv->counter.parent = &pdev->dev;
- priv->counter.ops = &mchp_tc_ops;
- priv->counter.num_counts = ARRAY_SIZE(mchp_tc_counts);
- priv->counter.counts = mchp_tc_counts;
- priv->counter.num_signals = ARRAY_SIZE(mchp_tc_count_signals);
- priv->counter.signals = mchp_tc_count_signals;
- priv->counter.priv = priv;
-
- return devm_counter_register(&pdev->dev, &priv->counter);
+ counter->name = dev_name(&pdev->dev);
+ counter->parent = &pdev->dev;
+ counter->ops = &mchp_tc_ops;
+ counter->num_counts = ARRAY_SIZE(mchp_tc_counts);
+ counter->counts = mchp_tc_counts;
+ counter->num_signals = ARRAY_SIZE(mchp_tc_count_signals);
+ counter->signals = mchp_tc_count_signals;
+
+ ret = devm_counter_add(&pdev->dev, counter);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
+
+ return 0;
}
static const struct of_device_id mchp_tc_dt_ids[] = {
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index b084e971a493..aee3b1a8aaa7 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -17,9 +17,9 @@
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
struct stm32_lptim_cnt {
- struct counter_device counter;
struct device *dev;
struct regmap *regmap;
struct clk *clk;
@@ -107,11 +107,7 @@ static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val);
}
-/**
- * enum stm32_lptim_cnt_function - enumerates LPTimer counter & encoder modes
- * @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges
- * @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature)
- *
+/*
* In non-quadrature mode, device counts up on active edge.
* In quadrature mode, encoder counting scenarios are as follows:
* +---------+----------+--------------------+--------------------+
@@ -129,35 +125,22 @@ static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
* | edges | Low -> | Up | Down | Down | Up |
* +---------+----------+----------+---------+----------+---------+
*/
-enum stm32_lptim_cnt_function {
- STM32_LPTIM_COUNTER_INCREASE,
- STM32_LPTIM_ENCODER_BOTH_EDGE,
-};
-
-static enum counter_count_function stm32_lptim_cnt_functions[] = {
- [STM32_LPTIM_COUNTER_INCREASE] = COUNTER_COUNT_FUNCTION_INCREASE,
- [STM32_LPTIM_ENCODER_BOTH_EDGE] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+static const enum counter_function stm32_lptim_cnt_functions[] = {
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_QUADRATURE_X4,
};
-enum stm32_lptim_synapse_action {
- STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE,
- STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE,
- STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES,
- STM32_LPTIM_SYNAPSE_ACTION_NONE,
-};
-
-static enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
- /* Index must match with stm32_lptim_cnt_polarity[] (priv->polarity) */
- [STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- [STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- [STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
- [STM32_LPTIM_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+static const enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ COUNTER_SYNAPSE_ACTION_NONE,
};
static int stm32_lptim_cnt_read(struct counter_device *counter,
- struct counter_count *count, unsigned long *val)
+ struct counter_count *count, u64 *val)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
u32 cnt;
int ret;
@@ -170,74 +153,71 @@ static int stm32_lptim_cnt_read(struct counter_device *counter,
return 0;
}
-static int stm32_lptim_cnt_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int stm32_lptim_cnt_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (!priv->quadrature_mode) {
- *function = STM32_LPTIM_COUNTER_INCREASE;
+ *function = COUNTER_FUNCTION_INCREASE;
return 0;
}
- if (priv->polarity == STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES) {
- *function = STM32_LPTIM_ENCODER_BOTH_EDGE;
+ if (priv->polarity == STM32_LPTIM_CKPOL_BOTH_EDGES) {
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
}
return -EINVAL;
}
-static int stm32_lptim_cnt_function_set(struct counter_device *counter,
- struct counter_count *count,
- size_t function)
+static int stm32_lptim_cnt_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
switch (function) {
- case STM32_LPTIM_COUNTER_INCREASE:
+ case COUNTER_FUNCTION_INCREASE:
priv->quadrature_mode = 0;
return 0;
- case STM32_LPTIM_ENCODER_BOTH_EDGE:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
priv->quadrature_mode = 1;
- priv->polarity = STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES;
+ priv->polarity = STM32_LPTIM_CKPOL_BOTH_EDGES;
return 0;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
}
-
- return -EINVAL;
}
-static ssize_t stm32_lptim_cnt_enable_read(struct counter_device *counter,
- struct counter_count *count,
- void *private, char *buf)
+static int stm32_lptim_cnt_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ u8 *enable)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
int ret;
ret = stm32_lptim_is_enabled(priv);
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%u\n", ret);
+ *enable = ret;
+
+ return 0;
}
-static ssize_t stm32_lptim_cnt_enable_write(struct counter_device *counter,
- struct counter_count *count,
- void *private,
- const char *buf, size_t len)
+static int stm32_lptim_cnt_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ u8 enable)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
- bool enable;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
int ret;
- ret = kstrtobool(buf, &enable);
- if (ret)
- return ret;
-
/* Check nobody uses the timer, or already disabled/enabled */
ret = stm32_lptim_is_enabled(priv);
if ((ret < 0) || (!ret && !enable))
@@ -253,121 +233,130 @@ static ssize_t stm32_lptim_cnt_enable_write(struct counter_device *counter,
if (ret)
return ret;
- return len;
+ return 0;
}
-static ssize_t stm32_lptim_cnt_ceiling_read(struct counter_device *counter,
- struct counter_count *count,
- void *private, char *buf)
+static int stm32_lptim_cnt_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ u64 *ceiling)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
- return snprintf(buf, PAGE_SIZE, "%u\n", priv->ceiling);
+ *ceiling = priv->ceiling;
+
+ return 0;
}
-static ssize_t stm32_lptim_cnt_ceiling_write(struct counter_device *counter,
- struct counter_count *count,
- void *private,
- const char *buf, size_t len)
+static int stm32_lptim_cnt_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ u64 ceiling)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
- unsigned int ceiling;
- int ret;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
- ret = kstrtouint(buf, 0, &ceiling);
- if (ret)
- return ret;
-
if (ceiling > STM32_LPTIM_MAX_ARR)
- return -EINVAL;
+ return -ERANGE;
priv->ceiling = ceiling;
- return len;
+ return 0;
}
-static const struct counter_count_ext stm32_lptim_cnt_ext[] = {
- {
- .name = "enable",
- .read = stm32_lptim_cnt_enable_read,
- .write = stm32_lptim_cnt_enable_write
- },
- {
- .name = "ceiling",
- .read = stm32_lptim_cnt_ceiling_read,
- .write = stm32_lptim_cnt_ceiling_write
- },
+static struct counter_comp stm32_lptim_cnt_ext[] = {
+ COUNTER_COMP_ENABLE(stm32_lptim_cnt_enable_read,
+ stm32_lptim_cnt_enable_write),
+ COUNTER_COMP_CEILING(stm32_lptim_cnt_ceiling_read,
+ stm32_lptim_cnt_ceiling_write),
};
-static int stm32_lptim_cnt_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int stm32_lptim_cnt_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
- size_t function;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
+ enum counter_function function;
int err;
- err = stm32_lptim_cnt_function_get(counter, count, &function);
+ err = stm32_lptim_cnt_function_read(counter, count, &function);
if (err)
return err;
switch (function) {
- case STM32_LPTIM_COUNTER_INCREASE:
+ case COUNTER_FUNCTION_INCREASE:
/* LP Timer acts as up-counter on input 1 */
- if (synapse->signal->id == count->synapses[0].signal->id)
- *action = priv->polarity;
- else
- *action = STM32_LPTIM_SYNAPSE_ACTION_NONE;
- return 0;
- case STM32_LPTIM_ENCODER_BOTH_EDGE:
- *action = priv->polarity;
+ if (synapse->signal->id != count->synapses[0].signal->id) {
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
+ return 0;
+ }
+
+ switch (priv->polarity) {
+ case STM32_LPTIM_CKPOL_RISING_EDGE:
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ return 0;
+ case STM32_LPTIM_CKPOL_FALLING_EDGE:
+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
+ return 0;
+ case STM32_LPTIM_CKPOL_BOTH_EDGES:
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+ return 0;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
}
-
- return -EINVAL;
}
-static int stm32_lptim_cnt_action_set(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t action)
+static int stm32_lptim_cnt_action_write(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
- size_t function;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
+ enum counter_function function;
int err;
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
- err = stm32_lptim_cnt_function_get(counter, count, &function);
+ err = stm32_lptim_cnt_function_read(counter, count, &function);
if (err)
return err;
/* only set polarity when in counter mode (on input 1) */
- if (function == STM32_LPTIM_COUNTER_INCREASE
- && synapse->signal->id == count->synapses[0].signal->id) {
- switch (action) {
- case STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE:
- case STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE:
- case STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES:
- priv->polarity = action;
- return 0;
- }
- }
+ if (function != COUNTER_FUNCTION_INCREASE
+ || synapse->signal->id != count->synapses[0].signal->id)
+ return -EINVAL;
- return -EINVAL;
+ switch (action) {
+ case COUNTER_SYNAPSE_ACTION_RISING_EDGE:
+ priv->polarity = STM32_LPTIM_CKPOL_RISING_EDGE;
+ return 0;
+ case COUNTER_SYNAPSE_ACTION_FALLING_EDGE:
+ priv->polarity = STM32_LPTIM_CKPOL_FALLING_EDGE;
+ return 0;
+ case COUNTER_SYNAPSE_ACTION_BOTH_EDGES:
+ priv->polarity = STM32_LPTIM_CKPOL_BOTH_EDGES;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static const struct counter_ops stm32_lptim_cnt_ops = {
.count_read = stm32_lptim_cnt_read,
- .function_get = stm32_lptim_cnt_function_get,
- .function_set = stm32_lptim_cnt_function_set,
- .action_get = stm32_lptim_cnt_action_get,
- .action_set = stm32_lptim_cnt_action_set,
+ .function_read = stm32_lptim_cnt_function_read,
+ .function_write = stm32_lptim_cnt_function_write,
+ .action_read = stm32_lptim_cnt_action_read,
+ .action_write = stm32_lptim_cnt_action_write,
};
static struct counter_signal stm32_lptim_cnt_signals[] = {
@@ -421,14 +410,17 @@ static struct counter_count stm32_lptim_in1_counts = {
static int stm32_lptim_cnt_probe(struct platform_device *pdev)
{
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct counter_device *counter;
struct stm32_lptim_cnt *priv;
+ int ret;
if (IS_ERR_OR_NULL(ddata))
return -EINVAL;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(&pdev->dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
priv->dev = &pdev->dev;
priv->regmap = ddata->regmap;
@@ -436,23 +428,26 @@ static int stm32_lptim_cnt_probe(struct platform_device *pdev)
priv->ceiling = STM32_LPTIM_MAX_ARR;
/* Initialize Counter device */
- priv->counter.name = dev_name(&pdev->dev);
- priv->counter.parent = &pdev->dev;
- priv->counter.ops = &stm32_lptim_cnt_ops;
+ counter->name = dev_name(&pdev->dev);
+ counter->parent = &pdev->dev;
+ counter->ops = &stm32_lptim_cnt_ops;
if (ddata->has_encoder) {
- priv->counter.counts = &stm32_lptim_enc_counts;
- priv->counter.num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals);
+ counter->counts = &stm32_lptim_enc_counts;
+ counter->num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals);
} else {
- priv->counter.counts = &stm32_lptim_in1_counts;
- priv->counter.num_signals = 1;
+ counter->counts = &stm32_lptim_in1_counts;
+ counter->num_signals = 1;
}
- priv->counter.num_counts = 1;
- priv->counter.signals = stm32_lptim_cnt_signals;
- priv->counter.priv = priv;
+ counter->num_counts = 1;
+ counter->signals = stm32_lptim_cnt_signals;
platform_set_drvdata(pdev, priv);
- return devm_counter_register(&pdev->dev, &priv->counter);
+ ret = devm_counter_add(&pdev->dev, counter);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 75bc401fdd18..5779ae7c73cf 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
#define TIM_CCMR_CCXS (BIT(8) | BIT(0))
#define TIM_CCMR_MASK (TIM_CCMR_CC1S | TIM_CCMR_CC2S | \
@@ -28,7 +29,6 @@ struct stm32_timer_regs {
};
struct stm32_timer_cnt {
- struct counter_device counter;
struct regmap *regmap;
struct clk *clk;
u32 max_arr;
@@ -36,31 +36,17 @@ struct stm32_timer_cnt {
struct stm32_timer_regs bak;
};
-/**
- * enum stm32_count_function - enumerates stm32 timer counter encoder modes
- * @STM32_COUNT_SLAVE_MODE_DISABLED: counts on internal clock when CEN=1
- * @STM32_COUNT_ENCODER_MODE_1: counts TI1FP1 edges, depending on TI2FP2 level
- * @STM32_COUNT_ENCODER_MODE_2: counts TI2FP2 edges, depending on TI1FP1 level
- * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
- */
-enum stm32_count_function {
- STM32_COUNT_SLAVE_MODE_DISABLED,
- STM32_COUNT_ENCODER_MODE_1,
- STM32_COUNT_ENCODER_MODE_2,
- STM32_COUNT_ENCODER_MODE_3,
-};
-
-static enum counter_count_function stm32_count_functions[] = {
- [STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE,
- [STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
- [STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
- [STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+static const enum counter_function stm32_count_functions[] = {
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_QUADRATURE_X2_A,
+ COUNTER_FUNCTION_QUADRATURE_X2_B,
+ COUNTER_FUNCTION_QUADRATURE_X4,
};
static int stm32_count_read(struct counter_device *counter,
- struct counter_count *count, unsigned long *val)
+ struct counter_count *count, u64 *val)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cnt;
regmap_read(priv->regmap, TIM_CNT, &cnt);
@@ -70,10 +56,9 @@ static int stm32_count_read(struct counter_device *counter,
}
static int stm32_count_write(struct counter_device *counter,
- struct counter_count *count,
- const unsigned long val)
+ struct counter_count *count, const u64 val)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 ceiling;
regmap_read(priv->regmap, TIM_ARR, &ceiling);
@@ -83,52 +68,52 @@ static int stm32_count_write(struct counter_device *counter,
return regmap_write(priv->regmap, TIM_CNT, val);
}
-static int stm32_count_function_get(struct counter_device *counter,
- struct counter_count *count,
- size_t *function)
+static int stm32_count_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 smcr;
regmap_read(priv->regmap, TIM_SMCR, &smcr);
switch (smcr & TIM_SMCR_SMS) {
- case 0:
- *function = STM32_COUNT_SLAVE_MODE_DISABLED;
+ case TIM_SMCR_SMS_SLAVE_MODE_DISABLED:
+ *function = COUNTER_FUNCTION_INCREASE;
return 0;
- case 1:
- *function = STM32_COUNT_ENCODER_MODE_1;
+ case TIM_SMCR_SMS_ENCODER_MODE_1:
+ *function = COUNTER_FUNCTION_QUADRATURE_X2_A;
return 0;
- case 2:
- *function = STM32_COUNT_ENCODER_MODE_2;
+ case TIM_SMCR_SMS_ENCODER_MODE_2:
+ *function = COUNTER_FUNCTION_QUADRATURE_X2_B;
return 0;
- case 3:
- *function = STM32_COUNT_ENCODER_MODE_3;
+ case TIM_SMCR_SMS_ENCODER_MODE_3:
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
default:
return -EINVAL;
}
}
-static int stm32_count_function_set(struct counter_device *counter,
- struct counter_count *count,
- size_t function)
+static int stm32_count_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1, sms;
switch (function) {
- case STM32_COUNT_SLAVE_MODE_DISABLED:
- sms = 0;
+ case COUNTER_FUNCTION_INCREASE:
+ sms = TIM_SMCR_SMS_SLAVE_MODE_DISABLED;
break;
- case STM32_COUNT_ENCODER_MODE_1:
- sms = 1;
+ case COUNTER_FUNCTION_QUADRATURE_X2_A:
+ sms = TIM_SMCR_SMS_ENCODER_MODE_1;
break;
- case STM32_COUNT_ENCODER_MODE_2:
- sms = 2;
+ case COUNTER_FUNCTION_QUADRATURE_X2_B:
+ sms = TIM_SMCR_SMS_ENCODER_MODE_2;
break;
- case STM32_COUNT_ENCODER_MODE_3:
- sms = 3;
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ sms = TIM_SMCR_SMS_ENCODER_MODE_3;
break;
default:
return -EINVAL;
@@ -150,44 +135,37 @@ static int stm32_count_function_set(struct counter_device *counter,
return 0;
}
-static ssize_t stm32_count_direction_read(struct counter_device *counter,
+static int stm32_count_direction_read(struct counter_device *counter,
struct counter_count *count,
- void *private, char *buf)
+ enum counter_count_direction *direction)
{
- struct stm32_timer_cnt *const priv = counter->priv;
- const char *direction;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
- direction = (cr1 & TIM_CR1_DIR) ? "backward" : "forward";
+ *direction = (cr1 & TIM_CR1_DIR) ? COUNTER_COUNT_DIRECTION_BACKWARD :
+ COUNTER_COUNT_DIRECTION_FORWARD;
- return scnprintf(buf, PAGE_SIZE, "%s\n", direction);
+ return 0;
}
-static ssize_t stm32_count_ceiling_read(struct counter_device *counter,
- struct counter_count *count,
- void *private, char *buf)
+static int stm32_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, u64 *ceiling)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 arr;
regmap_read(priv->regmap, TIM_ARR, &arr);
- return snprintf(buf, PAGE_SIZE, "%u\n", arr);
+ *ceiling = arr;
+
+ return 0;
}
-static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
- struct counter_count *count,
- void *private,
- const char *buf, size_t len)
+static int stm32_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count, u64 ceiling)
{
- struct stm32_timer_cnt *const priv = counter->priv;
- unsigned int ceiling;
- int ret;
-
- ret = kstrtouint(buf, 0, &ceiling);
- if (ret)
- return ret;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
if (ceiling > priv->max_arr)
return -ERANGE;
@@ -196,34 +174,27 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_write(priv->regmap, TIM_ARR, ceiling);
- return len;
+ return 0;
}
-static ssize_t stm32_count_enable_read(struct counter_device *counter,
- struct counter_count *count,
- void *private, char *buf)
+static int stm32_count_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
- return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)(cr1 & TIM_CR1_CEN));
+ *enable = cr1 & TIM_CR1_CEN;
+
+ return 0;
}
-static ssize_t stm32_count_enable_write(struct counter_device *counter,
- struct counter_count *count,
- void *private,
- const char *buf, size_t len)
+static int stm32_count_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
{
- struct stm32_timer_cnt *const priv = counter->priv;
- int err;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
- bool enable;
-
- err = kstrtobool(buf, &enable);
- if (err)
- return err;
if (enable) {
regmap_read(priv->regmap, TIM_CR1, &cr1);
@@ -242,70 +213,55 @@ static ssize_t stm32_count_enable_write(struct counter_device *counter,
/* Keep enabled state to properly handle low power states */
priv->enabled = enable;
- return len;
+ return 0;
}
-static const struct counter_count_ext stm32_count_ext[] = {
- {
- .name = "direction",
- .read = stm32_count_direction_read,
- },
- {
- .name = "enable",
- .read = stm32_count_enable_read,
- .write = stm32_count_enable_write
- },
- {
- .name = "ceiling",
- .read = stm32_count_ceiling_read,
- .write = stm32_count_ceiling_write
- },
-};
-
-enum stm32_synapse_action {
- STM32_SYNAPSE_ACTION_NONE,
- STM32_SYNAPSE_ACTION_BOTH_EDGES
+static struct counter_comp stm32_count_ext[] = {
+ COUNTER_COMP_DIRECTION(stm32_count_direction_read),
+ COUNTER_COMP_ENABLE(stm32_count_enable_read, stm32_count_enable_write),
+ COUNTER_COMP_CEILING(stm32_count_ceiling_read,
+ stm32_count_ceiling_write),
};
-static enum counter_synapse_action stm32_synapse_actions[] = {
- [STM32_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
- [STM32_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+static const enum counter_synapse_action stm32_synapse_actions[] = {
+ COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
-static int stm32_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse,
- size_t *action)
+static int stm32_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- size_t function;
+ enum counter_function function;
int err;
- err = stm32_count_function_get(counter, count, &function);
+ err = stm32_count_function_read(counter, count, &function);
if (err)
return err;
switch (function) {
- case STM32_COUNT_SLAVE_MODE_DISABLED:
+ case COUNTER_FUNCTION_INCREASE:
/* counts on internal clock when CEN=1 */
- *action = STM32_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
- case STM32_COUNT_ENCODER_MODE_1:
+ case COUNTER_FUNCTION_QUADRATURE_X2_A:
/* counts up/down on TI1FP1 edge depending on TI2FP2 level */
if (synapse->signal->id == count->synapses[0].signal->id)
- *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
else
- *action = STM32_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
- case STM32_COUNT_ENCODER_MODE_2:
+ case COUNTER_FUNCTION_QUADRATURE_X2_B:
/* counts up/down on TI2FP2 edge depending on TI1FP1 level */
if (synapse->signal->id == count->synapses[1].signal->id)
- *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
else
- *action = STM32_SYNAPSE_ACTION_NONE;
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
- case STM32_COUNT_ENCODER_MODE_3:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
/* counts up/down on both TI1FP1 and TI2FP2 edges */
- *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
default:
return -EINVAL;
@@ -315,9 +271,9 @@ static int stm32_action_get(struct counter_device *counter,
static const struct counter_ops stm32_timer_cnt_ops = {
.count_read = stm32_count_read,
.count_write = stm32_count_write,
- .function_get = stm32_count_function_get,
- .function_set = stm32_count_function_set,
- .action_get = stm32_action_get,
+ .function_read = stm32_count_function_read,
+ .function_write = stm32_count_function_write,
+ .action_read = stm32_action_read,
};
static struct counter_signal stm32_signals[] = {
@@ -360,31 +316,38 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct stm32_timer_cnt *priv;
+ struct counter_device *counter;
+ int ret;
if (IS_ERR_OR_NULL(ddata))
return -EINVAL;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
+
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &stm32_timer_cnt_ops;
- priv->counter.counts = &stm32_counts;
- priv->counter.num_counts = 1;
- priv->counter.signals = stm32_signals;
- priv->counter.num_signals = ARRAY_SIZE(stm32_signals);
- priv->counter.priv = priv;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &stm32_timer_cnt_ops;
+ counter->counts = &stm32_counts;
+ counter->num_counts = 1;
+ counter->signals = stm32_signals;
+ counter->num_signals = ARRAY_SIZE(stm32_signals);
platform_set_drvdata(pdev, priv);
/* Register Counter device */
- return devm_counter_register(dev, &priv->counter);
+ ret = devm_counter_add(dev, counter);
+ if (ret < 0)
+ dev_err_probe(dev, ret, "Failed to add counter\n");
+
+ return ret;
}
static int __maybe_unused stm32_timer_cnt_suspend(struct device *dev)
diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
new file mode 100644
index 000000000000..2eb243cac091
--- /dev/null
+++ b/drivers/counter/ti-ecap-capture.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ECAP Capture driver
+ *
+ * Copyright (C) 2022 Julien Panis <jpanis@baylibre.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/counter.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define ECAP_DRV_NAME "ecap"
+
+/* ECAP event IDs */
+#define ECAP_CEVT1 0
+#define ECAP_CEVT2 1
+#define ECAP_CEVT3 2
+#define ECAP_CEVT4 3
+#define ECAP_CNTOVF 4
+
+#define ECAP_CEVT_LAST ECAP_CEVT4
+#define ECAP_NB_CEVT (ECAP_CEVT_LAST + 1)
+
+#define ECAP_EVT_LAST ECAP_CNTOVF
+#define ECAP_NB_EVT (ECAP_EVT_LAST + 1)
+
+/* Registers */
+#define ECAP_TSCNT_REG 0x00
+
+#define ECAP_CAP_REG(i) (((i) << 2) + 0x08)
+
+#define ECAP_ECCTL_REG 0x28
+#define ECAP_CAPPOL_BIT(i) BIT((i) << 1)
+#define ECAP_EV_MODE_MASK GENMASK(7, 0)
+#define ECAP_CAPLDEN_BIT BIT(8)
+#define ECAP_CONT_ONESHT_BIT BIT(16)
+#define ECAP_STOPVALUE_MASK GENMASK(18, 17)
+#define ECAP_TSCNTSTP_BIT BIT(20)
+#define ECAP_SYNCO_DIS_MASK GENMASK(23, 22)
+#define ECAP_CAP_APWM_BIT BIT(25)
+#define ECAP_ECCTL_EN_MASK (ECAP_CAPLDEN_BIT | ECAP_TSCNTSTP_BIT)
+#define ECAP_ECCTL_CFG_MASK (ECAP_SYNCO_DIS_MASK | ECAP_STOPVALUE_MASK \
+ | ECAP_ECCTL_EN_MASK | ECAP_CAP_APWM_BIT \
+ | ECAP_CONT_ONESHT_BIT)
+
+#define ECAP_ECINT_EN_FLG_REG 0x2c
+#define ECAP_EVT_EN_MASK GENMASK(ECAP_NB_EVT, ECAP_NB_CEVT)
+#define ECAP_EVT_FLG_BIT(i) BIT((i) + 17)
+
+#define ECAP_ECINT_CLR_FRC_REG 0x30
+#define ECAP_INT_CLR_BIT BIT(0)
+#define ECAP_EVT_CLR_BIT(i) BIT((i) + 1)
+#define ECAP_EVT_CLR_MASK GENMASK(ECAP_NB_EVT, 0)
+
+#define ECAP_PID_REG 0x5c
+
+/* ECAP signals */
+#define ECAP_CLOCK_SIG 0
+#define ECAP_INPUT_SIG 1
+
+static const struct regmap_config ecap_cnt_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = ECAP_PID_REG,
+};
+
+/**
+ * struct ecap_cnt_dev - device private data structure
+ * @enabled: device state
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @clk: device clock
+ * @regmap: device register map
+ * @nb_ovf: number of overflows since capture start
+ * @pm_ctx: device context for PM operations
+ * @pm_ctx.ev_mode: event mode bits
+ * @pm_ctx.time_cntr: timestamp counter value
+ */
+struct ecap_cnt_dev {
+ bool enabled;
+ struct mutex lock;
+ struct clk *clk;
+ struct regmap *regmap;
+ atomic_t nb_ovf;
+ struct {
+ u8 ev_mode;
+ u32 time_cntr;
+ } pm_ctx;
+};
+
+static u8 ecap_cnt_capture_get_evmode(struct counter_device *counter)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+ unsigned int regval;
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_read(ecap_dev->regmap, ECAP_ECCTL_REG, &regval);
+ pm_runtime_put_sync(counter->parent);
+
+ return regval;
+}
+
+static void ecap_cnt_capture_set_evmode(struct counter_device *counter, u8 ev_mode)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_EV_MODE_MASK, ev_mode);
+ pm_runtime_put_sync(counter->parent);
+}
+
+static void ecap_cnt_capture_enable(struct counter_device *counter)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+
+ /* Enable interrupts on events */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG,
+ ECAP_EVT_EN_MASK, ECAP_EVT_EN_MASK);
+
+ /* Run counter */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_ECCTL_CFG_MASK,
+ ECAP_SYNCO_DIS_MASK | ECAP_STOPVALUE_MASK | ECAP_ECCTL_EN_MASK);
+}
+
+static void ecap_cnt_capture_disable(struct counter_device *counter)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ /* Stop counter */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_ECCTL_EN_MASK, 0);
+
+ /* Disable interrupts on events */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG, ECAP_EVT_EN_MASK, 0);
+
+ pm_runtime_put_sync(counter->parent);
+}
+
+static u32 ecap_cnt_count_get_val(struct counter_device *counter, unsigned int reg)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+ unsigned int regval;
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_read(ecap_dev->regmap, reg, &regval);
+ pm_runtime_put_sync(counter->parent);
+
+ return regval;
+}
+
+static void ecap_cnt_count_set_val(struct counter_device *counter, unsigned int reg, u32 val)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_write(ecap_dev->regmap, reg, val);
+ pm_runtime_put_sync(counter->parent);
+}
+
+static int ecap_cnt_count_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ *val = ecap_cnt_count_get_val(counter, ECAP_TSCNT_REG);
+
+ return 0;
+}
+
+static int ecap_cnt_count_write(struct counter_device *counter,
+ struct counter_count *count, u64 val)
+{
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ ecap_cnt_count_set_val(counter, ECAP_TSCNT_REG, val);
+
+ return 0;
+}
+
+static int ecap_cnt_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
+{
+ *function = COUNTER_FUNCTION_INCREASE;
+
+ return 0;
+}
+
+static int ecap_cnt_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
+{
+ *action = (synapse->signal->id == ECAP_CLOCK_SIG) ?
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE :
+ COUNTER_SYNAPSE_ACTION_NONE;
+
+ return 0;
+}
+
+static int ecap_cnt_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ if (watch->channel > ECAP_CEVT_LAST)
+ return -EINVAL;
+
+ switch (watch->event) {
+ case COUNTER_EVENT_CAPTURE:
+ case COUNTER_EVENT_OVERFLOW:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ecap_cnt_clk_get_freq(struct counter_device *counter,
+ struct counter_signal *signal, u64 *freq)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ *freq = clk_get_rate(ecap_dev->clk);
+
+ return 0;
+}
+
+static int ecap_cnt_pol_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 *pol)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ *pol = regmap_test_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
+ pm_runtime_put_sync(counter->parent);
+
+ return 0;
+}
+
+static int ecap_cnt_pol_write(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 pol)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ if (pol)
+ regmap_set_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
+ else
+ regmap_clear_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
+ pm_runtime_put_sync(counter->parent);
+
+ return 0;
+}
+
+static int ecap_cnt_cap_read(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 *cap)
+{
+ *cap = ecap_cnt_count_get_val(counter, ECAP_CAP_REG(idx));
+
+ return 0;
+}
+
+static int ecap_cnt_cap_write(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 cap)
+{
+ if (cap > U32_MAX)
+ return -ERANGE;
+
+ ecap_cnt_count_set_val(counter, ECAP_CAP_REG(idx), cap);
+
+ return 0;
+}
+
+#define ECAP_POL_READ(i) int ecap_cnt_pol##i##_read(struct counter_device *counter, \
+ struct counter_signal *signal, \
+ u32 *pol) \
+{ \
+ return ecap_cnt_pol_read(counter, signal, i, pol); \
+}
+
+#define ECAP_POL_WRITE(i) int ecap_cnt_pol##i##_write(struct counter_device *counter, \
+ struct counter_signal *signal, \
+ u32 pol) \
+{ \
+ return ecap_cnt_pol_write(counter, signal, i, pol); \
+}
+
+#define ECAP_CAP_READ(i) int ecap_cnt_cap##i##_read(struct counter_device *counter, \
+ struct counter_count *count, \
+ u64 *cap) \
+{ \
+ return ecap_cnt_cap_read(counter, count, i, cap); \
+}
+
+#define ECAP_CAP_WRITE(i) int ecap_cnt_cap##i##_write(struct counter_device *counter, \
+ struct counter_count *count, \
+ u64 cap) \
+{ \
+ return ecap_cnt_cap_write(counter, count, i, cap); \
+}
+
+static inline ECAP_POL_READ(0)
+static inline ECAP_POL_READ(1)
+static inline ECAP_POL_READ(2)
+static inline ECAP_POL_READ(3)
+static inline ECAP_POL_WRITE(0)
+static inline ECAP_POL_WRITE(1)
+static inline ECAP_POL_WRITE(2)
+static inline ECAP_POL_WRITE(3)
+static inline ECAP_CAP_READ(0)
+static inline ECAP_CAP_READ(1)
+static inline ECAP_CAP_READ(2)
+static inline ECAP_CAP_READ(3)
+static inline ECAP_CAP_WRITE(0)
+static inline ECAP_CAP_WRITE(1)
+static inline ECAP_CAP_WRITE(2)
+static inline ECAP_CAP_WRITE(3)
+
+static int ecap_cnt_nb_ovf_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ *val = atomic_read(&ecap_dev->nb_ovf);
+
+ return 0;
+}
+
+static int ecap_cnt_nb_ovf_write(struct counter_device *counter,
+ struct counter_count *count, u64 val)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ atomic_set(&ecap_dev->nb_ovf, val);
+
+ return 0;
+}
+
+static int ecap_cnt_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ *val = U32_MAX;
+
+ return 0;
+}
+
+static int ecap_cnt_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ *enable = ecap_dev->enabled;
+
+ return 0;
+}
+
+static int ecap_cnt_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ mutex_lock(&ecap_dev->lock);
+
+ if (enable == ecap_dev->enabled)
+ goto out;
+
+ if (enable)
+ ecap_cnt_capture_enable(counter);
+ else
+ ecap_cnt_capture_disable(counter);
+ ecap_dev->enabled = enable;
+
+out:
+ mutex_unlock(&ecap_dev->lock);
+
+ return 0;
+}
+
+static const struct counter_ops ecap_cnt_ops = {
+ .count_read = ecap_cnt_count_read,
+ .count_write = ecap_cnt_count_write,
+ .function_read = ecap_cnt_function_read,
+ .action_read = ecap_cnt_action_read,
+ .watch_validate = ecap_cnt_watch_validate,
+};
+
+static const enum counter_function ecap_cnt_functions[] = {
+ COUNTER_FUNCTION_INCREASE,
+};
+
+static const enum counter_synapse_action ecap_cnt_clock_actions[] = {
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+};
+
+static const enum counter_synapse_action ecap_cnt_input_actions[] = {
+ COUNTER_SYNAPSE_ACTION_NONE,
+};
+
+static struct counter_comp ecap_cnt_clock_ext[] = {
+ COUNTER_COMP_SIGNAL_U64("frequency", ecap_cnt_clk_get_freq, NULL),
+};
+
+static const char *const ecap_cnt_pol_names[] = {
+ "positive",
+ "negative",
+};
+
+static DEFINE_COUNTER_ENUM(ecap_cnt_pol_avail, ecap_cnt_pol_names);
+
+static struct counter_comp ecap_cnt_signal_ext[] = {
+ COUNTER_COMP_SIGNAL_ENUM("polarity0", ecap_cnt_pol0_read,
+ ecap_cnt_pol0_write, ecap_cnt_pol_avail),
+ COUNTER_COMP_SIGNAL_ENUM("polarity1", ecap_cnt_pol1_read,
+ ecap_cnt_pol1_write, ecap_cnt_pol_avail),
+ COUNTER_COMP_SIGNAL_ENUM("polarity2", ecap_cnt_pol2_read,
+ ecap_cnt_pol2_write, ecap_cnt_pol_avail),
+ COUNTER_COMP_SIGNAL_ENUM("polarity3", ecap_cnt_pol3_read,
+ ecap_cnt_pol3_write, ecap_cnt_pol_avail),
+};
+
+static struct counter_signal ecap_cnt_signals[] = {
+ {
+ .id = ECAP_CLOCK_SIG,
+ .name = "Clock Signal",
+ .ext = ecap_cnt_clock_ext,
+ .num_ext = ARRAY_SIZE(ecap_cnt_clock_ext),
+ },
+ {
+ .id = ECAP_INPUT_SIG,
+ .name = "Input Signal",
+ .ext = ecap_cnt_signal_ext,
+ .num_ext = ARRAY_SIZE(ecap_cnt_signal_ext),
+ },
+};
+
+static struct counter_synapse ecap_cnt_synapses[] = {
+ {
+ .actions_list = ecap_cnt_clock_actions,
+ .num_actions = ARRAY_SIZE(ecap_cnt_clock_actions),
+ .signal = &ecap_cnt_signals[ECAP_CLOCK_SIG],
+ },
+ {
+ .actions_list = ecap_cnt_input_actions,
+ .num_actions = ARRAY_SIZE(ecap_cnt_input_actions),
+ .signal = &ecap_cnt_signals[ECAP_INPUT_SIG],
+ },
+};
+
+static struct counter_comp ecap_cnt_count_ext[] = {
+ COUNTER_COMP_COUNT_U64("capture0", ecap_cnt_cap0_read, ecap_cnt_cap0_write),
+ COUNTER_COMP_COUNT_U64("capture1", ecap_cnt_cap1_read, ecap_cnt_cap1_write),
+ COUNTER_COMP_COUNT_U64("capture2", ecap_cnt_cap2_read, ecap_cnt_cap2_write),
+ COUNTER_COMP_COUNT_U64("capture3", ecap_cnt_cap3_read, ecap_cnt_cap3_write),
+ COUNTER_COMP_COUNT_U64("num_overflows", ecap_cnt_nb_ovf_read, ecap_cnt_nb_ovf_write),
+ COUNTER_COMP_CEILING(ecap_cnt_ceiling_read, NULL),
+ COUNTER_COMP_ENABLE(ecap_cnt_enable_read, ecap_cnt_enable_write),
+};
+
+static struct counter_count ecap_cnt_counts[] = {
+ {
+ .name = "Timestamp Counter",
+ .functions_list = ecap_cnt_functions,
+ .num_functions = ARRAY_SIZE(ecap_cnt_functions),
+ .synapses = ecap_cnt_synapses,
+ .num_synapses = ARRAY_SIZE(ecap_cnt_synapses),
+ .ext = ecap_cnt_count_ext,
+ .num_ext = ARRAY_SIZE(ecap_cnt_count_ext),
+ },
+};
+
+static irqreturn_t ecap_cnt_isr(int irq, void *dev_id)
+{
+ struct counter_device *counter_dev = dev_id;
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+ unsigned int clr = 0;
+ unsigned int flg;
+ int i;
+
+ regmap_read(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG, &flg);
+
+ /* Check capture events */
+ for (i = 0 ; i < ECAP_NB_CEVT ; i++) {
+ if (flg & ECAP_EVT_FLG_BIT(i)) {
+ counter_push_event(counter_dev, COUNTER_EVENT_CAPTURE, i);
+ clr |= ECAP_EVT_CLR_BIT(i);
+ }
+ }
+
+ /* Check counter overflow */
+ if (flg & ECAP_EVT_FLG_BIT(ECAP_CNTOVF)) {
+ atomic_inc(&ecap_dev->nb_ovf);
+ for (i = 0 ; i < ECAP_NB_CEVT ; i++)
+ counter_push_event(counter_dev, COUNTER_EVENT_OVERFLOW, i);
+ clr |= ECAP_EVT_CLR_BIT(ECAP_CNTOVF);
+ }
+
+ clr |= ECAP_INT_CLR_BIT;
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_CLR_FRC_REG, ECAP_EVT_CLR_MASK, clr);
+
+ return IRQ_HANDLED;
+}
+
+static void ecap_cnt_clk_disable(void *clk)
+{
+ clk_disable_unprepare(clk);
+}
+
+static void ecap_cnt_pm_disable(void *dev)
+{
+ pm_runtime_disable(dev);
+}
+
+static int ecap_cnt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ecap_cnt_dev *ecap_dev;
+ struct counter_device *counter_dev;
+ void __iomem *mmio_base;
+ unsigned long clk_rate;
+ int ret;
+
+ counter_dev = devm_counter_alloc(dev, sizeof(*ecap_dev));
+ if (IS_ERR(counter_dev))
+ return PTR_ERR(counter_dev);
+
+ counter_dev->name = ECAP_DRV_NAME;
+ counter_dev->parent = dev;
+ counter_dev->ops = &ecap_cnt_ops;
+ counter_dev->signals = ecap_cnt_signals;
+ counter_dev->num_signals = ARRAY_SIZE(ecap_cnt_signals);
+ counter_dev->counts = ecap_cnt_counts;
+ counter_dev->num_counts = ARRAY_SIZE(ecap_cnt_counts);
+
+ ecap_dev = counter_priv(counter_dev);
+
+ mutex_init(&ecap_dev->lock);
+
+ ecap_dev->clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(ecap_dev->clk))
+ return dev_err_probe(dev, PTR_ERR(ecap_dev->clk), "failed to get clock\n");
+
+ ret = clk_prepare_enable(ecap_dev->clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clock\n");
+
+ /* Register a cleanup callback to care for disabling clock */
+ ret = devm_add_action_or_reset(dev, ecap_cnt_clk_disable, ecap_dev->clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add clk disable action\n");
+
+ clk_rate = clk_get_rate(ecap_dev->clk);
+ if (!clk_rate) {
+ dev_err(dev, "failed to get clock rate\n");
+ return -EINVAL;
+ }
+
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mmio_base))
+ return PTR_ERR(mmio_base);
+
+ ecap_dev->regmap = devm_regmap_init_mmio(dev, mmio_base, &ecap_cnt_regmap_config);
+ if (IS_ERR(ecap_dev->regmap))
+ return dev_err_probe(dev, PTR_ERR(ecap_dev->regmap), "failed to init regmap\n");
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get irq\n");
+
+ ret = devm_request_irq(dev, ret, ecap_cnt_isr, 0, pdev->name, counter_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq\n");
+
+ platform_set_drvdata(pdev, counter_dev);
+
+ pm_runtime_enable(dev);
+
+ /* Register a cleanup callback to care for disabling PM */
+ ret = devm_add_action_or_reset(dev, ecap_cnt_pm_disable, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add pm disable action\n");
+
+ ret = devm_counter_add(dev, counter_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add counter\n");
+
+ return 0;
+}
+
+static int ecap_cnt_remove(struct platform_device *pdev)
+{
+ struct counter_device *counter_dev = platform_get_drvdata(pdev);
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+
+ if (ecap_dev->enabled)
+ ecap_cnt_capture_disable(counter_dev);
+
+ return 0;
+}
+
+static __maybe_unused int ecap_cnt_suspend(struct device *dev)
+{
+ struct counter_device *counter_dev = dev_get_drvdata(dev);
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+
+ /* If eCAP is running, stop capture then save timestamp counter */
+ if (ecap_dev->enabled) {
+ /*
+ * Disabling capture has the following effects:
+ * - interrupts are disabled
+ * - loading of capture registers is disabled
+ * - timebase counter is stopped
+ */
+ ecap_cnt_capture_disable(counter_dev);
+ ecap_dev->pm_ctx.time_cntr = ecap_cnt_count_get_val(counter_dev, ECAP_TSCNT_REG);
+ }
+
+ ecap_dev->pm_ctx.ev_mode = ecap_cnt_capture_get_evmode(counter_dev);
+
+ clk_disable(ecap_dev->clk);
+
+ return 0;
+}
+
+static __maybe_unused int ecap_cnt_resume(struct device *dev)
+{
+ struct counter_device *counter_dev = dev_get_drvdata(dev);
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+
+ clk_enable(ecap_dev->clk);
+
+ ecap_cnt_capture_set_evmode(counter_dev, ecap_dev->pm_ctx.ev_mode);
+
+ /* If eCAP was running, restore timestamp counter then run capture */
+ if (ecap_dev->enabled) {
+ ecap_cnt_count_set_val(counter_dev, ECAP_TSCNT_REG, ecap_dev->pm_ctx.time_cntr);
+ ecap_cnt_capture_enable(counter_dev);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ecap_cnt_pm_ops, ecap_cnt_suspend, ecap_cnt_resume);
+
+static const struct of_device_id ecap_cnt_of_match[] = {
+ { .compatible = "ti,am62-ecap-capture" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ecap_cnt_of_match);
+
+static struct platform_driver ecap_cnt_driver = {
+ .probe = ecap_cnt_probe,
+ .remove = ecap_cnt_remove,
+ .driver = {
+ .name = "ecap-capture",
+ .of_match_table = ecap_cnt_of_match,
+ .pm = &ecap_cnt_pm_ops,
+ },
+};
+module_platform_driver(ecap_cnt_driver);
+
+MODULE_DESCRIPTION("ECAP Capture driver");
+MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 65df9ef5b5bc..0489d26eb47c 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/types.h>
/* 32-bit registers */
#define QPOSCNT 0x0
@@ -73,29 +74,28 @@ enum {
};
/* Position Counter Input Modes */
-enum {
+enum ti_eqep_count_func {
TI_EQEP_COUNT_FUNC_QUAD_COUNT,
TI_EQEP_COUNT_FUNC_DIR_COUNT,
TI_EQEP_COUNT_FUNC_UP_COUNT,
TI_EQEP_COUNT_FUNC_DOWN_COUNT,
};
-enum {
- TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES,
- TI_EQEP_SYNAPSE_ACTION_RISING_EDGE,
- TI_EQEP_SYNAPSE_ACTION_NONE,
-};
-
struct ti_eqep_cnt {
struct counter_device counter;
struct regmap *regmap32;
struct regmap *regmap16;
};
+static struct ti_eqep_cnt *ti_eqep_count_from_counter(struct counter_device *counter)
+{
+ return counter_priv(counter);
+}
+
static int ti_eqep_count_read(struct counter_device *counter,
- struct counter_count *count, unsigned long *val)
+ struct counter_count *count, u64 *val)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 cnt;
regmap_read(priv->regmap32, QPOSCNT, &cnt);
@@ -105,9 +105,9 @@ static int ti_eqep_count_read(struct counter_device *counter,
}
static int ti_eqep_count_write(struct counter_device *counter,
- struct counter_count *count, unsigned long val)
+ struct counter_count *count, u64 val)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 max;
regmap_read(priv->regmap32, QPOSMAX, &max);
@@ -117,62 +117,100 @@ static int ti_eqep_count_write(struct counter_device *counter,
return regmap_write(priv->regmap32, QPOSCNT, val);
}
-static int ti_eqep_function_get(struct counter_device *counter,
- struct counter_count *count, size_t *function)
+static int ti_eqep_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qdecctl;
regmap_read(priv->regmap16, QDECCTL, &qdecctl);
- *function = (qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT;
+
+ switch ((qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT) {
+ case TI_EQEP_COUNT_FUNC_QUAD_COUNT:
+ *function = COUNTER_FUNCTION_QUADRATURE_X4;
+ break;
+ case TI_EQEP_COUNT_FUNC_DIR_COUNT:
+ *function = COUNTER_FUNCTION_PULSE_DIRECTION;
+ break;
+ case TI_EQEP_COUNT_FUNC_UP_COUNT:
+ *function = COUNTER_FUNCTION_INCREASE;
+ break;
+ case TI_EQEP_COUNT_FUNC_DOWN_COUNT:
+ *function = COUNTER_FUNCTION_DECREASE;
+ break;
+ }
return 0;
}
-static int ti_eqep_function_set(struct counter_device *counter,
- struct counter_count *count, size_t function)
+static int ti_eqep_function_write(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ enum ti_eqep_count_func qsrc;
+
+ switch (function) {
+ case COUNTER_FUNCTION_QUADRATURE_X4:
+ qsrc = TI_EQEP_COUNT_FUNC_QUAD_COUNT;
+ break;
+ case COUNTER_FUNCTION_PULSE_DIRECTION:
+ qsrc = TI_EQEP_COUNT_FUNC_DIR_COUNT;
+ break;
+ case COUNTER_FUNCTION_INCREASE:
+ qsrc = TI_EQEP_COUNT_FUNC_UP_COUNT;
+ break;
+ case COUNTER_FUNCTION_DECREASE:
+ qsrc = TI_EQEP_COUNT_FUNC_DOWN_COUNT;
+ break;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
+ }
return regmap_write_bits(priv->regmap16, QDECCTL, QDECCTL_QSRC,
- function << QDECCTL_QSRC_SHIFT);
+ qsrc << QDECCTL_QSRC_SHIFT);
}
-static int ti_eqep_action_get(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t *action)
+static int ti_eqep_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
{
- struct ti_eqep_cnt *priv = counter->priv;
- size_t function;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
+ enum counter_function function;
u32 qdecctl;
int err;
- err = ti_eqep_function_get(counter, count, &function);
+ err = ti_eqep_function_read(counter, count, &function);
if (err)
return err;
switch (function) {
- case TI_EQEP_COUNT_FUNC_QUAD_COUNT:
+ case COUNTER_FUNCTION_QUADRATURE_X4:
/* In quadrature mode, the rising and falling edge of both
* QEPA and QEPB trigger QCLK.
*/
- *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
- break;
- case TI_EQEP_COUNT_FUNC_DIR_COUNT:
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+ return 0;
+ case COUNTER_FUNCTION_PULSE_DIRECTION:
/* In direction-count mode only rising edge of QEPA is counted
* and QEPB gives direction.
*/
switch (synapse->signal->id) {
case TI_EQEP_SIGNAL_QEPA:
- *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
- break;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ return 0;
+ case TI_EQEP_SIGNAL_QEPB:
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
+ return 0;
default:
- *action = TI_EQEP_SYNAPSE_ACTION_NONE;
- break;
+ /* should never reach this path */
+ return -EINVAL;
}
- break;
- case TI_EQEP_COUNT_FUNC_UP_COUNT:
- case TI_EQEP_COUNT_FUNC_DOWN_COUNT:
+ case COUNTER_FUNCTION_INCREASE:
+ case COUNTER_FUNCTION_DECREASE:
/* In up/down-count modes only QEPA is counted and QEPB is not
* used.
*/
@@ -183,99 +221,87 @@ static int ti_eqep_action_get(struct counter_device *counter,
return err;
if (qdecctl & QDECCTL_XCR)
- *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
else
- *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
- break;
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ return 0;
+ case TI_EQEP_SIGNAL_QEPB:
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
+ return 0;
default:
- *action = TI_EQEP_SYNAPSE_ACTION_NONE;
- break;
+ /* should never reach this path */
+ return -EINVAL;
}
- break;
+ default:
+ /* should never reach this path */
+ return -EINVAL;
}
-
- return 0;
}
static const struct counter_ops ti_eqep_counter_ops = {
.count_read = ti_eqep_count_read,
.count_write = ti_eqep_count_write,
- .function_get = ti_eqep_function_get,
- .function_set = ti_eqep_function_set,
- .action_get = ti_eqep_action_get,
+ .function_read = ti_eqep_function_read,
+ .function_write = ti_eqep_function_write,
+ .action_read = ti_eqep_action_read,
};
-static ssize_t ti_eqep_position_ceiling_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
+static int ti_eqep_position_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ u64 *ceiling)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qposmax;
regmap_read(priv->regmap32, QPOSMAX, &qposmax);
- return sprintf(buf, "%u\n", qposmax);
+ *ceiling = qposmax;
+
+ return 0;
}
-static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
+static int ti_eqep_position_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ u64 ceiling)
{
- struct ti_eqep_cnt *priv = counter->priv;
- int err;
- u32 res;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
- err = kstrtouint(buf, 0, &res);
- if (err < 0)
- return err;
+ if (ceiling != (u32)ceiling)
+ return -ERANGE;
- regmap_write(priv->regmap32, QPOSMAX, res);
+ regmap_write(priv->regmap32, QPOSMAX, ceiling);
- return len;
+ return 0;
}
-static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
+static int ti_eqep_position_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qepctl;
regmap_read(priv->regmap16, QEPCTL, &qepctl);
- return sprintf(buf, "%u\n", !!(qepctl & QEPCTL_PHEN));
+ *enable = !!(qepctl & QEPCTL_PHEN);
+
+ return 0;
}
-static ssize_t ti_eqep_position_enable_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
+static int ti_eqep_position_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
{
- struct ti_eqep_cnt *priv = counter->priv;
- int err;
- bool res;
-
- err = kstrtobool(buf, &res);
- if (err < 0)
- return err;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
- regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, res ? -1 : 0);
+ regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, enable ? -1 : 0);
- return len;
+ return 0;
}
-static struct counter_count_ext ti_eqep_position_ext[] = {
- {
- .name = "ceiling",
- .read = ti_eqep_position_ceiling_read,
- .write = ti_eqep_position_ceiling_write,
- },
- {
- .name = "enable",
- .read = ti_eqep_position_enable_read,
- .write = ti_eqep_position_enable_write,
- },
+static struct counter_comp ti_eqep_position_ext[] = {
+ COUNTER_COMP_CEILING(ti_eqep_position_ceiling_read,
+ ti_eqep_position_ceiling_write),
+ COUNTER_COMP_ENABLE(ti_eqep_position_enable_read,
+ ti_eqep_position_enable_write),
};
static struct counter_signal ti_eqep_signals[] = {
@@ -289,17 +315,17 @@ static struct counter_signal ti_eqep_signals[] = {
},
};
-static const enum counter_count_function ti_eqep_position_functions[] = {
- [TI_EQEP_COUNT_FUNC_QUAD_COUNT] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
- [TI_EQEP_COUNT_FUNC_DIR_COUNT] = COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
- [TI_EQEP_COUNT_FUNC_UP_COUNT] = COUNTER_COUNT_FUNCTION_INCREASE,
- [TI_EQEP_COUNT_FUNC_DOWN_COUNT] = COUNTER_COUNT_FUNCTION_DECREASE,
+static const enum counter_function ti_eqep_position_functions[] = {
+ COUNTER_FUNCTION_QUADRATURE_X4,
+ COUNTER_FUNCTION_PULSE_DIRECTION,
+ COUNTER_FUNCTION_INCREASE,
+ COUNTER_FUNCTION_DECREASE,
};
static const enum counter_synapse_action ti_eqep_position_synapse_actions[] = {
- [TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
- [TI_EQEP_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- [TI_EQEP_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_NONE,
};
static struct counter_synapse ti_eqep_position_synapses[] = {
@@ -347,13 +373,15 @@ static const struct regmap_config ti_eqep_regmap16_config = {
static int ti_eqep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct counter_device *counter;
struct ti_eqep_cnt *priv;
void __iomem *base;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -369,16 +397,15 @@ static int ti_eqep_probe(struct platform_device *pdev)
if (IS_ERR(priv->regmap16))
return PTR_ERR(priv->regmap16);
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &ti_eqep_counter_ops;
- priv->counter.counts = ti_eqep_counts;
- priv->counter.num_counts = ARRAY_SIZE(ti_eqep_counts);
- priv->counter.signals = ti_eqep_signals;
- priv->counter.num_signals = ARRAY_SIZE(ti_eqep_signals);
- priv->counter.priv = priv;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &ti_eqep_counter_ops;
+ counter->counts = ti_eqep_counts;
+ counter->num_counts = ARRAY_SIZE(ti_eqep_counts);
+ counter->signals = ti_eqep_signals;
+ counter->num_signals = ARRAY_SIZE(ti_eqep_signals);
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, counter);
/*
* Need to make sure power is turned on. On AM33xx, this comes from the
@@ -388,7 +415,7 @@ static int ti_eqep_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- err = counter_register(&priv->counter);
+ err = counter_add(counter);
if (err < 0) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -400,10 +427,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
static int ti_eqep_remove(struct platform_device *pdev)
{
- struct ti_eqep_cnt *priv = platform_get_drvdata(pdev);
+ struct counter_device *counter = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- counter_unregister(&priv->counter);
+ counter_unregister(counter);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1f73fa75b1a0..65830ebe5285 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -323,8 +323,8 @@ config ARM_TEGRA194_CPUFREQ
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
- depends on ARCH_OMAP2PLUS
- default ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || ARCH_K3
+ default y
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index aea285651fba..9c327d14e724 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -149,6 +149,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "ti,am43", },
{ .compatible = "ti,dra7", },
{ .compatible = "ti,omap3", },
+ { .compatible = "ti,am625", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,apq8064", },
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 8f9fdd864391..48279bb00c18 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -39,6 +39,14 @@
#define OMAP34xx_ProdID_SKUID 0x4830A20C
#define OMAP3_SYSCON_BASE (0x48000000 + 0x2000 + 0x270)
+#define AM625_EFUSE_K_MPU_OPP 11
+#define AM625_EFUSE_S_MPU_OPP 19
+#define AM625_EFUSE_T_MPU_OPP 20
+
+#define AM625_SUPPORT_K_MPU_OPP BIT(0)
+#define AM625_SUPPORT_S_MPU_OPP BIT(1)
+#define AM625_SUPPORT_T_MPU_OPP BIT(2)
+
#define VERSION_COUNT 2
struct ti_cpufreq_data;
@@ -105,6 +113,25 @@ static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
return BIT(efuse);
}
+static unsigned long am625_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = AM625_SUPPORT_K_MPU_OPP;
+
+ switch (efuse) {
+ case AM625_EFUSE_T_MPU_OPP:
+ calculated_efuse |= AM625_SUPPORT_T_MPU_OPP;
+ fallthrough;
+ case AM625_EFUSE_S_MPU_OPP:
+ calculated_efuse |= AM625_SUPPORT_S_MPU_OPP;
+ fallthrough;
+ case AM625_EFUSE_K_MPU_OPP:
+ calculated_efuse |= AM625_SUPPORT_K_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_xlate = amx3_efuse_xlate,
.efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
@@ -199,6 +226,14 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
.multi_regulator = false,
};
+static struct ti_cpufreq_soc_data am625_soc_data = {
+ .efuse_xlate = am625_efuse_xlate,
+ .efuse_offset = 0x0018,
+ .efuse_mask = 0x07c0,
+ .efuse_shift = 0x6,
+ .rev_offset = 0x0014,
+ .multi_regulator = false,
+};
/**
* ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
@@ -302,6 +337,7 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,dra7", .data = &dra7_soc_data },
{ .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
+ { .compatible = "ti,am625", .data = &am625_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
@@ -373,9 +409,10 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
version, VERSION_COUNT);
if (IS_ERR(ti_opp_table)) {
- dev_err(opp_data->cpu_dev,
- "Failed to set supported hardware\n");
ret = PTR_ERR(ti_opp_table);
+ if (ret != -EPROBE_DEFER)
+ dev_err(opp_data->cpu_dev,
+ "Failed to set supported hardware %d\n", ret);
goto fail_put_node;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0a3dd0793f30..d96a79ab1e95 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -897,6 +897,7 @@ config CRYPTO_DEV_SA2UL
select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
+ select CRYPTO_DES
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@@ -907,4 +908,6 @@ config CRYPTO_DEV_SA2UL
used for crypto offload. Select this if you want to use hardware
acceleration for cryptographic algorithms on these devices.
+source "drivers/crypto/ti/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 53fc115cf459..7abdc8edee33 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
+obj-$(CONFIG_ARCH_K3) += ti/
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 7fdf38e07adf..dc162771496f 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -736,7 +736,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -753,7 +753,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -771,7 +771,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -788,7 +788,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index f15fc1fb3707..b3e1337b4922 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -66,8 +66,23 @@
/* Max Authentication tag size */
#define SA_MAX_AUTH_TAG_SZ 64
-#define PRIV_ID 0x1
-#define PRIV 0x1
+enum sa_algo_id {
+ SA_ALG_CBC_AES = 0,
+ SA_ALG_EBC_AES,
+ SA_ALG_CBC_DES3,
+ SA_ALG_ECB_DES3,
+ SA_ALG_SHA1,
+ SA_ALG_SHA256,
+ SA_ALG_SHA512,
+ SA_ALG_AUTHENC_SHA1_AES,
+ SA_ALG_AUTHENC_SHA256_AES,
+};
+
+struct sa_match_data {
+ u8 priv;
+ u8 priv_id;
+ u32 supported_algos;
+};
static struct device *sa_k3_dev;
@@ -691,8 +706,9 @@ static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
}
static
-int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
- u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
+int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
+ const u8 *enc_key, u16 enc_key_sz,
+ const u8 *auth_key, u16 auth_key_sz,
struct algo_data *ad, u8 enc, u32 *swinfo)
{
int enc_sc_offset = 0;
@@ -727,8 +743,8 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
memcpy(&sc_buf[2], &sc_id, 2);
sc_buf[4] = 0x0;
- sc_buf[5] = PRIV_ID;
- sc_buf[6] = PRIV;
+ sc_buf[5] = match_data->priv_id;
+ sc_buf[6] = match_data->priv;
sc_buf[7] = 0x0;
/* Prepare context for encryption engine */
@@ -884,8 +900,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
return ret;
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
- &ctx->enc.epib[1]))
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
+ ad, 1, &ctx->enc.epib[1]))
goto badkey;
cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -897,8 +913,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->enc.cmdl_size = cmdl_len;
/* Setup Decryption Security Context & Command label template */
- if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
- &ctx->dec.epib[1]))
+ if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
+ ad, 0, &ctx->dec.epib[1]))
goto badkey;
cfg.enc_eng_id = ad->enc_eng.eng_id;
@@ -1098,7 +1114,7 @@ static int sa_run(struct sa_req *req)
else
dma_rx = pdata->dma_rx1;
- ddev = dma_rx->device->dev;
+ ddev = dmaengine_get_dma_device(pdata->dma_tx);
rxd->ddev = ddev;
memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
@@ -1275,6 +1291,7 @@ static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
struct crypto_alg *alg = req->base.tfm->__crt_alg;
struct sa_req sa_req = { 0 };
int ret;
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
if (!req->cryptlen)
return 0;
@@ -1283,7 +1300,8 @@ static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
return -EINVAL;
/* Use SW fallback if the data size is not supported */
- if (req->cryptlen > SA_MAX_DATA_SZ ||
+ if (req->cryptlen <= data->fallback_sz ||
+ req->cryptlen > SA_MAX_DATA_SZ ||
(req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher);
@@ -1382,13 +1400,15 @@ static int sa_sha_run(struct ahash_request *req)
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
struct sa_req sa_req = { 0 };
size_t auth_len;
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
auth_len = req->nbytes;
if (!auth_len)
return zero_message_process(req);
- if (auth_len > SA_MAX_DATA_SZ ||
+ if (auth_len <= data->fallback_sz ||
+ auth_len > SA_MAX_DATA_SZ ||
(auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
struct ahash_request *subreq = &rctx->fallback_req;
@@ -1445,9 +1465,10 @@ static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
cfg.akey = NULL;
cfg.akey_len = 0;
+ ctx->dev_data = dev_get_drvdata(sa_k3_dev);
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
- &ctx->enc.epib[1]))
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
+ ad, 0, &ctx->enc.epib[1]))
goto badkey;
cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -1715,6 +1736,7 @@ static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
int ret;
memzero_explicit(ctx, sizeof(*ctx));
+ ctx->dev_data = data;
ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->shash)) {
@@ -1816,8 +1838,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
cfg.akey_len = keys.authkeylen;
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
- keys.authkey, keys.authkeylen,
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
+ keys.enckeylen, keys.authkey, keys.authkeylen,
ad, 1, &ctx->enc.epib[1]))
return -EINVAL;
@@ -1830,8 +1852,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
ctx->enc.cmdl_size = cmdl_len;
/* Setup Decryption Security Context & Command label template */
- if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
- keys.authkey, keys.authkeylen,
+ if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
+ keys.enckeylen, keys.authkey, keys.authkeylen,
ad, 0, &ctx->dec.epib[1]))
return -EINVAL;
@@ -1892,6 +1914,7 @@ static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
struct sa_req sa_req = { 0 };
size_t auth_size, enc_size;
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
enc_size = req->cryptlen;
auth_size = req->assoclen + req->cryptlen;
@@ -1901,7 +1924,7 @@ static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
auth_size -= crypto_aead_authsize(tfm);
}
- if (auth_size > SA_MAX_DATA_SZ ||
+ if (auth_size <= data->fallback_sz || auth_size > SA_MAX_DATA_SZ ||
(auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
struct aead_request *subreq = aead_request_ctx(req);
@@ -1949,7 +1972,7 @@ static int sa_aead_decrypt(struct aead_request *req)
}
static struct sa_alg_tmpl sa_algs[] = {
- {
+ [SA_ALG_CBC_AES] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(aes)",
@@ -1972,7 +1995,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_EBC_AES] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(aes)",
@@ -1994,7 +2017,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_CBC_DES3] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(des3_ede)",
@@ -2017,7 +2040,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_ECB_DES3] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(des3_ede)",
@@ -2039,7 +2062,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_SHA1] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2068,7 +2091,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_SHA256] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2097,7 +2120,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_SHA512] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2126,7 +2149,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_AUTHENC_SHA1_AES] = {
.type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2153,7 +2176,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_aead_decrypt,
},
},
- {
+ [SA_ALG_AUTHENC_SHA256_AES] = {
.type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2184,13 +2207,19 @@ static struct sa_alg_tmpl sa_algs[] = {
};
/* Register the algorithms in crypto framework */
-static void sa_register_algos(const struct device *dev)
+static void sa_register_algos(struct sa_crypto_data *dev_data)
{
+ const struct sa_match_data *match_data = dev_data->match_data;
+ struct device *dev = dev_data->dev;
char *alg_name;
u32 type;
int i, err;
for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ /* Skip unsupported algos */
+ if (!(match_data->supported_algos & BIT(i)))
+ continue;
+
type = sa_algs[i].type;
if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
alg_name = sa_algs[i].alg.skcipher.base.cra_name;
@@ -2331,23 +2360,91 @@ static int sa_link_child(struct device *dev, void *data)
return 0;
}
+static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sa_crypto_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", data->fallback_sz);
+}
+
+static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct sa_crypto_data *data = dev_get_drvdata(dev);
+ ssize_t status;
+ long value;
+
+ status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
+
+ data->fallback_sz = value;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(fallback);
+
+static struct attribute *sa_ul_attrs[] = {
+ &dev_attr_fallback.attr,
+ NULL,
+};
+
+static struct attribute_group sa_ul_attr_group = {
+ .attrs = sa_ul_attrs,
+};
+
+static struct sa_match_data am654_match_data = {
+ .priv = 1,
+ .priv_id = 1,
+ .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0),
+};
+
+static struct sa_match_data am64_match_data = {
+ .priv = 0,
+ .priv_id = 0,
+ .supported_algos = BIT(SA_ALG_CBC_AES) |
+ BIT(SA_ALG_EBC_AES) |
+ BIT(SA_ALG_SHA256) |
+ BIT(SA_ALG_SHA512) |
+ BIT(SA_ALG_AUTHENC_SHA256_AES),
+};
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
+ { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
+ { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
+ { .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
static int sa_ul_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
static void __iomem *saul_base;
struct sa_crypto_data *dev_data;
- u32 val;
+ u32 status, val;
int ret;
dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return -ENOMEM;
+ dev_data->match_data = of_device_get_match_data(dev);
+ if (!dev_data->match_data)
+ return -ENODEV;
+
+ saul_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(saul_base))
+ return PTR_ERR(saul_base);
+
sa_k3_dev = dev;
dev_data->dev = dev;
dev_data->pdev = pdev;
+ dev_data->base = saul_base;
platform_set_drvdata(pdev, dev_data);
dev_set_drvdata(sa_k3_dev, dev_data);
@@ -2366,26 +2463,34 @@ static int sa_ul_probe(struct platform_device *pdev)
goto destroy_dma_pool;
spin_lock_init(&dev_data->scid_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- saul_base = devm_ioremap_resource(dev, res);
- dev_data->base = saul_base;
val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
- SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
- SA_EEC_TRNG_EN;
+ SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
+ SA_EEC_TRNG_EN;
+ status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
+ /* Only enable engines if all are not already enabled */
+ if (val & ~status)
+ writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
- writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+ sa_register_algos(dev_data);
- sa_register_algos(dev);
+ ret = sysfs_create_group(&dev->kobj, &sa_ul_attr_group);
+ if (ret) {
+ dev_err(dev, "failed to create sysfs attrs.\n");
+ goto release_dma;
+ }
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (ret)
- goto release_dma;
+ goto remove_sysfs;
device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
return 0;
+remove_sysfs:
+ sysfs_remove_group(&pdev->dev.kobj, &sa_ul_attr_group);
+
release_dma:
sa_unregister_algos(&pdev->dev);
@@ -2406,6 +2511,10 @@ static int sa_ul_remove(struct platform_device *pdev)
{
struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
+ of_platform_depopulate(&pdev->dev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &sa_ul_attr_group);
+
sa_unregister_algos(&pdev->dev);
dma_release_channel(dev_data->dma_rx2);
@@ -2422,13 +2531,6 @@ static int sa_ul_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id of_match[] = {
- {.compatible = "ti,j721e-sa2ul",},
- {.compatible = "ti,am654-sa2ul",},
- {},
-};
-MODULE_DEVICE_TABLE(of, of_match);
-
static struct platform_driver sa_ul_driver = {
.probe = sa_ul_probe,
.remove = sa_ul_remove,
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
index 7f7e3fe60d11..0f0bae0971da 100644
--- a/drivers/crypto/sa2ul.h
+++ b/drivers/crypto/sa2ul.h
@@ -17,6 +17,7 @@
#include <linux/hw_random.h>
#include <crypto/aes.h>
+#define SA_ENGINE_STATUS 0x0008
#define SA_ENGINE_ENABLE_CONTROL 0x1000
struct sa_tfm_ctx;
@@ -170,11 +171,14 @@ struct sa_tfm_ctx;
* the following range, so avoid using it.
*/
#define SA_UNSAFE_DATA_SZ_MIN 240
-#define SA_UNSAFE_DATA_SZ_MAX 256
+#define SA_UNSAFE_DATA_SZ_MAX 255
+
+struct sa_match_data;
/**
* struct sa_crypto_data - Crypto driver instance data
* @base: Base address of the register space
+ * @soc_data: Pointer to SoC specific data
* @pdev: Platform device pointer
* @sc_pool: security context pool
* @dev: Device pointer
@@ -187,9 +191,11 @@ struct sa_tfm_ctx;
* @dma_rx1: Pointer to DMA rx channel for sizes < 256 Bytes
* @dma_rx2: Pointer to DMA rx channel for sizes > 256 Bytes
* @dma_tx: Pointer to DMA TX channel
+ * @fallback_sz: SW fallback limit for crypto algorithms
*/
struct sa_crypto_data {
void __iomem *base;
+ const struct sa_match_data *match_data;
struct platform_device *pdev;
struct dma_pool *sc_pool;
struct device *dev;
@@ -204,6 +210,7 @@ struct sa_crypto_data {
struct dma_chan *dma_rx1;
struct dma_chan *dma_rx2;
struct dma_chan *dma_tx;
+ int fallback_sz;
};
/**
diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig
new file mode 100644
index 000000000000..7be6b19269a5
--- /dev/null
+++ b/drivers/crypto/ti/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_TI_MCRC64
+ tristate "Support for TI MCRC64 crc accelerators"
+ depends on ARCH_K3
+ select CRYPTO_HASH
+ help
+ This enables support for the MCRC hw accelerator which can be found
+ on TI SOC.
diff --git a/drivers/crypto/ti/Makefile b/drivers/crypto/ti/Makefile
new file mode 100644
index 000000000000..09013ba569e0
--- /dev/null
+++ b/drivers/crypto/ti/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_TI_MCRC64) += mcrc.o
diff --git a/drivers/crypto/ti/mcrc.c b/drivers/crypto/ti/mcrc.c
new file mode 100644
index 000000000000..4cc82779480c
--- /dev/null
+++ b/drivers/crypto/ti/mcrc.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) Texas Instruments 2023 - http://www.ti.com
+ * Author: Kamlesh Gurudasani <kamlesh@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/unaligned.h>
+
+#define DRIVER_NAME "ti-mcrc"
+#define CHKSUM_DIGEST_SIZE 8
+#define CHKSUM_BLOCK_SIZE 1
+
+/* Registers */
+#define CRC_CTRL0 0x0000 /* CRC Global Control Register 0 */
+#define CH_PSA_SWRE(ch) BIT(((ch) - 1) << 3) /* PSA Software Reset */
+
+#define CRC_CTRL1 0x0008 /* CRC Global Control Register 1 */
+#define PWDN BIT(0) /* Power Down */
+
+#define CRC_CTRL2 0x0010 /* CRC Global Control Register 2 */
+#define CH_MODE(ch, m) ((m) << (((ch) - 1) << 3))
+
+#define PSA_SIGREGL(ch) ((0x6 + (4 * ((ch) - 1))) << 4) /* Signature register */
+
+#define MCRC_AUTOSUSPEND_DELAY 50
+
+static struct device *mcrc_k3_dev;
+
+static unsigned int burst_size;
+
+module_param(burst_size, uint, 0644);
+MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
+
+enum mcrc_mode {
+ MCRC_MODE_DATA_CAPTURE = 0,
+ MCRC_MODE_AUTO,
+ MCRC_MODE_SEMI_CPU,
+ MCRC_MODE_FULL_CPU,
+ MCRC_MODE_INVALID,
+};
+
+enum mcrc_pattern {
+ MCRC_PATTERN_8BIT = 0,
+ MCRC_PATTERN_16BIT,
+ MCRC_PATTERN_32BIT,
+ MCRC_PATTERN_64BIT,
+ MCRC_PATTERN_INVALID,
+};
+
+enum mcrc_channel {
+ MCRC_CHANNEL_1 = 1,
+ MCRC_CHANNEL_2,
+ MCRC_CHANNEL_3,
+ MCRC_CHANNEL_4,
+ MCRC_CHANNEL_INVALID,
+};
+
+struct mcrc_data {
+ struct device *dev;
+ void __iomem *regs;
+};
+
+struct mcrc_ctx {
+ u32 key;
+};
+
+struct mcrc_desc_ctx {
+ u64 signature;
+};
+
+static int mcrc_set_mode(void __iomem *regs, u32 channel, u32 mode)
+{
+ u32 crc_ctrl2_reg, mode_set_val;
+
+ if (mode < 0 || mode >= MCRC_MODE_INVALID)
+ return -EINVAL;
+
+ if (channel <= 0 || channel >= MCRC_CHANNEL_INVALID)
+ return -EINVAL;
+
+ crc_ctrl2_reg = readl_relaxed(regs + CRC_CTRL2);
+
+ mode_set_val = crc_ctrl2_reg | CH_MODE(channel, mode);
+
+ /* Write CRC_CTRL2, set mode */
+ writel_relaxed(mode_set_val, regs + CRC_CTRL2);
+
+ return 0;
+}
+
+static int mcrc_reset_signature(void __iomem *regs, u32 channel)
+{
+ u32 crc_ctrl0_reg, reset_val, reset_undo_val;
+
+ if (channel <= 0 || channel >= MCRC_CHANNEL_INVALID)
+ return -EINVAL;
+
+ /* reset PSA */
+ crc_ctrl0_reg = readl_relaxed(regs + CRC_CTRL0);
+
+ reset_val = crc_ctrl0_reg | CH_PSA_SWRE(channel);
+ reset_undo_val = crc_ctrl0_reg & ~CH_PSA_SWRE(channel);
+
+ /* Write CRC_CTRL0 register, reset PSA register */
+ writel_relaxed(reset_val, regs + CRC_CTRL0);
+ writel_relaxed(reset_undo_val, regs + CRC_CTRL0);
+
+ return 0;
+}
+
+static int mcrc_calculate_crc(void __iomem *regs, u32 channel,
+ u32 pattern, const u8 *d8, size_t length)
+{
+ void __iomem *psa_reg;
+ u64 signature;
+
+ if (channel <= 0 || channel >= MCRC_CHANNEL_INVALID)
+ return -EINVAL;
+
+ psa_reg = regs + PSA_SIGREGL(channel);
+
+ for (; length >= sizeof(u64); d8 += sizeof(u64), length -= sizeof(u64)) {
+ writeq_relaxed(*((u64 *)d8), psa_reg);
+ signature = readq_relaxed(psa_reg);
+ }
+
+ if (length) {
+ u64 leftover = 0;
+
+ while (length--)
+ leftover = (*d8++) << 8 | leftover;
+ writeq_relaxed(leftover, psa_reg);
+ signature = readq_relaxed(psa_reg);
+ }
+
+ return 0;
+}
+
+static int mcrc_cra_init(struct crypto_tfm *tfm)
+{
+ struct mcrc_ctx *mctx = crypto_tfm_ctx(tfm);
+
+ struct mcrc_data *dev_data = dev_get_drvdata(mcrc_k3_dev);
+
+ pm_runtime_get_sync(dev_data->dev);
+
+ mctx->key = 0;
+
+ return 0;
+}
+
+static void mcrc_cra_exit(struct crypto_tfm *tfm)
+{
+ struct mcrc_data *dev_data = dev_get_drvdata(mcrc_k3_dev);
+
+ pm_runtime_mark_last_busy(dev_data->dev);
+ pm_runtime_put_autosuspend(dev_data->dev);
+}
+
+static int mcrc_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct mcrc_ctx *mctx = crypto_shash_ctx(tfm);
+
+ if (keylen != sizeof(u32))
+ return -EINVAL;
+
+ mctx->key = get_unaligned_le32(key);
+
+ return 0;
+}
+
+static int mcrc_init(struct shash_desc *desc)
+{
+ struct mcrc_data *dev_data = dev_get_drvdata(mcrc_k3_dev);
+
+ /* set full cpu mode */
+ int ret = mcrc_set_mode(dev_data->regs, MCRC_CHANNEL_1,
+ MCRC_MODE_FULL_CPU);
+ if (ret)
+ return ret;
+
+ /* reset PSA */
+ return mcrc_reset_signature(dev_data->regs, MCRC_CHANNEL_1);
+}
+
+static int burst_update(struct shash_desc *desc, const u8 *d8,
+ size_t length)
+{
+ struct mcrc_desc_ctx *ctx = shash_desc_ctx(desc);
+ struct mcrc_data *dev_data = dev_get_drvdata(mcrc_k3_dev);
+
+ int ret = mcrc_calculate_crc(dev_data->regs, MCRC_CHANNEL_1,
+ MCRC_PATTERN_64BIT, d8, length);
+ if (ret)
+ return ret;
+
+ /* Store signature result */
+ ctx->signature = readq_relaxed(dev_data->regs +
+ PSA_SIGREGL(MCRC_CHANNEL_1));
+
+ return 0;
+}
+
+static int mcrc_update(struct shash_desc *desc, const u8 *d8,
+ unsigned int length)
+{
+ const unsigned int burst_sz = burst_size;
+ unsigned int rem_sz;
+ const u8 *cur;
+ size_t size;
+ int ret;
+
+ if (!burst_sz)
+ return burst_update(desc, d8, length);
+
+ /* Digest first bytes not 64bit aligned at first pass in the loop */
+ size = min_t(size_t, length, burst_sz + (size_t)d8 -
+ ALIGN_DOWN((size_t)d8, sizeof(u64)));
+ for (rem_sz = length, cur = d8; rem_sz;
+ rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
+ ret = burst_update(desc, cur, size);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mcrc_final(struct shash_desc *desc, u8 *out)
+{
+ struct mcrc_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ /* Send computed CRC */
+ put_unaligned_le64(ctx->signature, out);
+ return 0;
+}
+
+static int mcrc_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ return mcrc_update(desc, data, length) ?:
+ mcrc_final(desc, out);
+}
+
+static int mcrc_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ return mcrc_init(desc) ?: mcrc_finup(desc, data, length, out);
+}
+
+static struct shash_alg algs[] = {
+ /* CRC-64 */
+ {
+ .setkey = mcrc_setkey,
+ .init = mcrc_init,
+ .update = mcrc_update,
+ .final = mcrc_final,
+ .finup = mcrc_finup,
+ .digest = mcrc_digest,
+ .descsize = sizeof(struct mcrc_desc_ctx),
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .base = {
+ .cra_name = "crc64",
+ .cra_driver_name = "mcrc",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 7,
+ .cra_ctxsize = sizeof(struct mcrc_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = mcrc_cra_init,
+ .cra_exit = mcrc_cra_exit,
+ }
+ }
+};
+
+static int mcrc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mcrc_data *dev_data;
+
+ dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ mcrc_k3_dev = dev;
+ dev_data->dev = dev;
+ dev_data->regs = devm_platform_ioremap_resource(pdev, 0);
+
+ platform_set_drvdata(pdev, dev_data);
+ dev_set_drvdata(mcrc_k3_dev, dev_data);
+
+ crypto_register_shashes(algs, ARRAY_SIZE(algs));
+
+ pm_runtime_set_autosuspend_delay(dev, MCRC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int mcrc_remove(struct platform_device *pdev)
+{
+ struct mcrc_data *dev_data = platform_get_drvdata(pdev);
+
+ int ret = pm_runtime_get_sync(dev_data->dev);
+
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev_data->dev);
+ return ret;
+ }
+
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+
+ pm_runtime_disable(dev_data->dev);
+ pm_runtime_put_noidle(dev_data->dev);
+
+ return 0;
+}
+
+static int __maybe_unused mcrc_suspend(struct device *dev)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused mcrc_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+
+static const struct dev_pm_ops mcrc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mcrc_suspend,
+ mcrc_resume)
+};
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "ti,mcrc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver mcrc_driver = {
+ .probe = mcrc_probe,
+ .remove = mcrc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &mcrc_pm_ops,
+ .of_match_table = of_match,
+ },
+};
+
+module_platform_driver(mcrc_driver);
+
+MODULE_AUTHOR("Kamlesh Gurudasani <kamlesh@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments MCRC hardware driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index dcbb023acc45..3ee485999c06 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -299,4 +299,4 @@ static int dma_heap_init(void)
return 0;
}
-subsys_initcall(dma_heap_init);
+core_initcall(dma_heap_init);
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06c4226..f9f7ac4ff9f1 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -12,3 +12,12 @@ config DMABUF_HEAPS_CMA
Choose this option to enable dma-buf CMA heap. This heap is backed
by the Contiguous Memory Allocator (CMA). If your system has these
regions, you should say Y here.
+
+config DMABUF_HEAPS_CARVEOUT
+ bool "DMA-BUF Carveout Heap"
+ depends on DMABUF_HEAPS && HAS_IOMEM
+ select GENERIC_ALLOCATOR
+ help
+ Choose this option to enable dma-buf Carveout heap. This heap is
+ backed by the a carved-out of memory. If your system has these
+ regions, you should say Y here.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index 6e54cdec3da0..3c9f5036ed40 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -2,3 +2,4 @@
obj-y += heap-helpers.o
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CARVEOUT) += carveout-heap.o
diff --git a/drivers/dma-buf/heaps/carveout-heap.c b/drivers/dma-buf/heaps/carveout-heap.c
new file mode 100644
index 000000000000..c41631f6017a
--- /dev/null
+++ b/drivers/dma-buf/heaps/carveout-heap.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Carveout DMA-Heap userspace exporter
+ *
+ * Copyright (C) 2019-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Andrew Davis <afd@ti.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+
+struct carveout_dma_heap {
+ struct dma_heap *heap;
+ struct gen_pool *pool;
+ bool cached;
+};
+
+struct carveout_dma_heap_buffer {
+ struct gen_pool *pool;
+ struct list_head attachments;
+ struct mutex attachments_lock;
+ struct mutex vmap_lock;
+ int vmap_cnt;
+ unsigned long len;
+ void *vaddr;
+ phys_addr_t paddr;
+ bool cached;
+};
+
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
+};
+
+static int dma_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct carveout_dma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+ struct sg_table *table;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table) {
+ kfree(a);
+ return -ENOMEM;
+ }
+ if (sg_alloc_table(table, 1, GFP_KERNEL)) {
+ kfree(a);
+ return -ENOMEM;
+ }
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(buffer->paddr)), buffer->len, 0);
+
+ a->table = table;
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->attachments_lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->attachments_lock);
+
+ return 0;
+}
+
+static void dma_heap_detatch(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct carveout_dma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+
+ mutex_lock(&buffer->attachments_lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->attachments_lock);
+
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+}
+
+static struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct carveout_dma_heap_buffer *buffer = attachment->dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+ struct sg_table *table = a->table;
+
+ unsigned long attrs = buffer->cached ? 0 : DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (!dma_map_sg_attrs(attachment->dev, table->sgl, table->nents,
+ direction, attrs))
+ return ERR_PTR(-ENOMEM);
+
+ return table;
+}
+
+static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct carveout_dma_heap_buffer *buffer = attachment->dmabuf->priv;
+ unsigned long attrs = buffer->cached ? 0 : DMA_ATTR_SKIP_CPU_SYNC;
+
+ dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents,
+ direction, attrs);
+}
+
+static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct carveout_dma_heap_buffer *buffer = dmabuf->priv;
+
+ if (buffer->vmap_cnt > 0) {
+ WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
+ memunmap(buffer->vaddr);
+ }
+
+ gen_pool_free(buffer->pool, buffer->paddr, buffer->len);
+ kfree(buffer);
+}
+
+static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct carveout_dma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ if (!buffer->cached)
+ return 0;
+
+ mutex_lock(&buffer->vmap_lock);
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
+ mutex_unlock(&buffer->vmap_lock);
+
+ mutex_lock(&buffer->attachments_lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
+ direction);
+ }
+ mutex_unlock(&buffer->attachments_lock);
+
+ return 0;
+}
+
+static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct carveout_dma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ if (!buffer->cached)
+ return 0;
+
+ mutex_lock(&buffer->vmap_lock);
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->len);
+ mutex_unlock(&buffer->vmap_lock);
+
+ mutex_lock(&buffer->attachments_lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
+ direction);
+ }
+ mutex_unlock(&buffer->attachments_lock);
+
+ return 0;
+}
+
+static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct carveout_dma_heap_buffer *buffer = dmabuf->priv;
+ int ret;
+
+ if (!buffer->cached)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ ret = vm_iomap_memory(vma, buffer->paddr, buffer->len);
+ if (ret)
+ pr_err("Could not map buffer to userspace\n");
+
+ return ret;
+}
+
+static void *dma_heap_vmap(struct dma_buf *dmabuf)
+{
+ struct carveout_dma_heap_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ mutex_lock(&buffer->vmap_lock);
+
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ vaddr = buffer->vaddr;
+ goto exit;
+ }
+ if (buffer->cached)
+ vaddr = memremap(buffer->paddr, buffer->len, MEMREMAP_WB);
+ else
+ vaddr = memremap(buffer->paddr, buffer->len, MEMREMAP_WC);
+ if (!vaddr) {
+ pr_err("Could not memremap buffer\n");
+ goto exit;
+ }
+ if (IS_ERR(vaddr))
+ goto exit;
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+
+exit:
+ mutex_unlock(&buffer->vmap_lock);
+ return vaddr;
+}
+
+static void dma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ struct carveout_dma_heap_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->vmap_lock);
+ if (!--buffer->vmap_cnt) {
+ memunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+ mutex_unlock(&buffer->vmap_lock);
+}
+
+static const struct dma_buf_ops carveout_dma_heap_buf_ops = {
+ .attach = dma_heap_attach,
+ .detach = dma_heap_detatch,
+ .map_dma_buf = dma_heap_map_dma_buf,
+ .unmap_dma_buf = dma_heap_unmap_dma_buf,
+ .release = dma_heap_dma_buf_release,
+ .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = dma_heap_dma_buf_end_cpu_access,
+ .mmap = dma_heap_mmap,
+ .vmap = dma_heap_vmap,
+ .vunmap = dma_heap_vunmap,
+};
+
+static int carveout_dma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct carveout_dma_heap *carveout_dma_heap = dma_heap_get_drvdata(heap);
+ struct carveout_dma_heap_buffer *buffer;
+
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dmabuf;
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ buffer->pool = carveout_dma_heap->pool;
+ buffer->cached = carveout_dma_heap->cached;
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->attachments_lock);
+ mutex_init(&buffer->vmap_lock);
+ buffer->len = len;
+
+ buffer->paddr = gen_pool_alloc(buffer->pool, buffer->len);
+ if (!buffer->paddr) {
+ ret = -ENOMEM;
+ goto free_buffer;
+ }
+
+ /* create the dmabuf */
+ exp_info.ops = &carveout_dma_heap_buf_ops;
+ exp_info.size = buffer->len;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto free_pool;
+ }
+
+ ret = dma_buf_fd(dmabuf, fd_flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+free_pool:
+ gen_pool_free(buffer->pool, buffer->paddr, buffer->len);
+free_buffer:
+ kfree(buffer);
+
+ return ret;
+}
+
+static struct dma_heap_ops carveout_dma_heap_ops = {
+ .allocate = carveout_dma_heap_allocate,
+};
+
+int carveout_dma_heap_export(phys_addr_t base, size_t size, const char *name, bool cached)
+{
+ struct carveout_dma_heap *carveout_dma_heap;
+ struct dma_heap_export_info exp_info;
+ int ret;
+
+ carveout_dma_heap = kzalloc(sizeof(*carveout_dma_heap), GFP_KERNEL);
+ if (!carveout_dma_heap)
+ return -ENOMEM;
+
+ carveout_dma_heap->pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
+ if (IS_ERR(carveout_dma_heap->pool)) {
+ pr_err("Carveout Heap: Could not create memory pool\n");
+ ret = PTR_ERR(carveout_dma_heap->pool);
+ goto free_carveout_dma_heap;
+ }
+ ret = gen_pool_add(carveout_dma_heap->pool, base, size, NUMA_NO_NODE);
+ if (ret) {
+ pr_err("Carveout Heap: Could not add memory to pool\n");
+ goto free_pool;
+ }
+
+ carveout_dma_heap->cached = cached;
+
+ exp_info.name = name;
+ exp_info.ops = &carveout_dma_heap_ops;
+ exp_info.priv = carveout_dma_heap;
+ carveout_dma_heap->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(carveout_dma_heap->heap)) {
+ pr_err("Carveout Heap: Could not add DMA-Heap\n");
+ ret = PTR_ERR(carveout_dma_heap->heap);
+ goto free_pool;
+ }
+
+ pr_info("Carveout Heap: Exported %zu MiB at %pa\n", size / SZ_1M, &base);
+
+ return 0;
+
+free_pool:
+ gen_pool_destroy(carveout_dma_heap->pool);
+free_carveout_dma_heap:
+ kfree(carveout_dma_heap);
+ return ret;
+}
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+#define MAX_HEAP_AREAS 7
+struct reserved_mem heap_areas[MAX_HEAP_AREAS];
+size_t heap_area_count;
+
+static int __init carveout_dma_heap_init_areas(void)
+{
+ int i;
+
+ for (i = 0; i < heap_area_count; i++) {
+ struct reserved_mem *rmem = &heap_areas[i];
+ bool cached = !of_get_flat_dt_prop(rmem->fdt_node, "no-map", NULL);
+ int ret = carveout_dma_heap_export(rmem->base, rmem->size, rmem->name, cached);
+ if (ret) {
+ pr_err("Carveout Heap: could not export as DMA-Heap\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+fs_initcall(carveout_dma_heap_init_areas);
+
+static int __init rmem_dma_heap_carveout_setup(struct reserved_mem *rmem)
+{
+ phys_addr_t align = PAGE_SIZE;
+ phys_addr_t mask = align - 1;
+
+ if ((rmem->base & mask) || (rmem->size & mask)) {
+ pr_err("Carveout Heap: incorrect alignment of region\n");
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (heap_area_count == ARRAY_SIZE(heap_areas)) {
+ pr_err("Not enough slots for DMA-Heap reserved regions!\n");
+ return -ENOSPC;
+ }
+
+ /*
+ * Each reserved area must be initialized later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ heap_areas[heap_area_count] = *rmem;
+ heap_area_count++;
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(dma_heap_carveout, "dma-heap-carveout", rmem_dma_heap_carveout_setup);
+
+#endif
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a3a172173e34..f696246f57fd 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -573,6 +573,7 @@ static int dmatest_func(void *data)
struct dmatest_params *params;
struct dma_chan *chan;
struct dma_device *dev;
+ struct device *dma_dev;
unsigned int error_count;
unsigned int failed_tests = 0;
unsigned int total_tests = 0;
@@ -606,6 +607,8 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
+ dma_dev = dmaengine_get_dma_device(chan);
+
src = &thread->src;
dst = &thread->dst;
if (thread->type == DMA_MEMCPY) {
@@ -730,7 +733,7 @@ static int dmatest_func(void *data)
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
+ um = dmaengine_get_unmap_data(dma_dev, src->cnt + dst->cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
@@ -745,10 +748,10 @@ static int dmatest_func(void *data)
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
- um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
+ um->addr[i] = dma_map_page(dma_dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
srcs[i] = um->addr[i] + src->off;
- ret = dma_mapping_error(dev->dev, um->addr[i]);
+ ret = dma_mapping_error(dma_dev, um->addr[i]);
if (ret) {
result("src mapping error", total_tests,
src->off, dst->off, len, ret);
@@ -763,9 +766,9 @@ static int dmatest_func(void *data)
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
- dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
+ dsts[i] = dma_map_page(dma_dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(dev->dev, dsts[i]);
+ ret = dma_mapping_error(dma_dev, dsts[i]);
if (ret) {
result("dst mapping error", total_tests,
src->off, dst->off, len, ret);
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 4be433482053..ac61ecda2926 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -79,8 +79,18 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
ofdma->dma_router->route_free(ofdma->dma_router->dev,
route_data);
} else {
+ int ret = 0;
+
chan->router = ofdma->dma_router;
chan->route_data = route_data;
+
+ if (chan->device->device_router_config)
+ ret = chan->device->device_router_config(chan);
+
+ if (ret) {
+ dma_release_channel(chan);
+ chan = ERR_PTR(ret);
+ }
}
err:
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index 79618fac119a..2adc2cca10e9 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -35,7 +35,7 @@ config DMA_OMAP
DMA engine is found on OMAP and DRA7xx parts.
config TI_K3_UDMA
- bool "Texas Instruments UDMA support"
+ tristate "Texas Instruments UDMA support"
depends on ARCH_K3
depends on TI_SCI_PROTOCOL
depends on TI_SCI_INTA_IRQCHIP
@@ -48,7 +48,7 @@ config TI_K3_UDMA
DMA engine is used in AM65x and j721e.
config TI_K3_UDMA_GLUE_LAYER
- bool "Texas Instruments UDMA Glue layer for non DMAengine users"
+ tristate "Texas Instruments UDMA Glue layer for non DMAengine users"
depends on ARCH_K3
depends on TI_K3_UDMA
help
@@ -56,7 +56,8 @@ config TI_K3_UDMA_GLUE_LAYER
If unsure, say N.
config TI_K3_PSIL
- bool
+ tristate
+ default TI_K3_UDMA
config TI_DMA_CROSSBAR
bool
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index 0c67254caee6..8f0bb9e72617 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -4,8 +4,14 @@ obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o
obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
-obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
- k3-psil-am654.o \
- k3-psil-j721e.o \
- k3-psil-j7200.o
+k3-psil-lib-objs := k3-psil.o \
+ k3-psil-am654.o \
+ k3-psil-j721e.o \
+ k3-psil-j7200.o \
+ k3-psil-am64.o \
+ k3-psil-j721s2.o \
+ k3-psil-am62.o \
+ k3-psil-j784s4.o \
+ k3-psil-am62a.o
+obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index 86ced7f2d771..f744ddbbbad7 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -122,7 +122,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
return map;
}
-static const struct of_device_id ti_am335x_master_match[] = {
+static const struct of_device_id ti_am335x_master_match[] __maybe_unused = {
{ .compatible = "ti,edma3-tpcc", },
{},
};
@@ -297,7 +297,7 @@ static const u32 ti_dma_offset[] = {
[TI_XBAR_SDMA_OFFSET] = 1,
};
-static const struct of_device_id ti_dra7_master_match[] = {
+static const struct of_device_id ti_dra7_master_match[] __maybe_unused = {
{
.compatible = "ti,omap4430-sdma",
.data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
@@ -465,7 +465,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
static struct platform_driver ti_dma_xbar_driver = {
.driver = {
.name = "ti-dma-crossbar",
- .of_match_table = of_match_ptr(ti_dma_xbar_match),
+ .of_match_table = ti_dma_xbar_match,
},
.probe = ti_dma_xbar_probe,
};
diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c
new file mode 100644
index 000000000000..8c52b0cf7cd4
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am62.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ PSIL_PDMA_XY_PKT(0x430c),
+ PSIL_PDMA_XY_PKT(0x430d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0x4600, 19, 19, 16),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4700),
+ PSIL_CSI2RX(0x4701),
+ PSIL_CSI2RX(0x4702),
+ PSIL_CSI2RX(0x4703),
+ PSIL_CSI2RX(0x4704),
+ PSIL_CSI2RX(0x4705),
+ PSIL_CSI2RX(0x4706),
+ PSIL_CSI2RX(0x4707),
+ PSIL_CSI2RX(0x4708),
+ PSIL_CSI2RX(0x4709),
+ PSIL_CSI2RX(0x470a),
+ PSIL_CSI2RX(0x470b),
+ PSIL_CSI2RX(0x470c),
+ PSIL_CSI2RX(0x470d),
+ PSIL_CSI2RX(0x470e),
+ PSIL_CSI2RX(0x470f),
+ PSIL_CSI2RX(0x4710),
+ PSIL_CSI2RX(0x4711),
+ PSIL_CSI2RX(0x4712),
+ PSIL_CSI2RX(0x4713),
+ PSIL_CSI2RX(0x4714),
+ PSIL_CSI2RX(0x4715),
+ PSIL_CSI2RX(0x4716),
+ PSIL_CSI2RX(0x4717),
+ PSIL_CSI2RX(0x4718),
+ PSIL_CSI2RX(0x4719),
+ PSIL_CSI2RX(0x471a),
+ PSIL_CSI2RX(0x471b),
+ PSIL_CSI2RX(0x471c),
+ PSIL_CSI2RX(0x471d),
+ PSIL_CSI2RX(0x471e),
+ PSIL_CSI2RX(0x471f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+ PSIL_PDMA_XY_PKT(0xc305),
+ PSIL_PDMA_XY_PKT(0xc306),
+ PSIL_PDMA_XY_PKT(0xc307),
+ PSIL_PDMA_XY_PKT(0xc308),
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+ PSIL_PDMA_XY_PKT(0xc30c),
+ PSIL_PDMA_XY_PKT(0xc30d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+ PSIL_PDMA_XY_PKT(0xc402),
+ PSIL_PDMA_XY_PKT(0xc403),
+ PSIL_PDMA_XY_PKT(0xc404),
+ PSIL_PDMA_XY_PKT(0xc405),
+ PSIL_PDMA_XY_PKT(0xc406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0xc600, 19, 19, 8),
+ PSIL_ETHERNET(0xc601, 20, 27, 8),
+ PSIL_ETHERNET(0xc602, 21, 35, 8),
+ PSIL_ETHERNET(0xc603, 22, 43, 8),
+ PSIL_ETHERNET(0xc604, 23, 51, 8),
+ PSIL_ETHERNET(0xc605, 24, 59, 8),
+ PSIL_ETHERNET(0xc606, 25, 67, 8),
+ PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62_ep_map = {
+ .name = "am62",
+ .src = am62_src_ep_map,
+ .src_count = ARRAY_SIZE(am62_src_ep_map),
+ .dst = am62_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am62_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-am62a.c b/drivers/dma/ti/k3-psil-am62a.c
new file mode 100644
index 000000000000..ca9d71f91422
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am62a.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62a_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ PSIL_PDMA_XY_PKT(0x430c),
+ PSIL_PDMA_XY_PKT(0x430d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0x4600, 19, 19, 16),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x5000),
+ PSIL_CSI2RX(0x5001),
+ PSIL_CSI2RX(0x5002),
+ PSIL_CSI2RX(0x5003),
+ PSIL_CSI2RX(0x5004),
+ PSIL_CSI2RX(0x5005),
+ PSIL_CSI2RX(0x5006),
+ PSIL_CSI2RX(0x5007),
+ PSIL_CSI2RX(0x5008),
+ PSIL_CSI2RX(0x5009),
+ PSIL_CSI2RX(0x500a),
+ PSIL_CSI2RX(0x500b),
+ PSIL_CSI2RX(0x500c),
+ PSIL_CSI2RX(0x500d),
+ PSIL_CSI2RX(0x500e),
+ PSIL_CSI2RX(0x500f),
+ PSIL_CSI2RX(0x5010),
+ PSIL_CSI2RX(0x5011),
+ PSIL_CSI2RX(0x5012),
+ PSIL_CSI2RX(0x5013),
+ PSIL_CSI2RX(0x5014),
+ PSIL_CSI2RX(0x5015),
+ PSIL_CSI2RX(0x5016),
+ PSIL_CSI2RX(0x5017),
+ PSIL_CSI2RX(0x5018),
+ PSIL_CSI2RX(0x5019),
+ PSIL_CSI2RX(0x501a),
+ PSIL_CSI2RX(0x501b),
+ PSIL_CSI2RX(0x501c),
+ PSIL_CSI2RX(0x501d),
+ PSIL_CSI2RX(0x501e),
+ PSIL_CSI2RX(0x501f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62a_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+ PSIL_PDMA_XY_PKT(0xc305),
+ PSIL_PDMA_XY_PKT(0xc306),
+ PSIL_PDMA_XY_PKT(0xc307),
+ PSIL_PDMA_XY_PKT(0xc308),
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+ PSIL_PDMA_XY_PKT(0xc30c),
+ PSIL_PDMA_XY_PKT(0xc30d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+ PSIL_PDMA_XY_PKT(0xc402),
+ PSIL_PDMA_XY_PKT(0xc403),
+ PSIL_PDMA_XY_PKT(0xc404),
+ PSIL_PDMA_XY_PKT(0xc405),
+ PSIL_PDMA_XY_PKT(0xc406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0xc600, 19, 19, 8),
+ PSIL_ETHERNET(0xc601, 20, 27, 8),
+ PSIL_ETHERNET(0xc602, 21, 35, 8),
+ PSIL_ETHERNET(0xc603, 22, 43, 8),
+ PSIL_ETHERNET(0xc604, 23, 51, 8),
+ PSIL_ETHERNET(0xc605, 24, 59, 8),
+ PSIL_ETHERNET(0xc606, 25, 67, 8),
+ PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62a_ep_map = {
+ .name = "am62a",
+ .src = am62a_src_ep_map,
+ .src_count = ARRAY_SIZE(am62a_src_ep_map),
+ .dst = am62a_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am62a_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-am64.c b/drivers/dma/ti/k3-psil-am64.c
new file mode 100644
index 000000000000..9fdeaa11a4fc
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am64.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am64_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x4000, 17, 32, 8, 32, 0),
+ PSIL_SAUL(0x4001, 18, 32, 8, 33, 0),
+ PSIL_SAUL(0x4002, 19, 40, 8, 40, 0),
+ PSIL_SAUL(0x4003, 20, 40, 8, 41, 0),
+ /* ICSS_G0 */
+ PSIL_ETHERNET(0x4100, 21, 48, 16),
+ PSIL_ETHERNET(0x4101, 22, 64, 16),
+ PSIL_ETHERNET(0x4102, 23, 80, 16),
+ PSIL_ETHERNET(0x4103, 24, 96, 16),
+ /* ICSS_G1 */
+ PSIL_ETHERNET(0x4200, 25, 112, 16),
+ PSIL_ETHERNET(0x4201, 26, 128, 16),
+ PSIL_ETHERNET(0x4202, 27, 144, 16),
+ PSIL_ETHERNET(0x4203, 28, 160, 16),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ PSIL_PDMA_XY_PKT(0x430c),
+ PSIL_PDMA_XY_PKT(0x430d),
+ PSIL_PDMA_XY_PKT(0x430e),
+ PSIL_PDMA_XY_PKT(0x430f),
+ /* PDMA_MAIN0 - USART0-1 */
+ PSIL_PDMA_XY_PKT(0x4310),
+ PSIL_PDMA_XY_PKT(0x4311),
+ /* PDMA_MAIN1 - SPI4 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ /* PDMA_MAIN1 - USART2-6 */
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ PSIL_PDMA_XY_PKT(0x4407),
+ PSIL_PDMA_XY_PKT(0x4408),
+ /* PDMA_MAIN1 - ADCs */
+ PSIL_PDMA_XY_TR(0x440f),
+ PSIL_PDMA_XY_TR(0x4410),
+ /* CPSW2 */
+ PSIL_ETHERNET(0x4500, 16, 16, 16),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am64_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xc000, 24, 80, 8, 80, 1),
+ PSIL_SAUL(0xc001, 25, 88, 8, 88, 1),
+ /* ICSS_G0 */
+ PSIL_ETHERNET(0xc100, 26, 96, 1),
+ PSIL_ETHERNET(0xc101, 27, 97, 1),
+ PSIL_ETHERNET(0xc102, 28, 98, 1),
+ PSIL_ETHERNET(0xc103, 29, 99, 1),
+ PSIL_ETHERNET(0xc104, 30, 100, 1),
+ PSIL_ETHERNET(0xc105, 31, 101, 1),
+ PSIL_ETHERNET(0xc106, 32, 102, 1),
+ PSIL_ETHERNET(0xc107, 33, 103, 1),
+ /* ICSS_G1 */
+ PSIL_ETHERNET(0xc200, 34, 104, 1),
+ PSIL_ETHERNET(0xc201, 35, 105, 1),
+ PSIL_ETHERNET(0xc202, 36, 106, 1),
+ PSIL_ETHERNET(0xc203, 37, 107, 1),
+ PSIL_ETHERNET(0xc204, 38, 108, 1),
+ PSIL_ETHERNET(0xc205, 39, 109, 1),
+ PSIL_ETHERNET(0xc206, 40, 110, 1),
+ PSIL_ETHERNET(0xc207, 41, 111, 1),
+ /* CPSW2 */
+ PSIL_ETHERNET(0xc500, 16, 16, 8),
+ PSIL_ETHERNET(0xc501, 17, 24, 8),
+ PSIL_ETHERNET(0xc502, 18, 32, 8),
+ PSIL_ETHERNET(0xc503, 19, 40, 8),
+ PSIL_ETHERNET(0xc504, 20, 48, 8),
+ PSIL_ETHERNET(0xc505, 21, 56, 8),
+ PSIL_ETHERNET(0xc506, 22, 64, 8),
+ PSIL_ETHERNET(0xc507, 23, 72, 8),
+};
+
+struct psil_ep_map am64_ep_map = {
+ .name = "am64",
+ .src = am64_src_ep_map,
+ .src_count = ARRAY_SIZE(am64_src_ep_map),
+ .dst = am64_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am64_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-j7200.c b/drivers/dma/ti/k3-psil-j7200.c
index 5ea63ea74822..e3feff869991 100644
--- a/drivers/dma/ti/k3-psil-j7200.c
+++ b/drivers/dma/ti/k3-psil-j7200.c
@@ -143,6 +143,57 @@ static struct psil_ep j7200_src_ep_map[] = {
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep j7200_dst_ep_map[] = {
+ /* PDMA_MCASP - McASP0-2 */
+ PSIL_PDMA_MCASP(0xc400),
+ PSIL_PDMA_MCASP(0xc401),
+ PSIL_PDMA_MCASP(0xc402),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc600),
+ PSIL_PDMA_XY_PKT(0xc601),
+ PSIL_PDMA_XY_PKT(0xc602),
+ PSIL_PDMA_XY_PKT(0xc603),
+ PSIL_PDMA_XY_PKT(0xc604),
+ PSIL_PDMA_XY_PKT(0xc605),
+ PSIL_PDMA_XY_PKT(0xc606),
+ PSIL_PDMA_XY_PKT(0xc607),
+ PSIL_PDMA_XY_PKT(0xc608),
+ PSIL_PDMA_XY_PKT(0xc609),
+ PSIL_PDMA_XY_PKT(0xc60a),
+ PSIL_PDMA_XY_PKT(0xc60b),
+ PSIL_PDMA_XY_PKT(0xc60c),
+ PSIL_PDMA_XY_PKT(0xc60d),
+ PSIL_PDMA_XY_PKT(0xc60e),
+ PSIL_PDMA_XY_PKT(0xc60f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0xc610),
+ PSIL_PDMA_XY_PKT(0xc611),
+ PSIL_PDMA_XY_PKT(0xc612),
+ PSIL_PDMA_XY_PKT(0xc613),
+ PSIL_PDMA_XY_PKT(0xc614),
+ PSIL_PDMA_XY_PKT(0xc615),
+ PSIL_PDMA_XY_PKT(0xc616),
+ PSIL_PDMA_XY_PKT(0xc617),
+ PSIL_PDMA_XY_PKT(0xc618),
+ PSIL_PDMA_XY_PKT(0xc619),
+ PSIL_PDMA_XY_PKT(0xc61a),
+ PSIL_PDMA_XY_PKT(0xc61b),
+ PSIL_PDMA_XY_PKT(0xc61c),
+ PSIL_PDMA_XY_PKT(0xc61d),
+ PSIL_PDMA_XY_PKT(0xc61e),
+ PSIL_PDMA_XY_PKT(0xc61f),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0xc700),
+ PSIL_PDMA_XY_PKT(0xc701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0xc702),
+ PSIL_PDMA_XY_PKT(0xc703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0xc704),
+ PSIL_PDMA_XY_PKT(0xc705),
+ PSIL_PDMA_XY_PKT(0xc706),
+ PSIL_PDMA_XY_PKT(0xc707),
+ PSIL_PDMA_XY_PKT(0xc708),
+ PSIL_PDMA_XY_PKT(0xc709),
/* CPSW5 */
PSIL_ETHERNET(0xca00),
PSIL_ETHERNET(0xca01),
@@ -161,6 +212,22 @@ static struct psil_ep j7200_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
+ /* MCU_PDMA_MISC_G0 - SPI0 */
+ PSIL_PDMA_XY_PKT(0xf100),
+ PSIL_PDMA_XY_PKT(0xf101),
+ PSIL_PDMA_XY_PKT(0xf102),
+ PSIL_PDMA_XY_PKT(0xf103),
+ /* MCU_PDMA_MISC_G1 - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0xf200),
+ PSIL_PDMA_XY_PKT(0xf201),
+ PSIL_PDMA_XY_PKT(0xf202),
+ PSIL_PDMA_XY_PKT(0xf203),
+ PSIL_PDMA_XY_PKT(0xf204),
+ PSIL_PDMA_XY_PKT(0xf205),
+ PSIL_PDMA_XY_PKT(0xf206),
+ PSIL_PDMA_XY_PKT(0xf207),
+ /* MCU_PDMA_MISC_G2 - UART0 */
+ PSIL_PDMA_XY_PKT(0xf300),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
index 7580870ed746..e7c83d668bb6 100644
--- a/drivers/dma/ti/k3-psil-j721e.c
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -58,6 +58,14 @@
}, \
}
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep j721e_src_ep_map[] = {
/* SA2UL */
@@ -138,6 +146,71 @@ static struct psil_ep j721e_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
/* CPSW9 */
PSIL_ETHERNET(0x4a00),
/* CPSW0 */
@@ -193,6 +266,69 @@ static struct psil_ep j721e_dst_ep_map[] = {
PSIL_ETHERNET(0xc205),
PSIL_ETHERNET(0xc206),
PSIL_ETHERNET(0xc207),
+ /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
+ PSIL_PDMA_MCASP(0xc400),
+ PSIL_PDMA_MCASP(0xc401),
+ PSIL_PDMA_MCASP(0xc402),
+ /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ PSIL_PDMA_MCASP(0xc503),
+ PSIL_PDMA_MCASP(0xc504),
+ PSIL_PDMA_MCASP(0xc505),
+ PSIL_PDMA_MCASP(0xc506),
+ PSIL_PDMA_MCASP(0xc507),
+ PSIL_PDMA_MCASP(0xc508),
+ /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
+ PSIL_PDMA_XY_PKT(0xc600),
+ PSIL_PDMA_XY_PKT(0xc601),
+ PSIL_PDMA_XY_PKT(0xc602),
+ PSIL_PDMA_XY_PKT(0xc603),
+ PSIL_PDMA_XY_PKT(0xc604),
+ PSIL_PDMA_XY_PKT(0xc605),
+ PSIL_PDMA_XY_PKT(0xc606),
+ PSIL_PDMA_XY_PKT(0xc607),
+ /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
+ PSIL_PDMA_XY_PKT(0xc60c),
+ PSIL_PDMA_XY_PKT(0xc60d),
+ PSIL_PDMA_XY_PKT(0xc60e),
+ PSIL_PDMA_XY_PKT(0xc60f),
+ PSIL_PDMA_XY_PKT(0xc610),
+ PSIL_PDMA_XY_PKT(0xc611),
+ PSIL_PDMA_XY_PKT(0xc612),
+ PSIL_PDMA_XY_PKT(0xc613),
+ /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
+ PSIL_PDMA_XY_PKT(0xc618),
+ PSIL_PDMA_XY_PKT(0xc619),
+ PSIL_PDMA_XY_PKT(0xc61a),
+ PSIL_PDMA_XY_PKT(0xc61b),
+ PSIL_PDMA_XY_PKT(0xc61c),
+ PSIL_PDMA_XY_PKT(0xc61d),
+ PSIL_PDMA_XY_PKT(0xc61e),
+ PSIL_PDMA_XY_PKT(0xc61f),
+ /* PDMA11 (PDMA_MISC_G3) */
+ PSIL_PDMA_XY_PKT(0xc624),
+ PSIL_PDMA_XY_PKT(0xc625),
+ PSIL_PDMA_XY_PKT(0xc626),
+ PSIL_PDMA_XY_PKT(0xc627),
+ PSIL_PDMA_XY_PKT(0xc628),
+ PSIL_PDMA_XY_PKT(0xc629),
+ PSIL_PDMA_XY_PKT(0xc630),
+ PSIL_PDMA_XY_PKT(0xc63a),
+ /* PDMA13 (PDMA_USART_G0) - UART0-1 */
+ PSIL_PDMA_XY_PKT(0xc700),
+ PSIL_PDMA_XY_PKT(0xc701),
+ /* PDMA14 (PDMA_USART_G1) - UART2-3 */
+ PSIL_PDMA_XY_PKT(0xc702),
+ PSIL_PDMA_XY_PKT(0xc703),
+ /* PDMA15 (PDMA_USART_G2) - UART4-9 */
+ PSIL_PDMA_XY_PKT(0xc704),
+ PSIL_PDMA_XY_PKT(0xc705),
+ PSIL_PDMA_XY_PKT(0xc706),
+ PSIL_PDMA_XY_PKT(0xc707),
+ PSIL_PDMA_XY_PKT(0xc708),
+ PSIL_PDMA_XY_PKT(0xc709),
/* CPSW9 */
PSIL_ETHERNET(0xca00),
PSIL_ETHERNET(0xca01),
@@ -211,6 +347,22 @@ static struct psil_ep j721e_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0xf100),
+ PSIL_PDMA_XY_PKT(0xf101),
+ PSIL_PDMA_XY_PKT(0xf102),
+ PSIL_PDMA_XY_PKT(0xf103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0xf200),
+ PSIL_PDMA_XY_PKT(0xf201),
+ PSIL_PDMA_XY_PKT(0xf202),
+ PSIL_PDMA_XY_PKT(0xf203),
+ PSIL_PDMA_XY_PKT(0xf204),
+ PSIL_PDMA_XY_PKT(0xf205),
+ PSIL_PDMA_XY_PKT(0xf206),
+ PSIL_PDMA_XY_PKT(0xf207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0xf300),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
new file mode 100644
index 000000000000..6fd6c203fa6d
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721s2_src_ep_map[] = {
+ /* PDMA_MCASP - McASP0-4 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ PSIL_PDMA_MCASP(0x4403),
+ PSIL_PDMA_MCASP(0x4404),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ PSIL_PDMA_XY_PKT(0x4608),
+ PSIL_PDMA_XY_PKT(0x4609),
+ PSIL_PDMA_XY_PKT(0x460a),
+ PSIL_PDMA_XY_PKT(0x460b),
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0x4610),
+ PSIL_PDMA_XY_PKT(0x4611),
+ PSIL_PDMA_XY_PKT(0x4612),
+ PSIL_PDMA_XY_PKT(0x4613),
+ PSIL_PDMA_XY_PKT(0x4614),
+ PSIL_PDMA_XY_PKT(0x4615),
+ PSIL_PDMA_XY_PKT(0x4616),
+ PSIL_PDMA_XY_PKT(0x4617),
+ PSIL_PDMA_XY_PKT(0x4618),
+ PSIL_PDMA_XY_PKT(0x4619),
+ PSIL_PDMA_XY_PKT(0x461a),
+ PSIL_PDMA_XY_PKT(0x461b),
+ PSIL_PDMA_XY_PKT(0x461c),
+ PSIL_PDMA_XY_PKT(0x461d),
+ PSIL_PDMA_XY_PKT(0x461e),
+ PSIL_PDMA_XY_PKT(0x461f),
+ /* MAIN_CPSW2G */
+ PSIL_ETHERNET(0x4640),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0x4a40, 0),
+ PSIL_SA2UL(0x4a41, 0),
+ PSIL_SA2UL(0x4a42, 0),
+ PSIL_SA2UL(0x4a43, 0),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+ PSIL_SA2UL(0x7502, 0),
+ PSIL_SA2UL(0x7503, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721s2_dst_ep_map[] = {
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* MAIN_CPSW2G */
+ PSIL_ETHERNET(0xc640),
+ PSIL_ETHERNET(0xc641),
+ PSIL_ETHERNET(0xc642),
+ PSIL_ETHERNET(0xc643),
+ PSIL_ETHERNET(0xc644),
+ PSIL_ETHERNET(0xc645),
+ PSIL_ETHERNET(0xc646),
+ PSIL_ETHERNET(0xc647),
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0xca40, 1),
+ PSIL_SA2UL(0xca41, 1),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+ PSIL_SA2UL(0xf501, 1),
+};
+
+struct psil_ep_map j721s2_ep_map = {
+ .name = "j721s2",
+ .src = j721s2_src_ep_map,
+ .src_count = ARRAY_SIZE(j721s2_src_ep_map),
+ .dst = j721s2_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j721s2_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-j784s4.c b/drivers/dma/ti/k3-psil-j784s4.c
new file mode 100644
index 000000000000..40efba193335
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j784s4.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j784s4_src_ep_map[] = {
+ /* PDMA_MCASP - McASP0-4 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ PSIL_PDMA_MCASP(0x4403),
+ PSIL_PDMA_MCASP(0x4404),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ PSIL_PDMA_XY_PKT(0x4608),
+ PSIL_PDMA_XY_PKT(0x4609),
+ PSIL_PDMA_XY_PKT(0x460a),
+ PSIL_PDMA_XY_PKT(0x460b),
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0x4620),
+ PSIL_PDMA_XY_PKT(0x4621),
+ PSIL_PDMA_XY_PKT(0x4622),
+ PSIL_PDMA_XY_PKT(0x4623),
+ PSIL_PDMA_XY_PKT(0x4624),
+ PSIL_PDMA_XY_PKT(0x4625),
+ PSIL_PDMA_XY_PKT(0x4626),
+ PSIL_PDMA_XY_PKT(0x4627),
+ PSIL_PDMA_XY_PKT(0x4628),
+ PSIL_PDMA_XY_PKT(0x4629),
+ PSIL_PDMA_XY_PKT(0x462a),
+ PSIL_PDMA_XY_PKT(0x462b),
+ PSIL_PDMA_XY_PKT(0x462c),
+ PSIL_PDMA_XY_PKT(0x462d),
+ PSIL_PDMA_XY_PKT(0x462e),
+ PSIL_PDMA_XY_PKT(0x462f),
+ /* MAIN_CPSW2G */
+ PSIL_ETHERNET(0x4640),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4900),
+ PSIL_CSI2RX(0x4901),
+ PSIL_CSI2RX(0x4902),
+ PSIL_CSI2RX(0x4903),
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
+ PSIL_CSI2RX(0x4980),
+ PSIL_CSI2RX(0x4981),
+ PSIL_CSI2RX(0x4982),
+ PSIL_CSI2RX(0x4983),
+ PSIL_CSI2RX(0x4984),
+ PSIL_CSI2RX(0x4985),
+ PSIL_CSI2RX(0x4986),
+ PSIL_CSI2RX(0x4987),
+ PSIL_CSI2RX(0x4988),
+ PSIL_CSI2RX(0x4989),
+ PSIL_CSI2RX(0x498a),
+ PSIL_CSI2RX(0x498b),
+ PSIL_CSI2RX(0x498c),
+ PSIL_CSI2RX(0x498d),
+ PSIL_CSI2RX(0x498e),
+ PSIL_CSI2RX(0x498f),
+ PSIL_CSI2RX(0x4990),
+ PSIL_CSI2RX(0x4991),
+ PSIL_CSI2RX(0x4992),
+ PSIL_CSI2RX(0x4993),
+ PSIL_CSI2RX(0x4994),
+ PSIL_CSI2RX(0x4995),
+ PSIL_CSI2RX(0x4996),
+ PSIL_CSI2RX(0x4997),
+ PSIL_CSI2RX(0x4998),
+ PSIL_CSI2RX(0x4999),
+ PSIL_CSI2RX(0x499a),
+ PSIL_CSI2RX(0x499b),
+ PSIL_CSI2RX(0x499c),
+ PSIL_CSI2RX(0x499d),
+ PSIL_CSI2RX(0x499e),
+ PSIL_CSI2RX(0x499f),
+ /* MAIN_CPSW9G */
+ PSIL_ETHERNET(0x4a00),
+ /* MAIN-SA2UL */
+ PSIL_SA2UL(0x4a40, 0),
+ PSIL_SA2UL(0x4a41, 0),
+ PSIL_SA2UL(0x4a42, 0),
+ PSIL_SA2UL(0x4a43, 0),
+ /* MCU_CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+ PSIL_SA2UL(0x7502, 0),
+ PSIL_SA2UL(0x7503, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j784s4_dst_ep_map[] = {
+ /* MCU_CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* MAIN_CPSW2G */
+ PSIL_ETHERNET(0xc640),
+ PSIL_ETHERNET(0xc641),
+ PSIL_ETHERNET(0xc642),
+ PSIL_ETHERNET(0xc643),
+ PSIL_ETHERNET(0xc644),
+ PSIL_ETHERNET(0xc645),
+ PSIL_ETHERNET(0xc646),
+ PSIL_ETHERNET(0xc647),
+ /* MAIN_CPSW9G */
+ PSIL_ETHERNET(0xca00),
+ PSIL_ETHERNET(0xca01),
+ PSIL_ETHERNET(0xca02),
+ PSIL_ETHERNET(0xca03),
+ PSIL_ETHERNET(0xca04),
+ PSIL_ETHERNET(0xca05),
+ PSIL_ETHERNET(0xca06),
+ PSIL_ETHERNET(0xca07),
+ /* MAIN-SA2UL */
+ PSIL_SA2UL(0xca40, 1),
+ PSIL_SA2UL(0xca41, 1),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+ PSIL_SA2UL(0xf501, 1),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc600),
+ PSIL_PDMA_XY_PKT(0xc601),
+ PSIL_PDMA_XY_PKT(0xc602),
+ PSIL_PDMA_XY_PKT(0xc603),
+ PSIL_PDMA_XY_PKT(0xc604),
+ PSIL_PDMA_XY_PKT(0xc605),
+ PSIL_PDMA_XY_PKT(0xc606),
+ PSIL_PDMA_XY_PKT(0xc607),
+ PSIL_PDMA_XY_PKT(0xc608),
+ PSIL_PDMA_XY_PKT(0xc609),
+ PSIL_PDMA_XY_PKT(0xc60a),
+ PSIL_PDMA_XY_PKT(0xc60b),
+ PSIL_PDMA_XY_PKT(0xc60c),
+ PSIL_PDMA_XY_PKT(0xc60d),
+ PSIL_PDMA_XY_PKT(0xc60e),
+ PSIL_PDMA_XY_PKT(0xc60f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0xc620),
+ PSIL_PDMA_XY_PKT(0xc621),
+ PSIL_PDMA_XY_PKT(0xc622),
+ PSIL_PDMA_XY_PKT(0xc623),
+ PSIL_PDMA_XY_PKT(0xc624),
+ PSIL_PDMA_XY_PKT(0xc625),
+ PSIL_PDMA_XY_PKT(0xc626),
+ PSIL_PDMA_XY_PKT(0xc627),
+ PSIL_PDMA_XY_PKT(0xc628),
+ PSIL_PDMA_XY_PKT(0xc629),
+ PSIL_PDMA_XY_PKT(0xc62a),
+ PSIL_PDMA_XY_PKT(0xc62b),
+ PSIL_PDMA_XY_PKT(0xc62c),
+ PSIL_PDMA_XY_PKT(0xc62d),
+ PSIL_PDMA_XY_PKT(0xc62e),
+ PSIL_PDMA_XY_PKT(0xc62f),
+ /* MCU_PDMA_MISC_G0 - SPI0 */
+ PSIL_PDMA_XY_PKT(0xf100),
+ PSIL_PDMA_XY_PKT(0xf101),
+ PSIL_PDMA_XY_PKT(0xf102),
+ PSIL_PDMA_XY_PKT(0xf103),
+ /* MCU_PDMA_MISC_G1 - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0xf200),
+ PSIL_PDMA_XY_PKT(0xf201),
+ PSIL_PDMA_XY_PKT(0xf202),
+ PSIL_PDMA_XY_PKT(0xf203),
+ PSIL_PDMA_XY_PKT(0xf204),
+ PSIL_PDMA_XY_PKT(0xf205),
+ PSIL_PDMA_XY_PKT(0xf206),
+ PSIL_PDMA_XY_PKT(0xf207),
+};
+
+struct psil_ep_map j784s4_ep_map = {
+ .name = "j784s4",
+ .src = j784s4_src_ep_map,
+ .src_count = ARRAY_SIZE(j784s4_src_ep_map),
+ .dst = j784s4_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j784s4_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index b4b0fb359eff..ec26db96e299 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -40,5 +40,10 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
extern struct psil_ep_map am654_ep_map;
extern struct psil_ep_map j721e_ep_map;
extern struct psil_ep_map j7200_ep_map;
+extern struct psil_ep_map am64_ep_map;
+extern struct psil_ep_map j721s2_ep_map;
+extern struct psil_ep_map am62_ep_map;
+extern struct psil_ep_map j784s4_ep_map;
+extern struct psil_ep_map am62a_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 837853aab95a..6ea2df1bf689 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/mutex.h>
@@ -20,6 +21,11 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM65X", .data = &am654_ep_map },
{ .family = "J721E", .data = &j721e_ep_map },
{ .family = "J7200", .data = &j7200_ep_map },
+ { .family = "AM64X", .data = &am64_ep_map },
+ { .family = "J721S2", .data = &j721s2_ep_map },
+ { .family = "AM62X", .data = &am62_ep_map },
+ { .family = "J784S4", .data = &j784s4_ep_map },
+ { .family = "AM62AX", .data = &am62a_ep_map },
{ /* sentinel */ }
};
@@ -98,3 +104,4 @@ int psil_set_new_ep_config(struct device *dev, const char *name,
return 0;
}
EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index a367584f0d7b..b0c9572b0d02 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/module.h>
#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -22,6 +23,7 @@
struct k3_udma_glue_common {
struct device *dev;
+ struct device chan_dev;
struct udma_dev *udmax;
const struct udma_tisci_rm *tisci_rm;
struct k3_ringacc *ringacc;
@@ -32,7 +34,8 @@ struct k3_udma_glue_common {
bool epib;
u32 psdata_size;
u32 swdata_size;
- u32 atype;
+ u32 atype_asel;
+ struct psil_endpoint_config *ep_config;
};
struct k3_udma_glue_tx_channel {
@@ -53,6 +56,8 @@ struct k3_udma_glue_tx_channel {
bool tx_filt_einfo;
bool tx_filt_pswords;
bool tx_supr_tdpkt;
+
+ int udma_tflow_id;
};
struct k3_udma_glue_rx_flow {
@@ -81,20 +86,26 @@ struct k3_udma_glue_rx_channel {
u32 flows_ready;
};
+static void k3_udma_chan_dev_release(struct device *dev)
+{
+ /* The struct containing the device is devm managed */
+}
+
+static struct class k3_udma_glue_devclass = {
+ .name = "k3_udma_glue_chan",
+ .dev_release = k3_udma_chan_dev_release,
+};
+
#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
static int of_k3_udma_glue_parse(struct device_node *udmax_np,
struct k3_udma_glue_common *common)
{
- common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
- "ti,ringacc");
- if (IS_ERR(common->ringacc))
- return PTR_ERR(common->ringacc);
-
common->udmax = of_xudma_dev_get(udmax_np, NULL);
if (IS_ERR(common->udmax))
return PTR_ERR(common->udmax);
+ common->ringacc = xudma_get_ringacc(common->udmax);
common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
return 0;
@@ -104,7 +115,6 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
const char *name, struct k3_udma_glue_common *common,
bool tx_chn)
{
- struct psil_endpoint_config *ep_config;
struct of_phandle_args dma_spec;
u32 thread_id;
int ret = 0;
@@ -121,15 +131,26 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
&dma_spec))
return -ENOENT;
+ ret = of_k3_udma_glue_parse(dma_spec.np, common);
+ if (ret)
+ goto out_put_spec;
+
thread_id = dma_spec.args[0];
if (dma_spec.args_count == 2) {
- if (dma_spec.args[1] > 2) {
+ if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
dev_err(common->dev, "Invalid channel atype: %u\n",
dma_spec.args[1]);
ret = -EINVAL;
goto out_put_spec;
}
- common->atype = dma_spec.args[1];
+ if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
+ dev_err(common->dev, "Invalid channel asel: %u\n",
+ dma_spec.args[1]);
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ common->atype_asel = dma_spec.args[1];
}
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
@@ -143,25 +164,23 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
}
/* get psil endpoint config */
- ep_config = psil_get_ep_config(thread_id);
- if (IS_ERR(ep_config)) {
+ common->ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(common->ep_config)) {
dev_err(common->dev,
"No configuration for psi-l thread 0x%04x\n",
thread_id);
- ret = PTR_ERR(ep_config);
+ ret = PTR_ERR(common->ep_config);
goto out_put_spec;
}
- common->epib = ep_config->needs_epib;
- common->psdata_size = ep_config->psd_size;
+ common->epib = common->ep_config->needs_epib;
+ common->psdata_size = common->ep_config->psd_size;
if (tx_chn)
common->dst_thread = thread_id;
else
common->src_thread = thread_id;
- ret = of_k3_udma_glue_parse(dma_spec.np, common);
-
out_put_spec:
of_node_put(dma_spec.np);
return ret;
@@ -227,7 +246,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
req.tx_supr_tdpkt = 1;
req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
- req.tx_atype = tx_chn->common.atype;
+ req.tx_atype = tx_chn->common.atype_asel;
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
@@ -259,8 +278,14 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
tx_chn->common.psdata_size,
tx_chn->common.swdata_size);
+ if (xudma_is_pktdma(tx_chn->common.udmax))
+ tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
+ else
+ tx_chn->udma_tchan_id = -1;
+
/* request and cfg UDMAP TX channel */
- tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
+ tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
+ tx_chn->udma_tchan_id);
if (IS_ERR(tx_chn->udma_tchanx)) {
ret = PTR_ERR(tx_chn->udma_tchanx);
dev_err(dev, "UDMAX tchanx get err %d\n", ret);
@@ -268,11 +293,34 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
}
tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
+ if (xudma_is_pktdma(tx_chn->common.udmax)) {
+ tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
+ dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
+ tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
+ ret = device_register(&tx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ tx_chn->common.chan_dev.parent = NULL;
+ goto err;
+ }
+
+ /* prepare the channel device as coherent */
+ tx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
+ if (xudma_is_pktdma(tx_chn->common.udmax))
+ tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
+ else
+ tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
+
/* request and cfg rings */
ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
- tx_chn->udma_tchan_id, -1,
+ tx_chn->udma_tflow_id, -1,
&tx_chn->ringtx,
&tx_chn->ringtxcq);
if (ret) {
@@ -280,6 +328,16 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
goto err;
}
+ /* Set the dma_dev for the rings to be configured */
+ cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
+ cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
+
+ /* Set the ASEL value for DMA rings of PKTDMA */
+ if (xudma_is_pktdma(tx_chn->common.udmax)) {
+ cfg->tx_cfg.asel = tx_chn->common.atype_asel;
+ cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
+ }
+
ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
@@ -303,19 +361,6 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
goto err;
}
- ret = xudma_navss_psil_pair(tx_chn->common.udmax,
- tx_chn->common.src_thread,
- tx_chn->common.dst_thread);
- if (ret) {
- dev_err(dev, "PSI-L request err %d\n", ret);
- goto err;
- }
-
- tx_chn->psil_paired = true;
-
- /* reset TX RT registers */
- k3_udma_glue_disable_tx_chn(tx_chn);
-
k3_udma_glue_dump_tx_chn(tx_chn);
return tx_chn;
@@ -344,6 +389,11 @@ void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
if (tx_chn->ringtx)
k3_ringacc_ring_free(tx_chn->ringtx);
+
+ if (tx_chn->common.chan_dev.parent) {
+ device_unregister(&tx_chn->common.chan_dev);
+ tx_chn->common.chan_dev.parent = NULL;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
@@ -378,6 +428,18 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
+ int ret;
+
+ ret = xudma_navss_psil_pair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
+ return ret;
+ }
+
+ tx_chn->psil_paired = true;
+
xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
UDMA_PEER_RT_EN_ENABLE);
@@ -398,6 +460,13 @@ void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
xudma_tchanrt_write(tx_chn->udma_tchanx,
UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
+
+ if (tx_chn->psil_paired) {
+ xudma_navss_psil_unpair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ tx_chn->psil_paired = false;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
@@ -437,13 +506,10 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
void *data,
void (*cleanup)(void *data, dma_addr_t desc_dma))
{
+ struct device *dev = tx_chn->common.dev;
dma_addr_t desc_dma;
int occ_tx, i, ret;
- /* reset TXCQ as it is not input for udma - expected to be empty */
- if (tx_chn->ringtxcq)
- k3_ringacc_ring_reset(tx_chn->ringtxcq);
-
/*
* TXQ reset need to be special way as it is input for udma and its
* state cached by udma, so:
@@ -452,17 +518,20 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
* 3) reset TXQ in a special way
*/
occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
- dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
+ dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
for (i = 0; i < occ_tx; i++) {
ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
if (ret) {
- dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
+ if (ret != -ENODATA)
+ dev_err(dev, "TX reset pop %d\n", ret);
break;
}
cleanup(data, desc_dma);
}
+ /* reset TXCQ as it is not input for udma - expected to be empty */
+ k3_ringacc_ring_reset(tx_chn->ringtxcq);
k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
@@ -481,12 +550,50 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
{
- tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+ if (xudma_is_pktdma(tx_chn->common.udmax)) {
+ tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
+ tx_chn->udma_tflow_id);
+ } else {
+ tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+ }
return tx_chn->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+struct device *
+ k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ if (xudma_is_pktdma(tx_chn->common.udmax) &&
+ (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
+ return &tx_chn->common.chan_dev;
+
+ return xudma_get_device(tx_chn->common.udmax);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
+
+void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(tx_chn->common.udmax) ||
+ !tx_chn->common.atype_asel)
+ return;
+
+ *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
+
+void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(tx_chn->common.udmax) ||
+ !tx_chn->common.atype_asel)
+ return;
+
+ *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
+
static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{
const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
@@ -498,8 +605,6 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id;
@@ -511,13 +616,16 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
* req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
*/
req.rxcq_qnum = 0xFFFF;
- if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
+ if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
+ rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
/* Default flow + extra ones */
+ req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
req.flowid_start = rx_chn->flow_id_base;
req.flowid_cnt = rx_chn->flow_num;
}
req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
- req.rx_atype = rx_chn->common.atype;
+ req.rx_atype = rx_chn->common.atype_asel;
ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
if (ret)
@@ -571,10 +679,18 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
goto err_rflow_put;
}
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ rx_ringfdq_id = flow->udma_rflow_id +
+ xudma_get_rflow_ring_offset(rx_chn->common.udmax);
+ rx_ring_id = 0;
+ } else {
+ rx_ring_id = flow_cfg->ring_rxq_id;
+ rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
+ }
+
/* request and cfg rings */
ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
- flow_cfg->ring_rxfdq0_id,
- flow_cfg->ring_rxq_id,
+ rx_ringfdq_id, rx_ring_id,
&flow->ringrxfdq,
&flow->ringrx);
if (ret) {
@@ -582,6 +698,16 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
goto err_rflow_put;
}
+ /* Set the dma_dev for the rings to be configured */
+ flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
+ flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
+
+ /* Set the ASEL value for DMA rings of PKTDMA */
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
+ flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
+ }
+
ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringrx %d\n", ret);
@@ -740,6 +866,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg)
{
struct k3_udma_glue_rx_channel *rx_chn;
+ struct psil_endpoint_config *ep_cfg;
int ret, i;
if (cfg->flow_id_num <= 0)
@@ -767,8 +894,16 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
rx_chn->common.psdata_size,
rx_chn->common.swdata_size);
+ ep_cfg = rx_chn->common.ep_config;
+
+ if (xudma_is_pktdma(rx_chn->common.udmax))
+ rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
+ else
+ rx_chn->udma_rchan_id = -1;
+
/* request and cfg UDMAP RX channel */
- rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
+ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
+ rx_chn->udma_rchan_id);
if (IS_ERR(rx_chn->udma_rchanx)) {
ret = PTR_ERR(rx_chn->udma_rchanx);
dev_err(dev, "UDMAX rchanx get err %d\n", ret);
@@ -776,12 +911,48 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
}
rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
- rx_chn->flow_num = cfg->flow_id_num;
- rx_chn->flow_id_base = cfg->flow_id_base;
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
+ rx_chn->udma_rchan_id, rx_chn->common.src_thread);
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ rx_chn->common.chan_dev.parent = NULL;
+ goto err;
+ }
- /* Use RX channel id as flow id: target dev can't generate flow_id */
- if (cfg->flow_id_use_rxchan_id)
- rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ int flow_start = cfg->flow_id_base;
+ int flow_end;
+
+ if (flow_start == -1)
+ flow_start = ep_cfg->flow_start;
+
+ flow_end = flow_start + cfg->flow_id_num - 1;
+ if (flow_start < ep_cfg->flow_start ||
+ flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
+ dev_err(dev, "Invalid flow range requested\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ rx_chn->flow_id_base = flow_start;
+ } else {
+ rx_chn->flow_id_base = cfg->flow_id_base;
+
+ /* Use RX channel id as flow id: target dev can't generate flow_id */
+ if (cfg->flow_id_use_rxchan_id)
+ rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+ }
+
+ rx_chn->flow_num = cfg->flow_id_num;
rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
sizeof(*rx_chn->flows), GFP_KERNEL);
@@ -815,19 +986,6 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
goto err;
}
- ret = xudma_navss_psil_pair(rx_chn->common.udmax,
- rx_chn->common.src_thread,
- rx_chn->common.dst_thread);
- if (ret) {
- dev_err(dev, "PSI-L request err %d\n", ret);
- goto err;
- }
-
- rx_chn->psil_paired = true;
-
- /* reset RX RT registers */
- k3_udma_glue_disable_rx_chn(rx_chn);
-
k3_udma_glue_dump_rx_chn(rx_chn);
return rx_chn;
@@ -884,6 +1042,25 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
goto err;
}
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
+ rx_chn->common.src_thread);
+
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ rx_chn->common.chan_dev.parent = NULL;
+ goto err;
+ }
+
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
if (ret)
goto err;
@@ -936,6 +1113,11 @@ void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
xudma_rchan_put(rx_chn->common.udmax,
rx_chn->udma_rchanx);
+
+ if (rx_chn->common.chan_dev.parent) {
+ device_unregister(&rx_chn->common.chan_dev);
+ rx_chn->common.chan_dev.parent = NULL;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
@@ -1052,12 +1234,24 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{
+ int ret;
+
if (rx_chn->remote)
return -EINVAL;
if (rx_chn->flows_ready < rx_chn->flow_num)
return -EINVAL;
+ ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
+ return ret;
+ }
+
+ rx_chn->psil_paired = true;
+
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
UDMA_CHAN_RT_CTL_EN);
@@ -1078,6 +1272,13 @@ void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
+
+ if (rx_chn->psil_paired) {
+ xudma_navss_psil_unpair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ rx_chn->psil_paired = false;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
@@ -1128,12 +1329,10 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
/* reset RXCQ as it is not input for udma - expected to be empty */
occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
- if (flow->ringrx)
- k3_ringacc_ring_reset(flow->ringrx);
/* Skip RX FDQ in case one FDQ is used for the set of flows */
if (skip_fdq)
- return;
+ goto do_reset;
/*
* RX FDQ reset need to be special way as it is input for udma and its
@@ -1148,13 +1347,17 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
for (i = 0; i < occ_rx; i++) {
ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
if (ret) {
- dev_err(dev, "RX reset pop %d\n", ret);
+ if (ret != -ENODATA)
+ dev_err(dev, "RX reset pop %d\n", ret);
break;
}
cleanup(data, desc_dma);
}
k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
+
+do_reset:
+ k3_ringacc_ring_reset(flow->ringrx);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
@@ -1184,8 +1387,54 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
flow = &rx_chn->flows[flow_num];
- flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
+ flow->udma_rflow_id);
+ } else {
+ flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+ }
return flow->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
+
+struct device *
+ k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ if (xudma_is_pktdma(rx_chn->common.udmax) &&
+ (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
+ return &rx_chn->common.chan_dev;
+
+ return xudma_get_device(rx_chn->common.udmax);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
+
+void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(rx_chn->common.udmax) ||
+ !rx_chn->common.atype_asel)
+ return;
+
+ *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
+
+void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(rx_chn->common.udmax) ||
+ !rx_chn->common.atype_asel)
+ return;
+
+ *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
+
+static int __init k3_udma_glue_class_init(void)
+{
+ return class_register(&k3_udma_glue_devclass);
+}
+
+module_init(k3_udma_glue_class_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index dadab2feca08..3257b2f5157c 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -50,6 +50,18 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
EXPORT_SYMBOL(of_xudma_dev_get);
+struct device *xudma_get_device(struct udma_dev *ud)
+{
+ return ud->dev;
+}
+EXPORT_SYMBOL(xudma_get_device);
+
+struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud)
+{
+ return ud->ringacc;
+}
+EXPORT_SYMBOL(xudma_get_ringacc);
+
u32 xudma_dev_get_psil_base(struct udma_dev *ud)
{
return ud->psil_base;
@@ -76,6 +88,9 @@ EXPORT_SYMBOL(xudma_free_gp_rflow_range);
bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
{
+ if (!ud->rflow_gp_map)
+ return false;
+
return !test_bit(id, ud->rflow_gp_map);
}
EXPORT_SYMBOL(xudma_rflow_is_gp);
@@ -107,6 +122,12 @@ void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
}
EXPORT_SYMBOL(xudma_rflow_put);
+int xudma_get_rflow_ring_offset(struct udma_dev *ud)
+{
+ return ud->tflow_cnt;
+}
+EXPORT_SYMBOL(xudma_get_rflow_ring_offset);
+
#define XUDMA_GET_RESOURCE_ID(res) \
int xudma_##res##_get_id(struct udma_##res *p) \
{ \
@@ -136,3 +157,27 @@ void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \
EXPORT_SYMBOL(xudma_##res##rt_write)
XUDMA_RT_IO_FUNCTIONS(tchan);
XUDMA_RT_IO_FUNCTIONS(rchan);
+
+int xudma_is_pktdma(struct udma_dev *ud)
+{
+ return ud->match_data->type == DMA_TYPE_PKTDMA;
+}
+EXPORT_SYMBOL(xudma_is_pktdma);
+
+int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id)
+{
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+
+ return ti_sci_inta_msi_get_virq(ud->dev, udma_tflow_id +
+ oes->pktdma_tchan_flow);
+}
+EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq);
+
+int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id)
+{
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+
+ return ti_sci_inta_msi_get_virq(ud->dev, udma_rflow_id +
+ oes->pktdma_rchan_flow);
+}
+EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index d3902784cae2..0d117c1c49fe 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -26,6 +27,7 @@
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/k3-event-router.h>
#include <linux/dma/ti-cppi5.h>
#include "../virt-dma.h"
@@ -55,14 +57,26 @@ struct udma_static_tr {
struct udma_chan;
+enum k3_dma_type {
+ DMA_TYPE_UDMA = 0,
+ DMA_TYPE_BCDMA,
+ DMA_TYPE_PKTDMA,
+};
+
enum udma_mmr {
MMR_GCFG = 0,
+ MMR_BCHANRT,
MMR_RCHANRT,
MMR_TCHANRT,
MMR_LAST,
};
-static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+static const char * const mmr_names[] = {
+ [MMR_GCFG] = "gcfg",
+ [MMR_BCHANRT] = "bchanrt",
+ [MMR_RCHANRT] = "rchanrt",
+ [MMR_TCHANRT] = "tchanrt",
+};
struct udma_tchan {
void __iomem *reg_rt;
@@ -70,8 +84,12 @@ struct udma_tchan {
int id;
struct k3_ring *t_ring; /* Transmit ring */
struct k3_ring *tc_ring; /* Transmit Completion ring */
+ int tflow_id; /* applicable only for PKTDMA */
+
};
+#define udma_bchan udma_tchan
+
struct udma_rflow {
int id;
struct k3_ring *fd_ring; /* Free Descriptor ring */
@@ -84,18 +102,46 @@ struct udma_rchan {
int id;
};
+struct udma_oes_offsets {
+ /* K3 UDMA Output Event Offset */
+ u32 udma_rchan;
+
+ /* BCDMA Output Event Offsets */
+ u32 bcdma_bchan_data;
+ u32 bcdma_bchan_ring;
+ u32 bcdma_tchan_data;
+ u32 bcdma_tchan_ring;
+ u32 bcdma_rchan_data;
+ u32 bcdma_rchan_ring;
+
+ /* PKTDMA Output Event Offsets */
+ u32 pktdma_tchan_flow;
+ u32 pktdma_rchan_flow;
+};
+
#define UDMA_FLAG_PDMA_ACC32 BIT(0)
#define UDMA_FLAG_PDMA_BURST BIT(1)
+#define UDMA_FLAG_TDTYPE BIT(2)
+#define UDMA_FLAG_BURST_SIZE BIT(3)
+#define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
+ UDMA_FLAG_PDMA_BURST | \
+ UDMA_FLAG_TDTYPE | \
+ UDMA_FLAG_BURST_SIZE)
struct udma_match_data {
+ enum k3_dma_type type;
u32 psil_base;
bool enable_memcpy_support;
u32 flags;
u32 statictr_z_mask;
+ u8 burst_size[3];
+ struct udma_soc_data *soc_data;
+ u8 order_id;
};
struct udma_soc_data {
- u32 rchan_oes_offset;
+ struct udma_oes_offsets oes;
+ u32 bcdma_trigger_event_offset;
};
struct udma_hwdesc {
@@ -116,6 +162,11 @@ struct udma_rx_flush {
dma_addr_t buffer_paddr;
};
+struct udma_tpl {
+ u8 levels;
+ u32 start_idx[3];
+};
+
struct udma_dev {
struct dma_device ddev;
struct device *dev;
@@ -123,8 +174,9 @@ struct udma_dev {
const struct udma_match_data *match_data;
const struct udma_soc_data *soc_data;
- u8 tpl_levels;
- u32 tpl_start_idx[3];
+ struct udma_tpl bchan_tpl;
+ struct udma_tpl tchan_tpl;
+ struct udma_tpl rchan_tpl;
size_t desc_align; /* alignment to use for descriptors */
@@ -138,16 +190,22 @@ struct udma_dev {
struct udma_rx_flush rx_flush;
+ int bchan_cnt;
int tchan_cnt;
int echan_cnt;
int rchan_cnt;
int rflow_cnt;
+ int tflow_cnt;
+ int ch_count;
+ unsigned long *bchan_map;
unsigned long *tchan_map;
unsigned long *rchan_map;
unsigned long *rflow_gp_map;
unsigned long *rflow_gp_map_allocated;
unsigned long *rflow_in_use;
+ unsigned long *tflow_map;
+ struct udma_bchan *bchans;
struct udma_tchan *tchans;
struct udma_rchan *rchans;
struct udma_rflow *rflows;
@@ -155,6 +213,7 @@ struct udma_dev {
struct udma_chan *channels;
u32 psil_base;
u32 atype;
+ u32 asel;
};
struct udma_desc {
@@ -199,6 +258,7 @@ struct udma_chan_config {
bool notdpkt; /* Suppress sending TDC packet */
int remote_thread_id;
u32 atype;
+ u32 asel;
u32 src_thread;
u32 dst_thread;
enum psil_endpoint_type ep_type;
@@ -206,6 +266,14 @@ struct udma_chan_config {
bool enable_burst;
enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+ u32 tr_trigger_type;
+ unsigned long tx_flags;
+
+ /* PKDMA mapped channel */
+ int mapped_channel_id;
+ /* PKTDMA default tflow or rflow for mapped channel */
+ int default_flow_id;
+
enum dma_transfer_direction dir;
};
@@ -213,11 +281,13 @@ struct udma_chan {
struct virt_dma_chan vc;
struct dma_slave_config cfg;
struct udma_dev *ud;
+ struct device *dma_dev;
struct udma_desc *desc;
struct udma_desc *terminated_desc;
struct udma_static_tr static_tr;
char *name;
+ struct udma_bchan *bchan;
struct udma_tchan *tchan;
struct udma_rchan *rchan;
struct udma_rflow *rflow;
@@ -235,10 +305,10 @@ struct udma_chan {
struct udma_tx_drain tx_drain;
- u32 bcnt; /* number of bytes completed since the start of the channel */
-
/* Channel configuration parameters */
struct udma_chan_config config;
+ /* Channel configuration parameters (backup) */
+ struct udma_chan_config backup_config;
/* dmapool for packet mode descriptors */
bool use_dma_pool;
@@ -353,10 +423,48 @@ static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
src_thread, dst_thread);
}
+static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
+{
+ struct device *chan_dev = &chan->dev->device;
+
+ if (asel == 0) {
+ /* No special handling for the channel */
+ chan->dev->chan_dma_dev = false;
+
+ chan_dev->dma_coherent = false;
+ chan_dev->dma_parms = NULL;
+ } else if (asel == 14 || asel == 15) {
+ chan->dev->chan_dma_dev = true;
+
+ chan_dev->dma_coherent = true;
+ dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
+ chan_dev->dma_parms = chan_dev->parent->dma_parms;
+ } else {
+ dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
+
+ chan_dev->dma_coherent = false;
+ chan_dev->dma_parms = NULL;
+ }
+}
+
+static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
+{
+ int i;
+
+ for (i = 0; i < tpl_map->levels; i++) {
+ if (chan_id >= tpl_map->start_idx[i])
+ return i;
+ }
+
+ return 0;
+}
+
static void udma_reset_uchan(struct udma_chan *uc)
{
memset(&uc->config, 0, sizeof(uc->config));
uc->config.remote_thread_id = -1;
+ uc->config.mapped_channel_id = -1;
+ uc->config.default_flow_id = -1;
uc->state = UDMA_CHAN_IS_IDLE;
}
@@ -439,9 +547,7 @@ static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
d->hwdesc[i].cppi5_desc_vaddr = NULL;
}
} else if (d->hwdesc[0].cppi5_desc_vaddr) {
- struct udma_dev *ud = uc->ud;
-
- dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+ dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
d->hwdesc[0].cppi5_desc_vaddr,
d->hwdesc[0].cppi5_desc_paddr);
@@ -656,6 +762,21 @@ static void udma_reset_rings(struct udma_chan *uc)
}
}
+static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
+{
+ if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ if (uc->config.ep_type != PSIL_EP_NATIVE)
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ } else {
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
+}
+
static void udma_reset_counters(struct udma_chan *uc)
{
u32 val;
@@ -670,8 +791,10 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
- val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
- udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ if (!uc->bchan) {
+ val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
}
if (uc->rchan) {
@@ -687,8 +810,6 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
-
- uc->bcnt = 0;
}
static int udma_reset_chan(struct udma_chan *uc, bool hard)
@@ -746,10 +867,16 @@ static void udma_start_desc(struct udma_chan *uc)
{
struct udma_chan_config *ucc = &uc->config;
- if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
+ (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
int i;
- /* Push all descriptors to ring for packet mode cyclic or RX */
+ /*
+ * UDMA only: Push all descriptors to ring for packet mode
+ * cyclic or RX
+ * PKTDMA supports pre-linked descriptor and cyclic is not
+ * supported
+ */
for (i = 0; i < uc->desc->sglen; i++)
udma_push_to_ring(uc, i);
} else {
@@ -936,9 +1063,14 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
{
u32 peer_bcnt, bcnt;
- /* Only TX towards PDMA is affected */
+ /*
+ * Only TX towards PDMA is affected.
+ * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
+ * completion calculation, consumer must ensure that there is no stale
+ * data in DMA fabric in this case.
+ */
if (uc->config.ep_type == PSIL_EP_NATIVE ||
- uc->config.dir != DMA_MEM_TO_DEV)
+ uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
return true;
peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
@@ -1006,7 +1138,7 @@ static void udma_check_tx_completion(struct work_struct *work)
if (uc->desc) {
struct udma_desc *d = uc->desc;
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
break;
@@ -1020,13 +1152,12 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
{
struct udma_chan *uc = data;
struct udma_desc *d;
- unsigned long flags;
dma_addr_t paddr = 0;
if (udma_pop_from_ring(uc, &paddr) || !paddr)
return IRQ_HANDLED;
- spin_lock_irqsave(&uc->vc.lock, flags);
+ spin_lock(&uc->vc.lock);
/* Teardown completion message */
if (cppi5_desc_is_tdcm(paddr)) {
@@ -1060,7 +1191,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
if (udma_is_desc_really_done(uc, d)) {
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
} else {
@@ -1077,7 +1208,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
}
}
out:
- spin_unlock_irqrestore(&uc->vc.lock, flags);
+ spin_unlock(&uc->vc.lock);
return IRQ_HANDLED;
}
@@ -1086,9 +1217,8 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
{
struct udma_chan *uc = data;
struct udma_desc *d;
- unsigned long flags;
- spin_lock_irqsave(&uc->vc.lock, flags);
+ spin_lock(&uc->vc.lock);
d = uc->desc;
if (d) {
d->tr_idx = (d->tr_idx + 1) % d->sglen;
@@ -1097,13 +1227,13 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
/* TODO: figure out the real amount of data */
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
}
}
- spin_unlock_irqrestore(&uc->vc.lock, flags);
+ spin_unlock(&uc->vc.lock);
return IRQ_HANDLED;
}
@@ -1181,10 +1311,12 @@ static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
if (test_bit(id, ud->rflow_in_use))
return ERR_PTR(-ENOENT);
- /* GP rflow has to be allocated first */
- if (!test_bit(id, ud->rflow_gp_map) &&
- !test_bit(id, ud->rflow_gp_map_allocated))
- return ERR_PTR(-EINVAL);
+ if (ud->rflow_gp_map) {
+ /* GP rflow has to be allocated first */
+ if (!test_bit(id, ud->rflow_gp_map) &&
+ !test_bit(id, ud->rflow_gp_map_allocated))
+ return ERR_PTR(-EINVAL);
+ }
dev_dbg(ud->dev, "get rflow%d\n", id);
set_bit(id, ud->rflow_in_use);
@@ -1215,10 +1347,10 @@ static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
} else { \
int start; \
\
- if (tpl >= ud->tpl_levels) \
- tpl = ud->tpl_levels - 1; \
+ if (tpl >= ud->res##_tpl.levels) \
+ tpl = ud->res##_tpl.levels - 1; \
\
- start = ud->tpl_start_idx[tpl]; \
+ start = ud->res##_tpl.start_idx[tpl]; \
\
id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
start); \
@@ -1231,12 +1363,47 @@ static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
return &ud->res##s[id]; \
}
+UDMA_RESERVE_RESOURCE(bchan);
UDMA_RESERVE_RESOURCE(tchan);
UDMA_RESERVE_RESOURCE(rchan);
+static int bcdma_get_bchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ enum udma_tp_level tpl;
+ int ret;
+
+ if (uc->bchan) {
+ dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
+ uc->id, uc->bchan->id);
+ return 0;
+ }
+
+ /*
+ * Use normal channels for peripherals, and highest TPL channel for
+ * mem2mem
+ */
+ if (uc->config.tr_trigger_type)
+ tpl = 0;
+ else
+ tpl = ud->bchan_tpl.levels - 1;
+
+ uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
+ if (IS_ERR(uc->bchan)) {
+ ret = PTR_ERR(uc->bchan);
+ uc->bchan = NULL;
+ return ret;
+ }
+
+ uc->tchan = uc->bchan;
+
+ return 0;
+}
+
static int udma_get_tchan(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
+ int ret;
if (uc->tchan) {
dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
@@ -1244,14 +1411,48 @@ static int udma_get_tchan(struct udma_chan *uc)
return 0;
}
- uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
+ /*
+ * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
+ * For PKTDMA mapped channels it is configured to a channel which must
+ * be used to service the peripheral.
+ */
+ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
+ uc->config.mapped_channel_id);
+ if (IS_ERR(uc->tchan)) {
+ ret = PTR_ERR(uc->tchan);
+ uc->tchan = NULL;
+ return ret;
+ }
+
+ if (ud->tflow_cnt) {
+ int tflow_id;
+
+ /* Only PKTDMA have support for tx flows */
+ if (uc->config.default_flow_id >= 0)
+ tflow_id = uc->config.default_flow_id;
+ else
+ tflow_id = uc->tchan->id;
+
+ if (test_bit(tflow_id, ud->tflow_map)) {
+ dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
+ clear_bit(uc->tchan->id, ud->tchan_map);
+ uc->tchan = NULL;
+ return -ENOENT;
+ }
+
+ uc->tchan->tflow_id = tflow_id;
+ set_bit(tflow_id, ud->tflow_map);
+ } else {
+ uc->tchan->tflow_id = -1;
+ }
- return PTR_ERR_OR_ZERO(uc->tchan);
+ return 0;
}
static int udma_get_rchan(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
+ int ret;
if (uc->rchan) {
dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
@@ -1259,9 +1460,20 @@ static int udma_get_rchan(struct udma_chan *uc)
return 0;
}
- uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
+ /*
+ * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
+ * For PKTDMA mapped channels it is configured to a channel which must
+ * be used to service the peripheral.
+ */
+ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
+ uc->config.mapped_channel_id);
+ if (IS_ERR(uc->rchan)) {
+ ret = PTR_ERR(uc->rchan);
+ uc->rchan = NULL;
+ return ret;
+ }
- return PTR_ERR_OR_ZERO(uc->rchan);
+ return 0;
}
static int udma_get_chan_pair(struct udma_chan *uc)
@@ -1287,8 +1499,11 @@ static int udma_get_chan_pair(struct udma_chan *uc)
/* Can be optimized, but let's have it like this for now */
end = min(ud->tchan_cnt, ud->rchan_cnt);
- /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
- chan_id = ud->tpl_start_idx[ud->tpl_levels - 1];
+ /*
+ * Try to use the highest TPL channel pair for MEM_TO_MEM channels
+ * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
+ */
+ chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
for (; chan_id < end; chan_id++) {
if (!test_bit(chan_id, ud->tchan_map) &&
!test_bit(chan_id, ud->rchan_map))
@@ -1303,12 +1518,16 @@ static int udma_get_chan_pair(struct udma_chan *uc)
uc->tchan = &ud->tchans[chan_id];
uc->rchan = &ud->rchans[chan_id];
+ /* UDMA does not use tx flows */
+ uc->tchan->tflow_id = -1;
+
return 0;
}
static int udma_get_rflow(struct udma_chan *uc, int flow_id)
{
struct udma_dev *ud = uc->ud;
+ int ret;
if (!uc->rchan) {
dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
@@ -1322,8 +1541,26 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id)
}
uc->rflow = __udma_get_rflow(ud, flow_id);
+ if (IS_ERR(uc->rflow)) {
+ ret = PTR_ERR(uc->rflow);
+ uc->rflow = NULL;
+ return ret;
+ }
+
+ return 0;
+}
- return PTR_ERR_OR_ZERO(uc->rflow);
+static void bcdma_put_bchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->bchan) {
+ dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
+ uc->bchan->id);
+ clear_bit(uc->bchan->id, ud->bchan_map);
+ uc->bchan = NULL;
+ uc->tchan = NULL;
+ }
}
static void udma_put_rchan(struct udma_chan *uc)
@@ -1346,6 +1583,10 @@ static void udma_put_tchan(struct udma_chan *uc)
dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
uc->tchan->id);
clear_bit(uc->tchan->id, ud->tchan_map);
+
+ if (uc->tchan->tflow_id >= 0)
+ clear_bit(uc->tchan->tflow_id, ud->tflow_map);
+
uc->tchan = NULL;
}
}
@@ -1362,6 +1603,65 @@ static void udma_put_rflow(struct udma_chan *uc)
}
}
+static void bcdma_free_bchan_resources(struct udma_chan *uc)
+{
+ if (!uc->bchan)
+ return;
+
+ k3_ringacc_ring_free(uc->bchan->tc_ring);
+ k3_ringacc_ring_free(uc->bchan->t_ring);
+ uc->bchan->tc_ring = NULL;
+ uc->bchan->t_ring = NULL;
+ k3_configure_chan_coherency(&uc->vc.chan, 0);
+
+ bcdma_put_bchan(uc);
+}
+
+static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
+{
+ struct k3_ring_cfg ring_cfg;
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ ret = bcdma_get_bchan(uc);
+ if (ret)
+ return ret;
+
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
+ &uc->bchan->t_ring,
+ &uc->bchan->tc_ring);
+ if (ret) {
+ ret = -EBUSY;
+ goto err_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
+
+ k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
+ ring_cfg.asel = ud->asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+
+ ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(uc->bchan->tc_ring);
+ uc->bchan->tc_ring = NULL;
+ k3_ringacc_ring_free(uc->bchan->t_ring);
+ uc->bchan->t_ring = NULL;
+ k3_configure_chan_coherency(&uc->vc.chan, 0);
+err_ring:
+ bcdma_put_bchan(uc);
+
+ return ret;
+}
+
static void udma_free_tx_resources(struct udma_chan *uc)
{
if (!uc->tchan)
@@ -1379,15 +1679,22 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
{
struct k3_ring_cfg ring_cfg;
struct udma_dev *ud = uc->ud;
- int ret;
+ struct udma_tchan *tchan;
+ int ring_idx, ret;
ret = udma_get_tchan(uc);
if (ret)
return ret;
- ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
- &uc->tchan->t_ring,
- &uc->tchan->tc_ring);
+ tchan = uc->tchan;
+ if (tchan->tflow_id >= 0)
+ ring_idx = tchan->tflow_id;
+ else
+ ring_idx = ud->bchan_cnt + tchan->id;
+
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
+ &tchan->t_ring,
+ &tchan->tc_ring);
if (ret) {
ret = -EBUSY;
goto err_ring;
@@ -1396,10 +1703,18 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
memset(&ring_cfg, 0, sizeof(ring_cfg));
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
- ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ } else {
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
- ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
- ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+ k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
+ ring_cfg.asel = uc->config.asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+ }
+
+ ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
+ ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
if (ret)
goto err_ringcfg;
@@ -1452,14 +1767,23 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
if (uc->config.dir == DMA_MEM_TO_MEM)
return 0;
- ret = udma_get_rflow(uc, uc->rchan->id);
+ if (uc->config.default_flow_id >= 0)
+ ret = udma_get_rflow(uc, uc->config.default_flow_id);
+ else
+ ret = udma_get_rflow(uc, uc->rchan->id);
+
if (ret) {
ret = -EBUSY;
goto err_rflow;
}
rflow = uc->rflow;
- fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+ if (ud->tflow_cnt)
+ fd_ring_id = ud->tflow_cnt + rflow->id;
+ else
+ fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
+ uc->rchan->id;
+
ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
&rflow->fd_ring, &rflow->r_ring);
if (ret) {
@@ -1469,15 +1793,25 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
memset(&ring_cfg, 0, sizeof(ring_cfg));
- if (uc->config.pkt_mode)
- ring_cfg.size = SG_MAX_SEGMENTS;
- else
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ if (uc->config.pkt_mode)
+ ring_cfg.size = SG_MAX_SEGMENTS;
+ else
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ } else {
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
- ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
- ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
+ ring_cfg.asel = uc->config.asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+ }
ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
@@ -1499,7 +1833,18 @@ err_rflow:
return ret;
}
-#define TISCI_TCHAN_VALID_PARAMS ( \
+#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
+
+#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
+
+#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
+
+#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
@@ -1509,7 +1854,7 @@ err_rflow:
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
-#define TISCI_RCHAN_VALID_PARAMS ( \
+#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
@@ -1527,20 +1872,32 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct udma_tchan *tchan = uc->tchan;
struct udma_rchan *rchan = uc->rchan;
- int ret = 0;
+ u8 burst_size = 0;
+ int ret;
+ u8 tpl;
/* Non synchronized - mem to mem type of transfer */
int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
- req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
+ tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
+
+ burst_size = ud->match_data->burst_size[tpl];
+ }
+
+ req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_tx.txcq_qnum = tc_ring;
req_tx.tx_atype = ud->atype;
+ if (burst_size) {
+ req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
+ req_tx.tx_burst_size = burst_size;
+ }
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret) {
@@ -1548,13 +1905,17 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
return ret;
}
- req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
req_rx.index = rchan->id;
req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_rx.rxcq_qnum = tc_ring;
req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
req_rx.rx_atype = ud->atype;
+ if (burst_size) {
+ req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
+ req_rx.rx_burst_size = burst_size;
+ }
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
if (ret)
@@ -1563,6 +1924,39 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
return ret;
}
+static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ struct udma_bchan *bchan = uc->bchan;
+ u8 burst_size = 0;
+ int ret;
+ u8 tpl;
+
+ if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
+ tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
+
+ burst_size = ud->match_data->burst_size[tpl];
+ }
+
+ req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
+ req_tx.index = bchan->id;
+ if (burst_size) {
+ req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
+ req_tx.tx_burst_size = burst_size;
+ }
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
+
+ return ret;
+}
+
static int udma_tisci_tx_channel_config(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1572,7 +1966,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
u32 mode, fetch_size;
- int ret = 0;
+ int ret;
if (uc->config.pkt_mode) {
mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
@@ -1583,7 +1977,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
fetch_size = sizeof(struct cppi5_desc_hdr_t);
}
- req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
req_tx.tx_chan_type = mode;
@@ -1591,6 +1985,40 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
req_tx.tx_fetch_size = fetch_size >> 2;
req_tx.txcq_qnum = tc_ring;
req_tx.tx_atype = uc->config.atype;
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ ud->match_data->flags & UDMA_FLAG_TDTYPE) {
+ /* wait for peer to complete the teardown for PDMAs */
+ req_tx.valid_params |=
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
+ req_tx.tx_tdtype = 1;
+ }
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+ return ret;
+}
+
+static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ int ret;
+
+ req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+ if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
+ /* wait for peer to complete the teardown for PDMAs */
+ req_tx.valid_params |=
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
+ req_tx.tx_tdtype = 1;
+ }
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret)
@@ -1599,6 +2027,8 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
return ret;
}
+#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
+
static int udma_tisci_rx_channel_config(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1610,7 +2040,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
u32 mode, fetch_size;
- int ret = 0;
+ int ret;
if (uc->config.pkt_mode) {
mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
@@ -1621,7 +2051,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
fetch_size = sizeof(struct cppi5_desc_hdr_t);
}
- req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
req_rx.index = rchan->id;
req_rx.rx_fetch_size = fetch_size >> 2;
@@ -1680,6 +2110,78 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
return 0;
}
+static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ const struct udma_match_data *match_data = ud->match_data;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_rchan *rchan = uc->rchan;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ int ret;
+
+ req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+
+ if (match_data->order_id) {
+ req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_ORDER_ID_VALID;
+ req_rx.rx_orderid = match_data->order_id;
+ }
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret)
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+
+ return ret;
+}
+
+static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
+ int ret;
+
+ req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = uc->rchan->id;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret) {
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
+ return ret;
+ }
+
+ flow_req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
+
+ flow_req.nav_id = tisci_rm->tisci_dev_id;
+ flow_req.flow_index = uc->rflow->id;
+
+ if (uc->config.needs_epib)
+ flow_req.rx_einfo_present = 1;
+ else
+ flow_req.rx_einfo_present = 0;
+ if (uc->config.psd_size)
+ flow_req.rx_psinfo_present = 1;
+ else
+ flow_req.rx_psinfo_present = 0;
+ flow_req.rx_error_handling = 1;
+
+ ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
+
+ if (ret)
+ dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
+ ret);
+
+ return ret;
+}
+
static int udma_alloc_chan_resources(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
@@ -1689,6 +2191,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
u32 irq_udma_idx;
int ret;
+ uc->dma_dev = ud->dev;
+
if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
uc->use_dma_pool = true;
/* in case of MEM_TO_MEM we have maximum of two TRs */
@@ -1784,7 +2288,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
K3_PSIL_DST_THREAD_ID_OFFSET;
irq_ring = uc->rflow->r_ring;
- irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
+ irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
ret = udma_tisci_rx_channel_config(uc);
break;
@@ -1884,6 +2388,369 @@ err_cleanup:
return ret;
}
+static int bcdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 irq_udma_idx, irq_ring_idx;
+ int ret;
+
+ /* Only TR mode is supported */
+ uc->config.pkt_mode = false;
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ /* Non synchronized - mem to mem type of transfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+ uc->id);
+
+ ret = bcdma_alloc_bchan_resources(uc);
+ if (ret)
+ return ret;
+
+ irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
+ irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
+
+ ret = bcdma_tisci_m2m_channel_config(uc);
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
+ irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
+
+ ret = bcdma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
+ irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
+
+ ret = bcdma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ ret = -EBUSY;
+ goto err_res_free;
+ }
+ }
+
+ uc->dma_dev = dmaengine_get_dma_device(chan);
+ if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) {
+ uc->config.hdesc_size = cppi5_trdesc_calc_size(
+ sizeof(struct cppi5_tr_type15_t), 2);
+
+ uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+ uc->config.hdesc_size,
+ ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ return -ENOMEM;
+ }
+
+ uc->use_dma_pool = true;
+ } else if (uc->config.dir != DMA_MEM_TO_MEM) {
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev,
+ "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+ }
+
+ uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ irq_ring_idx);
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ /* Event from BCDMA (TR events) only needed for slave channels */
+ if (is_slave_direction(uc->config.dir)) {
+ uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+ irq_udma_idx);
+ if (uc->irq_num_udma <= 0) {
+ dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
+ irq_udma_idx);
+ free_irq(uc->irq_num_ring, uc);
+ ret = -EINVAL;
+ goto err_irq_free;
+ }
+
+ ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+ uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
+ uc->id);
+ free_irq(uc->irq_num_ring, uc);
+ goto err_irq_free;
+ }
+ } else {
+ uc->irq_num_udma = 0;
+ }
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+ uc->irq_num_udma = 0;
+err_psi_free:
+ if (uc->psil_paired)
+ navss_psil_unpair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ bcdma_free_bchan_resources(uc);
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+
+ return ret;
+}
+
+static int bcdma_router_config(struct dma_chan *chan)
+{
+ struct k3_event_route_data *router_data = chan->route_data;
+ struct udma_chan *uc = to_udma_chan(chan);
+ u32 trigger_event;
+
+ if (!uc->bchan)
+ return -EINVAL;
+
+ if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
+ return -EINVAL;
+
+ trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
+ trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
+
+ return router_data->set_event(router_data->priv, trigger_event);
+}
+
+static int pktdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 irq_ring_idx;
+ int ret;
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
+
+ ret = pktdma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
+
+ ret = pktdma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ ret = -EBUSY;
+ goto err_res_free;
+ }
+ }
+
+ uc->dma_dev = dmaengine_get_dma_device(chan);
+ uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
+ uc->config.hdesc_size, ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ ret = -ENOMEM;
+ goto err_res_free;
+ }
+
+ uc->use_dma_pool = true;
+
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+
+ uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ irq_ring_idx);
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ uc->irq_num_udma = 0;
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+
+ if (uc->tchan)
+ dev_dbg(ud->dev,
+ "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
+ uc->id, uc->tchan->id, uc->tchan->tflow_id,
+ uc->config.remote_thread_id);
+ else if (uc->rchan)
+ dev_dbg(ud->dev,
+ "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
+ uc->id, uc->rchan->id, uc->rflow->id,
+ uc->config.remote_thread_id);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+err_psi_free:
+ navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+
+ return ret;
+}
+
static int udma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
@@ -2028,6 +2895,7 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
size_t tr_size;
int num_tr = 0;
int tr_idx = 0;
+ u64 asel;
/* estimate the number of TRs we will need */
for_each_sg(sgl, sgent, sglen, i) {
@@ -2045,6 +2913,11 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
d->sglen = sglen;
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ asel = 0;
+ else
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
tr_req = d->hwdesc[0].tr_req_base;
for_each_sg(sgl, sgent, sglen, i) {
dma_addr_t sg_addr = sg_dma_address(sgent);
@@ -2063,6 +2936,7 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ sg_addr |= asel;
tr_req[tr_idx].addr = sg_addr;
tr_req[tr_idx].icnt0 = tr0_cnt0;
tr_req[tr_idx].icnt1 = tr0_cnt1;
@@ -2092,6 +2966,205 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
return d;
}
+static struct udma_desc *
+udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen,
+ enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct scatterlist *sgent;
+ struct cppi5_tr_type15_t *tr_req = NULL;
+ enum dma_slave_buswidth dev_width;
+ u16 tr_cnt0, tr_cnt1;
+ dma_addr_t dev_addr;
+ struct udma_desc *d;
+ unsigned int i;
+ size_t tr_size, sg_len;
+ int num_tr = 0;
+ int tr_idx = 0;
+ u32 burst, trigger_size, port_window;
+ u64 asel;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = uc->cfg.src_addr;
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ port_window = uc->cfg.src_port_window_size;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = uc->cfg.dst_addr;
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ port_window = uc->cfg.dst_port_window_size;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (port_window) {
+ if (port_window != burst) {
+ dev_err(uc->ud->dev,
+ "The burst must be equal to port_window\n");
+ return NULL;
+ }
+
+ tr_cnt0 = dev_width * port_window;
+ tr_cnt1 = 1;
+ } else {
+ tr_cnt0 = dev_width;
+ tr_cnt1 = burst;
+ }
+ trigger_size = tr_cnt0 * tr_cnt1;
+
+ /* estimate the number of TRs we will need */
+ for_each_sg(sgl, sgent, sglen, i) {
+ sg_len = sg_dma_len(sgent);
+
+ if (sg_len % trigger_size) {
+ dev_err(uc->ud->dev,
+ "Not aligned SG entry (%zu for %u)\n", sg_len,
+ trigger_size);
+ return NULL;
+ }
+
+ if (sg_len / trigger_size < SZ_64K)
+ num_tr++;
+ else
+ num_tr += 2;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type15_t);
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
+ asel = 0;
+ } else {
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+ dev_addr |= asel;
+ }
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for_each_sg(sgl, sgent, sglen, i) {
+ u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+
+ sg_len = sg_dma_len(sgent);
+ num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
+ &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ sg_len);
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
+ true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
+ uc->config.tr_trigger_type,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
+
+ sg_addr |= asel;
+ if (dir == DMA_DEV_TO_MEM) {
+ tr_req[tr_idx].addr = dev_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr0_cnt2;
+ tr_req[tr_idx].icnt3 = tr0_cnt3;
+ tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
+
+ tr_req[tr_idx].daddr = sg_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr0_cnt2;
+ tr_req[tr_idx].dicnt3 = tr0_cnt3;
+ tr_req[tr_idx].ddim1 = tr_cnt0;
+ tr_req[tr_idx].ddim2 = trigger_size;
+ tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
+ } else {
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr0_cnt2;
+ tr_req[tr_idx].icnt3 = tr0_cnt3;
+ tr_req[tr_idx].dim1 = tr_cnt0;
+ tr_req[tr_idx].dim2 = trigger_size;
+ tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
+
+ tr_req[tr_idx].daddr = dev_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr0_cnt2;
+ tr_req[tr_idx].dicnt3 = tr0_cnt3;
+ tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
+ }
+
+ tr_idx++;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
+ false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
+ uc->config.tr_trigger_type,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+ 0, 0);
+
+ sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
+ if (dir == DMA_DEV_TO_MEM) {
+ tr_req[tr_idx].addr = dev_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr1_cnt2;
+ tr_req[tr_idx].icnt3 = 1;
+ tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
+
+ tr_req[tr_idx].daddr = sg_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr1_cnt2;
+ tr_req[tr_idx].dicnt3 = 1;
+ tr_req[tr_idx].ddim1 = tr_cnt0;
+ tr_req[tr_idx].ddim2 = trigger_size;
+ } else {
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr1_cnt2;
+ tr_req[tr_idx].icnt3 = 1;
+ tr_req[tr_idx].dim1 = tr_cnt0;
+ tr_req[tr_idx].dim2 = trigger_size;
+
+ tr_req[tr_idx].daddr = dev_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr1_cnt2;
+ tr_req[tr_idx].dicnt3 = 1;
+ tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
+ }
+ tr_idx++;
+ }
+
+ d->residue += sg_len;
+ }
+
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
+ CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
+
+ return d;
+}
+
static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
enum dma_slave_buswidth dev_width,
u16 elcnt)
@@ -2156,6 +3229,7 @@ udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
struct udma_desc *d;
u32 ring_id;
unsigned int i;
+ u64 asel;
d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
if (!d)
@@ -2169,6 +3243,11 @@ udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
else
ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ asel = 0;
+ else
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
for_each_sg(sgl, sgent, sglen, i) {
struct udma_hwdesc *hwdesc = &d->hwdesc[i];
dma_addr_t sg_addr = sg_dma_address(sgent);
@@ -2203,14 +3282,16 @@ udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
}
/* attach the sg buffer to the descriptor */
+ sg_addr |= asel;
cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
/* Attach link as host buffer descriptor */
if (h_desc)
cppi5_hdesc_link_hbdesc(h_desc,
- hwdesc->cppi5_desc_paddr);
+ hwdesc->cppi5_desc_paddr | asel);
- if (dir == DMA_MEM_TO_DEV)
+ if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
+ dir == DMA_MEM_TO_DEV)
h_desc = desc;
}
@@ -2333,7 +3414,8 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct udma_desc *d;
u32 burst;
- if (dir != uc->config.dir) {
+ if (dir != uc->config.dir &&
+ (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
dev_err(chan->device->dev,
"%s: chan%d is for %s, not supporting %s\n",
__func__, uc->id,
@@ -2356,12 +3438,17 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!burst)
burst = 1;
+ uc->config.tx_flags = tx_flags;
+
if (uc->config.pkt_mode)
d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
context);
- else
+ else if (is_slave_direction(uc->config.dir))
d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
context);
+ else
+ d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
+ tx_flags, context);
if (!d)
return NULL;
@@ -2415,7 +3502,12 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
return NULL;
tr_req = d->hwdesc[0].tr_req_base;
- period_addr = buf_addr;
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ period_addr = buf_addr;
+ else
+ period_addr = buf_addr |
+ ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -2480,6 +3572,9 @@ udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
else
ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+ if (uc->ud->match_data->type != DMA_TYPE_UDMA)
+ buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
for (i = 0; i < periods; i++) {
struct udma_hwdesc *hwdesc = &d->hwdesc[i];
dma_addr_t period_addr = buf_addr + (period_len * i);
@@ -2621,6 +3716,11 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
d->tr_idx = 0;
d->residue = len;
+ if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
+ src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ }
+
tr_req = d->hwdesc[0].tr_req_base;
cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
@@ -2741,7 +3841,6 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
}
- bcnt -= uc->bcnt;
if (bcnt && !(bcnt % uc->desc->residue))
residue = 0;
else
@@ -2978,6 +4077,7 @@ static void udma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&uc->vc);
tasklet_kill(&uc->vc.task);
+ bcdma_free_bchan_resources(uc);
udma_free_tx_resources(uc);
udma_free_rx_resources(uc);
udma_reset_uchan(uc);
@@ -2989,10 +4089,14 @@ static void udma_free_chan_resources(struct dma_chan *chan)
}
static struct platform_driver udma_driver;
+static struct platform_driver bcdma_driver;
+static struct platform_driver pktdma_driver;
struct udma_filter_param {
int remote_thread_id;
u32 atype;
+ u32 asel;
+ u32 tr_trigger_type;
};
static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
@@ -3003,7 +4107,9 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
struct udma_chan *uc;
struct udma_dev *ud;
- if (chan->device->dev->driver != &udma_driver.driver)
+ if (chan->device->dev->driver != &udma_driver.driver &&
+ chan->device->dev->driver != &bcdma_driver.driver &&
+ chan->device->dev->driver != &pktdma_driver.driver)
return false;
uc = to_udma_chan(chan);
@@ -3017,13 +4123,25 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
return false;
}
+ if (filter_param->asel > 15) {
+ dev_err(ud->dev, "Invalid channel asel: %u\n",
+ filter_param->asel);
+ return false;
+ }
+
ucc->remote_thread_id = filter_param->remote_thread_id;
ucc->atype = filter_param->atype;
+ ucc->asel = filter_param->asel;
+ ucc->tr_trigger_type = filter_param->tr_trigger_type;
- if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+ if (ucc->tr_trigger_type) {
+ ucc->dir = DMA_MEM_TO_MEM;
+ goto triggered_bchan;
+ } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
ucc->dir = DMA_MEM_TO_DEV;
- else
+ } else {
ucc->dir = DMA_DEV_TO_MEM;
+ }
ep_config = psil_get_ep_config(ucc->remote_thread_id);
if (IS_ERR(ep_config)) {
@@ -3032,6 +4150,19 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->dir = DMA_MEM_TO_MEM;
ucc->remote_thread_id = -1;
ucc->atype = 0;
+ ucc->asel = 0;
+ return false;
+ }
+
+ if (ud->match_data->type == DMA_TYPE_BCDMA &&
+ ep_config->pkt_mode) {
+ dev_err(ud->dev,
+ "Only TR mode is supported (psi-l thread 0x%04x)\n",
+ ucc->remote_thread_id);
+ ucc->dir = DMA_MEM_TO_MEM;
+ ucc->remote_thread_id = -1;
+ ucc->atype = 0;
+ ucc->asel = 0;
return false;
}
@@ -3040,6 +4171,15 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->notdpkt = ep_config->notdpkt;
ucc->ep_type = ep_config->ep_type;
+ if (ud->match_data->type == DMA_TYPE_PKTDMA &&
+ ep_config->mapped_channel_id >= 0) {
+ ucc->mapped_channel_id = ep_config->mapped_channel_id;
+ ucc->default_flow_id = ep_config->default_flow_id;
+ } else {
+ ucc->mapped_channel_id = -1;
+ ucc->default_flow_id = -1;
+ }
+
if (ucc->ep_type != PSIL_EP_NATIVE) {
const struct udma_match_data *match_data = ud->match_data;
@@ -3063,6 +4203,13 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
return true;
+
+triggered_bchan:
+ dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
+ ucc->tr_trigger_type);
+
+ return true;
+
}
static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
@@ -3073,14 +4220,33 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct udma_filter_param filter_param;
struct dma_chan *chan;
- if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
- return NULL;
+ if (ud->match_data->type == DMA_TYPE_BCDMA) {
+ if (dma_spec->args_count != 3)
+ return NULL;
- filter_param.remote_thread_id = dma_spec->args[0];
- if (dma_spec->args_count == 2)
- filter_param.atype = dma_spec->args[1];
- else
+ filter_param.tr_trigger_type = dma_spec->args[0];
+ filter_param.remote_thread_id = dma_spec->args[1];
+ filter_param.asel = dma_spec->args[2];
filter_param.atype = 0;
+ } else {
+ if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
+ return NULL;
+
+ filter_param.remote_thread_id = dma_spec->args[0];
+ filter_param.tr_trigger_type = 0;
+ if (dma_spec->args_count == 2) {
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ filter_param.atype = dma_spec->args[1];
+ filter_param.asel = 0;
+ } else {
+ filter_param.atype = 0;
+ filter_param.asel = dma_spec->args[1];
+ }
+ } else {
+ filter_param.atype = 0;
+ filter_param.asel = 0;
+ }
+ }
chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
@@ -3093,29 +4259,99 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
static struct udma_match_data am654_main_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x1000,
.enable_memcpy_support = true,
.statictr_z_mask = GENMASK(11, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data am654_mcu_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x6000,
.enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data j721e_main_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x1000,
.enable_memcpy_support = true,
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
+ },
};
static struct udma_match_data j721e_mcu_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x6000,
.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
+ 0, /* No UH Channels */
+ },
+};
+
+static struct udma_soc_data am62a_dmss_csi_soc_data = {
+ .oes = {
+ .bcdma_rchan_data = 0xe00,
+ .bcdma_rchan_ring = 0x1000,
+ },
+};
+
+static struct udma_match_data am62a_bcdma_csirx_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &am62a_dmss_csi_soc_data,
+ .order_id = 8,
+};
+
+static struct udma_match_data am64_bcdma_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
+ .enable_memcpy_support = true, /* Supported via bchan */
+ .flags = UDMA_FLAGS_J7_CLASS,
+ .statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+};
+
+static struct udma_match_data am64_pktdma_data = {
+ .type = DMA_TYPE_PKTDMA,
+ .psil_base = 0x1000,
+ .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
+ .flags = UDMA_FLAGS_J7_CLASS,
+ .statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
};
static const struct of_device_id udma_of_match[] = {
@@ -3133,33 +4369,109 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,j721e-navss-mcu-udmap",
.data = &j721e_mcu_data,
},
+ {
+ .compatible = "ti,am64-dmss-bcdma",
+ .data = &am64_bcdma_data,
+ },
+ {
+ .compatible = "ti,am64-dmss-pktdma",
+ .data = &am64_pktdma_data,
+ },
+ {
+ .compatible = "ti,am62a-dmss-bcdma-csirx",
+ .data = &am62a_bcdma_csirx_data,
+ },
{ /* Sentinel */ },
};
static struct udma_soc_data am654_soc_data = {
- .rchan_oes_offset = 0x200,
+ .oes = {
+ .udma_rchan = 0x200,
+ },
};
static struct udma_soc_data j721e_soc_data = {
- .rchan_oes_offset = 0x400,
+ .oes = {
+ .udma_rchan = 0x400,
+ },
};
static struct udma_soc_data j7200_soc_data = {
- .rchan_oes_offset = 0x80,
+ .oes = {
+ .udma_rchan = 0x80,
+ },
+};
+
+static struct udma_soc_data am64_soc_data = {
+ .oes = {
+ .bcdma_bchan_data = 0x2200,
+ .bcdma_bchan_ring = 0x2400,
+ .bcdma_tchan_data = 0x2800,
+ .bcdma_tchan_ring = 0x2a00,
+ .bcdma_rchan_data = 0x2e00,
+ .bcdma_rchan_ring = 0x3000,
+ .pktdma_tchan_flow = 0x1200,
+ .pktdma_rchan_flow = 0x1600,
+ },
+ .bcdma_trigger_event_offset = 0xc400,
};
static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM65X", .data = &am654_soc_data },
{ .family = "J721E", .data = &j721e_soc_data },
{ .family = "J7200", .data = &j7200_soc_data },
+ { .family = "AM64X", .data = &am64_soc_data },
+ { .family = "J721S2", .data = &j721e_soc_data},
+ { .family = "AM62X", .data = &am64_soc_data },
+ { .family = "J784S4", .data = &j721e_soc_data },
+ { .family = "AM62AX", .data = &am64_soc_data },
{ /* sentinel */ }
};
static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
{
+ u32 cap2, cap3, cap4;
int i;
- for (i = 0; i < MMR_LAST; i++) {
+ ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
+ if (IS_ERR(ud->mmrs[MMR_GCFG]))
+ return PTR_ERR(ud->mmrs[MMR_GCFG]);
+
+ cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
+ ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
+ ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
+ ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
+ break;
+ case DMA_TYPE_BCDMA:
+ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
+ ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
+ ud->rflow_cnt = ud->rchan_cnt;
+ break;
+ case DMA_TYPE_PKTDMA:
+ cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
+ ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
+ ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
+ ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
+ ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 1; i < MMR_LAST; i++) {
+ if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
+ continue;
+ if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
+ continue;
+ if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
+ continue;
+
ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
if (IS_ERR(ud->mmrs[i]))
return PTR_ERR(ud->mmrs[i]);
@@ -3168,47 +4480,58 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
return 0;
}
+static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
+ struct ti_sci_resource_desc *rm_desc,
+ char *name)
+{
+ bitmap_clear(map, rm_desc->start, rm_desc->num);
+ bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
+ dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
+ rm_desc->start, rm_desc->num, rm_desc->start_sec,
+ rm_desc->num_sec);
+}
+
+static const char * const range_names[] = {
+ [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
+ [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
+ [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
+ [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
+ [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
+};
+
static int udma_setup_resources(struct udma_dev *ud)
{
+ int ret, i, j;
struct device *dev = ud->dev;
- int ch_count, ret, i, j;
- u32 cap2, cap3;
- struct ti_sci_resource_desc *rm_desc;
struct ti_sci_resource *rm_res, irq_res;
struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
- static const char * const range_names[] = { "ti,sci-rm-range-tchan",
- "ti,sci-rm-range-rchan",
- "ti,sci-rm-range-rflow" };
-
- cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2));
- cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3));
-
- ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
- ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
- ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
- ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
- ch_count = ud->tchan_cnt + ud->rchan_cnt;
+ u32 cap3;
/* Set up the throughput level start indexes */
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
if (of_device_is_compatible(dev->of_node,
"ti,am654-navss-main-udmap")) {
- ud->tpl_levels = 2;
- ud->tpl_start_idx[0] = 8;
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = 8;
} else if (of_device_is_compatible(dev->of_node,
"ti,am654-navss-mcu-udmap")) {
- ud->tpl_levels = 2;
- ud->tpl_start_idx[0] = 2;
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = 2;
} else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
- ud->tpl_levels = 3;
- ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
- ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ ud->tchan_tpl.levels = 3;
+ ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
- ud->tpl_levels = 2;
- ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
} else {
- ud->tpl_levels = 1;
+ ud->tchan_tpl.levels = 1;
}
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+
ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
sizeof(unsigned long), GFP_KERNEL);
ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
@@ -3246,11 +4569,15 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
/* Get resource ranges from tisci */
- for (i = 0; i < RM_RANGE_LAST; i++)
+ for (i = 0; i < RM_RANGE_LAST; i++) {
+ if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
+ continue;
+
tisci_rm->rm_ranges[i] =
devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
tisci_rm->tisci_dev_id,
(char *)range_names[i]);
+ }
/* tchan ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
@@ -3258,13 +4585,9 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
- for (i = 0; i < rm_res->sets; i++) {
- rm_desc = &rm_res->desc[i];
- bitmap_clear(ud->tchan_map, rm_desc->start,
- rm_desc->num);
- dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
- rm_desc->start, rm_desc->num);
- }
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tchan_map,
+ &rm_res->desc[i], "tchan");
}
irq_res.sets = rm_res->sets;
@@ -3274,13 +4597,9 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
- for (i = 0; i < rm_res->sets; i++) {
- rm_desc = &rm_res->desc[i];
- bitmap_clear(ud->rchan_map, rm_desc->start,
- rm_desc->num);
- dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
- rm_desc->start, rm_desc->num);
- }
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rchan_map,
+ &rm_res->desc[i], "rchan");
}
irq_res.sets += rm_res->sets;
@@ -3289,12 +4608,21 @@ static int udma_setup_resources(struct udma_dev *ud)
for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start;
irq_res.desc[i].num = rm_res->desc[i].num;
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
for (j = 0; j < rm_res->sets; j++, i++) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- ud->soc_data->rchan_oes_offset;
- irq_res.desc[i].num = rm_res->desc[j].num;
+ if (rm_res->desc[j].num) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc);
@@ -3310,31 +4638,392 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
ud->rflow_cnt - ud->rchan_cnt);
} else {
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rflow_gp_map,
+ &rm_res->desc[i], "gp-rflow");
+ }
+
+ return 0;
+}
+
+static int bcdma_setup_resources(struct udma_dev *ud)
+{
+ int ret, i, j;
+ struct device *dev = ud->dev;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 cap;
+
+ /* Set up the throughput level start indexes */
+ cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+ if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
+ ud->bchan_tpl.levels = 3;
+ ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
+ ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
+ } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
+ ud->bchan_tpl.levels = 2;
+ ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
+ } else {
+ ud->bchan_tpl.levels = 1;
+ }
+
+ cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
+ if (BCDMA_CAP4_URCHAN_CNT(cap)) {
+ ud->rchan_tpl.levels = 3;
+ ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
+ ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
+ } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
+ ud->rchan_tpl.levels = 2;
+ ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
+ } else {
+ ud->rchan_tpl.levels = 1;
+ }
+
+ if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
+ ud->tchan_tpl.levels = 3;
+ ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
+ ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
+ } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
+ } else {
+ ud->tchan_tpl.levels = 1;
+ }
+
+ ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
+ GFP_KERNEL);
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ /* BCDMA do not really have flows, but the driver expect it */
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+
+ if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
+ !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
+ !ud->rflows)
+ return -ENOMEM;
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++) {
+ if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
+ continue;
+ if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
+ continue;
+ if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
+ continue;
+ if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
+ continue;
+
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+ }
+
+ irq_res.sets = 0;
+
+ /* bchan ranges */
+ if (ud->bchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->bchan_map, ud->bchan_cnt);
+ } else {
+ bitmap_fill(ud->bchan_map, ud->bchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->bchan_map,
+ &rm_res->desc[i],
+ "bchan");
+ }
+ irq_res.sets += rm_res->sets;
+ }
+
+ /* tchan ranges */
+ if (ud->tchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tchan_map,
+ &rm_res->desc[i],
+ "tchan");
+ }
+ irq_res.sets += rm_res->sets * 2;
+ }
+
+ /* rchan ranges */
+ if (ud->rchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rchan_map,
+ &rm_res->desc[i],
+ "rchan");
+ }
+ irq_res.sets += rm_res->sets * 2;
+ }
+
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (ud->bchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
for (i = 0; i < rm_res->sets; i++) {
- rm_desc = &rm_res->desc[i];
- bitmap_clear(ud->rflow_gp_map, rm_desc->start,
- rm_desc->num);
- dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
- rm_desc->start, rm_desc->num);
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num = rm_res->desc[i].num;
}
+ } else {
+ i = 0;
}
+ if (ud->tchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
+ }
+ if (ud->rchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
+ }
+
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pktdma_setup_resources(struct udma_dev *ud)
+{
+ int ret, i, j;
+ struct device *dev = ud->dev;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 cap3;
+
+ /* Set up the throughput level start indexes */
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+ if (UDMA_CAP3_UCHAN_CNT(cap3)) {
+ ud->tchan_tpl.levels = 3;
+ ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ } else {
+ ud->tchan_tpl.levels = 1;
+ }
+
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+ ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
+ !ud->rchans || !ud->rflows || !ud->rflow_in_use)
+ return -ENOMEM;
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++) {
+ if (i == RM_RANGE_BCHAN)
+ continue;
+
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+ }
+
+ /* tchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tchan_map,
+ &rm_res->desc[i], "tchan");
+ }
+
+ /* rchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rchan_map,
+ &rm_res->desc[i], "rchan");
+ }
+
+ /* rflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all rflows are assigned exclusively to Linux */
+ bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
+ } else {
+ bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rflow_in_use,
+ &rm_res->desc[i], "rflow");
+ }
+ irq_res.sets = rm_res->sets;
+
+ /* tflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all tflows are assigned exclusively to Linux */
+ bitmap_zero(ud->tflow_map, ud->tflow_cnt);
+ } else {
+ bitmap_fill(ud->tflow_map, ud->tflow_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tflow_map,
+ &rm_res->desc[i], "tflow");
+ }
+ irq_res.sets += rm_res->sets;
+
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int setup_resources(struct udma_dev *ud)
+{
+ struct device *dev = ud->dev;
+ int ch_count, ret;
+
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ ret = udma_setup_resources(ud);
+ break;
+ case DMA_TYPE_BCDMA:
+ ret = bcdma_setup_resources(ud);
+ break;
+ case DMA_TYPE_PKTDMA:
+ ret = pktdma_setup_resources(ud);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
+ if (ud->bchan_cnt)
+ ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
if (!ch_count)
return -ENODEV;
+ ud->ch_count = ch_count;
ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
GFP_KERNEL);
if (!ud->channels)
return -ENOMEM;
- dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
- ch_count,
- ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
- ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
- ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
- ud->rflow_cnt));
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ dev_info(dev,
+ "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map,
+ ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map,
+ ud->rchan_cnt),
+ ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
+ ud->rflow_cnt));
+ break;
+ case DMA_TYPE_BCDMA:
+ dev_info(dev,
+ "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
+ ch_count,
+ ud->bchan_cnt - bitmap_weight(ud->bchan_map,
+ ud->bchan_cnt),
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map,
+ ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map,
+ ud->rchan_cnt));
+ break;
+ case DMA_TYPE_PKTDMA:
+ dev_info(dev,
+ "Channels: %d (tchan: %u, rchan: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map,
+ ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map,
+ ud->rchan_cnt));
+ default:
+ break;
+ }
return ch_count;
}
@@ -3443,20 +5132,33 @@ static void udma_dbg_summary_show_chan(struct seq_file *s,
seq_printf(s, " %-13s| %s", dma_chan_name(chan),
chan->dbg_client_name ?: "in-use");
- seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
+ if (ucc->tr_trigger_type)
+ seq_puts(s, " (triggered, ");
+ else
+ seq_printf(s, " (%s, ",
+ dmaengine_get_direction_text(uc->config.dir));
switch (uc->config.dir) {
case DMA_MEM_TO_MEM:
+ if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ seq_printf(s, "bchan%d)\n", uc->bchan->id);
+ return;
+ }
+
seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
ucc->src_thread, ucc->dst_thread);
break;
case DMA_DEV_TO_MEM:
seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
ucc->src_thread, ucc->dst_thread);
+ if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
+ seq_printf(s, "rflow%d, ", uc->rflow->id);
break;
case DMA_MEM_TO_DEV:
seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
ucc->src_thread, ucc->dst_thread);
+ if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
+ seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
break;
default:
seq_printf(s, ")\n");
@@ -3494,6 +5196,34 @@ static void udma_dbg_summary_show(struct seq_file *s,
}
#endif /* CONFIG_DEBUG_FS */
+static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
+{
+ const struct udma_match_data *match_data = ud->match_data;
+ u8 tpl;
+
+ if (!match_data->enable_memcpy_support)
+ return DMAENGINE_ALIGN_8_BYTES;
+
+ /* Get the highest TPL level the device supports for memcpy */
+ if (ud->bchan_cnt)
+ tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
+ else if (ud->tchan_cnt)
+ tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
+ else
+ return DMAENGINE_ALIGN_8_BYTES;
+
+ switch (match_data->burst_size[tpl]) {
+ case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
+ return DMAENGINE_ALIGN_256_BYTES;
+ case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
+ return DMAENGINE_ALIGN_128_BYTES;
+ case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
+ fallthrough;
+ default:
+ return DMAENGINE_ALIGN_64_BYTES;
+ }
+}
+
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
@@ -3518,6 +5248,21 @@ static int udma_probe(struct platform_device *pdev)
if (!ud)
return -ENOMEM;
+ match = of_match_node(udma_of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ ud->match_data = match->data;
+
+ ud->soc_data = ud->match_data->soc_data;
+ if (!ud->soc_data) {
+ soc = soc_device_match(k3_soc_devices);
+ if (!soc)
+ return -EPROBE_DEFER;
+ ud->soc_data = soc->data;
+ }
+
ret = udma_get_mmrs(pdev, ud);
if (ret)
return ret;
@@ -3541,47 +5286,63 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
- if (!ret && ud->atype > 2) {
- dev_err(dev, "Invalid atype: %u\n", ud->atype);
- return -EINVAL;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
+ &ud->atype);
+ if (!ret && ud->atype > 2) {
+ dev_err(dev, "Invalid atype: %u\n", ud->atype);
+ return -EINVAL;
+ }
+ } else {
+ ret = of_property_read_u32(dev->of_node, "ti,asel",
+ &ud->asel);
+ if (!ret && ud->asel > 15) {
+ dev_err(dev, "Invalid asel: %u\n", ud->asel);
+ return -EINVAL;
+ }
}
ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
- ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ } else {
+ struct k3_ringacc_init_data ring_init_data;
+
+ ring_init_data.tisci = ud->tisci_rm.tisci;
+ ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
+ if (ud->match_data->type == DMA_TYPE_BCDMA) {
+ ring_init_data.num_rings = ud->bchan_cnt +
+ ud->tchan_cnt +
+ ud->rchan_cnt;
+ } else {
+ ring_init_data.num_rings = ud->rflow_cnt +
+ ud->tflow_cnt;
+ }
+
+ ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
+ }
+
if (IS_ERR(ud->ringacc))
return PTR_ERR(ud->ringacc);
dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
if (!dev->msi_domain) {
- dev_err(dev, "Failed to get MSI domain\n");
+ dev_dbg(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
- match = of_match_node(udma_of_match, dev->of_node);
- if (!match) {
- dev_err(dev, "No compatible match found\n");
- return -ENODEV;
- }
- ud->match_data = match->data;
-
- soc = soc_device_match(k3_soc_devices);
- if (!soc) {
- dev_err(dev, "No compatible SoC found\n");
- return -ENODEV;
- }
- ud->soc_data = soc->data;
-
dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
- dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+ /* cyclic operation is not supported via PKTDMA */
+ if (ud->match_data->type != DMA_TYPE_PKTDMA) {
+ dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+ ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
+ }
- ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
ud->ddev.device_config = udma_slave_config;
ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
- ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
ud->ddev.device_issue_pending = udma_issue_pending;
ud->ddev.device_tx_status = udma_tx_status;
ud->ddev.device_pause = udma_pause;
@@ -3592,15 +5353,33 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.dbg_summary_show = udma_dbg_summary_show;
#endif
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ ud->ddev.device_alloc_chan_resources =
+ udma_alloc_chan_resources;
+ break;
+ case DMA_TYPE_BCDMA:
+ ud->ddev.device_alloc_chan_resources =
+ bcdma_alloc_chan_resources;
+ ud->ddev.device_router_config = bcdma_router_config;
+ break;
+ case DMA_TYPE_PKTDMA:
+ ud->ddev.device_alloc_chan_resources =
+ pktdma_alloc_chan_resources;
+ break;
+ default:
+ return -EINVAL;
+ }
ud->ddev.device_free_chan_resources = udma_free_chan_resources;
+
ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
DESC_METADATA_ENGINE;
- if (ud->match_data->enable_memcpy_support) {
+ if (ud->match_data->enable_memcpy_support &&
+ !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
@@ -3613,7 +5392,7 @@ static int udma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ud->ddev.channels);
INIT_LIST_HEAD(&ud->desc_to_purge);
- ch_count = udma_setup_resources(ud);
+ ch_count = setup_resources(ud);
if (ch_count <= 0)
return ch_count;
@@ -3628,6 +5407,13 @@ static int udma_probe(struct platform_device *pdev)
if (ret)
return ret;
+ for (i = 0; i < ud->bchan_cnt; i++) {
+ struct udma_bchan *bchan = &ud->bchans[i];
+
+ bchan->id = i;
+ bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
+ }
+
for (i = 0; i < ud->tchan_cnt; i++) {
struct udma_tchan *tchan = &ud->tchans[i];
@@ -3654,9 +5440,12 @@ static int udma_probe(struct platform_device *pdev)
uc->ud = ud;
uc->vc.desc_free = udma_desc_free;
uc->id = i;
+ uc->bchan = NULL;
uc->tchan = NULL;
uc->rchan = NULL;
uc->config.remote_thread_id = -1;
+ uc->config.mapped_channel_id = -1;
+ uc->config.default_flow_id = -1;
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
@@ -3668,6 +5457,9 @@ static int udma_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
}
+ /* Configure the copy_align to the maximum burst size the device supports */
+ ud->ddev.copy_align = udma_get_copy_align(ud);
+
ret = dma_async_device_register(&ud->ddev);
if (ret) {
dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
@@ -3685,15 +5477,68 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
+static int udma_pm_suspend(struct device *dev)
+{
+ struct udma_dev *ud = dev_get_drvdata(dev);
+ struct dma_chan *chan;
+ int i;
+
+ for (i = 0; i < ud->ch_count; i++) {
+ chan = &ud->channels[i].vc.chan;
+ if (chan->client_count) {
+ /* backup the channel configuration */
+ memcpy(&ud->channels[i].backup_config,
+ &ud->channels[i].config,
+ sizeof(struct udma_chan_config));
+ dev_dbg(dev, "Suspending channel %s\n",
+ dma_chan_name(chan));
+ ud->ddev.device_free_chan_resources(chan);
+ }
+ }
+
+ return 0;
+}
+
+static int udma_pm_resume(struct device *dev)
+{
+ struct udma_dev *ud = dev_get_drvdata(dev);
+ struct dma_chan *chan;
+ int ret, i;
+
+ for (i = 0; i < ud->ch_count; i++) {
+ chan = &ud->channels[i].vc.chan;
+ if (chan->client_count) {
+ /* restore the channel configuration */
+ memcpy(&ud->channels[i].config,
+ &ud->channels[i].backup_config,
+ sizeof(struct udma_chan_config));
+ dev_dbg(dev, "Resuming channel %s\n",
+ dma_chan_name(chan));
+ ret = ud->ddev.device_alloc_chan_resources(chan);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops udma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume)
+};
+
static struct platform_driver udma_driver = {
.driver = {
.name = "ti-udma",
.of_match_table = udma_of_match,
.suppress_bind_attrs = true,
+ .pm = &udma_pm_ops,
},
.probe = udma_probe,
};
-builtin_platform_driver(udma_driver);
+
+module_platform_driver(udma_driver);
+MODULE_LICENSE("GPL v2");
/* Private interfaces to UDMA */
#include "k3-udma-private.c"
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
index 09c4529e013d..d349c6d482ae 100644
--- a/drivers/dma/ti/k3-udma.h
+++ b/drivers/dma/ti/k3-udma.h
@@ -18,7 +18,7 @@
#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80
#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88
-/* TCHANRT/RCHANRT registers */
+/* BCHANRT/TCHANRT/RCHANRT registers */
#define UDMA_CHAN_RT_CTL_REG 0x0
#define UDMA_CHAN_RT_SWTRIG_REG 0x8
#define UDMA_CHAN_RT_STDATA_REG 0x80
@@ -45,6 +45,18 @@
#define UDMA_CAP3_HCHAN_CNT(val) (((val) >> 14) & 0x1ff)
#define UDMA_CAP3_UCHAN_CNT(val) (((val) >> 23) & 0x1ff)
+#define BCDMA_CAP2_BCHAN_CNT(val) ((val) & 0x1ff)
+#define BCDMA_CAP2_TCHAN_CNT(val) (((val) >> 9) & 0x1ff)
+#define BCDMA_CAP2_RCHAN_CNT(val) (((val) >> 18) & 0x1ff)
+#define BCDMA_CAP3_HBCHAN_CNT(val) (((val) >> 14) & 0x1ff)
+#define BCDMA_CAP3_UBCHAN_CNT(val) (((val) >> 23) & 0x1ff)
+#define BCDMA_CAP4_HRCHAN_CNT(val) ((val) & 0xff)
+#define BCDMA_CAP4_URCHAN_CNT(val) (((val) >> 8) & 0xff)
+#define BCDMA_CAP4_HTCHAN_CNT(val) (((val) >> 16) & 0xff)
+#define BCDMA_CAP4_UTCHAN_CNT(val) (((val) >> 24) & 0xff)
+
+#define PKTDMA_CAP4_TFLOW_CNT(val) ((val) & 0x3fff)
+
/* UDMA_CHAN_RT_CTL_REG */
#define UDMA_CHAN_RT_CTL_EN BIT(31)
#define UDMA_CHAN_RT_CTL_TDOWN BIT(30)
@@ -82,15 +94,20 @@
*/
#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask))
+/* Address Space Select */
+#define K3_ADDRESS_ASEL_SHIFT 48
+
struct udma_dev;
struct udma_tchan;
struct udma_rchan;
struct udma_rflow;
enum udma_rm_range {
- RM_RANGE_TCHAN = 0,
+ RM_RANGE_BCHAN = 0,
+ RM_RANGE_TCHAN,
RM_RANGE_RCHAN,
RM_RANGE_RFLOW,
+ RM_RANGE_TFLOW,
RM_RANGE_LAST,
};
@@ -112,6 +129,8 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
u32 dst_thread);
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+struct device *xudma_get_device(struct udma_dev *ud);
+struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud);
void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
@@ -136,5 +155,10 @@ void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
+int xudma_get_rflow_ring_offset(struct udma_dev *ud);
+
+int xudma_is_pktdma(struct udma_dev *ud);
+int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id);
+int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id);
#endif /* K3_UDMA_H_ */
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 896f53ec7857..7644fc5c8046 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -10,8 +10,11 @@
#include <linux/bitmap.h>
#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
#include <linux/export.h>
+#include <linux/firmware.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
@@ -20,10 +23,14 @@
#include <linux/slab.h>
#include <linux/soc/ti/ti-msgmgr.h>
#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/suspend.h>
#include <linux/reboot.h>
#include "ti_sci.h"
+/* Low power mode memory context size */
+#define LPM_CTX_MEM_SIZE 0x80000
+
/* List of all TI SCI devices active in system */
static LIST_HEAD(ti_sci_list);
/* Protection for the entire list */
@@ -84,10 +91,13 @@ struct ti_sci_desc {
* @dev: Device pointer
* @desc: SoC description for this instance
* @nb: Reboot Notifier block
+ * @pm_nb: PM notifier block
* @d: Debugfs file entry
* @debug_region: Memory region where the debug message are available
* @debug_region_size: Debug region size
* @debug_buffer: Buffer allocated to copy debug messages.
+ * @lpm_region: Memory region where the FS Stub LPM Firmware will be stored
+ * @lpm_region_size: LPM region size
* @handle: Instance of TI SCI handle to send to clients.
* @cl: Mailbox Client
* @chan_tx: Transmit mailbox channel
@@ -95,16 +105,25 @@ struct ti_sci_desc {
* @minfo: Message info
* @node: list head
* @host_id: Host ID
+ * @ctx_mem_addr: Low power context memory phys address
+ * @ctx_mem_buf: Low power context memory buffer
+ * @fw_caps: FW/SoC low power capabilities
* @users: Number of users of this instance
+ * @is_suspending: Flag set to indicate in suspend path.
+ * @lpm_firmware_loaded: Flag to indicate if LPM firmware has been loaded
+ * @lpm_firmware_name: Name of firmware binary to load from fw search path
*/
struct ti_sci_info {
struct device *dev;
struct notifier_block nb;
+ struct notifier_block pm_nb;
const struct ti_sci_desc *desc;
struct dentry *d;
void __iomem *debug_region;
char *debug_buffer;
size_t debug_region_size;
+ void __iomem *lpm_region;
+ size_t lpm_region_size;
struct ti_sci_handle handle;
struct mbox_client cl;
struct mbox_chan *chan_tx;
@@ -112,14 +131,20 @@ struct ti_sci_info {
struct ti_sci_xfers_info minfo;
struct list_head node;
u8 host_id;
+ dma_addr_t ctx_mem_addr;
+ void *ctx_mem_buf;
+ u64 fw_caps;
/* protected by ti_sci_list_mutex */
int users;
-
+ bool is_suspending;
+ bool lpm_firmware_loaded;
+ const char *lpm_firmware_name;
};
#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
+#define pm_nb_to_ti_sci_info(n) container_of(n, struct ti_sci_info, pm_nb)
#ifdef CONFIG_DEBUG_FS
@@ -349,6 +374,8 @@ static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
xfer->tx_message.len = tx_message_size;
+ xfer->tx_message.chan_rx = info->chan_rx;
+ xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
xfer->rx_len = (u8)rx_message_size;
reinit_completion(&xfer->done);
@@ -406,6 +433,7 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
int ret;
int timeout;
struct device *dev = info->dev;
+ bool done_state = true;
ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
if (ret < 0)
@@ -413,13 +441,26 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
ret = 0;
- /* And we wait for the response. */
- timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
- if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+ if (!info->is_suspending) {
+ /* And we wait for the response. */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ if (!wait_for_completion_timeout(&xfer->done, timeout))
+ ret = -ETIMEDOUT;
+ } else {
+ /*
+ * If we are suspending, we cannot use wait_for_completion_timeout
+ * during noirq phase, so we must manually poll the completion.
+ */
+ ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
+ done_state, 1,
+ info->desc->max_rx_timeout_ms * 1000,
+ false, &xfer->done);
+ }
+
+ if (ret == -ETIMEDOUT)
dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
(void *)_RET_IP_);
- ret = -ETIMEDOUT;
- }
+
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
@@ -1648,6 +1689,232 @@ fail:
return ret;
}
+/**
+ * ti_sci_cmd_prepare_sleep() - Prepare system for system suspend
+ * @handle: pointer to TI SCI handle
+ * @mode: Device identifier
+ * @ctx_lo: Low part of address for context save
+ * @ctx_hi: High part of address for context save
+ * @debug_flags: Debug flags to pass to firmware
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
+ u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_prepare_sleep *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ req = (struct ti_sci_msg_req_prepare_sleep *)xfer->xfer_buf;
+ req->mode = mode;
+ req->ctx_lo = ctx_lo;
+ req->ctx_hi = ctx_hi;
+ req->debug_flags = debug_flags;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_msg_cmd_query_fw_caps() - Get the FW/SoC capabilities
+ * @handle: Pointer to TI SCI handle
+ * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle *handle,
+ u64 *fw_caps)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_resp_query_fw_caps *resp;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_query_fw_caps *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (fw_caps)
+ *fw_caps = resp->fw_caps;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_msg_cmd_lpm_wake_reason() - Get the wakeup source from LPM
+ * @handle: Pointer to TI SCI handle
+ * @source: The wakeup source that woke the SoC from LPM
+ * @timestamp: Timestamp of the wakeup event
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle *handle,
+ u32 *source, u64 *timestamp)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_resp_lpm_wake_reason *resp;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_WAKE_REASON,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_lpm_wake_reason *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (source)
+ *source = resp->wake_source;
+ if (timestamp)
+ *timestamp = resp->wake_timestamp;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_io_isolation() - Enable IO isolation in LPM
+ * @handle: Pointer to TI SCI handle
+ * @state: The desired state of the IO isolation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_io_isolation(const struct ti_sci_handle *handle,
+ u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_io_isolation *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_IO_ISOLATION,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_io_isolation *)xfer->xfer_buf;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
@@ -1674,6 +1941,7 @@ static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
return ret;
}
req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
+ req->domain = 0;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
@@ -1703,14 +1971,14 @@ fail:
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
struct ti_sci_msg_resp_get_resource_range *resp;
struct ti_sci_msg_req_get_resource_range *req;
@@ -1721,7 +1989,7 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
if (IS_ERR(handle))
return PTR_ERR(handle);
- if (!handle)
+ if (!handle || !desc)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
@@ -1751,12 +2019,15 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
- } else if (!resp->range_start && !resp->range_num) {
+ } else if (!resp->range_num && !resp->range_num_sec) {
+ /* Neither of the two resource range is valid */
ret = -ENODEV;
} else {
- *range_start = resp->range_start;
- *range_num = resp->range_num;
- };
+ desc->start = resp->range_start;
+ desc->num = resp->range_num;
+ desc->start_sec = resp->range_start_sec;
+ desc->num_sec = resp->range_num_sec;
+ }
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
@@ -1771,18 +2042,18 @@ fail:
* @dev_id: TISCI device ID.
* @subtype: Resource assignment subtype that is being requested
* from the given device.
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
return ti_sci_get_resource_range(handle, dev_id, subtype,
TI_SCI_IRQ_SECONDARY_HOST_INVALID,
- range_start, range_num);
+ desc);
}
/**
@@ -1793,18 +2064,17 @@ static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static
int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
- return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
- range_start, range_num);
+ return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
}
/**
@@ -2047,28 +2317,17 @@ static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
}
/**
- * ti_sci_cmd_ring_config() - configure RA ring
- * @handle: Pointer to TI SCI handle.
- * @valid_params: Bitfield defining validity of ring configuration
- * parameters
- * @nav_id: Device ID of Navigator Subsystem from which the ring is
- * allocated
- * @index: Ring index
- * @addr_lo: The ring base address lo 32 bits
- * @addr_hi: The ring base address hi 32 bits
- * @count: Number of ring elements
- * @mode: The mode of the ring
- * @size: The ring element size.
- * @order_id: Specifies the ring's bus order ID
+ * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure
*
* Return: 0 if all went well, else returns appropriate error value.
*
- * See @ti_sci_msg_rm_ring_cfg_req for more info.
+ * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
+ * more info.
*/
-static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
- u32 valid_params, u16 nav_id, u16 index,
- u32 addr_lo, u32 addr_hi, u32 count,
- u8 mode, u8 size, u8 order_id)
+static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_ring_cfg *params)
{
struct ti_sci_msg_rm_ring_cfg_req *req;
struct ti_sci_msg_hdr *resp;
@@ -2092,15 +2351,17 @@ static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
return ret;
}
req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
- req->valid_params = valid_params;
- req->nav_id = nav_id;
- req->index = index;
- req->addr_lo = addr_lo;
- req->addr_hi = addr_hi;
- req->count = count;
- req->mode = mode;
- req->size = size;
- req->order_id = order_id;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->addr_lo = params->addr_lo;
+ req->addr_hi = params->addr_hi;
+ req->count = params->count;
+ req->mode = params->mode;
+ req->size = params->size;
+ req->order_id = params->order_id;
+ req->virtid = params->virtid;
+ req->asel = params->asel;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
@@ -2109,90 +2370,11 @@ static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
- ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
-
-fail:
- ti_sci_put_one_xfer(&info->minfo, xfer);
- dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
- return ret;
-}
-
-/**
- * ti_sci_cmd_ring_get_config() - get RA ring configuration
- * @handle: Pointer to TI SCI handle.
- * @nav_id: Device ID of Navigator Subsystem from which the ring is
- * allocated
- * @index: Ring index
- * @addr_lo: Returns ring's base address lo 32 bits
- * @addr_hi: Returns ring's base address hi 32 bits
- * @count: Returns number of ring elements
- * @mode: Returns mode of the ring
- * @size: Returns ring element size
- * @order_id: Returns ring's bus order ID
- *
- * Return: 0 if all went well, else returns appropriate error value.
- *
- * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
- */
-static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
- u32 nav_id, u32 index, u8 *mode,
- u32 *addr_lo, u32 *addr_hi,
- u32 *count, u8 *size, u8 *order_id)
-{
- struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
- struct ti_sci_msg_rm_ring_get_cfg_req *req;
- struct ti_sci_xfer *xfer;
- struct ti_sci_info *info;
- struct device *dev;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(handle))
- return -EINVAL;
-
- info = handle_to_ti_sci_info(handle);
- dev = info->dev;
-
- xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
- TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
- sizeof(*req), sizeof(*resp));
- if (IS_ERR(xfer)) {
- ret = PTR_ERR(xfer);
- dev_err(dev,
- "RM_RA:Message get config failed(%d)\n", ret);
- return ret;
- }
- req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
- req->nav_id = nav_id;
- req->index = index;
-
- ret = ti_sci_do_xfer(info, xfer);
- if (ret) {
- dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
- goto fail;
- }
-
- resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
-
- if (!ti_sci_is_response_ack(resp)) {
- ret = -ENODEV;
- } else {
- if (mode)
- *mode = resp->mode;
- if (addr_lo)
- *addr_lo = resp->addr_lo;
- if (addr_hi)
- *addr_hi = resp->addr_hi;
- if (count)
- *count = resp->count;
- if (size)
- *size = resp->size;
- if (order_id)
- *order_id = resp->order_id;
- };
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
- dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
+ dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
return ret;
}
@@ -2362,6 +2544,8 @@ static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
req->fdepth = params->fdepth;
req->tx_sched_priority = params->tx_sched_priority;
req->tx_burst_size = params->tx_burst_size;
+ req->tx_tdtype = params->tx_tdtype;
+ req->extended_ch_type = params->extended_ch_type;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
@@ -2873,6 +3057,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
struct ti_sci_core_ops *core_ops = &ops->core_ops;
struct ti_sci_dev_ops *dops = &ops->dev_ops;
struct ti_sci_clk_ops *cops = &ops->clk_ops;
+ struct ti_sci_pm_ops *pmops = &ops->pm_ops;
struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
@@ -2912,6 +3097,10 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
cops->set_freq = ti_sci_cmd_clk_set_freq;
cops->get_freq = ti_sci_cmd_clk_get_freq;
+ pmops->prepare_sleep = ti_sci_cmd_prepare_sleep;
+ pmops->lpm_wake_reason = ti_sci_msg_cmd_lpm_wake_reason;
+ pmops->set_io_isolation = ti_sci_cmd_set_io_isolation;
+
rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
rm_core_ops->get_range_from_shost =
ti_sci_cmd_get_resource_range_from_shost;
@@ -2921,8 +3110,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
iops->free_irq = ti_sci_cmd_free_irq;
iops->free_event_map = ti_sci_cmd_free_event_map;
- rops->config = ti_sci_cmd_ring_config;
- rops->get_config = ti_sci_cmd_ring_get_config;
+ rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
psilops->pair = ti_sci_cmd_rm_psil_pair;
psilops->unpair = ti_sci_cmd_rm_psil_unpair;
@@ -3157,12 +3345,18 @@ u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
- free_bit = find_first_zero_bit(res->desc[set].res_map,
- res->desc[set].num);
- if (free_bit != res->desc[set].num) {
- set_bit(free_bit, res->desc[set].res_map);
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+ int res_count = desc->num + desc->num_sec;
+
+ free_bit = find_first_zero_bit(desc->res_map, res_count);
+ if (free_bit != res_count) {
+ __set_bit(free_bit, desc->res_map);
raw_spin_unlock_irqrestore(&res->lock, flags);
- return res->desc[set].start + free_bit;
+
+ if (desc->num && free_bit < desc->num)
+ return desc->start + free_bit;
+ else
+ return desc->start_sec + free_bit;
}
}
raw_spin_unlock_irqrestore(&res->lock, flags);
@@ -3183,10 +3377,14 @@ void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
- if (res->desc[set].start <= id &&
- (res->desc[set].num + res->desc[set].start) > id)
- clear_bit(id - res->desc[set].start,
- res->desc[set].res_map);
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+
+ if (desc->num && desc->start <= id &&
+ (desc->start + desc->num) > id)
+ __clear_bit(id - desc->start, desc->res_map);
+ else if (desc->num_sec && desc->start_sec <= id &&
+ (desc->start_sec + desc->num_sec) > id)
+ __clear_bit(id - desc->start_sec, desc->res_map);
}
raw_spin_unlock_irqrestore(&res->lock, flags);
}
@@ -3203,7 +3401,7 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
u32 set, count = 0;
for (set = 0; set < res->sets; set++)
- count += res->desc[set].num;
+ count += res->desc[set].num + res->desc[set].num_sec;
return count;
}
@@ -3227,7 +3425,7 @@ devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
{
struct ti_sci_resource *res;
bool valid_set = false;
- int i, ret;
+ int i, ret, res_count;
res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
if (!res)
@@ -3242,24 +3440,23 @@ devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
for (i = 0; i < res->sets; i++) {
ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
sub_types[i],
- &res->desc[i].start,
- &res->desc[i].num);
+ &res->desc[i]);
if (ret) {
dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
dev_id, sub_types[i]);
- res->desc[i].start = 0;
- res->desc[i].num = 0;
+ memset(&res->desc[i], 0, sizeof(res->desc[i]));
continue;
}
- dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
+ dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
dev_id, sub_types[i], res->desc[i].start,
- res->desc[i].num);
+ res->desc[i].num, res->desc[i].start_sec,
+ res->desc[i].num_sec);
valid_set = true;
- res->desc[i].res_map =
- devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
- sizeof(*res->desc[i].res_map), GFP_KERNEL);
+ res_count = res->desc[i].num + res->desc[i].num_sec;
+ res->desc[i].res_map = devm_bitmap_zalloc(dev, res_count,
+ GFP_KERNEL);
if (!res->desc[i].res_map)
return ERR_PTR(-ENOMEM);
}
@@ -3339,6 +3536,176 @@ static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
return NOTIFY_BAD;
}
+static int ti_sci_load_lpm_firmware(struct device *dev, struct ti_sci_info *info)
+{
+ const struct firmware *firmware;
+ int ret = 0;
+
+ /* If no firmware name is set, do not attempt to load. */
+ if (!info->lpm_firmware_name)
+ return -EINVAL;
+
+ ret = request_firmware_direct(&firmware, info->lpm_firmware_name, dev);
+ if (ret) {
+ dev_warn(dev, "Cannot load %s\n", info->lpm_firmware_name);
+ return ret;
+ }
+
+ if (firmware->size > info->lpm_region_size) {
+ release_firmware(firmware);
+ return -ENOMEM;
+ }
+
+ memcpy_toio(info->lpm_region, firmware->data, firmware->size);
+
+ release_firmware(firmware);
+
+ return ret;
+}
+
+static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending)
+{
+ info->is_suspending = is_suspending;
+}
+
+static int __maybe_unused ti_sci_prepare_system_suspend(struct ti_sci_info *info)
+{
+#if IS_ENABLED(CONFIG_SUSPEND)
+ u8 mode;
+
+ /* Map and validate the target Linux suspend state to TISCI LPM. */
+ switch (pm_suspend_target_state) {
+ case PM_SUSPEND_MEM:
+ /* S2MEM is not supported by the firmware. */
+ if (!(info->fw_caps & MSG_FLAG_CAPS_LPM_DEEP_SLEEP))
+ return 0;
+ /* S2MEM can't continue if the LPM firmware is not loaded. */
+ if (!info->lpm_firmware_loaded)
+ return -EINVAL;
+ mode = TISCI_MSG_VALUE_SLEEP_MODE_DEEP_SLEEP;
+ break;
+ default:
+ /*
+ * Do not fail if we don't have action to take for a
+ * specific suspend mode.
+ */
+ return 0;
+ }
+
+ return ti_sci_cmd_prepare_sleep(&info->handle, mode,
+ (u32)(info->ctx_mem_addr & 0xffffffff),
+ (u32)((u64)info->ctx_mem_addr >> 32), 0);
+#else
+ return 0;
+#endif
+}
+
+static int __maybe_unused ti_sci_suspend(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ti_sci_prepare_system_suspend(info);
+ if (ret)
+ return ret;
+ /*
+ * We must switch operation to polled mode now as drivers and the genpd
+ * layer may make late TI SCI calls to change clock and device states
+ * from the noirq phase of suspend.
+ */
+ ti_sci_set_is_suspending(info, true);
+
+ return 0;
+}
+
+static int __maybe_unused ti_sci_resume(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+
+ ti_sci_set_is_suspending(info, false);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume);
+
+static int tisci_pm_handler(struct notifier_block *nb, unsigned long pm_event,
+ void *unused)
+{
+ struct ti_sci_info *info = pm_nb_to_ti_sci_info(nb);
+ int ret;
+
+ /* Load the LPM firmware on PM_SUSPEND_PREPARE if not loaded yet */
+ if (pm_event != PM_SUSPEND_PREPARE || info->lpm_firmware_loaded)
+ return NOTIFY_DONE;
+
+ ret = ti_sci_load_lpm_firmware(info->dev, info);
+ if (ret) {
+ dev_err(info->dev, "Failed to load LPM firmware (%d)\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ info->lpm_firmware_loaded = true;
+
+ return NOTIFY_OK;
+}
+
+static int ti_sci_init_suspend(struct platform_device *pdev,
+ struct ti_sci_info *info)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ info->ctx_mem_buf = dma_alloc_coherent(info->dev, LPM_CTX_MEM_SIZE,
+ &info->ctx_mem_addr,
+ GFP_KERNEL);
+ if (!info->ctx_mem_buf) {
+ dev_err(info->dev, "Failed to allocate LPM context memory\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpm");
+ if (!res) {
+ dev_warn(dev,
+ "lpm region is required for suspend but not provided.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ info->lpm_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(info->lpm_region)) {
+ ret = PTR_ERR(info->lpm_region);
+ goto err;
+ }
+ info->lpm_region_size = resource_size(res);
+
+ if (of_property_read_string(dev->of_node, "firmware-name",
+ &info->lpm_firmware_name)) {
+ dev_warn(dev,
+ "firmware-name is required for suspend but not provided.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ info->pm_nb.notifier_call = tisci_pm_handler;
+ info->pm_nb.priority = 128;
+
+ ret = register_pm_notifier(&info->pm_nb);
+ if (ret) {
+ dev_err(dev, "pm_notifier registration fail (%d)\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ dma_free_coherent(info->dev, LPM_CTX_MEM_SIZE,
+ info->ctx_mem_buf,
+ info->ctx_mem_addr);
+ return ret;
+}
+
/* Description for K2G */
static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
.default_host_id = 2,
@@ -3427,13 +3794,11 @@ static int ti_sci_probe(struct platform_device *pdev)
if (!minfo->xfer_block)
return -ENOMEM;
- minfo->xfer_alloc_table = devm_kcalloc(dev,
- BITS_TO_LONGS(desc->max_msgs),
- sizeof(unsigned long),
- GFP_KERNEL);
+ minfo->xfer_alloc_table = devm_bitmap_zalloc(dev,
+ desc->max_msgs,
+ GFP_KERNEL);
if (!minfo->xfer_alloc_table)
return -ENOMEM;
- bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
/* Pre-initialize the buffer pointer to pre-allocated buffers */
for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
@@ -3487,10 +3852,19 @@ static int ti_sci_probe(struct platform_device *pdev)
ret = register_restart_handler(&info->nb);
if (ret) {
dev_err(dev, "reboot registration fail(%d)\n", ret);
- return ret;
+ goto out;
}
}
+ /*
+ * Check if the firmware supports any optional low power modes
+ * and initialize them if present. Old revisions of TIFS (< 08.04)
+ * will NACK the request.
+ */
+ ret = ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
+ if (!ret && (info->fw_caps & MSG_MASK_CAPS_LPM))
+ ti_sci_init_suspend(pdev, info);
+
dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
info->handle.version.abi_major, info->handle.version.abi_minor,
info->handle.version.firmware_revision,
@@ -3520,6 +3894,9 @@ static int ti_sci_remove(struct platform_device *pdev)
info = platform_get_drvdata(pdev);
+ if (info->pm_nb.notifier_call)
+ unregister_pm_notifier(&info->pm_nb);
+
if (info->nb.notifier_call)
unregister_restart_handler(&info->nb);
@@ -3538,6 +3915,10 @@ static int ti_sci_remove(struct platform_device *pdev)
mbox_free_channel(info->chan_rx);
}
+ if (info->ctx_mem_buf)
+ dma_free_coherent(info->dev, LPM_CTX_MEM_SIZE,
+ info->ctx_mem_buf,
+ info->ctx_mem_addr);
return ret;
}
@@ -3547,6 +3928,7 @@ static struct platform_driver ti_sci_driver = {
.driver = {
.name = "ti-sci",
.of_match_table = of_match_ptr(ti_sci_of_match),
+ .pm = &ti_sci_pm_ops,
},
};
module_platform_driver(ti_sci_driver);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 57cd04062994..9ecd2067094b 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -6,7 +6,7 @@
* The system works in a message response protocol
* See: http://processors.wiki.ti.com/index.php/TISCI for details
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __TI_SCI_H
@@ -19,6 +19,7 @@
#define TI_SCI_MSG_WAKE_REASON 0x0003
#define TI_SCI_MSG_GOODBYE 0x0004
#define TI_SCI_MSG_SYS_RESET 0x0005
+#define TI_SCI_MSG_QUERY_FW_CAPS 0x0022
/* Device requests */
#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200
@@ -35,6 +36,11 @@
#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+/* Low Power Mode Requests */
+#define TI_SCI_MSG_PREPARE_SLEEP 0x0300
+#define TI_SCI_MSG_LPM_WAKE_REASON 0x0306
+#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307
+
/* Resource Management Requests */
#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
@@ -49,7 +55,6 @@
#define TI_SCI_MSG_RM_RING_RECONFIG 0x1102
#define TI_SCI_MSG_RM_RING_RESET 0x1103
#define TI_SCI_MSG_RM_RING_CFG 0x1110
-#define TI_SCI_MSG_RM_RING_GET_CFG 0x1111
/* PSI-L requests */
#define TI_SCI_MSG_RM_PSIL_PAIR 0x1280
@@ -125,12 +130,39 @@ struct ti_sci_msg_resp_version {
/**
* struct ti_sci_msg_req_reboot - Reboot the SoC
* @hdr: Generic Header
+ * @domain: Domain to be reset, 0 for full SoC reboot
*
* Request type is TI_SCI_MSG_SYS_RESET, responded with a generic
* ACK/NACK message.
*/
struct ti_sci_msg_req_reboot {
struct ti_sci_msg_hdr hdr;
+ u8 domain;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_query_fw_caps - Response for query firmware caps
+ * @hdr: Generic header
+ * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
+ * MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported)
+ * MSG_FLAG_CAPS_LPM_DEEP_SLEEP: Deep Sleep LPM
+ * MSG_FLAG_CAPS_LPM_MCU_ONLY: MCU only LPM
+ * MSG_FLAG_CAPS_LPM_STANDBY: Standby LPM
+ * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
+ *
+ * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
+ * providing currently available SOC/firmware capabilities. SoC that don't
+ * support low power modes return only MSG_FLAG_CAPS_GENERIC capability.
+ */
+struct ti_sci_msg_resp_query_fw_caps {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_CAPS_LPM_DEEP_SLEEP TI_SCI_MSG_FLAG(1)
+#define MSG_FLAG_CAPS_LPM_MCU_ONLY TI_SCI_MSG_FLAG(2)
+#define MSG_FLAG_CAPS_LPM_STANDBY TI_SCI_MSG_FLAG(3)
+#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
+#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
+ u64 fw_caps;
} __packed;
/**
@@ -546,6 +578,63 @@ struct ti_sci_msg_resp_get_clock_freq {
u64 freq_hz;
} __packed;
+#define TISCI_MSG_VALUE_SLEEP_MODE_DEEP_SLEEP 0x0
+#define TISCI_MSG_VALUE_SLEEP_MODE_MCU_ONLY 0x1
+#define TISCI_MSG_VALUE_SLEEP_MODE_STANDBY 0x2
+
+/**
+ * struct tisci_msg_prepare_sleep_req - Request for TISCI_MSG_PREPARE_SLEEP.
+ *
+ * @hdr TISCI header to provide ACK/NAK flags to the host.
+ * @mode Low power mode to enter.
+ * @ctx_lo Low 32-bits of physical pointer to address to use for context save.
+ * @ctx_hi High 32-bits of physical pointer to address to use for context save.
+ * @debug_flags Flags that can be set to halt the sequence during suspend or
+ * resume to allow JTAG connection and debug.
+ *
+ * This message is used as the first step of entering a low power mode. It
+ * allows configurable information, including which state to enter to be
+ * easily shared from the application, as this is a non-secure message and
+ * therefore can be sent by anyone.
+ */
+struct ti_sci_msg_req_prepare_sleep {
+ struct ti_sci_msg_hdr hdr;
+ u8 mode;
+ u32 ctx_lo;
+ u32 ctx_hi;
+ u32 debug_flags;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_lpm_wake_reason - Response for TI_SCI_MSG_LPM_WAKE_REASON.
+ *
+ * @hdr: Generic header.
+ * @wake_source: The wake up source that woke soc from LPM.
+ * @wake_timestamp: Timestamp at which soc woke.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_LPM_WAKE_REASON,
+ * used to query the wake up source from low power mode.
+ */
+struct ti_sci_msg_resp_lpm_wake_reason {
+ struct ti_sci_msg_hdr hdr;
+ u32 wake_source;
+ u64 wake_timestamp;
+} __packed;
+
+/**
+ * struct tisci_msg_set_io_isolation_req - Request for TI_SCI_MSG_SET_IO_ISOLATION.
+ *
+ * @hdr: Generic header
+ * @state: The deseared state of the IO isolation.
+ *
+ * This message is used to enable/disable IO isolation for low power modes.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_io_isolation {
+ struct ti_sci_msg_hdr hdr;
+ u8 state;
+} __packed;
+
#define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff
/**
@@ -574,8 +663,10 @@ struct ti_sci_msg_req_get_resource_range {
/**
* struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
* @hdr: Generic Header
- * @range_start: Start index of the resource range.
- * @range_num: Number of resources in the range.
+ * @range_start: Start index of the first resource range.
+ * @range_num: Number of resources in the first range.
+ * @range_start_sec: Start index of the second resource range.
+ * @range_num_sec: Number of resources in the second range.
*
* Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
*/
@@ -583,6 +674,8 @@ struct ti_sci_msg_resp_get_resource_range {
struct ti_sci_msg_hdr hdr;
u16 range_start;
u16 range_num;
+ u16 range_start_sec;
+ u16 range_num_sec;
} __packed;
/**
@@ -656,6 +749,8 @@ struct ti_sci_msg_req_manage_irq {
* 3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode
* 4 - Valid bit for @tisci_msg_rm_ring_cfg_req size
* 5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id
+ * 6 - Valid bit for @tisci_msg_rm_ring_cfg_req virtid
+ * 7 - Valid bit for @tisci_msg_rm_ring_cfg_req ASEL
* @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
* @index: ring index to be configured.
* @addr_lo: 32 LSBs of ring base address to be programmed into the ring's
@@ -669,6 +764,9 @@ struct ti_sci_msg_req_manage_irq {
* the formula (log2(size_bytes) - 2), where size_bytes cannot be
* greater than 256.
* @order_id: Specifies the ring's bus order ID.
+ * @virtid: Ring virt ID value
+ * @asel: Ring ASEL (address select) value to be set into the ASEL field of the
+ * ring's RING_BA_HI register.
*/
struct ti_sci_msg_rm_ring_cfg_req {
struct ti_sci_msg_hdr hdr;
@@ -681,49 +779,8 @@ struct ti_sci_msg_rm_ring_cfg_req {
u8 mode;
u8 size;
u8 order_id;
-} __packed;
-
-/**
- * struct ti_sci_msg_rm_ring_get_cfg_req - Get RA ring's configuration
- *
- * Gets the configuration of the non-real-time register fields of a ring. The
- * host, or a supervisor of the host, who owns the ring must be the requesting
- * host. The values of the non-real-time registers are returned in
- * @ti_sci_msg_rm_ring_get_cfg_resp.
- *
- * @hdr: Generic Header
- * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
- * @index: ring index.
- */
-struct ti_sci_msg_rm_ring_get_cfg_req {
- struct ti_sci_msg_hdr hdr;
- u16 nav_id;
- u16 index;
-} __packed;
-
-/**
- * struct ti_sci_msg_rm_ring_get_cfg_resp - Ring get configuration response
- *
- * Response received by host processor after RM has handled
- * @ti_sci_msg_rm_ring_get_cfg_req. The response contains the ring's
- * non-real-time register values.
- *
- * @hdr: Generic Header
- * @addr_lo: Ring 32 LSBs of base address
- * @addr_hi: Ring 16 MSBs of base address.
- * @count: Ring number of elements.
- * @mode: Ring mode.
- * @size: encoded Ring element size
- * @order_id: ing order ID.
- */
-struct ti_sci_msg_rm_ring_get_cfg_resp {
- struct ti_sci_msg_hdr hdr;
- u32 addr_lo;
- u32 addr_hi;
- u32 count;
- u8 mode;
- u8 size;
- u8 order_id;
+ u16 virtid;
+ u8 asel;
} __packed;
/**
@@ -910,6 +967,8 @@ struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
* 12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count
* 13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth
* 14 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_burst_size
+ * 15 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_tdtype
+ * 16 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::extended_ch_type
*
* @nav_id: SoC device ID of Navigator Subsystem where tx channel is located
*
@@ -973,6 +1032,15 @@ struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
*
* @tx_burst_size: UDMAP transmit channel burst size configuration to be
* programmed into the tx_burst_size field of the TCHAN_TCFG register.
+ *
+ * @tx_tdtype: UDMAP transmit channel teardown type configuration to be
+ * programmed into the tdtype field of the TCHAN_TCFG register:
+ * 0 - Return immediately
+ * 1 - Wait for completion message from remote peer
+ *
+ * @extended_ch_type: Valid for BCDMA.
+ * 0 - the channel is split tx channel (tchan)
+ * 1 - the channel is block copy channel (bchan)
*/
struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
struct ti_sci_msg_hdr hdr;
@@ -994,6 +1062,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
u16 fdepth;
u8 tx_sched_priority;
u8 tx_burst_size;
+ u8 tx_tdtype;
+ u8 extended_ch_type;
} __packed;
/**
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d1300fc003ed..f6312ff11a0f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -218,7 +218,7 @@ config GPIO_CLPS711X
Say yes here to support GPIO on CLPS711X SoCs.
config GPIO_DAVINCI
- bool "TI Davinci/Keystone GPIO support"
+ tristate "TI Davinci/Keystone GPIO support"
default y if ARCH_DAVINCI
depends on (ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3)
help
@@ -1319,6 +1319,13 @@ config GPIO_TPS65912
help
This driver supports TPS65912 gpio chip
+config GPIO_TPS6594X
+ tristate "TI TPS6594X GPIO driver"
+ depends on MFD_TPS6594X
+ help
+ Select this option to enable GPIO driver for the TPS6954X
+ PMIC chip family. There are 11 GPIOs that can be configured.
+
config GPIO_TPS68470
bool "TPS68470 GPIO"
depends on MFD_TPS68470
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 09dada80ac34..226039b7d6ae 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -151,6 +151,7 @@ obj-$(CONFIG_GPIO_TPS65218) += gpio-tps65218.o
obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
+obj-$(CONFIG_GPIO_TPS6594X) += gpio-tps6594x.o
obj-$(CONFIG_GPIO_TPS68470) += gpio-tps68470.o
obj-$(CONFIG_GPIO_TQMX86) += gpio-tqmx86.o
obj-$(CONFIG_GPIO_TS4800) += gpio-ts4800.o
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 6f2138503726..0dca22c8d283 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -22,6 +22,7 @@
#include <linux/platform_data/gpio-davinci.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
#include <asm-generic/gpio.h>
@@ -62,6 +63,8 @@ struct davinci_gpio_controller {
void __iomem *regs[MAX_REGS_BANKS];
int gpio_unbanked;
int irqs[MAX_INT_PER_BANK];
+ struct davinci_gpio_regs context[MAX_REGS_BANKS];
+ u32 binten_context;
};
static inline u32 __gpio_mask(unsigned gpio)
@@ -624,6 +627,85 @@ done:
return 0;
}
+static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
+ u32 nbank)
+{
+ struct davinci_gpio_regs __iomem *g;
+ struct davinci_gpio_regs *context;
+ u32 bank;
+ void __iomem *base;
+
+ base = chips->regs[0] - offset_array[0];
+ chips->binten_context = readl_relaxed(base + BINTEN);
+
+ for (bank = 0; bank < nbank; bank++) {
+ g = chips->regs[bank];
+ context = &chips->context[bank];
+ context->dir = readl_relaxed(&g->dir);
+ context->set_data = readl_relaxed(&g->set_data);
+ context->set_rising = readl_relaxed(&g->set_rising);
+ context->set_falling = readl_relaxed(&g->set_falling);
+ }
+
+ /* Clear Bank interrupt enable bit */
+ writel_relaxed(0, base + BINTEN);
+
+ /* Clear all interrupt status registers */
+ writel_relaxed(GENMASK(31, 0), &g->intstat);
+}
+
+static void davinci_gpio_restore_context(struct davinci_gpio_controller *chips,
+ u32 nbank)
+{
+ struct davinci_gpio_regs __iomem *g;
+ struct davinci_gpio_regs *context;
+ u32 bank;
+ void __iomem *base;
+
+ base = chips->regs[0] - offset_array[0];
+
+ if (readl_relaxed(base + BINTEN) != chips->binten_context)
+ writel_relaxed(chips->binten_context, base + BINTEN);
+
+ for (bank = 0; bank < nbank; bank++) {
+ g = chips->regs[bank];
+ context = &chips->context[bank];
+ if (readl_relaxed(&g->dir) != context->dir)
+ writel_relaxed(context->dir, &g->dir);
+ if (readl_relaxed(&g->set_data) != context->set_data)
+ writel_relaxed(context->set_data, &g->set_data);
+ if (readl_relaxed(&g->set_rising) != context->set_rising)
+ writel_relaxed(context->set_rising, &g->set_rising);
+ if (readl_relaxed(&g->set_falling) != context->set_falling)
+ writel_relaxed(context->set_falling, &g->set_falling);
+ }
+}
+
+static int __maybe_unused davinci_gpio_suspend(struct device *dev)
+{
+ struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
+ struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
+ u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+
+ davinci_gpio_save_context(chips, nbank);
+
+ return 0;
+}
+
+static int __maybe_unused davinci_gpio_resume(struct device *dev)
+{
+ struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
+ struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
+ u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+
+ davinci_gpio_restore_context(chips, nbank);
+
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(davinci_gpio_dev_pm_ops, davinci_gpio_suspend,
+ davinci_gpio_resume);
+
static const struct of_device_id davinci_gpio_ids[] = {
{ .compatible = "ti,keystone-gpio", keystone_gpio_get_irq_chip},
{ .compatible = "ti,am654-gpio", keystone_gpio_get_irq_chip},
@@ -636,6 +718,7 @@ static struct platform_driver davinci_gpio_driver = {
.probe = davinci_gpio_probe,
.driver = {
.name = "davinci_gpio",
+ .pm = &davinci_gpio_dev_pm_ops,
.of_match_table = of_match_ptr(davinci_gpio_ids),
},
};
@@ -649,3 +732,14 @@ static int __init davinci_gpio_drv_reg(void)
return platform_driver_register(&davinci_gpio_driver);
}
postcore_initcall(davinci_gpio_drv_reg);
+
+static void __exit davinci_gpio_exit(void)
+{
+ platform_driver_unregister(&davinci_gpio_driver);
+}
+module_exit(davinci_gpio_exit);
+
+MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
+MODULE_DESCRIPTION("DAVINCI GPIO driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-davinci");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index a7e8ed5191a8..56152263ab38 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1050,11 +1050,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
irq->first = irq_base;
ret = gpiochip_add_data(&bank->chip, bank);
- if (ret) {
- dev_err(bank->chip.parent,
- "Could not register gpio chip %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n");
ret = devm_request_irq(bank->chip.parent, bank->irq,
omap_gpio_irq_handler,
diff --git a/drivers/gpio/gpio-tps6594x.c b/drivers/gpio/gpio-tps6594x.c
new file mode 100644
index 000000000000..f530ac17f73f
--- /dev/null
+++ b/drivers/gpio/gpio-tps6594x.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GPIO driver for TI TPS6594x PMICs
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/gpio/driver.h>
+#include <linux/mfd/tps6594x.h>
+
+#define GPIO_CFG_MASK BIT(0)
+#define NGPIOS_PER_REG 8
+
+struct tps6594x_gpio {
+ struct gpio_chip gpio_chip;
+ struct tps6594x *tps;
+};
+
+static int tps6594x_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps6594x_gpio *gpio = gpiochip_get_data(gc);
+ int ret, val;
+
+ ret = regmap_read(gpio->tps->regmap, TPS6594X_GPIO1_CONF + offset, &val);
+ if (ret)
+ return ret;
+
+ if (val & GPIO_CFG_MASK)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int tps6594x_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps6594x_gpio *gpio = gpiochip_get_data(gc);
+
+ return regmap_update_bits(gpio->tps->regmap, TPS6594X_GPIO1_CONF + offset,
+ GPIO_CFG_MASK, 0);
+}
+
+static int tps6594x_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct tps6594x_gpio *gpio = gpiochip_get_data(gc);
+ unsigned int reg = TPS6594X_GPIO_OUT_1, shift = offset;
+
+ if (shift >= NGPIOS_PER_REG) {
+ reg = TPS6594X_GPIO_OUT_2;
+ shift -= NGPIOS_PER_REG;
+ }
+
+ regmap_update_bits(gpio->tps->regmap, reg, BIT(shift), value ? BIT(shift) : 0);
+
+ return regmap_update_bits(gpio->tps->regmap, TPS6594X_GPIO1_CONF + offset,
+ GPIO_CFG_MASK, GPIO_CFG_MASK);
+}
+
+static int tps6594x_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps6594x_gpio *gpio = gpiochip_get_data(gc);
+ unsigned int reg = TPS6594X_GPIO_IN_1;
+ int ret, val;
+
+ if (offset >= NGPIOS_PER_REG) {
+ reg = TPS6594X_GPIO_IN_2;
+ offset -= NGPIOS_PER_REG;
+ }
+
+ ret = regmap_read(gpio->tps->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void tps6594x_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct tps6594x_gpio *gpio = gpiochip_get_data(gc);
+ unsigned int reg = TPS6594X_GPIO_OUT_1;
+
+ if (offset >= NGPIOS_PER_REG) {
+ reg = TPS6594X_GPIO_OUT_2;
+ offset -= NGPIOS_PER_REG;
+ }
+
+ regmap_update_bits(gpio->tps->regmap, reg, BIT(offset), value ? BIT(offset) : 0);
+}
+
+static const struct gpio_chip template_chip = {
+ .label = "tps6594x-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = tps6594x_gpio_get_direction,
+ .direction_input = tps6594x_gpio_direction_input,
+ .direction_output = tps6594x_gpio_direction_output,
+ .get = tps6594x_gpio_get,
+ .set = tps6594x_gpio_set,
+ .base = -1,
+ .ngpio = 11,
+ .can_sleep = true,
+};
+
+static int tps6594x_gpio_probe(struct platform_device *pdev)
+{
+ struct tps6594x *tps = dev_get_drvdata(pdev->dev.parent);
+ struct tps6594x_gpio *gpio;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->tps = dev_get_drvdata(pdev->dev.parent);
+ gpio->gpio_chip = template_chip;
+ gpio->gpio_chip.parent = tps->dev;
+
+ return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio_chip, gpio);
+}
+
+static const struct of_device_id of_tps6594x_gpio_match[] = {
+ { .compatible = "ti,tps6594x-gpio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_tps6594x_gpio_match);
+
+static struct platform_driver tps6594x_gpio_driver = {
+ .driver = {
+ .name = "tps6594x-gpio",
+ .of_match_table = of_match_ptr(of_tps6594x_gpio_match),
+ },
+ .probe = tps6594x_gpio_probe,
+};
+module_platform_driver(tps6594x_gpio_driver);
+
+MODULE_ALIAS("platform:tps6594x-gpio");
+MODULE_AUTHOR("Matt Ranostay <mranostay@ti.com>");
+MODULE_DESCRIPTION("TPS6594X GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3e01a3ac652d..0cd940803962 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -783,9 +783,11 @@ err_free_ida:
ida_free(&gpio_ida, gdev->id);
err_free_gdev:
/* failures here can mean systems won't boot... */
- pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
- gdev->base, gdev->base + gdev->ngpio - 1,
- gc->label ? : "generic", ret);
+ if (ret != -EPROBE_DEFER) {
+ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
+ gdev->base, gdev->base + gdev->ngpio - 1,
+ gc->label ? : "generic", ret);
+ }
kfree(gdev);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 4e82647a621e..0f42f9e9904f 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -16,17 +16,6 @@ config DRM_PANEL_BRIDGE
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
-config DRM_CDNS_DSI
- tristate "Cadence DPI/DSI bridge"
- select DRM_KMS_HELPER
- select DRM_MIPI_DSI
- select DRM_PANEL_BRIDGE
- select GENERIC_PHY_MIPI_DPHY
- depends on OF
- help
- Support Cadence DPI to DSI bridge. This is an internal
- bridge and is meant to be directly embedded in a SoC.
-
config DRM_CHRONTEL_CH7033
tristate "Chrontel CH7033 Video Encoder"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 2b3aff104e46..7d50b4f34c20 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index ef8c230e0f62..b0d2c918c627 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -22,3 +22,24 @@ config DRM_CDNS_MHDP8546_J721E
initializes the J721E Display Port and sets up the
clock and data muxes.
endif
+
+config DRM_CDNS_DSI
+ tristate "Cadence DPI/DSI bridge"
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ select GENERIC_PHY_MIPI_DPHY
+ depends on OF
+ help
+ Support Cadence DPI to DSI bridge. This is an internal
+ bridge and is meant to be directly embedded in a SoC.
+
+if DRM_CDNS_DSI
+
+config DRM_CDNS_DSI_J721E
+ bool "J721E Cadence DPI/DSI wrapper support"
+ default y
+ help
+ Support J721E Cadence DPI/DSI wrapper. This wrapper adds
+ support to select which DPI input to use for the bridge.
+endif
diff --git a/drivers/gpu/drm/bridge/cadence/Makefile b/drivers/gpu/drm/bridge/cadence/Makefile
index 8f647991b374..0e54764a7e63 100644
--- a/drivers/gpu/drm/bridge/cadence/Makefile
+++ b/drivers/gpu/drm/bridge/cadence/Makefile
@@ -2,3 +2,6 @@
obj-$(CONFIG_DRM_CDNS_MHDP8546) += cdns-mhdp8546.o
cdns-mhdp8546-y := cdns-mhdp8546-core.o
cdns-mhdp8546-$(CONFIG_DRM_CDNS_MHDP8546_J721E) += cdns-mhdp8546-j721e.o
+obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
+cdns-dsi-y := cdns-dsi-core.o
+cdns-dsi-$(CONFIG_DRM_CDNS_DSI_J721E) += cdns-dsi-j721e.o
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index 0ced08d81d7a..aab961daf7c7 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -6,10 +6,7 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <video/mipi_display.h>
@@ -18,452 +15,16 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <linux/phy/phy.h>
-#include <linux/phy/phy-mipi-dphy.h>
-
-#define IP_CONF 0x0
-#define SP_HS_FIFO_DEPTH(x) (((x) & GENMASK(30, 26)) >> 26)
-#define SP_LP_FIFO_DEPTH(x) (((x) & GENMASK(25, 21)) >> 21)
-#define VRS_FIFO_DEPTH(x) (((x) & GENMASK(20, 16)) >> 16)
-#define DIRCMD_FIFO_DEPTH(x) (((x) & GENMASK(15, 13)) >> 13)
-#define SDI_IFACE_32 BIT(12)
-#define INTERNAL_DATAPATH_32 (0 << 10)
-#define INTERNAL_DATAPATH_16 (1 << 10)
-#define INTERNAL_DATAPATH_8 (3 << 10)
-#define INTERNAL_DATAPATH_SIZE ((x) & GENMASK(11, 10))
-#define NUM_IFACE(x) ((((x) & GENMASK(9, 8)) >> 8) + 1)
-#define MAX_LANE_NB(x) (((x) & GENMASK(7, 6)) >> 6)
-#define RX_FIFO_DEPTH(x) ((x) & GENMASK(5, 0))
-
-#define MCTL_MAIN_DATA_CTL 0x4
-#define TE_MIPI_POLLING_EN BIT(25)
-#define TE_HW_POLLING_EN BIT(24)
-#define DISP_EOT_GEN BIT(18)
-#define HOST_EOT_GEN BIT(17)
-#define DISP_GEN_CHECKSUM BIT(16)
-#define DISP_GEN_ECC BIT(15)
-#define BTA_EN BIT(14)
-#define READ_EN BIT(13)
-#define REG_TE_EN BIT(12)
-#define IF_TE_EN(x) BIT(8 + (x))
-#define TVG_SEL BIT(6)
-#define VID_EN BIT(5)
-#define IF_VID_SELECT(x) ((x) << 2)
-#define IF_VID_SELECT_MASK GENMASK(3, 2)
-#define IF_VID_MODE BIT(1)
-#define LINK_EN BIT(0)
-
-#define MCTL_MAIN_PHY_CTL 0x8
-#define HS_INVERT_DAT(x) BIT(19 + ((x) * 2))
-#define SWAP_PINS_DAT(x) BIT(18 + ((x) * 2))
-#define HS_INVERT_CLK BIT(17)
-#define SWAP_PINS_CLK BIT(16)
-#define HS_SKEWCAL_EN BIT(15)
-#define WAIT_BURST_TIME(x) ((x) << 10)
-#define DATA_ULPM_EN(x) BIT(6 + (x))
-#define CLK_ULPM_EN BIT(5)
-#define CLK_CONTINUOUS BIT(4)
-#define DATA_LANE_EN(x) BIT((x) - 1)
-
-#define MCTL_MAIN_EN 0xc
-#define DATA_FORCE_STOP BIT(17)
-#define CLK_FORCE_STOP BIT(16)
-#define IF_EN(x) BIT(13 + (x))
-#define DATA_LANE_ULPM_REQ(l) BIT(9 + (l))
-#define CLK_LANE_ULPM_REQ BIT(8)
-#define DATA_LANE_START(x) BIT(4 + (x))
-#define CLK_LANE_EN BIT(3)
-#define PLL_START BIT(0)
-
-#define MCTL_DPHY_CFG0 0x10
-#define DPHY_C_RSTB BIT(20)
-#define DPHY_D_RSTB(x) GENMASK(15 + (x), 16)
-#define DPHY_PLL_PDN BIT(10)
-#define DPHY_CMN_PDN BIT(9)
-#define DPHY_C_PDN BIT(8)
-#define DPHY_D_PDN(x) GENMASK(3 + (x), 4)
-#define DPHY_ALL_D_PDN GENMASK(7, 4)
-#define DPHY_PLL_PSO BIT(1)
-#define DPHY_CMN_PSO BIT(0)
-
-#define MCTL_DPHY_TIMEOUT1 0x14
-#define HSTX_TIMEOUT(x) ((x) << 4)
-#define HSTX_TIMEOUT_MAX GENMASK(17, 0)
-#define CLK_DIV(x) (x)
-#define CLK_DIV_MAX GENMASK(3, 0)
-
-#define MCTL_DPHY_TIMEOUT2 0x18
-#define LPRX_TIMEOUT(x) (x)
-
-#define MCTL_ULPOUT_TIME 0x1c
-#define DATA_LANE_ULPOUT_TIME(x) ((x) << 9)
-#define CLK_LANE_ULPOUT_TIME(x) (x)
-
-#define MCTL_3DVIDEO_CTL 0x20
-#define VID_VSYNC_3D_EN BIT(7)
-#define VID_VSYNC_3D_LR BIT(5)
-#define VID_VSYNC_3D_SECOND_EN BIT(4)
-#define VID_VSYNC_3DFORMAT_LINE (0 << 2)
-#define VID_VSYNC_3DFORMAT_FRAME (1 << 2)
-#define VID_VSYNC_3DFORMAT_PIXEL (2 << 2)
-#define VID_VSYNC_3DMODE_OFF 0
-#define VID_VSYNC_3DMODE_PORTRAIT 1
-#define VID_VSYNC_3DMODE_LANDSCAPE 2
-
-#define MCTL_MAIN_STS 0x24
-#define MCTL_MAIN_STS_CTL 0x130
-#define MCTL_MAIN_STS_CLR 0x150
-#define MCTL_MAIN_STS_FLAG 0x170
-#define HS_SKEWCAL_DONE BIT(11)
-#define IF_UNTERM_PKT_ERR(x) BIT(8 + (x))
-#define LPRX_TIMEOUT_ERR BIT(7)
-#define HSTX_TIMEOUT_ERR BIT(6)
-#define DATA_LANE_RDY(l) BIT(2 + (l))
-#define CLK_LANE_RDY BIT(1)
-#define PLL_LOCKED BIT(0)
-
-#define MCTL_DPHY_ERR 0x28
-#define MCTL_DPHY_ERR_CTL1 0x148
-#define MCTL_DPHY_ERR_CLR 0x168
-#define MCTL_DPHY_ERR_FLAG 0x188
-#define ERR_CONT_LP(x, l) BIT(18 + ((x) * 4) + (l))
-#define ERR_CONTROL(l) BIT(14 + (l))
-#define ERR_SYNESC(l) BIT(10 + (l))
-#define ERR_ESC(l) BIT(6 + (l))
-
-#define MCTL_DPHY_ERR_CTL2 0x14c
-#define ERR_CONT_LP_EDGE(x, l) BIT(12 + ((x) * 4) + (l))
-#define ERR_CONTROL_EDGE(l) BIT(8 + (l))
-#define ERR_SYN_ESC_EDGE(l) BIT(4 + (l))
-#define ERR_ESC_EDGE(l) BIT(0 + (l))
-
-#define MCTL_LANE_STS 0x2c
-#define PPI_C_TX_READY_HS BIT(18)
-#define DPHY_PLL_LOCK BIT(17)
-#define PPI_D_RX_ULPS_ESC(x) (((x) & GENMASK(15, 12)) >> 12)
-#define LANE_STATE_START 0
-#define LANE_STATE_IDLE 1
-#define LANE_STATE_WRITE 2
-#define LANE_STATE_ULPM 3
-#define LANE_STATE_READ 4
-#define DATA_LANE_STATE(l, val) \
- (((val) >> (2 + 2 * (l) + ((l) ? 1 : 0))) & GENMASK((l) ? 1 : 2, 0))
-#define CLK_LANE_STATE_HS 2
-#define CLK_LANE_STATE(val) ((val) & GENMASK(1, 0))
-
-#define DSC_MODE_CTL 0x30
-#define DSC_MODE_EN BIT(0)
-
-#define DSC_CMD_SEND 0x34
-#define DSC_SEND_PPS BIT(0)
-#define DSC_EXECUTE_QUEUE BIT(1)
-
-#define DSC_PPS_WRDAT 0x38
-
-#define DSC_MODE_STS 0x3c
-#define DSC_PPS_DONE BIT(1)
-#define DSC_EXEC_DONE BIT(2)
-
-#define CMD_MODE_CTL 0x70
-#define IF_LP_EN(x) BIT(9 + (x))
-#define IF_VCHAN_ID(x, c) ((c) << ((x) * 2))
-
-#define CMD_MODE_CTL2 0x74
-#define TE_TIMEOUT(x) ((x) << 11)
-#define FILL_VALUE(x) ((x) << 3)
-#define ARB_IF_WITH_HIGHEST_PRIORITY(x) ((x) << 1)
-#define ARB_ROUND_ROBIN_MODE BIT(0)
-
-#define CMD_MODE_STS 0x78
-#define CMD_MODE_STS_CTL 0x134
-#define CMD_MODE_STS_CLR 0x154
-#define CMD_MODE_STS_FLAG 0x174
-#define ERR_IF_UNDERRUN(x) BIT(4 + (x))
-#define ERR_UNWANTED_READ BIT(3)
-#define ERR_TE_MISS BIT(2)
-#define ERR_NO_TE BIT(1)
-#define CSM_RUNNING BIT(0)
-
-#define DIRECT_CMD_SEND 0x80
-
-#define DIRECT_CMD_MAIN_SETTINGS 0x84
-#define TRIGGER_VAL(x) ((x) << 25)
-#define CMD_LP_EN BIT(24)
-#define CMD_SIZE(x) ((x) << 16)
-#define CMD_VCHAN_ID(x) ((x) << 14)
-#define CMD_DATATYPE(x) ((x) << 8)
-#define CMD_LONG BIT(3)
-#define WRITE_CMD 0
-#define READ_CMD 1
-#define TE_REQ 4
-#define TRIGGER_REQ 5
-#define BTA_REQ 6
-
-#define DIRECT_CMD_STS 0x88
-#define DIRECT_CMD_STS_CTL 0x138
-#define DIRECT_CMD_STS_CLR 0x158
-#define DIRECT_CMD_STS_FLAG 0x178
-#define RCVD_ACK_VAL(val) ((val) >> 16)
-#define RCVD_TRIGGER_VAL(val) (((val) & GENMASK(14, 11)) >> 11)
-#define READ_COMPLETED_WITH_ERR BIT(10)
-#define BTA_FINISHED BIT(9)
-#define BTA_COMPLETED BIT(8)
-#define TE_RCVD BIT(7)
-#define TRIGGER_RCVD BIT(6)
-#define ACK_WITH_ERR_RCVD BIT(5)
-#define ACK_RCVD BIT(4)
-#define READ_COMPLETED BIT(3)
-#define TRIGGER_COMPLETED BIT(2)
-#define WRITE_COMPLETED BIT(1)
-#define SENDING_CMD BIT(0)
-
-#define DIRECT_CMD_STOP_READ 0x8c
-
-#define DIRECT_CMD_WRDATA 0x90
-
-#define DIRECT_CMD_FIFO_RST 0x94
-
-#define DIRECT_CMD_RDDATA 0xa0
-
-#define DIRECT_CMD_RD_PROPS 0xa4
-#define RD_DCS BIT(18)
-#define RD_VCHAN_ID(val) (((val) >> 16) & GENMASK(1, 0))
-#define RD_SIZE(val) ((val) & GENMASK(15, 0))
-
-#define DIRECT_CMD_RD_STS 0xa8
-#define DIRECT_CMD_RD_STS_CTL 0x13c
-#define DIRECT_CMD_RD_STS_CLR 0x15c
-#define DIRECT_CMD_RD_STS_FLAG 0x17c
-#define ERR_EOT_WITH_ERR BIT(8)
-#define ERR_MISSING_EOT BIT(7)
-#define ERR_WRONG_LENGTH BIT(6)
-#define ERR_OVERSIZE BIT(5)
-#define ERR_RECEIVE BIT(4)
-#define ERR_UNDECODABLE BIT(3)
-#define ERR_CHECKSUM BIT(2)
-#define ERR_UNCORRECTABLE BIT(1)
-#define ERR_FIXED BIT(0)
-
-#define VID_MAIN_CTL 0xb0
-#define VID_IGNORE_MISS_VSYNC BIT(31)
-#define VID_FIELD_SW BIT(28)
-#define VID_INTERLACED_EN BIT(27)
-#define RECOVERY_MODE(x) ((x) << 25)
-#define RECOVERY_MODE_NEXT_HSYNC 0
-#define RECOVERY_MODE_NEXT_STOP_POINT 2
-#define RECOVERY_MODE_NEXT_VSYNC 3
-#define REG_BLKEOL_MODE(x) ((x) << 23)
-#define REG_BLKLINE_MODE(x) ((x) << 21)
-#define REG_BLK_MODE_NULL_PKT 0
-#define REG_BLK_MODE_BLANKING_PKT 1
-#define REG_BLK_MODE_LP 2
-#define SYNC_PULSE_HORIZONTAL BIT(20)
-#define SYNC_PULSE_ACTIVE BIT(19)
-#define BURST_MODE BIT(18)
-#define VID_PIXEL_MODE_MASK GENMASK(17, 14)
-#define VID_PIXEL_MODE_RGB565 (0 << 14)
-#define VID_PIXEL_MODE_RGB666_PACKED (1 << 14)
-#define VID_PIXEL_MODE_RGB666 (2 << 14)
-#define VID_PIXEL_MODE_RGB888 (3 << 14)
-#define VID_PIXEL_MODE_RGB101010 (4 << 14)
-#define VID_PIXEL_MODE_RGB121212 (5 << 14)
-#define VID_PIXEL_MODE_YUV420 (8 << 14)
-#define VID_PIXEL_MODE_YUV422_PACKED (9 << 14)
-#define VID_PIXEL_MODE_YUV422 (10 << 14)
-#define VID_PIXEL_MODE_YUV422_24B (11 << 14)
-#define VID_PIXEL_MODE_DSC_COMP (12 << 14)
-#define VID_DATATYPE(x) ((x) << 8)
-#define VID_VIRTCHAN_ID(iface, x) ((x) << (4 + (iface) * 2))
-#define STOP_MODE(x) ((x) << 2)
-#define START_MODE(x) (x)
-
-#define VID_VSIZE1 0xb4
-#define VFP_LEN(x) ((x) << 12)
-#define VBP_LEN(x) ((x) << 6)
-#define VSA_LEN(x) (x)
-
-#define VID_VSIZE2 0xb8
-#define VACT_LEN(x) (x)
-
-#define VID_HSIZE1 0xc0
-#define HBP_LEN(x) ((x) << 16)
-#define HSA_LEN(x) (x)
-
-#define VID_HSIZE2 0xc4
-#define HFP_LEN(x) ((x) << 16)
-#define HACT_LEN(x) (x)
-
-#define VID_BLKSIZE1 0xcc
-#define BLK_EOL_PKT_LEN(x) ((x) << 15)
-#define BLK_LINE_EVENT_PKT_LEN(x) (x)
-
-#define VID_BLKSIZE2 0xd0
-#define BLK_LINE_PULSE_PKT_LEN(x) (x)
-
-#define VID_PKT_TIME 0xd8
-#define BLK_EOL_DURATION(x) (x)
-
-#define VID_DPHY_TIME 0xdc
-#define REG_WAKEUP_TIME(x) ((x) << 17)
-#define REG_LINE_DURATION(x) (x)
-
-#define VID_ERR_COLOR1 0xe0
-#define COL_GREEN(x) ((x) << 12)
-#define COL_RED(x) (x)
-
-#define VID_ERR_COLOR2 0xe4
-#define PAD_VAL(x) ((x) << 12)
-#define COL_BLUE(x) (x)
-
-#define VID_VPOS 0xe8
-#define LINE_VAL(val) (((val) & GENMASK(14, 2)) >> 2)
-#define LINE_POS(val) ((val) & GENMASK(1, 0))
-
-#define VID_HPOS 0xec
-#define HORIZ_VAL(val) (((val) & GENMASK(17, 3)) >> 3)
-#define HORIZ_POS(val) ((val) & GENMASK(2, 0))
-
-#define VID_MODE_STS 0xf0
-#define VID_MODE_STS_CTL 0x140
-#define VID_MODE_STS_CLR 0x160
-#define VID_MODE_STS_FLAG 0x180
-#define VSG_RECOVERY BIT(10)
-#define ERR_VRS_WRONG_LEN BIT(9)
-#define ERR_LONG_READ BIT(8)
-#define ERR_LINE_WRITE BIT(7)
-#define ERR_BURST_WRITE BIT(6)
-#define ERR_SMALL_HEIGHT BIT(5)
-#define ERR_SMALL_LEN BIT(4)
-#define ERR_MISSING_VSYNC BIT(3)
-#define ERR_MISSING_HSYNC BIT(2)
-#define ERR_MISSING_DATA BIT(1)
-#define VSG_RUNNING BIT(0)
-
-#define VID_VCA_SETTING1 0xf4
-#define BURST_LP BIT(16)
-#define MAX_BURST_LIMIT(x) (x)
-
-#define VID_VCA_SETTING2 0xf8
-#define MAX_LINE_LIMIT(x) ((x) << 16)
-#define EXACT_BURST_LIMIT(x) (x)
-
-#define TVG_CTL 0xfc
-#define TVG_STRIPE_SIZE(x) ((x) << 5)
-#define TVG_MODE_MASK GENMASK(4, 3)
-#define TVG_MODE_SINGLE_COLOR (0 << 3)
-#define TVG_MODE_VSTRIPES (2 << 3)
-#define TVG_MODE_HSTRIPES (3 << 3)
-#define TVG_STOPMODE_MASK GENMASK(2, 1)
-#define TVG_STOPMODE_EOF (0 << 1)
-#define TVG_STOPMODE_EOL (1 << 1)
-#define TVG_STOPMODE_NOW (2 << 1)
-#define TVG_RUN BIT(0)
-
-#define TVG_IMG_SIZE 0x100
-#define TVG_NBLINES(x) ((x) << 16)
-#define TVG_LINE_SIZE(x) (x)
-
-#define TVG_COLOR1 0x104
-#define TVG_COL1_GREEN(x) ((x) << 12)
-#define TVG_COL1_RED(x) (x)
-
-#define TVG_COLOR1_BIS 0x108
-#define TVG_COL1_BLUE(x) (x)
-
-#define TVG_COLOR2 0x10c
-#define TVG_COL2_GREEN(x) ((x) << 12)
-#define TVG_COL2_RED(x) (x)
-
-#define TVG_COLOR2_BIS 0x110
-#define TVG_COL2_BLUE(x) (x)
-
-#define TVG_STS 0x114
-#define TVG_STS_CTL 0x144
-#define TVG_STS_CLR 0x164
-#define TVG_STS_FLAG 0x184
-#define TVG_STS_RUNNING BIT(0)
-
-#define STS_CTL_EDGE(e) ((e) << 16)
-
-#define DPHY_LANES_MAP 0x198
-#define DAT_REMAP_CFG(b, l) ((l) << ((b) * 8))
-
-#define DPI_IRQ_EN 0x1a0
-#define DPI_IRQ_CLR 0x1a4
-#define DPI_IRQ_STS 0x1a8
-#define PIXEL_BUF_OVERFLOW BIT(0)
-
-#define DPI_CFG 0x1ac
-#define DPI_CFG_FIFO_DEPTH(x) ((x) >> 16)
-#define DPI_CFG_FIFO_LEVEL(x) ((x) & GENMASK(15, 0))
-
-#define TEST_GENERIC 0x1f0
-#define TEST_STATUS(x) ((x) >> 16)
-#define TEST_CTRL(x) (x)
-
-#define ID_REG 0x1fc
-#define REV_VENDOR_ID(x) (((x) & GENMASK(31, 20)) >> 20)
-#define REV_PRODUCT_ID(x) (((x) & GENMASK(19, 12)) >> 12)
-#define REV_HW(x) (((x) & GENMASK(11, 8)) >> 8)
-#define REV_MAJOR(x) (((x) & GENMASK(7, 4)) >> 4)
-#define REV_MINOR(x) ((x) & GENMASK(3, 0))
-
-#define DSI_OUTPUT_PORT 0
-#define DSI_INPUT_PORT(inputid) (1 + (inputid))
-
-#define DSI_HBP_FRAME_OVERHEAD 12
-#define DSI_HSA_FRAME_OVERHEAD 14
-#define DSI_HFP_FRAME_OVERHEAD 6
-#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
-#define DSI_BLANKING_FRAME_OVERHEAD 6
-#define DSI_NULL_FRAME_OVERHEAD 6
-#define DSI_EOT_PKT_SIZE 4
-
-struct cdns_dsi_output {
- struct mipi_dsi_device *dev;
- struct drm_panel *panel;
- struct drm_bridge *bridge;
- union phy_configure_opts phy_opts;
-};
-
-enum cdns_dsi_input_id {
- CDNS_SDI_INPUT,
- CDNS_DPI_INPUT,
- CDNS_DSC_INPUT,
-};
-
-struct cdns_dsi_cfg {
- unsigned int hfp;
- unsigned int hsa;
- unsigned int hbp;
- unsigned int hact;
- unsigned int htotal;
-};
-
-struct cdns_dsi_input {
- enum cdns_dsi_input_id id;
- struct drm_bridge bridge;
-};
-
-struct cdns_dsi {
- struct mipi_dsi_host base;
- void __iomem *regs;
- struct cdns_dsi_input input;
- struct cdns_dsi_output output;
- unsigned int direct_cmd_fifo_depth;
- unsigned int rx_fifo_depth;
- struct completion direct_cmd_comp;
- struct clk *dsi_p_clk;
- struct reset_control *dsi_p_rst;
- struct clk *dsi_sys_clk;
- bool link_initialized;
- struct phy *dphy;
-};
+#include "cdns-dsi-core.h"
+#ifdef CONFIG_DRM_CDNS_DSI_J721E
+#include "cdns-dsi-j721e.h"
+#endif
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
{
@@ -708,6 +269,18 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
val = readl(dsi->regs + MCTL_MAIN_EN) & ~IF_EN(input->id);
writel(val, dsi->regs + MCTL_MAIN_EN);
+
+ if (dsi->platform_ops && dsi->platform_ops->disable)
+ dsi->platform_ops->disable(dsi);
+
+ pm_runtime_put(dsi->base.dev);
+}
+
+static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+
pm_runtime_put(dsi->base.dev);
}
@@ -716,6 +289,8 @@ static void cdns_dsi_hs_init(struct cdns_dsi *dsi)
struct cdns_dsi_output *output = &dsi->output;
u32 status;
+ if (dsi->phy_initialized)
+ return;
/*
* Power all internal DPHY blocks down and maintain their reset line
* asserted before changing the DPHY config.
@@ -739,6 +314,7 @@ static void cdns_dsi_hs_init(struct cdns_dsi *dsi)
writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN |
DPHY_D_RSTB(output->dev->lanes) | DPHY_C_RSTB,
dsi->regs + MCTL_DPHY_CFG0);
+ dsi->phy_initialized = true;
}
static void cdns_dsi_init_link(struct cdns_dsi *dsi)
@@ -792,6 +368,9 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
+ if (dsi->platform_ops && dsi->platform_ops->enable)
+ dsi->platform_ops->enable(dsi);
+
mode = &bridge->encoder->crtc->state->adjusted_mode;
nlanes = output->dev->lanes;
@@ -914,11 +493,25 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
+static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+
+ if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
+ return;
+
+ cdns_dsi_init_link(dsi);
+ cdns_dsi_hs_init(dsi);
+}
+
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
.disable = cdns_dsi_bridge_disable,
+ .pre_enable = cdns_dsi_bridge_pre_enable,
.enable = cdns_dsi_bridge_enable,
+ .post_disable = cdns_dsi_bridge_post_disable,
};
static int cdns_dsi_attach(struct mipi_dsi_host *host,
@@ -1161,6 +754,7 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
clk_disable_unprepare(dsi->dsi_p_clk);
reset_control_assert(dsi->dsi_p_rst);
dsi->link_initialized = false;
+ dsi->phy_initialized = false;
return 0;
}
@@ -1220,6 +814,8 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
goto err_disable_pclk;
}
+ dsi->platform_ops = of_device_get_match_data(&pdev->dev);
+
val = readl(dsi->regs + IP_CONF);
dsi->direct_cmd_fifo_depth = 1 << (DIRCMD_FIFO_DEPTH(val) + 2);
dsi->rx_fifo_depth = RX_FIFO_DEPTH(val);
@@ -1255,14 +851,27 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
dsi->base.dev = &pdev->dev;
dsi->base.ops = &cdns_dsi_ops;
+ if (dsi->platform_ops && dsi->platform_ops->init) {
+ ret = dsi->platform_ops->init(dsi);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "platform initialization failed: %d\n",
+ ret);
+ goto err_disable_runtime_pm;
+ }
+ }
+
ret = mipi_dsi_host_register(&dsi->base);
if (ret)
- goto err_disable_runtime_pm;
+ goto err_deinit_platform;
clk_disable_unprepare(dsi->dsi_p_clk);
return 0;
+err_deinit_platform:
+ if (dsi->platform_ops && dsi->platform_ops->exit)
+ dsi->platform_ops->exit(dsi);
+
err_disable_runtime_pm:
pm_runtime_disable(&pdev->dev);
@@ -1277,6 +886,10 @@ static int cdns_dsi_drm_remove(struct platform_device *pdev)
struct cdns_dsi *dsi = platform_get_drvdata(pdev);
mipi_dsi_host_unregister(&dsi->base);
+
+ if (dsi->platform_ops && dsi->platform_ops->exit)
+ dsi->platform_ops->exit(dsi);
+
pm_runtime_disable(&pdev->dev);
return 0;
@@ -1284,6 +897,11 @@ static int cdns_dsi_drm_remove(struct platform_device *pdev)
static const struct of_device_id cdns_dsi_of_match[] = {
{ .compatible = "cdns,dsi" },
+#ifdef CONFIG_DRM_CDNS_DSI_J721E
+ { .compatible = "ti,j721e-dsi",
+ .data = &dsi_ti_j721e_ops,
+ },
+#endif
{ },
};
MODULE_DEVICE_TABLE(of, cdns_dsi_of_match);
@@ -1303,4 +921,3 @@ MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
MODULE_DESCRIPTION("Cadence DSI driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cdns-dsi");
-
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
new file mode 100644
index 000000000000..37568b547fbe
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
@@ -0,0 +1,471 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright: 2017 Cadence Design Systems, Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef CDNS_DSI_H
+#define CDNS_DSI_H
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+
+#define IP_CONF 0x0
+#define SP_HS_FIFO_DEPTH(x) (((x) & GENMASK(30, 26)) >> 26)
+#define SP_LP_FIFO_DEPTH(x) (((x) & GENMASK(25, 21)) >> 21)
+#define VRS_FIFO_DEPTH(x) (((x) & GENMASK(20, 16)) >> 16)
+#define DIRCMD_FIFO_DEPTH(x) (((x) & GENMASK(15, 13)) >> 13)
+#define SDI_IFACE_32 BIT(12)
+#define INTERNAL_DATAPATH_32 (0 << 10)
+#define INTERNAL_DATAPATH_16 (1 << 10)
+#define INTERNAL_DATAPATH_8 (3 << 10)
+#define INTERNAL_DATAPATH_SIZE ((x) & GENMASK(11, 10))
+#define NUM_IFACE(x) ((((x) & GENMASK(9, 8)) >> 8) + 1)
+#define MAX_LANE_NB(x) (((x) & GENMASK(7, 6)) >> 6)
+#define RX_FIFO_DEPTH(x) ((x) & GENMASK(5, 0))
+
+#define MCTL_MAIN_DATA_CTL 0x4
+#define TE_MIPI_POLLING_EN BIT(25)
+#define TE_HW_POLLING_EN BIT(24)
+#define DISP_EOT_GEN BIT(18)
+#define HOST_EOT_GEN BIT(17)
+#define DISP_GEN_CHECKSUM BIT(16)
+#define DISP_GEN_ECC BIT(15)
+#define BTA_EN BIT(14)
+#define READ_EN BIT(13)
+#define REG_TE_EN BIT(12)
+#define IF_TE_EN(x) BIT(8 + (x))
+#define TVG_SEL BIT(6)
+#define VID_EN BIT(5)
+#define IF_VID_SELECT(x) ((x) << 2)
+#define IF_VID_SELECT_MASK GENMASK(3, 2)
+#define IF_VID_MODE BIT(1)
+#define LINK_EN BIT(0)
+
+#define MCTL_MAIN_PHY_CTL 0x8
+#define HS_INVERT_DAT(x) BIT(19 + ((x) * 2))
+#define SWAP_PINS_DAT(x) BIT(18 + ((x) * 2))
+#define HS_INVERT_CLK BIT(17)
+#define SWAP_PINS_CLK BIT(16)
+#define HS_SKEWCAL_EN BIT(15)
+#define WAIT_BURST_TIME(x) ((x) << 10)
+#define DATA_ULPM_EN(x) BIT(6 + (x))
+#define CLK_ULPM_EN BIT(5)
+#define CLK_CONTINUOUS BIT(4)
+#define DATA_LANE_EN(x) BIT((x) - 1)
+
+#define MCTL_MAIN_EN 0xc
+#define DATA_FORCE_STOP BIT(17)
+#define CLK_FORCE_STOP BIT(16)
+#define IF_EN(x) BIT(13 + (x))
+#define DATA_LANE_ULPM_REQ(l) BIT(9 + (l))
+#define CLK_LANE_ULPM_REQ BIT(8)
+#define DATA_LANE_START(x) BIT(4 + (x))
+#define CLK_LANE_EN BIT(3)
+#define PLL_START BIT(0)
+
+#define MCTL_DPHY_CFG0 0x10
+#define DPHY_C_RSTB BIT(20)
+#define DPHY_D_RSTB(x) GENMASK(15 + (x), 16)
+#define DPHY_PLL_PDN BIT(10)
+#define DPHY_CMN_PDN BIT(9)
+#define DPHY_C_PDN BIT(8)
+#define DPHY_D_PDN(x) GENMASK(3 + (x), 4)
+#define DPHY_ALL_D_PDN GENMASK(7, 4)
+#define DPHY_PLL_PSO BIT(1)
+#define DPHY_CMN_PSO BIT(0)
+
+#define MCTL_DPHY_TIMEOUT1 0x14
+#define HSTX_TIMEOUT(x) ((x) << 4)
+#define HSTX_TIMEOUT_MAX GENMASK(17, 0)
+#define CLK_DIV(x) (x)
+#define CLK_DIV_MAX GENMASK(3, 0)
+
+#define MCTL_DPHY_TIMEOUT2 0x18
+#define LPRX_TIMEOUT(x) (x)
+
+#define MCTL_ULPOUT_TIME 0x1c
+#define DATA_LANE_ULPOUT_TIME(x) ((x) << 9)
+#define CLK_LANE_ULPOUT_TIME(x) (x)
+
+#define MCTL_3DVIDEO_CTL 0x20
+#define VID_VSYNC_3D_EN BIT(7)
+#define VID_VSYNC_3D_LR BIT(5)
+#define VID_VSYNC_3D_SECOND_EN BIT(4)
+#define VID_VSYNC_3DFORMAT_LINE (0 << 2)
+#define VID_VSYNC_3DFORMAT_FRAME (1 << 2)
+#define VID_VSYNC_3DFORMAT_PIXEL (2 << 2)
+#define VID_VSYNC_3DMODE_OFF 0
+#define VID_VSYNC_3DMODE_PORTRAIT 1
+#define VID_VSYNC_3DMODE_LANDSCAPE 2
+
+#define MCTL_MAIN_STS 0x24
+#define MCTL_MAIN_STS_CTL 0x130
+#define MCTL_MAIN_STS_CLR 0x150
+#define MCTL_MAIN_STS_FLAG 0x170
+#define HS_SKEWCAL_DONE BIT(11)
+#define IF_UNTERM_PKT_ERR(x) BIT(8 + (x))
+#define LPRX_TIMEOUT_ERR BIT(7)
+#define HSTX_TIMEOUT_ERR BIT(6)
+#define DATA_LANE_RDY(l) BIT(2 + (l))
+#define CLK_LANE_RDY BIT(1)
+#define PLL_LOCKED BIT(0)
+
+#define MCTL_DPHY_ERR 0x28
+#define MCTL_DPHY_ERR_CTL1 0x148
+#define MCTL_DPHY_ERR_CLR 0x168
+#define MCTL_DPHY_ERR_FLAG 0x188
+#define ERR_CONT_LP(x, l) BIT(18 + ((x) * 4) + (l))
+#define ERR_CONTROL(l) BIT(14 + (l))
+#define ERR_SYNESC(l) BIT(10 + (l))
+#define ERR_ESC(l) BIT(6 + (l))
+
+#define MCTL_DPHY_ERR_CTL2 0x14c
+#define ERR_CONT_LP_EDGE(x, l) BIT(12 + ((x) * 4) + (l))
+#define ERR_CONTROL_EDGE(l) BIT(8 + (l))
+#define ERR_SYN_ESC_EDGE(l) BIT(4 + (l))
+#define ERR_ESC_EDGE(l) BIT(0 + (l))
+
+#define MCTL_LANE_STS 0x2c
+#define PPI_C_TX_READY_HS BIT(18)
+#define DPHY_PLL_LOCK BIT(17)
+#define PPI_D_RX_ULPS_ESC(x) (((x) & GENMASK(15, 12)) >> 12)
+#define LANE_STATE_START 0
+#define LANE_STATE_IDLE 1
+#define LANE_STATE_WRITE 2
+#define LANE_STATE_ULPM 3
+#define LANE_STATE_READ 4
+#define DATA_LANE_STATE(l, val) \
+ (((val) >> (2 + 2 * (l) + ((l) ? 1 : 0))) & GENMASK((l) ? 1 : 2, 0))
+#define CLK_LANE_STATE_HS 2
+#define CLK_LANE_STATE(val) ((val) & GENMASK(1, 0))
+
+#define DSC_MODE_CTL 0x30
+#define DSC_MODE_EN BIT(0)
+
+#define DSC_CMD_SEND 0x34
+#define DSC_SEND_PPS BIT(0)
+#define DSC_EXECUTE_QUEUE BIT(1)
+
+#define DSC_PPS_WRDAT 0x38
+
+#define DSC_MODE_STS 0x3c
+#define DSC_PPS_DONE BIT(1)
+#define DSC_EXEC_DONE BIT(2)
+
+#define CMD_MODE_CTL 0x70
+#define IF_LP_EN(x) BIT(9 + (x))
+#define IF_VCHAN_ID(x, c) ((c) << ((x) * 2))
+
+#define CMD_MODE_CTL2 0x74
+#define TE_TIMEOUT(x) ((x) << 11)
+#define FILL_VALUE(x) ((x) << 3)
+#define ARB_IF_WITH_HIGHEST_PRIORITY(x) ((x) << 1)
+#define ARB_ROUND_ROBIN_MODE BIT(0)
+
+#define CMD_MODE_STS 0x78
+#define CMD_MODE_STS_CTL 0x134
+#define CMD_MODE_STS_CLR 0x154
+#define CMD_MODE_STS_FLAG 0x174
+#define ERR_IF_UNDERRUN(x) BIT(4 + (x))
+#define ERR_UNWANTED_READ BIT(3)
+#define ERR_TE_MISS BIT(2)
+#define ERR_NO_TE BIT(1)
+#define CSM_RUNNING BIT(0)
+
+#define DIRECT_CMD_SEND 0x80
+
+#define DIRECT_CMD_MAIN_SETTINGS 0x84
+#define TRIGGER_VAL(x) ((x) << 25)
+#define CMD_LP_EN BIT(24)
+#define CMD_SIZE(x) ((x) << 16)
+#define CMD_VCHAN_ID(x) ((x) << 14)
+#define CMD_DATATYPE(x) ((x) << 8)
+#define CMD_LONG BIT(3)
+#define WRITE_CMD 0
+#define READ_CMD 1
+#define TE_REQ 4
+#define TRIGGER_REQ 5
+#define BTA_REQ 6
+
+#define DIRECT_CMD_STS 0x88
+#define DIRECT_CMD_STS_CTL 0x138
+#define DIRECT_CMD_STS_CLR 0x158
+#define DIRECT_CMD_STS_FLAG 0x178
+#define RCVD_ACK_VAL(val) ((val) >> 16)
+#define RCVD_TRIGGER_VAL(val) (((val) & GENMASK(14, 11)) >> 11)
+#define READ_COMPLETED_WITH_ERR BIT(10)
+#define BTA_FINISHED BIT(9)
+#define BTA_COMPLETED BIT(8)
+#define TE_RCVD BIT(7)
+#define TRIGGER_RCVD BIT(6)
+#define ACK_WITH_ERR_RCVD BIT(5)
+#define ACK_RCVD BIT(4)
+#define READ_COMPLETED BIT(3)
+#define TRIGGER_COMPLETED BIT(2)
+#define WRITE_COMPLETED BIT(1)
+#define SENDING_CMD BIT(0)
+
+#define DIRECT_CMD_STOP_READ 0x8c
+
+#define DIRECT_CMD_WRDATA 0x90
+
+#define DIRECT_CMD_FIFO_RST 0x94
+
+#define DIRECT_CMD_RDDATA 0xa0
+
+#define DIRECT_CMD_RD_PROPS 0xa4
+#define RD_DCS BIT(18)
+#define RD_VCHAN_ID(val) (((val) >> 16) & GENMASK(1, 0))
+#define RD_SIZE(val) ((val) & GENMASK(15, 0))
+
+#define DIRECT_CMD_RD_STS 0xa8
+#define DIRECT_CMD_RD_STS_CTL 0x13c
+#define DIRECT_CMD_RD_STS_CLR 0x15c
+#define DIRECT_CMD_RD_STS_FLAG 0x17c
+#define ERR_EOT_WITH_ERR BIT(8)
+#define ERR_MISSING_EOT BIT(7)
+#define ERR_WRONG_LENGTH BIT(6)
+#define ERR_OVERSIZE BIT(5)
+#define ERR_RECEIVE BIT(4)
+#define ERR_UNDECODABLE BIT(3)
+#define ERR_CHECKSUM BIT(2)
+#define ERR_UNCORRECTABLE BIT(1)
+#define ERR_FIXED BIT(0)
+
+#define VID_MAIN_CTL 0xb0
+#define VID_IGNORE_MISS_VSYNC BIT(31)
+#define VID_FIELD_SW BIT(28)
+#define VID_INTERLACED_EN BIT(27)
+#define RECOVERY_MODE(x) ((x) << 25)
+#define RECOVERY_MODE_NEXT_HSYNC 0
+#define RECOVERY_MODE_NEXT_STOP_POINT 2
+#define RECOVERY_MODE_NEXT_VSYNC 3
+#define REG_BLKEOL_MODE(x) ((x) << 23)
+#define REG_BLKLINE_MODE(x) ((x) << 21)
+#define REG_BLK_MODE_NULL_PKT 0
+#define REG_BLK_MODE_BLANKING_PKT 1
+#define REG_BLK_MODE_LP 2
+#define SYNC_PULSE_HORIZONTAL BIT(20)
+#define SYNC_PULSE_ACTIVE BIT(19)
+#define BURST_MODE BIT(18)
+#define VID_PIXEL_MODE_MASK GENMASK(17, 14)
+#define VID_PIXEL_MODE_RGB565 (0 << 14)
+#define VID_PIXEL_MODE_RGB666_PACKED (1 << 14)
+#define VID_PIXEL_MODE_RGB666 (2 << 14)
+#define VID_PIXEL_MODE_RGB888 (3 << 14)
+#define VID_PIXEL_MODE_RGB101010 (4 << 14)
+#define VID_PIXEL_MODE_RGB121212 (5 << 14)
+#define VID_PIXEL_MODE_YUV420 (8 << 14)
+#define VID_PIXEL_MODE_YUV422_PACKED (9 << 14)
+#define VID_PIXEL_MODE_YUV422 (10 << 14)
+#define VID_PIXEL_MODE_YUV422_24B (11 << 14)
+#define VID_PIXEL_MODE_DSC_COMP (12 << 14)
+#define VID_DATATYPE(x) ((x) << 8)
+#define VID_VIRTCHAN_ID(iface, x) ((x) << (4 + (iface) * 2))
+#define STOP_MODE(x) ((x) << 2)
+#define START_MODE(x) (x)
+
+#define VID_VSIZE1 0xb4
+#define VFP_LEN(x) ((x) << 12)
+#define VBP_LEN(x) ((x) << 6)
+#define VSA_LEN(x) (x)
+
+#define VID_VSIZE2 0xb8
+#define VACT_LEN(x) (x)
+
+#define VID_HSIZE1 0xc0
+#define HBP_LEN(x) ((x) << 16)
+#define HSA_LEN(x) (x)
+
+#define VID_HSIZE2 0xc4
+#define HFP_LEN(x) ((x) << 16)
+#define HACT_LEN(x) (x)
+
+#define VID_BLKSIZE1 0xcc
+#define BLK_EOL_PKT_LEN(x) ((x) << 15)
+#define BLK_LINE_EVENT_PKT_LEN(x) (x)
+
+#define VID_BLKSIZE2 0xd0
+#define BLK_LINE_PULSE_PKT_LEN(x) (x)
+
+#define VID_PKT_TIME 0xd8
+#define BLK_EOL_DURATION(x) (x)
+
+#define VID_DPHY_TIME 0xdc
+#define REG_WAKEUP_TIME(x) ((x) << 17)
+#define REG_LINE_DURATION(x) (x)
+
+#define VID_ERR_COLOR1 0xe0
+#define COL_GREEN(x) ((x) << 12)
+#define COL_RED(x) (x)
+
+#define VID_ERR_COLOR2 0xe4
+#define PAD_VAL(x) ((x) << 12)
+#define COL_BLUE(x) (x)
+
+#define VID_VPOS 0xe8
+#define LINE_VAL(val) (((val) & GENMASK(14, 2)) >> 2)
+#define LINE_POS(val) ((val) & GENMASK(1, 0))
+
+#define VID_HPOS 0xec
+#define HORIZ_VAL(val) (((val) & GENMASK(17, 3)) >> 3)
+#define HORIZ_POS(val) ((val) & GENMASK(2, 0))
+
+#define VID_MODE_STS 0xf0
+#define VID_MODE_STS_CTL 0x140
+#define VID_MODE_STS_CLR 0x160
+#define VID_MODE_STS_FLAG 0x180
+#define VSG_RECOVERY BIT(10)
+#define ERR_VRS_WRONG_LEN BIT(9)
+#define ERR_LONG_READ BIT(8)
+#define ERR_LINE_WRITE BIT(7)
+#define ERR_BURST_WRITE BIT(6)
+#define ERR_SMALL_HEIGHT BIT(5)
+#define ERR_SMALL_LEN BIT(4)
+#define ERR_MISSING_VSYNC BIT(3)
+#define ERR_MISSING_HSYNC BIT(2)
+#define ERR_MISSING_DATA BIT(1)
+#define VSG_RUNNING BIT(0)
+
+#define VID_VCA_SETTING1 0xf4
+#define BURST_LP BIT(16)
+#define MAX_BURST_LIMIT(x) (x)
+
+#define VID_VCA_SETTING2 0xf8
+#define MAX_LINE_LIMIT(x) ((x) << 16)
+#define EXACT_BURST_LIMIT(x) (x)
+
+#define TVG_CTL 0xfc
+#define TVG_STRIPE_SIZE(x) ((x) << 5)
+#define TVG_MODE_MASK GENMASK(4, 3)
+#define TVG_MODE_SINGLE_COLOR (0 << 3)
+#define TVG_MODE_VSTRIPES (2 << 3)
+#define TVG_MODE_HSTRIPES (3 << 3)
+#define TVG_STOPMODE_MASK GENMASK(2, 1)
+#define TVG_STOPMODE_EOF (0 << 1)
+#define TVG_STOPMODE_EOL (1 << 1)
+#define TVG_STOPMODE_NOW (2 << 1)
+#define TVG_RUN BIT(0)
+
+#define TVG_IMG_SIZE 0x100
+#define TVG_NBLINES(x) ((x) << 16)
+#define TVG_LINE_SIZE(x) (x)
+
+#define TVG_COLOR1 0x104
+#define TVG_COL1_GREEN(x) ((x) << 12)
+#define TVG_COL1_RED(x) (x)
+
+#define TVG_COLOR1_BIS 0x108
+#define TVG_COL1_BLUE(x) (x)
+
+#define TVG_COLOR2 0x10c
+#define TVG_COL2_GREEN(x) ((x) << 12)
+#define TVG_COL2_RED(x) (x)
+
+#define TVG_COLOR2_BIS 0x110
+#define TVG_COL2_BLUE(x) (x)
+
+#define TVG_STS 0x114
+#define TVG_STS_CTL 0x144
+#define TVG_STS_CLR 0x164
+#define TVG_STS_FLAG 0x184
+#define TVG_STS_RUNNING BIT(0)
+
+#define STS_CTL_EDGE(e) ((e) << 16)
+
+#define DPHY_LANES_MAP 0x198
+#define DAT_REMAP_CFG(b, l) ((l) << ((b) * 8))
+
+#define DPI_IRQ_EN 0x1a0
+#define DPI_IRQ_CLR 0x1a4
+#define DPI_IRQ_STS 0x1a8
+#define PIXEL_BUF_OVERFLOW BIT(0)
+
+#define DPI_CFG 0x1ac
+#define DPI_CFG_FIFO_DEPTH(x) ((x) >> 16)
+#define DPI_CFG_FIFO_LEVEL(x) ((x) & GENMASK(15, 0))
+
+#define TEST_GENERIC 0x1f0
+#define TEST_STATUS(x) ((x) >> 16)
+#define TEST_CTRL(x) (x)
+
+#define ID_REG 0x1fc
+#define REV_VENDOR_ID(x) (((x) & GENMASK(31, 20)) >> 20)
+#define REV_PRODUCT_ID(x) (((x) & GENMASK(19, 12)) >> 12)
+#define REV_HW(x) (((x) & GENMASK(11, 8)) >> 8)
+#define REV_MAJOR(x) (((x) & GENMASK(7, 4)) >> 4)
+#define REV_MINOR(x) ((x) & GENMASK(3, 0))
+
+#define DSI_OUTPUT_PORT 0
+#define DSI_INPUT_PORT(inputid) (1 + (inputid))
+
+#define DSI_HBP_FRAME_OVERHEAD 12
+#define DSI_HSA_FRAME_OVERHEAD 14
+#define DSI_HFP_FRAME_OVERHEAD 6
+#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
+#define DSI_BLANKING_FRAME_OVERHEAD 6
+#define DSI_NULL_FRAME_OVERHEAD 6
+#define DSI_EOT_PKT_SIZE 4
+
+struct cdns_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ union phy_configure_opts phy_opts;
+};
+
+enum cdns_dsi_input_id {
+ CDNS_SDI_INPUT,
+ CDNS_DPI_INPUT,
+ CDNS_DSC_INPUT,
+};
+
+struct cdns_dsi_cfg {
+ unsigned int hfp;
+ unsigned int hsa;
+ unsigned int hbp;
+ unsigned int hact;
+ unsigned int htotal;
+};
+
+struct cdns_dsi_input {
+ enum cdns_dsi_input_id id;
+ struct drm_bridge bridge;
+};
+
+struct cdns_dsi;
+
+struct dsi_platform_ops {
+ int (*init)(struct cdns_dsi *dsi);
+ void (*exit)(struct cdns_dsi *dsi);
+ void (*enable)(struct cdns_dsi *dsi);
+ void (*disable)(struct cdns_dsi *dsi);
+};
+
+struct cdns_dsi {
+ struct mipi_dsi_host base;
+ void __iomem *regs;
+#ifdef CONFIG_DRM_CDNS_DSI_J721E
+ void __iomem *j721e_regs;
+#endif
+ const struct dsi_platform_ops *platform_ops;
+ struct cdns_dsi_input input;
+ struct cdns_dsi_output output;
+ unsigned int direct_cmd_fifo_depth;
+ unsigned int rx_fifo_depth;
+ struct completion direct_cmd_comp;
+ struct clk *dsi_p_clk;
+ struct reset_control *dsi_p_rst;
+ struct clk *dsi_sys_clk;
+ bool link_initialized;
+ bool phy_initialized;
+ struct phy *dphy;
+};
+
+#endif /* !CDNS_DSI_H */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c
new file mode 100644
index 000000000000..b5216acb333e
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI j721e Cadence DSI wrapper
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rahul T R <r-ravikumar@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "cdns-dsi-j721e.h"
+
+#define DSI_WRAP_REVISION 0x0
+#define DSI_WRAP_DPI_CONTROL 0x4
+#define DSI_WRAP_DSC_CONTROL 0x8
+#define DSI_WRAP_DPI_SECURE 0xc
+#define DSI_WRAP_DSI_0_ASF_STATUS 0x10
+
+#define DSI_WRAP_DPI_0_EN BIT(0)
+#define DSI_WRAP_DSI2_MUX_SEL BIT(4)
+
+static int cdns_dsi_j721e_init(struct cdns_dsi *dsi)
+{
+ struct platform_device *pdev = to_platform_device(dsi->base.dev);
+
+ dsi->j721e_regs = devm_platform_ioremap_resource(pdev, 1);
+ return PTR_ERR_OR_ZERO(dsi->j721e_regs);
+}
+
+static void cdns_dsi_j721e_enable(struct cdns_dsi *dsi)
+{
+ /*
+ * Enable DPI0 as its input. DSS0 DPI2 is connected
+ * to DSI DPI0. This is the only supported configuration on
+ * J721E.
+ */
+ writel(DSI_WRAP_DPI_0_EN, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL);
+}
+
+static void cdns_dsi_j721e_disable(struct cdns_dsi *dsi)
+{
+ /* Put everything to defaults */
+ writel(0, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL);
+}
+
+const struct dsi_platform_ops dsi_ti_j721e_ops = {
+ .init = cdns_dsi_j721e_init,
+ .enable = cdns_dsi_j721e_enable,
+ .disable = cdns_dsi_j721e_disable,
+};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h
new file mode 100644
index 000000000000..01f3dbd92db2
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI j721e Cadence DSI wrapper
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rahul T R <r-ravikumar@ti.com>
+ */
+
+#ifndef CDNS_DSI_J721E_H
+#define CDNS_DSI_J721E_H
+
+#include "cdns-dsi-core.h"
+
+struct dsi_platform_ops;
+
+extern const struct dsi_platform_ops dsi_ti_j721e_ops;
+
+#endif /* !CDNS_DSI_J721E_H */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index f56ff97c9899..d4519cbd5167 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -52,6 +52,8 @@
#include "cdns-mhdp8546-j721e.h"
+static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp);
+
static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
{
int ret, empty;
@@ -747,7 +749,7 @@ static int cdns_mhdp_fw_activate(const struct firmware *fw,
* MHDP_HW_STOPPED happens only due to driver removal when
* bridge should already be detached.
*/
- if (mhdp->bridge_attached)
+ if (mhdp->bridge_attached && !mhdp->no_hpd)
writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
mhdp->regs + CDNS_APB_INT_MASK);
@@ -843,7 +845,7 @@ static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
msg->buffer, msg->size);
if (ret) {
- dev_err(mhdp->dev,
+ dev_dbg(mhdp->dev,
"Failed to read DPCD addr %u\n",
msg->address);
@@ -1687,6 +1689,19 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge,
spin_unlock(&mhdp->start_lock);
+ if (mhdp->no_hpd) {
+ ret = wait_event_timeout(mhdp->fw_load_wq,
+ mhdp->hw_state == MHDP_HW_READY,
+ msecs_to_jiffies(100));
+ if (ret == 0) {
+ dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
+ __func__);
+ return -ETIMEDOUT;
+ }
+
+ cdns_mhdp_update_link_status(mhdp);
+ return 0;
+ }
/* Enable SW event interrupts */
if (hw_ready)
writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
@@ -2188,7 +2203,16 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
mutex_lock(&mhdp->link_mutex);
- mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
+ if (mhdp->no_hpd) {
+ ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
+ hpd_pulse = false;
+ if (ret < 0)
+ mhdp->plugged = false;
+ else
+ mhdp->plugged = true;
+ } else {
+ mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
+ }
if (!mhdp->plugged) {
cdns_mhdp_link_down(mhdp);
@@ -2350,6 +2374,8 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->aux.dev = dev;
mhdp->aux.transfer = cdns_mhdp_transfer;
+ mhdp->no_hpd = of_property_read_bool(dev->of_node, "cdns,no-hpd");
+
mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mhdp->regs)) {
dev_err(dev, "Failed to get memory resource\n");
@@ -2416,8 +2442,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->bridge.of_node = pdev->dev.of_node;
mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
- mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
- DRM_BRIDGE_OP_HPD;
+ mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ if (!mhdp->no_hpd)
+ mhdp->bridge.ops |= DRM_BRIDGE_OP_HPD;
mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
if (mhdp->info)
mhdp->bridge.timings = mhdp->info->timings;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
index 5897a85e3159..fa17883498a5 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
@@ -375,6 +375,7 @@ struct cdns_mhdp_device {
bool link_up;
bool plugged;
+ bool no_hpd;
/*
* "start_lock" protects the access to bridge_attached and
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 544a47335cac..847a0dce7f1d 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
@@ -20,6 +21,8 @@ struct display_connector {
struct gpio_desc *hpd_gpio;
int hpd_irq;
+
+ struct regulator *dp_pwr;
};
static inline struct display_connector *
@@ -172,11 +175,12 @@ static int display_connector_probe(struct platform_device *pdev)
of_property_read_string(pdev->dev.of_node, "label", &label);
/*
- * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+ * Get the HPD GPIO for DVI, HDMI and DP connectors. If the GPIO can provide
* edge interrupts, register an interrupt handler.
*/
if (type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_HDMIA) {
+ type == DRM_MODE_CONNECTOR_HDMIA ||
+ type == DRM_MODE_CONNECTOR_DisplayPort) {
conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
GPIOD_IN);
if (IS_ERR(conn->hpd_gpio)) {
@@ -223,6 +227,38 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
+ /* Get the DP PWR for DP connector. */
+ if (type == DRM_MODE_CONNECTOR_DisplayPort) {
+ int ret;
+
+ conn->dp_pwr = devm_regulator_get_optional(&pdev->dev, "dp-pwr");
+
+ if (IS_ERR(conn->dp_pwr)) {
+ ret = PTR_ERR(conn->dp_pwr);
+
+ switch (ret) {
+ case -ENODEV:
+ conn->dp_pwr = NULL;
+ break;
+
+ case -EPROBE_DEFER:
+ return -EPROBE_DEFER;
+
+ default:
+ dev_err(&pdev->dev, "failed to get DP PWR regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (conn->dp_pwr) {
+ ret = regulator_enable(conn->dp_pwr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable DP PWR regulator: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
@@ -251,6 +287,9 @@ static int display_connector_remove(struct platform_device *pdev)
{
struct display_connector *conn = platform_get_drvdata(pdev);
+ if (conn->dp_pwr)
+ regulator_disable(conn->dp_pwr);
+
drm_bridge_remove(&conn->bridge);
if (!IS_ERR(conn->bridge.ddc))
@@ -275,6 +314,9 @@ static const struct of_device_id display_connector_match[] = {
}, {
.compatible = "vga-connector",
.data = (void *)DRM_MODE_CONNECTOR_VGA,
+ }, {
+ .compatible = "dp-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_DisplayPort,
},
{},
};
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 89558e581530..b6c485cb2c89 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -166,10 +166,15 @@ struct sii902x {
struct i2c_client *i2c;
struct regmap *regmap;
struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
struct regulator_bulk_data supplies[2];
+ bool sink_is_hdmi;
+ unsigned int ctx_tpi;
+ unsigned int ctx_interrupt;
+
/*
* Mutex protects audio and video functions from interfering
* each other, by keeping their i2c command sequences atomic.
@@ -245,10 +250,8 @@ static void sii902x_reset(struct sii902x *sii902x)
gpiod_set_value(sii902x->reset_gpio, 0);
}
-static enum drm_connector_status
-sii902x_connector_detect(struct drm_connector *connector, bool force)
+static enum drm_connector_status sii902x_detect(struct sii902x *sii902x)
{
- struct sii902x *sii902x = connector_to_sii902x(connector);
unsigned int status;
mutex_lock(&sii902x->mutex);
@@ -261,6 +264,14 @@ sii902x_connector_detect(struct drm_connector *connector, bool force)
connector_status_connected : connector_status_disconnected;
}
+static enum drm_connector_status
+sii902x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct sii902x *sii902x = connector_to_sii902x(connector);
+
+ return sii902x_detect(sii902x);
+}
+
static const struct drm_connector_funcs sii902x_connector_funcs = {
.detect = sii902x_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -270,42 +281,40 @@ static const struct drm_connector_funcs sii902x_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int sii902x_get_modes(struct drm_connector *connector)
+static struct edid *sii902x_get_edid(struct sii902x *sii902x,
+ struct drm_connector *connector)
{
- struct sii902x *sii902x = connector_to_sii902x(connector);
- u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI;
struct edid *edid;
- int num = 0, ret;
mutex_lock(&sii902x->mutex);
edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
- drm_connector_update_edid_property(connector, edid);
if (edid) {
if (drm_detect_hdmi_monitor(edid))
- output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI;
-
- num = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ sii902x->sink_is_hdmi = true;
+ else
+ sii902x->sink_is_hdmi = false;
}
- ret = drm_display_info_set_bus_formats(&connector->display_info,
- &bus_format, 1);
- if (ret)
- goto error_out;
+ mutex_unlock(&sii902x->mutex);
- ret = regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_OUTPUT_MODE, output_mode);
- if (ret)
- goto error_out;
+ return edid;
+}
- ret = num;
+static int sii902x_get_modes(struct drm_connector *connector)
+{
+ struct sii902x *sii902x = connector_to_sii902x(connector);
+ struct edid *edid;
+ int num = 0;
-error_out:
- mutex_unlock(&sii902x->mutex);
+ edid = sii902x_get_edid(sii902x, connector);
+ drm_connector_update_edid_property(connector, edid);
+ if (edid) {
+ num = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
- return ret;
+ return num;
}
static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector,
@@ -354,12 +363,16 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *adj)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI;
struct regmap *regmap = sii902x->regmap;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
u16 pixel_clock_10kHz = adj->clock / 10;
int ret;
+ if (sii902x->sink_is_hdmi)
+ output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI;
+
buf[0] = pixel_clock_10kHz & 0xff;
buf[1] = pixel_clock_10kHz >> 8;
buf[2] = drm_mode_vrefresh(adj);
@@ -375,6 +388,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&sii902x->mutex);
+ ret = regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_OUTPUT_MODE, output_mode);
+ if (ret)
+ goto out;
+
ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
if (ret)
goto out;
@@ -405,13 +423,13 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
struct drm_device *drm = bridge->dev;
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return drm_bridge_attach(bridge->encoder, sii902x->next_bridge,
+ bridge, flags);
drm_connector_helper_add(&sii902x->connector,
&sii902x_connector_helper_funcs);
@@ -433,16 +451,38 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
else
sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+ ret = drm_display_info_set_bus_formats(&sii902x->connector.display_info,
+ &bus_format, 1);
+ if (ret)
+ return ret;
+
drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
return 0;
}
+static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+ return sii902x_detect(sii902x);
+}
+
+static struct edid *sii902x_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+ return sii902x_get_edid(sii902x, connector);
+}
+
static const struct drm_bridge_funcs sii902x_bridge_funcs = {
.attach = sii902x_bridge_attach,
.mode_set = sii902x_bridge_mode_set,
.disable = sii902x_bridge_disable,
.enable = sii902x_bridge_enable,
+ .detect = sii902x_bridge_detect,
+ .get_edid = sii902x_bridge_get_edid,
};
static int sii902x_mute(struct sii902x *sii902x, bool mute)
@@ -829,8 +869,12 @@ static irqreturn_t sii902x_interrupt(int irq, void *data)
mutex_unlock(&sii902x->mutex);
- if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev)
+ if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev) {
drm_helper_hpd_irq_event(sii902x->bridge.dev);
+ drm_bridge_hpd_notify(&sii902x->bridge, (status & SII902X_PLUGGED_STATUS)
+ ? connector_status_connected
+ : connector_status_disconnected);
+ }
return IRQ_HANDLED;
}
@@ -956,6 +1000,76 @@ static const struct drm_bridge_timings default_sii902x_timings = {
| DRM_BUS_FLAG_DE_HIGH,
};
+static int __maybe_unused sii902x_resume(struct device *dev)
+{
+ struct sii902x *sii902x = dev_get_drvdata(dev);
+ unsigned int tpi_reg, status;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(sii902x->supplies),
+ sii902x->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable supplies");
+ return ret;
+ }
+
+ ret = regmap_read(sii902x->regmap, SII902X_REG_TPI_RQB, &tpi_reg);
+ if (ret)
+ goto err_disable_supply;
+
+ if (tpi_reg != sii902x->ctx_tpi) {
+ /*
+ * TPI register context has changed. SII902X power supply
+ * device has been turned off and on.
+ */
+
+ sii902x_reset(sii902x);
+
+ /* Configure the device to enter TPI mode. */
+ ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
+ if (ret)
+ goto err_disable_supply;
+
+ /* Re enable the interrupts */
+ regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
+ sii902x->ctx_interrupt);
+ }
+
+ /* Clear all pending interrupts */
+ regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+ regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
+
+ return 0;
+
+err_disable_supply:
+
+ regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
+ sii902x->supplies);
+
+ return ret;
+}
+
+static int __maybe_unused sii902x_suspend(struct device *dev)
+{
+ struct sii902x *sii902x = dev_get_drvdata(dev);
+ int ret;
+
+ regmap_read(sii902x->regmap, SII902X_REG_TPI_RQB,
+ &sii902x->ctx_tpi);
+
+ regmap_read(sii902x->regmap, SII902X_INT_ENABLE,
+ &sii902x->ctx_interrupt);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
+ sii902x->supplies);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sii902x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sii902x_suspend, sii902x_resume)
+};
+
static int sii902x_init(struct sii902x *sii902x)
{
struct device *dev = &sii902x->i2c->dev;
@@ -1001,6 +1115,11 @@ static int sii902x_init(struct sii902x *sii902x)
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
+ sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+
+ if (sii902x->i2c->irq > 0)
+ sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
drm_bridge_add(&sii902x->bridge);
sii902x_audio_codec_init(sii902x, dev);
@@ -1022,6 +1141,7 @@ static int sii902x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ struct device_node *endpoint;
struct sii902x *sii902x;
int ret;
@@ -1049,6 +1169,28 @@ static int sii902x_probe(struct i2c_client *client,
return PTR_ERR(sii902x->reset_gpio);
}
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
+ if (endpoint) {
+ struct device_node *remote = of_graph_get_remote_port_parent(endpoint);
+
+ of_node_put(endpoint);
+ if (!remote) {
+ dev_err(dev, "Endpoint in port@1 unconnected\n");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(remote)) {
+ dev_err(dev, "port@1 remote device is disabled\n");
+ of_node_put(remote);
+ return -ENODEV;
+ }
+
+ sii902x->next_bridge = of_drm_find_bridge(remote);
+ of_node_put(remote);
+ if (!sii902x->next_bridge)
+ return -EPROBE_DEFER;
+ }
+
mutex_init(&sii902x->mutex);
sii902x->supplies[0].supply = "iovcc";
@@ -1104,6 +1246,7 @@ static struct i2c_driver sii902x_driver = {
.remove = sii902x_remove,
.driver = {
.name = "sii902x",
+ .pm = &sii902x_pm_ops,
.of_match_table = sii902x_dt_ids,
},
.id_table = sii902x_i2c_ids,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 77a447a3fb1d..32ddf80591fb 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -123,6 +123,7 @@
* @dp_lanes: Count of dp_lanes we're using.
* @ln_assign: Value to program to the LN_ASSIGN register.
* @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG.
+ * @plugged: Panel is plugged.
*
* @gchip: If we expose our GPIOs, this is used.
* @gchip_output: A cache of whether we've set GPIOs to output. This
@@ -150,6 +151,7 @@ struct ti_sn_bridge {
int dp_lanes;
u8 ln_assign;
u8 ln_polrs;
+ bool plugged;
#if defined(CONFIG_OF_GPIO)
struct gpio_chip gchip;
@@ -192,7 +194,7 @@ static int __maybe_unused ti_sn_bridge_resume(struct device *dev)
return ret;
}
- gpiod_set_value(pdata->enable_gpio, 1);
+ gpiod_set_value_cansleep(pdata->enable_gpio, 1);
return ret;
}
@@ -202,7 +204,7 @@ static int __maybe_unused ti_sn_bridge_suspend(struct device *dev)
struct ti_sn_bridge *pdata = dev_get_drvdata(dev);
int ret;
- gpiod_set_value(pdata->enable_gpio, 0);
+ gpiod_set_value_cansleep(pdata->enable_gpio, 0);
ret = regulator_bulk_disable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
if (ret)
@@ -286,12 +288,12 @@ static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = {
static enum drm_connector_status
ti_sn_bridge_connector_detect(struct drm_connector *connector, bool force)
{
- /**
- * TODO: Currently if drm_panel is present, then always
- * return the status as connected. Need to add support to detect
- * device state for hot pluggable scenarios.
- */
- return connector_status_connected;
+ struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector);
+
+ if (pdata->plugged)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
}
static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = {
@@ -325,14 +327,9 @@ static int ti_sn_bridge_parse_regulators(struct ti_sn_bridge *pdata)
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
- int ret, val;
+ int ret;
+ u8 link_status[DP_LINK_STATUS_SIZE];
struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
- struct mipi_dsi_host *host;
- struct mipi_dsi_device *dsi;
- const struct mipi_dsi_device_info info = { .type = "ti_sn_bridge",
- .channel = 0,
- .node = NULL,
- };
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
DRM_ERROR("Fix bridge driver to make connector optional!");
@@ -351,57 +348,13 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
&ti_sn_bridge_connector_helper_funcs);
drm_connector_attach_encoder(&pdata->connector, bridge->encoder);
- /*
- * TODO: ideally finding host resource and dsi dev registration needs
- * to be done in bridge probe. But some existing DSI host drivers will
- * wait for any of the drm_bridge/drm_panel to get added to the global
- * bridge/panel list, before completing their probe. So if we do the
- * dsi dev registration part in bridge probe, before populating in
- * the global bridge list, then it will cause deadlock as dsi host probe
- * will never complete, neither our bridge probe. So keeping it here
- * will satisfy most of the existing host drivers. Once the host driver
- * is fixed we can move the below code to bridge probe safely.
- */
- host = of_find_mipi_dsi_host_by_node(pdata->host_node);
- if (!host) {
- DRM_ERROR("failed to find dsi host\n");
- ret = -ENODEV;
- goto err_dsi_host;
- }
-
- dsi = mipi_dsi_device_register_full(host, &info);
- if (IS_ERR(dsi)) {
- DRM_ERROR("failed to create dsi device\n");
- ret = PTR_ERR(dsi);
- goto err_dsi_host;
- }
-
- /* TODO: setting to 4 MIPI lanes always for now */
- dsi->lanes = 4;
- dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
-
- /* check if continuous dsi clock is required or not */
- pm_runtime_get_sync(pdata->dev);
- regmap_read(pdata->regmap, SN_DPPLL_SRC_REG, &val);
- pm_runtime_put(pdata->dev);
- if (!(val & DPPLL_CLK_SRC_DSICLK))
- dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS;
-
- ret = mipi_dsi_attach(dsi);
- if (ret < 0) {
- DRM_ERROR("failed to attach dsi to host\n");
- goto err_dsi_attach;
- }
- pdata->dsi = dsi;
+ ret = drm_dp_dpcd_read_link_status(&pdata->aux, link_status);
+ if (ret < 0)
+ pdata->plugged = false;
+ else
+ pdata->plugged = true;
return 0;
-
-err_dsi_attach:
- mipi_dsi_device_unregister(dsi);
-err_dsi_host:
- drm_connector_cleanup(&pdata->connector);
- return ret;
}
static void ti_sn_bridge_disable(struct drm_bridge *bridge)
@@ -721,6 +674,15 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
int ret = -EINVAL;
int max_dp_lanes;
+ /*
+ * Disable ASSR Display Authentication, since its supported only in eDP
+ * and not spupperted in DP
+ */
+ regmap_write(pdata->regmap, 0xFF, 0x7);
+ regmap_write(pdata->regmap, 0x16, 0x1);
+ regmap_write(pdata->regmap, 0xFF, 0x0);
+ regmap_update_bits(pdata->regmap, 0x5A, BIT(0), 0);
+
max_dp_lanes = ti_sn_get_max_lanes(pdata);
pdata->dp_lanes = min(pdata->dp_lanes, max_dp_lanes);
@@ -736,15 +698,6 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
/* set dsi clk frequency value */
ti_sn_bridge_set_dsi_rate(pdata);
- /**
- * The SN65DSI86 only supports ASSR Display Authentication method and
- * this method is enabled by default. An eDP panel must support this
- * authentication method. We need to enable this method in the eDP panel
- * at DisplayPort address 0x0010A prior to link training.
- */
- drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
- DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
-
/* Set the DP output format (18 bpp or 24 bpp) */
val = (ti_sn_bridge_get_bpp(pdata) == 18) ? BPP_18_RGB : 0;
regmap_update_bits(pdata->regmap, SN_DATA_FORMAT_REG, BPP_18_RGB, val);
@@ -860,6 +813,14 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
return -EINVAL;
}
+ ret = pm_runtime_get_sync(pdata->dev);
+ if (ret < 0) {
+ dev_err(pdata->dev, "pm_runtime_get_sync failed\n");
+ return ret;
+ }
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
+ HPD_DISABLE);
+
regmap_write(pdata->regmap, SN_AUX_ADDR_19_16_REG,
(msg->address >> 16) & 0xF);
regmap_write(pdata->regmap, SN_AUX_ADDR_15_8_REG,
@@ -886,31 +847,38 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
!(val & AUX_CMD_SEND), 200,
50 * 1000);
if (ret)
- return ret;
+ goto put_pm_runtime;
ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val);
if (ret)
- return ret;
- else if ((val & AUX_IRQ_STATUS_NAT_I2C_FAIL)
- || (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT)
- || (val & AUX_IRQ_STATUS_AUX_SHORT))
- return -ENXIO;
+ goto put_pm_runtime;
+ else if ((val & AUX_IRQ_STATUS_NAT_I2C_FAIL) ||
+ (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) ||
+ (val & AUX_IRQ_STATUS_AUX_SHORT)) {
+ ret = -ENXIO;
+ goto put_pm_runtime;
+ }
- if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE)
- return msg->size;
+ if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) {
+ ret = msg->size;
+ goto put_pm_runtime;
+ }
for (i = 0; i < msg->size; i++) {
unsigned int val;
ret = regmap_read(pdata->regmap, SN_AUX_RDATA_REG(i),
&val);
if (ret)
- return ret;
+ goto put_pm_runtime;
WARN_ON(val & ~0xFF);
buf[i] = (u8)(val & 0xFF);
}
- return msg->size;
+ ret = msg->size;
+put_pm_runtime:
+ pm_runtime_put_sync(pdata->dev);
+ return ret;
}
static int ti_sn_bridge_parse_dsi_host(struct ti_sn_bridge *pdata)
@@ -1161,7 +1129,13 @@ static int ti_sn_bridge_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ti_sn_bridge *pdata;
- int ret;
+ int ret, val;
+ struct mipi_dsi_host *host;
+ struct mipi_dsi_device *dsi;
+ const struct mipi_dsi_device_info info = { .type = "ti_sn_bridge",
+ .channel = 0,
+ .node = client->dev.of_node,
+ };
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
DRM_ERROR("device doesn't support I2C\n");
@@ -1220,6 +1194,12 @@ static int ti_sn_bridge_probe(struct i2c_client *client,
if (ret)
return ret;
+ host = of_find_mipi_dsi_host_by_node(pdata->host_node);
+ if (!host) {
+ DRM_ERROR("failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
pm_runtime_enable(pdata->dev);
ret = ti_sn_setup_gpio_controller(pdata);
@@ -1240,6 +1220,35 @@ static int ti_sn_bridge_probe(struct i2c_client *client,
drm_bridge_add(&pdata->bridge);
+ /*
+ * Attach to DSI host.
+ */
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ DRM_ERROR("failed to create dsi device\n");
+ return PTR_ERR(dsi);
+ }
+
+ /* TODO: setting to 4 MIPI lanes always for now */
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
+ dsi->mode_flags |= MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+ /* check if continuous dsi clock is required or not */
+ pm_runtime_get_sync(pdata->dev);
+ regmap_read(pdata->regmap, SN_DPPLL_SRC_REG, &val);
+ pm_runtime_put(pdata->dev);
+ if (!(val & DPPLL_CLK_SRC_DSICLK))
+ dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_ERROR("failed to attach dsi to host\n");
+ return ret;
+ }
+ pdata->dsi = dsi;
+
ti_sn_debugfs_init(pdata);
return 0;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7fc8e7000046..44916acdfc64 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3170,6 +3170,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_connector_list_iter conn_iter;
struct drm_plane *plane;
struct drm_crtc *crtc;
+ struct drm_private_obj *privobj;
int err = 0;
state = drm_atomic_state_alloc(dev);
@@ -3199,6 +3200,16 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
}
}
+ drm_for_each_privobj(privobj, dev) {
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_private_obj_state(state, privobj);
+ if (IS_ERR(priv_state)) {
+ err = PTR_ERR(priv_state);
+ goto free;
+ }
+ }
+
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
@@ -3306,12 +3317,17 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_connector_state *new_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
+ struct drm_private_obj *privobj;
+ struct drm_private_state *new_priv_state;
state->acquire_ctx = ctx;
for_each_new_plane_in_state(state, plane, new_plane_state, i)
state->planes[i].old_state = plane->state;
+ for_each_new_private_obj_in_state(state, privobj, new_priv_state, i)
+ state->private_objs[i].old_state = privobj->state;
+
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
state->crtcs[i].old_state = crtc->state;
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 5417e7a47072..5b8da663e739 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -11,6 +11,17 @@ config DRM_OMAP
if DRM_OMAP
+config DRM_OMAP_WB
+ bool "Enable writeback support for OMAP DRM driver"
+ depends on DRM_OMAP
+ depends on (VIDEO_V4L2 = y) || (VIDEO_V4L2 = m && DRM_OMAP = m)
+ depends on VIDEO_DEV && HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ default n
+ help
+ Select this to enable memory-to-memory/capture writeback support.
+
source "drivers/gpu/drm/omapdrm/dss/Kconfig"
source "drivers/gpu/drm/omapdrm/displays/Kconfig"
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index f115253115c5..15139009e445 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -12,6 +12,7 @@ omapdrm-y := omap_drv.o \
omap_debugfs.o \
omap_crtc.o \
omap_plane.o \
+ omap_overlay.o \
omap_encoder.o \
omap_connector.o \
omap_fb.o \
@@ -22,4 +23,6 @@ omapdrm-y := omap_drv.o \
omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o
+omapdrm-$(CONFIG_DRM_OMAP_WB) += omap_wb.o omap_wb_cap.o omap_wb_m2m.o
+
obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 48593932bddf..9fc900899bcd 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -92,6 +92,8 @@ struct dispc_features {
u8 mgr_height_start;
u16 mgr_width_max;
u16 mgr_height_max;
+ u16 ovl_width_max;
+ u16 ovl_height_max;
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
unsigned int max_downscale;
@@ -892,32 +894,91 @@ static void dispc_wb_write_color_conv_coef(struct dispc_device *dispc,
#undef CVAL
}
-static void dispc_setup_color_conv_coef(struct dispc_device *dispc)
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_full = {
+ 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/
+ 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+ 256, 452, 0, /* by, bcb, bcr |1.000 1.772 0.000|*/
+ true, /* full range */
+};
+
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
+ 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/
+ 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+ 298, 516, 0, /* by, bcb, bcr |1.164 2.017 0.000|*/
+ false, /* limited range */
+};
+
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_full = {
+ 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/
+ 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+ 256, 475, 0, /* by, bcb, bcr |1.000 1.856 0.000|*/
+ true, /* full range */
+};
+
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_lim = {
+ 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/
+ 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+ 298, 541, 0, /* by, bcb, bcr |1.164 2.112 0.000|*/
+ false, /* limited range */
+};
+
+/* RGB -> YUV, ITU-R BT.601, limited range */
+static const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_lim = {
+ 66, 129, 25, /* yr, yg, yb | 0.257 0.504 0.098|*/
+ -38, -74, 112, /* cbr, cbg, cbb |-0.148 -0.291 0.439|*/
+ 112, -94, -18, /* crr, crg, crb | 0.439 -0.368 -0.071|*/
+ false, /* limited range */
+};
+
+/* RGB -> YUV, ITU-R BT.601, full range */
+static const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_full = {
+ 77, 150, 29, /* yr, yg, yb | 0.299 0.587 0.114|*/
+ -43, -85, 128, /* cbr, cbg, cbb |-0.173 -0.339 0.511|*/
+ 128, -107, -21, /* crr, crg, crb | 0.511 -0.428 -0.083|*/
+ true, /* full range */
+};
+
+/* RGB -> YUV, ITU-R BT.709, limited range */
+static const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt701_lim = {
+ 47, 157, 16, /* yr, yg, yb | 0.1826 0.6142 0.0620|*/
+ -26, -87, 112, /* cbr, cbg, cbb |-0.1006 -0.3386 0.4392|*/
+ 112, -102, -10, /* crr, crg, crb | 0.4392 -0.3989 -0.0403|*/
+ false, /* limited range */
+};
+
+static int dispc_ovl_set_csc(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum drm_color_encoding color_encoding,
+ enum drm_color_range color_range)
{
- int i;
- int num_ovl = dispc_get_num_ovls(dispc);
-
- /* YUV -> RGB, ITU-R BT.601, limited range */
- const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
- 298, 0, 409, /* ry, rcb, rcr */
- 298, -100, -208, /* gy, gcb, gcr */
- 298, 516, 0, /* by, bcb, bcr */
- false, /* limited range */
- };
+ const struct csc_coef_yuv2rgb *csc;
- /* RGB -> YUV, ITU-R BT.601, limited range */
- const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_lim = {
- 66, 129, 25, /* yr, yg, yb */
- -38, -74, 112, /* cbr, cbg, cbb */
- 112, -94, -18, /* crr, crg, crb */
- false, /* limited range */
- };
+ switch (color_encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = &coefs_yuv2rgb_bt601_full;
+ else
+ csc = &coefs_yuv2rgb_bt601_lim;
+ break;
+ case DRM_COLOR_YCBCR_BT709:
+ if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = &coefs_yuv2rgb_bt709_full;
+ else
+ csc = &coefs_yuv2rgb_bt709_lim;
+ break;
+ default:
+ DSSERR("Unsupported CSC mode %d for plane %d\n",
+ color_encoding, plane);
+ return -EINVAL;
+ }
- for (i = 1; i < num_ovl; i++)
- dispc_ovl_write_color_conv_coef(dispc, i, &coefs_yuv2rgb_bt601_lim);
+ dispc_ovl_write_color_conv_coef(dispc, plane, csc);
- if (dispc->feat->has_writeback)
- dispc_wb_write_color_conv_coef(dispc, &coefs_rgb2yuv_bt601_lim);
+ return 0;
}
static void dispc_ovl_set_ba0(struct dispc_device *dispc,
@@ -2475,6 +2536,12 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
return 0;
}
+static enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc,
+ enum omap_plane_id plane)
+{
+ return dispc->feat->overlay_caps[plane];
+}
+
#define DIV_FRAC(dividend, divisor) \
((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100))
@@ -2587,6 +2654,13 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
return 0;
}
+static void dispc_ovl_get_max_size(struct dispc_device *dispc,
+ u16 *width, u16 *height)
+{
+ *width = dispc->feat->ovl_width_max;
+ *height = dispc->feat->ovl_height_max;
+}
+
static int dispc_ovl_setup_common(struct dispc_device *dispc,
enum omap_plane_id plane,
enum omap_overlay_caps caps,
@@ -2598,7 +2672,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
u8 pre_mult_alpha, u8 global_alpha,
enum omap_dss_rotation_type rotation_type,
bool replication, const struct videomode *vm,
- bool mem_to_mem)
+ bool mem_to_mem,
+ enum drm_color_encoding color_encoding,
+ enum drm_color_range color_range)
{
bool five_taps = true;
bool fieldmode = false;
@@ -2747,6 +2823,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
fieldmode, fourcc, rotation);
dispc_ovl_set_output_size(dispc, plane, out_width, out_height);
dispc_ovl_set_vid_color_conv(dispc, plane, cconv);
+
+ if (plane != OMAP_DSS_WB)
+ dispc_ovl_set_csc(dispc, plane, color_encoding, color_range);
}
dispc_ovl_set_rotation_attrs(dispc, plane, rotation, rotation_type,
@@ -2783,7 +2862,8 @@ static int dispc_ovl_setup(struct dispc_device *dispc,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->fourcc, oi->rotation,
oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
- oi->rotation_type, replication, vm, mem_to_mem);
+ oi->rotation_type, replication, vm, mem_to_mem,
+ oi->color_encoding, oi->color_range);
return r;
}
@@ -2816,7 +2896,8 @@ static int dispc_wb_setup(struct dispc_device *dispc,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->fourcc, wi->rotation, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, vm, mem_to_mem);
+ replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
if (r)
return r;
@@ -2976,7 +3057,7 @@ static void dispc_mgr_setup(struct dispc_device *dispc,
info->trans_key);
dispc_mgr_enable_trans_key(dispc, channel, info->trans_enabled);
dispc_mgr_enable_alpha_fixed_zorder(dispc, channel,
- info->partial_alpha_enabled);
+ info->alpha_blender_enabled);
if (dispc_has_feature(dispc, FEAT_CPR)) {
dispc_mgr_enable_cpr(dispc, channel, info->cpr_enable);
dispc_mgr_set_cpr_coef(dispc, channel, &info->cpr_coefs);
@@ -3927,7 +4008,8 @@ static void _omap_dispc_initial_config(struct dispc_device *dispc)
dispc->feat->has_gamma_table)
REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 9, 9);
- dispc_setup_color_conv_coef(dispc);
+ if (dispc->feat->has_writeback)
+ dispc_wb_write_color_conv_coef(dispc, &coefs_rgb2yuv_bt601_full);
dispc_set_loadmode(dispc, OMAP_DSS_LOAD_FRAME_ONLY);
@@ -4223,6 +4305,8 @@ static const struct dispc_features omap24xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 66500000,
.max_downscale = 2,
/*
@@ -4261,6 +4345,8 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4296,6 +4382,8 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4331,6 +4419,8 @@ static const struct dispc_features omap36xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4366,6 +4456,8 @@ static const struct dispc_features am43xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
.max_downscale = 4,
@@ -4401,6 +4493,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
.mgr_height_start = 26,
.mgr_width_max = 2048,
.mgr_height_max = 2048,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 2048,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 185625000,
.max_downscale = 4,
@@ -4440,8 +4534,10 @@ static const struct dispc_features omap54xx_dispc_feats = {
.mgr_height_start = 27,
.mgr_width_max = 4096,
.mgr_height_max = 4096,
+ .ovl_width_max = 2048,
+ .ovl_height_max = 4096,
.max_lcd_pclk = 170000000,
- .max_tv_pclk = 186000000,
+ .max_tv_pclk = 192000000,
.max_downscale = 4,
.max_line_width = 2048,
.min_pcd = 1,
@@ -4571,7 +4667,7 @@ static const struct dispc_errata_i734_data {
.mgri = {
.default_color = 0,
.trans_enabled = false,
- .partial_alpha_enabled = false,
+ .alpha_blender_enabled = false,
.cpr_enable = false,
},
.lcd_conf = {
@@ -4714,6 +4810,9 @@ static const struct dispc_ops dispc_ops = {
.ovl_enable = dispc_ovl_enable,
.ovl_setup = dispc_ovl_setup,
.ovl_get_color_modes = dispc_ovl_get_color_modes,
+ .ovl_color_mode_supported = dispc_ovl_color_mode_supported,
+ .ovl_get_caps = dispc_ovl_get_caps,
+ .ovl_get_max_size = dispc_ovl_get_max_size,
.wb_get_framedone_irq = dispc_wb_get_framedone_irq,
.wb_setup = dispc_wb_setup,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index ab19d4af8de7..9652eb85dea1 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -14,6 +14,7 @@
#include <linux/platform_data/omapdss.h>
#include <uapi/drm/drm_mode.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_color_mgmt.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
@@ -243,6 +244,9 @@ struct omap_overlay_info {
u8 global_alpha;
u8 pre_mult_alpha;
u8 zorder;
+
+ enum drm_color_encoding color_encoding;
+ enum drm_color_range color_range;
};
struct omap_overlay_manager_info {
@@ -252,7 +256,7 @@ struct omap_overlay_manager_info {
u32 trans_key;
bool trans_enabled;
- bool partial_alpha_enabled;
+ bool alpha_blender_enabled;
bool cpr_enable;
struct omap_dss_cpr_coefs cpr_coefs;
@@ -579,6 +583,12 @@ struct dispc_ops {
const u32 *(*ovl_get_color_modes)(struct dispc_device *dispc,
enum omap_plane_id plane);
+ bool (*ovl_color_mode_supported)(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 fourcc);
+ enum omap_overlay_caps (*ovl_get_caps)(struct dispc_device *dispc,
+ enum omap_plane_id plane);
+ void (*ovl_get_max_size)(struct dispc_device *dispc,
+ u16 *width, u16 *height);
u32 (*wb_get_framedone_irq)(struct dispc_device *dispc);
int (*wb_setup)(struct dispc_device *dispc,
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 328a4a74f534..acd45ae07090 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -24,6 +24,11 @@ struct omap_crtc_state {
unsigned int rotation;
unsigned int zpos;
bool manually_updated;
+
+ u32 default_color;
+ unsigned int trans_key_mode;
+ unsigned int trans_key;
+ bool alpha_blender_enabled;
};
#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
@@ -391,18 +396,72 @@ static void omap_crtc_manual_display_update(struct work_struct *data)
}
}
+static s16 omap_crtc_S31_32_to_s2_8(s64 coef)
+{
+ uint64_t sign_bit = 1ULL << 63;
+ uint64_t cbits = (uint64_t) coef;
+ s16 ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1FF);
+
+ if (cbits & sign_bit)
+ ret = -ret;
+
+ return ret;
+}
+
+static void omap_crtc_cpr_coefs_from_ctm(const struct drm_color_ctm *ctm,
+ struct omap_dss_cpr_coefs *cpr)
+{
+ cpr->rr = omap_crtc_S31_32_to_s2_8(ctm->matrix[0]);
+ cpr->rg = omap_crtc_S31_32_to_s2_8(ctm->matrix[1]);
+ cpr->rb = omap_crtc_S31_32_to_s2_8(ctm->matrix[2]);
+ cpr->gr = omap_crtc_S31_32_to_s2_8(ctm->matrix[3]);
+ cpr->gg = omap_crtc_S31_32_to_s2_8(ctm->matrix[4]);
+ cpr->gb = omap_crtc_S31_32_to_s2_8(ctm->matrix[5]);
+ cpr->br = omap_crtc_S31_32_to_s2_8(ctm->matrix[6]);
+ cpr->bg = omap_crtc_S31_32_to_s2_8(ctm->matrix[7]);
+ cpr->bb = omap_crtc_S31_32_to_s2_8(ctm->matrix[8]);
+}
+
static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_overlay_manager_info info;
+ const struct omap_crtc_state *omap_state =
+ to_omap_crtc_state(crtc->state);
memset(&info, 0, sizeof(info));
- info.default_color = 0x000000;
- info.trans_enabled = false;
- info.partial_alpha_enabled = false;
- info.cpr_enable = false;
+ info.default_color = omap_state->default_color;
+
+ info.trans_key = omap_state->trans_key;
+
+ switch (omap_state->trans_key_mode) {
+ case 0:
+ default:
+ info.trans_enabled = false;
+ break;
+ case 1:
+ info.trans_enabled = true;
+ info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ break;
+ case 2:
+ info.trans_enabled = true;
+ info.trans_key_type = OMAP_DSS_COLOR_KEY_VID_SRC;
+ break;
+ }
+
+ info.alpha_blender_enabled = omap_state->alpha_blender_enabled;
+
+ if (crtc->state->ctm) {
+ struct drm_color_ctm *ctm =
+ (struct drm_color_ctm *) crtc->state->ctm->data;
+
+ info.cpr_enable = true;
+ omap_crtc_cpr_coefs_from_ctm(ctm, &info.cpr_coefs);
+ } else {
+ info.cpr_enable = false;
+ }
priv->dispc_ops->mgr_setup(priv->dispc, omap_crtc->channel, &info);
}
@@ -571,6 +630,7 @@ static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc)
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ const struct omap_crtc_state *omap_state = to_omap_crtc_state(state);
struct drm_plane_state *pri_state;
if (state->color_mgmt_changed && state->gamma_lut) {
@@ -581,6 +641,25 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (omap_state->trans_key_mode) {
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ u32 zpos_mask = 0;
+
+ drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
+ plane_state = drm_atomic_get_plane_state(state->state,
+ plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ if (zpos_mask & BIT(plane_state->zpos))
+ return -EINVAL;
+
+ zpos_mask |= BIT(plane_state->zpos);
+ plane_state->normalized_zpos = plane_state->zpos;
+ }
+ }
+
pri_state = drm_atomic_get_new_plane_state(state->state, crtc->primary);
if (pri_state) {
struct omap_crtc_state *omap_crtc_state =
@@ -657,6 +736,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct drm_plane_state *plane_state;
+ struct omap_crtc_state *omap_state = to_omap_crtc_state(state);
/*
* Delegate property set to the primary plane. Get the plane state and
@@ -672,6 +752,14 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
plane_state->rotation = val;
else if (property == priv->zorder_prop)
plane_state->zpos = val;
+ else if (property == priv->background_color_prop)
+ omap_state->default_color = val;
+ else if (property == priv->trans_key_mode_prop)
+ omap_state->trans_key_mode = val;
+ else if (property == priv->trans_key_prop)
+ omap_state->trans_key = val;
+ else if (property == priv->alpha_blender_prop)
+ omap_state->alpha_blender_enabled = !!val;
else
return -EINVAL;
@@ -690,12 +778,28 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
*val = omap_state->rotation;
else if (property == priv->zorder_prop)
*val = omap_state->zpos;
+ else if (property == priv->background_color_prop)
+ *val = omap_state->default_color;
+ else if (property == priv->trans_key_mode_prop)
+ *val = omap_state->trans_key_mode;
+ else if (property == priv->trans_key_prop)
+ *val = omap_state->trans_key;
+ else if (property == priv->alpha_blender_prop)
+ *val = omap_state->alpha_blender_enabled;
else
return -EINVAL;
return 0;
}
+int omap_crtc_atomic_get_trans_key_mode(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state)
+{
+ struct omap_crtc_state *omap_state = to_omap_crtc_state(state);
+
+ return omap_state->trans_key_mode;
+}
+
static void omap_crtc_reset(struct drm_crtc *crtc)
{
struct omap_crtc_state *state;
@@ -730,6 +834,12 @@ omap_crtc_duplicate_state(struct drm_crtc *crtc)
state->rotation = current_state->rotation;
state->manually_updated = current_state->manually_updated;
+ state->default_color = current_state->default_color;
+
+ state->trans_key_mode = current_state->trans_key_mode;
+ state->trans_key = current_state->trans_key;
+ state->alpha_blender_enabled = current_state->alpha_blender_enabled;
+
return &state->base;
}
@@ -778,6 +888,18 @@ void omap_crtc_pre_uninit(struct omap_drm_private *priv)
dss_uninstall_mgr_ops(priv->dss);
}
+static void omap_crtc_install_properties(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_object *obj = &crtc->base;
+ struct omap_drm_private *priv = dev->dev_private;
+
+ drm_object_attach_property(obj, priv->background_color_prop, 0);
+ drm_object_attach_property(obj, priv->trans_key_mode_prop, 0);
+ drm_object_attach_property(obj, priv->trans_key_prop, 0);
+ drm_object_attach_property(obj, priv->alpha_blender_prop, 0);
+}
+
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_drm_pipeline *pipe,
@@ -839,10 +961,11 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
unsigned int gamma_lut_size = 256;
- drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
+ drm_crtc_enable_color_mgmt(crtc, 0, true, gamma_lut_size);
drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
}
+ omap_crtc_install_properties(crtc);
omap_plane_install_properties(crtc->primary, &crtc->base);
return crtc;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
index 2fd57751ae2b..6da186dc618c 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.h
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -32,5 +32,8 @@ void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus);
void omap_crtc_vblank_irq(struct drm_crtc *crtc);
void omap_crtc_framedone_irq(struct drm_crtc *crtc, uint32_t irqstatus);
void omap_crtc_flush(struct drm_crtc *crtc);
+int omap_crtc_atomic_get_trans_key_mode(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state);
+
#endif /* __OMAPDRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 53d5e184ee77..43b6a0a48a0c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -116,6 +116,98 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
priv->dispc_ops->runtime_put(priv->dispc);
}
+static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b)
+{
+ const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
+ const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
+
+ if (sa->normalized_zpos != sb->normalized_zpos)
+ return sa->normalized_zpos - sb->normalized_zpos;
+ else
+ return sa->plane->base.id - sb->plane->base.id;
+}
+
+static int omap_atomic_update_normalize_zpos(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_state, *new_state;
+ struct drm_plane *plane;
+ int c, i, n, inc;
+ int total_planes = dev->mode_config.num_total_plane;
+ struct drm_plane_state **states;
+ int ret = 0;
+
+ states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
+ if (!states)
+ return -ENOMEM;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) {
+ if (old_state->plane_mask == new_state->plane_mask &&
+ !new_state->zpos_changed)
+ continue;
+
+ if (omap_crtc_atomic_get_trans_key_mode(crtc, new_state))
+ continue;
+
+ /* Reset plane increment and index value for every crtc */
+ n = 0;
+
+ /*
+ * Normalization process might create new states for planes
+ * which normalized_zpos has to be recalculated.
+ */
+ drm_for_each_plane_mask(plane, dev, new_state->plane_mask) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(new_state->state,
+ plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto done;
+ }
+ states[n++] = plane_state;
+ }
+
+ sort(states, n, sizeof(*states),
+ drm_atomic_state_normalized_zpos_cmp, NULL);
+
+ for (i = 0, inc = 0; i < n; i++) {
+ plane = states[i]->plane;
+
+ states[i]->normalized_zpos = i + inc;
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n",
+ plane->base.id, plane->name,
+ states[i]->normalized_zpos);
+
+ if (is_omap_plane_dual_overlay(states[i]))
+ inc++;
+ }
+ new_state->zpos_changed = true;
+ }
+
+done:
+ kfree(states);
+ return ret;
+}
+
+static int omap_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ if (dev->mode_config.normalize_zpos) {
+ ret = omap_atomic_update_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
.atomic_commit_tail = omap_atomic_commit_tail,
};
@@ -123,10 +215,86 @@ static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs =
static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = omap_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct omap_global_state *
+omap_get_existing_global_state(struct omap_drm_private *priv)
+{
+ return to_omap_global_state(priv->glob_obj.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct omap_global_state *__must_check
+omap_get_global_state(struct drm_atomic_state *s)
+{
+ struct omap_drm_private *priv = s->dev->dev_private;
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_omap_global_state(priv_state);
+}
+
+static struct drm_private_state *
+omap_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct omap_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void omap_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct omap_global_state *omap_state = to_omap_global_state(state);
+
+ kfree(omap_state);
+}
+
+static const struct drm_private_state_funcs omap_global_state_funcs = {
+ .atomic_duplicate_state = omap_global_duplicate_state,
+ .atomic_destroy_state = omap_global_destroy_state,
+};
+
+static int omap_global_obj_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_global_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base,
+ &omap_global_state_funcs);
+ return 0;
+}
+
+static void omap_global_obj_fini(struct omap_drm_private *priv)
+{
+ drm_atomic_private_obj_fini(&priv->glob_obj);
+}
+
static void omap_disconnect_pipelines(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
@@ -194,11 +362,40 @@ static int omap_modeset_init_properties(struct drm_device *dev)
struct omap_drm_private *priv = dev->dev_private;
unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
+ static const struct drm_prop_enum_list trans_key_mode_list[] = {
+ { 0, "disable"},
+ { 1, "gfx-dst"},
+ { 2, "vid-src"},
+ };
+
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
num_planes - 1);
if (!priv->zorder_prop)
return -ENOMEM;
+ /* crtc properties */
+
+ priv->background_color_prop = drm_property_create_range(dev, 0,
+ "background", 0, 0xffffff);
+ if (!priv->background_color_prop)
+ return -ENOMEM;
+
+ priv->trans_key_mode_prop = drm_property_create_enum(dev, 0,
+ "trans-key-mode",
+ trans_key_mode_list, ARRAY_SIZE(trans_key_mode_list));
+ if (!priv->trans_key_mode_prop)
+ return -ENOMEM;
+
+ priv->trans_key_prop = drm_property_create_range(dev, 0, "trans-key",
+ 0, 0xffffff);
+ if (!priv->trans_key_prop)
+ return -ENOMEM;
+
+ priv->alpha_blender_prop = drm_property_create_bool(dev, 0,
+ "alpha_blender");
+ if (!priv->alpha_blender_prop)
+ return -ENOMEM;
+
return 0;
}
@@ -237,8 +434,6 @@ static int omap_modeset_init(struct drm_device *dev)
if (!omapdss_stack_is_ready())
return -EPROBE_DEFER;
- drm_mode_config_init(dev);
-
ret = omap_modeset_init_properties(dev);
if (ret < 0)
return ret;
@@ -611,10 +806,20 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_gem_init(ddev);
+ drm_mode_config_init(ddev);
+
+ ret = omap_global_obj_init(ddev);
+ if (ret)
+ goto err_gem_deinit;
+
+ ret = omap_hwoverlays_init(priv);
+ if (ret)
+ goto err_free_priv_obj;
+
ret = omap_modeset_init(ddev);
if (ret) {
dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
- goto err_gem_deinit;
+ goto err_free_overlays;
}
/* Initialize vblank handling, start with all CRTCs disabled. */
@@ -629,6 +834,14 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
drm_kms_helper_poll_init(ddev);
omap_modeset_enable_external_hpd(ddev);
+ if (priv->dispc_ops->has_writeback(priv->dispc)) {
+ ret = omap_wb_init(ddev);
+ if (ret)
+ dev_warn(priv->dev, "failed to initialize writeback\n");
+ else
+ priv->wb_initialized = true;
+ }
+
/*
* Register the DRM device with the core and the connectors with
* sysfs.
@@ -640,13 +853,22 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
return 0;
err_cleanup_helpers:
+ if (priv->wb_initialized)
+ omap_wb_cleanup(ddev);
+
omap_modeset_disable_external_hpd(ddev);
+
drm_kms_helper_poll_fini(ddev);
omap_fbdev_fini(ddev);
err_cleanup_modeset:
omap_modeset_fini(ddev);
+err_free_overlays:
+ omap_hwoverlays_destroy(priv);
+err_free_priv_obj:
+ omap_global_obj_fini(priv);
err_gem_deinit:
+ drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
@@ -663,6 +885,9 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_dev_unregister(ddev);
+ if (priv->wb_initialized)
+ omap_wb_cleanup(ddev);
+
omap_modeset_disable_external_hpd(ddev);
drm_kms_helper_poll_fini(ddev);
@@ -671,6 +896,9 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_atomic_helper_shutdown(ddev);
omap_modeset_fini(ddev);
+ omap_hwoverlays_destroy(priv);
+ omap_global_obj_fini(priv);
+ drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 8a1fac680138..67286b0c88ae 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -13,6 +13,7 @@
#include "dss/omapdss.h"
+#include <drm/drm_atomic.h>
#include <drm/drm_gem.h>
#include <drm/omap_drm.h>
@@ -24,6 +25,7 @@
#include "omap_gem.h"
#include "omap_irq.h"
#include "omap_plane.h"
+#include "omap_overlay.h"
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* verbose debug */
@@ -40,6 +42,20 @@ struct omap_drm_pipeline {
unsigned int alias_id;
};
+/*
+ * Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+#define to_omap_global_state(x) container_of(x, struct omap_global_state, base)
+struct omap_global_state {
+ struct drm_private_state base;
+
+ struct drm_atomic_state *state;
+
+ /* global atomic state of assignment between overlays and planes */
+ struct drm_plane *hwoverlay_to_plane[8];
+};
+
struct omap_drm_private {
struct drm_device *ddev;
struct device *dev;
@@ -56,6 +72,16 @@ struct omap_drm_private {
unsigned int num_planes;
struct drm_plane *planes[8];
+ unsigned int num_ovls;
+ struct omap_hw_overlay *overlays[8];
+
+ /*
+ * Global private object state, Do not access directly, use
+ * omap_global_get_state()
+ */
+ struct drm_modeset_lock glob_obj_lock;
+ struct drm_private_obj glob_obj;
+
struct drm_fb_helper *fbdev;
struct workqueue_struct *wq;
@@ -72,6 +98,12 @@ struct omap_drm_private {
/* properties: */
struct drm_property *zorder_prop;
+ /* crtc properties */
+ struct drm_property *background_color_prop;
+ struct drm_property *trans_key_mode_prop;
+ struct drm_property *trans_key_prop;
+ struct drm_property *alpha_blender_prop;
+
/* irq handling: */
spinlock_t wait_lock; /* protects the wait_list */
struct list_head wait_list; /* list of omap_irq_wait */
@@ -79,9 +111,36 @@ struct omap_drm_private {
/* memory bandwidth limit if it is needed on the platform */
unsigned int max_bandwidth;
+
+ void *wb_private; /* Write-back private data */
+ bool wb_initialized;
};
void omap_debugfs_init(struct drm_minor *minor);
+struct omap_global_state *__must_check
+omap_get_global_state(struct drm_atomic_state *s);
+struct omap_global_state *
+omap_get_existing_global_state(struct omap_drm_private *priv);
+
+#if IS_ENABLED(CONFIG_DRM_OMAP_WB)
+
+#define OMAP_WB_IRQ_MASK (DISPC_IRQ_FRAMEDONEWB | \
+ DISPC_IRQ_WBBUFFEROVERFLOW | \
+ DISPC_IRQ_WBUNCOMPLETEERROR)
+
+int omap_wb_init(struct drm_device *drmdev);
+void omap_wb_cleanup(struct drm_device *drmdev);
+void omap_wb_irq(void *priv, u32 irqstatus);
+
+#else
+
+#define OMAP_WB_IRQ_MASK (0)
+
+static inline int omap_wb_init(struct drm_device *drmdev) { return 0; }
+static inline void omap_wb_cleanup(struct drm_device *drmdev) { }
+static inline void omap_wb_irq(void *priv, u32 irqstatus) { }
+
+#endif
#endif /* __OMAPDRM_DRV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 05f30e2618c9..239064fa9b1d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -131,7 +131,9 @@ static u32 drm_rotation_to_tiler(unsigned int drm_rot)
/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
*/
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct drm_plane_state *state, struct omap_overlay_info *info)
+ struct drm_plane_state *state,
+ struct omap_overlay_info *info,
+ struct omap_overlay_info *r_info)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
@@ -218,6 +220,35 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
} else {
info->p_uv_addr = 0;
}
+
+ if (r_info) {
+ info->width /= 2;
+ info->out_width /= 2;
+
+ *r_info = *info;
+
+ if (fb->format->is_yuv) {
+ if (info->width & 1) {
+ info->width++;
+ r_info->width--;
+ }
+
+ if (info->out_width & 1) {
+ info->out_width++;
+ r_info->out_width--;
+ }
+ }
+
+ r_info->pos_x = info->pos_x + info->out_width;
+
+ r_info->paddr = get_linear_addr(fb, format, 0,
+ x + info->width, y);
+ if (fb->format->format == DRM_FORMAT_NV12) {
+ r_info->p_uv_addr =
+ get_linear_addr(fb, format, 1,
+ x + info->width, y);
+ }
+ }
}
/* pin, prepare for scanout: */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index c0e19aed8220..b75f0b5ef1d8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -26,7 +26,9 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct drm_plane_state *state, struct omap_overlay_info *info);
+ struct drm_plane_state *state,
+ struct omap_overlay_info *info,
+ struct omap_overlay_info *r_info);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 382bcdc72ac0..a02fce5a64b9 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -237,6 +237,7 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
omap_irq_ocp_error_handler(dev, irqstatus);
omap_irq_fifo_underflow(priv, irqstatus);
+ omap_wb_irq(priv->wb_private, irqstatus);
spin_lock_irqsave(&priv->wait_lock, flags);
list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
@@ -285,6 +286,9 @@ int omap_drm_irq_install(struct drm_device *dev)
for (i = 0; i < num_mgrs; ++i)
priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i);
+ if (priv->dispc_ops->has_writeback(priv->dispc))
+ priv->irq_mask |= OMAP_WB_IRQ_MASK;
+
priv->dispc_ops->runtime_get(priv->dispc);
priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff);
priv->dispc_ops->runtime_put(priv->dispc);
diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.c b/drivers/gpu/drm/omapdrm/omap_overlay.c
new file mode 100644
index 000000000000..2210827b4521
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_overlay.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Benoit Parrot, <bparrot@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "omap_dmm_tiler.h"
+#include "omap_drv.h"
+
+/*
+ * overlay funcs
+ */
+static const char * const overlay_id_to_name[] = {
+ [OMAP_DSS_GFX] = "gfx",
+ [OMAP_DSS_VIDEO1] = "vid1",
+ [OMAP_DSS_VIDEO2] = "vid2",
+ [OMAP_DSS_VIDEO3] = "vid3",
+};
+
+static struct omap_hw_overlay *
+omap_plane_find_free_overlay(struct drm_device *dev,
+ struct drm_plane *hwoverlay_to_plane[],
+ u32 caps, u32 fourcc, u32 crtc_mask)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ const struct dispc_ops *ops = priv->dispc_ops;
+ int i;
+
+ DBG("caps: %x fourcc: %x crtc: %x", caps, fourcc, crtc_mask);
+
+ for (i = 0; i < priv->num_ovls; i++) {
+ struct omap_hw_overlay *cur = priv->overlays[i];
+
+ DBG("%d: id: %d cur->caps: %x cur->crtc: %x",
+ cur->idx, cur->overlay_id, cur->caps, cur->possible_crtcs);
+
+ /* skip if already in-use */
+ if (hwoverlay_to_plane[cur->idx])
+ continue;
+
+ /* check if allowed on crtc */
+ if (!(cur->possible_crtcs & crtc_mask))
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ /* check supported format */
+ if (!ops->ovl_color_mode_supported(priv->dispc,
+ cur->overlay_id,
+ fourcc))
+ continue;
+
+ return cur;
+ }
+
+ DBG("no match");
+ return NULL;
+}
+
+int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ u32 caps, u32 fourcc, u32 crtc_mask,
+ struct omap_hw_overlay **overlay,
+ struct omap_hw_overlay **r_overlay)
+{
+ struct omap_drm_private *priv = s->dev->dev_private;
+ struct omap_global_state *new_global_state, *old_global_state;
+ struct drm_plane **overlay_map;
+ struct omap_hw_overlay *ovl, *r_ovl;
+ u32 save_possible_crtcs;
+
+ new_global_state = omap_get_global_state(s);
+ if (IS_ERR(new_global_state))
+ return PTR_ERR(new_global_state);
+
+ /*
+ * grab old_state after omap_get_global_state(),
+ * since now we hold lock:
+ */
+ old_global_state = omap_get_existing_global_state(priv);
+ DBG("new_global_state: %p old_global_state: %p",
+ new_global_state, old_global_state);
+
+ overlay_map = new_global_state->hwoverlay_to_plane;
+
+ if (!*overlay) {
+ ovl = omap_plane_find_free_overlay(s->dev, overlay_map,
+ caps, fourcc, crtc_mask);
+ if (!ovl)
+ return -ENOMEM;
+
+ /* in case we need to backtrack */
+ save_possible_crtcs = ovl->possible_crtcs;
+
+ ovl->possible_crtcs = crtc_mask;
+ overlay_map[ovl->idx] = plane;
+ *overlay = ovl;
+
+ if (r_overlay) {
+ r_ovl = omap_plane_find_free_overlay(s->dev,
+ overlay_map,
+ caps, fourcc,
+ crtc_mask);
+ if (!r_ovl) {
+ ovl->possible_crtcs = save_possible_crtcs;
+ overlay_map[ovl->idx] = NULL;
+ *overlay = NULL;
+ return -ENOMEM;
+ }
+
+ r_ovl->possible_crtcs = crtc_mask;
+ overlay_map[r_ovl->idx] = plane;
+ *r_overlay = r_ovl;
+ }
+
+ DBG("%s: assign to plane %s caps %x on crtc %x",
+ (*overlay)->name, plane->name, caps, crtc_mask);
+
+ if (r_overlay) {
+ DBG("%s: assign to right of plane %s caps %x on crtc %x",
+ (*r_overlay)->name, plane->name, caps, crtc_mask);
+ }
+ }
+
+ return 0;
+}
+
+void omap_overlay_release(struct drm_atomic_state *s,
+ struct drm_plane *plane,
+ struct omap_hw_overlay *overlay)
+{
+ struct omap_global_state *state = omap_get_global_state(s);
+ struct drm_plane **overlay_map = state->hwoverlay_to_plane;
+
+ if (!overlay)
+ return;
+
+ if (WARN_ON(!overlay_map[overlay->idx]))
+ return;
+ /*
+ * Check that the overlay we are releasing is actually
+ * assigned to the plane we are trying to release it from.
+ */
+ if (overlay_map[overlay->idx] == plane) {
+ DBG("%s: release from plane %s", overlay->name, plane->name);
+
+ overlay_map[overlay->idx] = NULL;
+ }
+}
+
+void omap_overlay_disable(struct drm_atomic_state *s,
+ struct drm_plane *plane,
+ struct omap_hw_overlay *overlay)
+{
+ struct omap_drm_private *priv = s->dev->dev_private;
+ struct drm_plane **overlay_map;
+ struct omap_global_state *old_state;
+
+ old_state = omap_get_existing_global_state(priv);
+ overlay_map = old_state->hwoverlay_to_plane;
+
+ if (!overlay)
+ return;
+
+ /*
+ * Check that the overlay we are trying to disable has not
+ * been re-assigned to another plane already
+ */
+ if (!overlay_map[overlay->idx]) {
+ DBG("%s: on %s disabled", overlay->name, plane->name);
+
+ /* disable the overlay */
+ priv->dispc_ops->ovl_enable(priv->dispc,
+ overlay->overlay_id, false);
+
+ /*
+ * Since we are disabling this overlay in this
+ * atomic cycle we can reset the available crtcs
+ * it can be used on
+ */
+ overlay->possible_crtcs = (1 << priv->num_pipes) - 1;
+ }
+
+ /*
+ * Otherwise the overlay is still in use so leave it alone
+ */
+}
+
+int omap_overlay_assign_wb(struct omap_drm_private *priv,
+ struct drm_plane *plane,
+ u32 caps, u32 fourcc, u32 crtc_mask,
+ struct omap_hw_overlay **overlay)
+{
+ struct omap_global_state *old_global_state;
+ struct drm_plane **overlay_map;
+ struct omap_hw_overlay *ovl;
+
+ /*
+ * As there is no state here we can't really grab the global obj lock.
+ * This might cause issue!
+ */
+ old_global_state = omap_get_existing_global_state(priv);
+ DBG("old_global_state: %p", old_global_state);
+
+ overlay_map = old_global_state->hwoverlay_to_plane;
+
+ if (!*overlay) {
+ ovl = omap_plane_find_free_overlay(plane->dev, overlay_map,
+ caps, fourcc, crtc_mask);
+ if (!ovl)
+ return -ENOMEM;
+
+ overlay_map[ovl->idx] = plane;
+ *overlay = ovl;
+
+ DBG("%s: assign to WB plane %s for caps %x",
+ (*overlay)->name, plane->name, caps);
+ }
+
+ return 0;
+}
+
+void omap_overlay_release_wb(struct omap_drm_private *priv,
+ struct drm_plane *plane,
+ struct omap_hw_overlay *overlay)
+{
+ struct omap_global_state *old_global_state;
+ struct drm_plane **overlay_map;
+
+ if (!overlay)
+ return;
+
+ /*
+ * As there is no state here we can't really grab the global obj lock.
+ * This might cause issue!
+ */
+ old_global_state = omap_get_existing_global_state(priv);
+ DBG("old_global_state: %p", old_global_state);
+
+ overlay_map = old_global_state->hwoverlay_to_plane;
+
+ if (WARN_ON(!overlay_map[overlay->idx]))
+ return;
+ /*
+ * Check that the overlay we are releasing is actually
+ * assigned to the plane we are trying to release it from.
+ */
+ if (overlay_map[overlay->idx] == plane) {
+ DBG("%s: release from WB plane %s", overlay->name, plane->name);
+
+ /*
+ * As this might get called without having done any other
+ * actual h/w access make sure the module is enabled before
+ * trying to access it.
+ */
+ priv->dispc_ops->runtime_get(priv->dispc);
+ priv->dispc_ops->ovl_enable(priv->dispc, overlay->overlay_id,
+ false);
+ priv->dispc_ops->runtime_put(priv->dispc);
+ overlay->possible_crtcs = (1 << priv->num_pipes) - 1;
+ overlay_map[overlay->idx] = NULL;
+ }
+}
+
+static void omap_overlay_destroy(struct omap_hw_overlay *overlay)
+{
+ kfree(overlay);
+}
+
+static struct omap_hw_overlay *omap_overlay_init(enum omap_plane_id overlay_id,
+ enum omap_overlay_caps caps)
+{
+ struct omap_hw_overlay *overlay;
+
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
+ return ERR_PTR(-ENOMEM);
+
+ overlay->name = overlay_id_to_name[overlay_id];
+ overlay->overlay_id = overlay_id;
+ overlay->caps = caps;
+ /*
+ * When this is called priv->num_crtcs is not known yet.
+ * Use a safe mask value to start with, it will get updated to the
+ * proper value after the first use.
+ */
+ overlay->possible_crtcs = 0xff;
+
+ return overlay;
+}
+
+int omap_hwoverlays_init(struct omap_drm_private *priv)
+{
+ static const enum omap_plane_id hw_plane_ids[] = {
+ OMAP_DSS_GFX, OMAP_DSS_VIDEO1,
+ OMAP_DSS_VIDEO2, OMAP_DSS_VIDEO3,
+ };
+ u32 num_overlays = priv->dispc_ops->get_num_ovls(priv->dispc);
+ enum omap_overlay_caps caps;
+ int i, ret;
+
+ for (i = 0; i < num_overlays; i++) {
+ struct omap_hw_overlay *overlay;
+
+ caps = priv->dispc_ops->ovl_get_caps(priv->dispc, hw_plane_ids[i]);
+ overlay = omap_overlay_init(hw_plane_ids[i], caps);
+ if (IS_ERR(overlay)) {
+ ret = PTR_ERR(overlay);
+ dev_err(priv->dev, "failed to construct overlay for %s (%d)\n",
+ overlay_id_to_name[i], ret);
+ return ret;
+ }
+ overlay->idx = priv->num_ovls;
+ priv->overlays[priv->num_ovls++] = overlay;
+ }
+
+ return 0;
+}
+
+void omap_hwoverlays_destroy(struct omap_drm_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_ovls; i++) {
+ omap_overlay_destroy(priv->overlays[i]);
+ priv->overlays[i] = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.h b/drivers/gpu/drm/omapdrm/omap_overlay.h
new file mode 100644
index 000000000000..d3d41f53d68d
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_overlay.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Benoit Parrot, <bparrot@ti.com>
+ */
+
+#ifndef __OMAPDRM_OVERLAY_H__
+#define __OMAPDRM_OVERLAY_H__
+
+#include <linux/types.h>
+
+enum drm_plane_type;
+
+struct drm_device;
+struct drm_mode_object;
+struct drm_plane;
+
+/* Used to associate a HW overlay/plane to a plane */
+struct omap_hw_overlay {
+ int idx;
+
+ const char *name;
+ enum omap_plane_id overlay_id;
+
+ enum omap_overlay_caps caps;
+ u32 possible_crtcs;
+};
+
+int omap_hwoverlays_init(struct omap_drm_private *priv);
+void omap_hwoverlays_destroy(struct omap_drm_private *priv);
+int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ u32 caps, u32 fourcc, u32 crtc_mask,
+ struct omap_hw_overlay **overlay,
+ struct omap_hw_overlay **r_overlay);
+void omap_overlay_release(struct drm_atomic_state *s,
+ struct drm_plane *plane,
+ struct omap_hw_overlay *overlay);
+void omap_overlay_disable(struct drm_atomic_state *s,
+ struct drm_plane *plane,
+ struct omap_hw_overlay *overlay);
+int omap_overlay_assign_wb(struct omap_drm_private *priv,
+ struct drm_plane *plane,
+ u32 caps, u32 fourcc, u32 crtc_mask,
+ struct omap_hw_overlay **overlay);
+void omap_overlay_release_wb(struct omap_drm_private *priv,
+ struct drm_plane *plane,
+ struct omap_hw_overlay *overlay);
+
+#endif /* __OMAPDRM_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 73ec99819a3d..4f4458d38ffb 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -7,6 +7,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fourcc.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -15,14 +16,37 @@
* plane funcs
*/
+#define to_omap_plane_state(x) container_of(x, struct omap_plane_state, base)
+
+struct omap_plane_state {
+ /* Must be first. */
+ struct drm_plane_state base;
+
+ struct omap_hw_overlay *overlay;
+ struct omap_hw_overlay *r_overlay; /* right overlay */
+};
+
#define to_omap_plane(x) container_of(x, struct omap_plane, base)
struct omap_plane {
struct drm_plane base;
enum omap_plane_id id;
const char *name;
+
+ /*
+ * WB has no notion of atomic state we need to keep
+ * a reference to the allocated overlay here.
+ */
+ struct omap_hw_overlay *reserved_wb_overlay;
};
+bool is_omap_plane_dual_overlay(struct drm_plane_state *state)
+{
+ struct omap_plane_state *omap_state = to_omap_plane_state(state);
+
+ return !!omap_state->r_overlay;
+}
+
static int omap_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -45,9 +69,32 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_plane_state *state = plane->state;
- struct omap_overlay_info info;
+ struct omap_plane_state *new_omap_state;
+ struct omap_plane_state *old_omap_state;
+ struct omap_overlay_info info, r_info;
+ enum omap_plane_id ovl_id, r_ovl_id;
int ret;
+ bool dual_ovl;
+
+ new_omap_state = to_omap_plane_state(state);
+ old_omap_state = to_omap_plane_state(old_state);
+
+ dual_ovl = is_omap_plane_dual_overlay(state);
+ /* Cleanup previously held overlay if needed */
+ omap_overlay_disable(old_state->state, plane, old_omap_state->overlay);
+ omap_overlay_disable(old_state->state, plane,
+ old_omap_state->r_overlay);
+
+ if (!new_omap_state->overlay) {
+ DBG("[PLANE:%d:%s] overlay_id: ??? (%p)", plane->base.id, plane->name,
+ new_omap_state->overlay);
+ return;
+ }
+
+ ovl_id = new_omap_state->overlay->overlay_id;
+ DBG("[PLANE:%d:%s] overlay_id: %d", plane->base.id, plane->name,
+ ovl_id);
DBG("%s, crtc=%p fb=%p", omap_plane->name, state->crtc, state->fb);
memset(&info, 0, sizeof(info));
@@ -59,75 +106,273 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
info.pre_mult_alpha = 1;
else
info.pre_mult_alpha = 0;
+ info.color_encoding = state->color_encoding;
+ info.color_range = state->color_range;
+
+ r_info = info;
/* update scanout: */
- omap_framebuffer_update_scanout(state->fb, state, &info);
+ omap_framebuffer_update_scanout(state->fb, state, &info,
+ dual_ovl ? &r_info : NULL);
- DBG("%dx%d -> %dx%d (%d)", info.width, info.height,
- info.out_width, info.out_height,
- info.screen_width);
+ DBG("%s: %dx%d -> %dx%d (%d)",
+ new_omap_state->overlay->name, info.width, info.height,
+ info.out_width, info.out_height, info.screen_width);
DBG("%d,%d %pad %pad", info.pos_x, info.pos_y,
- &info.paddr, &info.p_uv_addr);
+ &info.paddr, &info.p_uv_addr);
+
+ if (dual_ovl) {
+ r_ovl_id = new_omap_state->r_overlay->overlay_id;
+ /*
+ * If the current plane uses 2 hw planes the very next
+ * zorder is used by the r_overlay so we just use the
+ * main overlay zorder + 1
+ */
+ r_info.zorder = info.zorder + 1;
+
+ DBG("%s: %dx%d -> %dx%d (%d)",
+ new_omap_state->r_overlay->name,
+ r_info.width, r_info.height,
+ r_info.out_width, r_info.out_height, r_info.screen_width);
+ DBG("%d,%d %pad %pad", r_info.pos_x, r_info.pos_y,
+ &r_info.paddr, &r_info.p_uv_addr);
+ }
/* and finally, update omapdss: */
- ret = priv->dispc_ops->ovl_setup(priv->dispc, omap_plane->id, &info,
+ ret = priv->dispc_ops->ovl_setup(priv->dispc, ovl_id, &info,
omap_crtc_timings(state->crtc), false,
omap_crtc_channel(state->crtc));
if (ret) {
- dev_err(plane->dev->dev, "Failed to setup plane %s\n",
+ dev_err(plane->dev->dev, "Failed to setup plane1 %s\n",
omap_plane->name);
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+ priv->dispc_ops->ovl_enable(priv->dispc, ovl_id, false);
return;
}
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, true);
+ priv->dispc_ops->ovl_enable(priv->dispc, ovl_id, true);
+
+ if (dual_ovl) {
+ ret = priv->dispc_ops->ovl_setup(priv->dispc, r_ovl_id, &r_info,
+ omap_crtc_timings(state->crtc), false,
+ omap_crtc_channel(state->crtc));
+ if (ret) {
+ dev_err(plane->dev->dev, "Failed to setup plane2 %s\n",
+ omap_plane->name);
+ priv->dispc_ops->ovl_enable(priv->dispc, r_ovl_id, false);
+ priv->dispc_ops->ovl_enable(priv->dispc, ovl_id, false);
+ return;
+ }
+
+ priv->dispc_ops->ovl_enable(priv->dispc, r_ovl_id, true);
+ }
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct omap_plane_state *new_omap_state;
+ struct omap_plane_state *old_omap_state;
+
+ new_omap_state = to_omap_plane_state(state);
+ old_omap_state = to_omap_plane_state(old_state);
+
+ if (!old_omap_state->overlay)
+ return;
plane->state->rotation = DRM_MODE_ROTATE_0;
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
- ? 0 : omap_plane->id;
-
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+ ? 0 : old_omap_state->overlay->overlay_id;
+
+ omap_overlay_disable(old_state->state, plane, old_omap_state->overlay);
+ new_omap_state->overlay = NULL;
+ if (is_omap_plane_dual_overlay(old_state)) {
+ omap_overlay_disable(old_state->state, plane,
+ old_omap_state->r_overlay);
+ new_omap_state->r_overlay = NULL;
+ }
}
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int omap_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ u16 width, height;
+ u32 width_fp, height_fp;
+ struct drm_plane_state *old_state = plane->state;
+ struct omap_plane_state *omap_state = to_omap_plane_state(state);
+ struct omap_global_state *omap_overlay_global_state;
+ u32 crtc_mask;
+ u32 fourcc;
+ u32 caps = 0;
+ bool new_hw_overlay = false;
+ bool new_r_hw_overlay = false;
+ bool is_fourcc_yuv = false;
+ int min_scale, max_scale;
+ int ret;
- if (!state->fb)
- return 0;
+ if (omap_plane->reserved_wb_overlay)
+ return -EBUSY;
+
+ omap_overlay_global_state = omap_get_global_state(state->state);
+ if (IS_ERR(omap_overlay_global_state))
+ return PTR_ERR(omap_overlay_global_state);
+ DBG("%s: omap_overlay_global_state: %p", plane->name,
+ omap_overlay_global_state);
+
+ priv->dispc_ops->ovl_get_max_size(priv->dispc, &width, &height);
+ width_fp = width << 16;
+ height_fp = height << 16;
- /* crtc should only be NULL when disabling (i.e., !state->fb) */
- if (WARN_ON(!state->crtc))
+ crtc = state->crtc ? state->crtc : plane->state->crtc;
+ if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
- if (!crtc_state->enable)
- return 0;
-
- if (state->crtc_x < 0 || state->crtc_y < 0)
+ /* Make sure dimensions are within bounds. */
+ if (state->src_h > height_fp || state->crtc_h > height)
return -EINVAL;
- if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
- return -EINVAL;
+ if (state->fb)
+ is_fourcc_yuv = state->fb->format->is_yuv;
+
+ if (state->src_w > width_fp || state->crtc_w > width) {
+ /*
+ * We cannot have dual plane/overlay and trans_key_mode
+ * enabled concurrently, hence rejecting this configuration
+ */
+ if (omap_crtc_atomic_get_trans_key_mode(crtc, crtc_state))
+ return -EINVAL;
+
+ if (is_fourcc_yuv &&
+ (((state->src_w >> 16) / 2 & 1) ||
+ state->crtc_w / 2 & 1)) {
+ /*
+ * When calculating the split overlay width
+ * and it yield an odd value we will need to adjust
+ * the indivual width +/- 1. So make sure it fits
+ */
+ if (state->src_w <= ((2 * width - 1) << 16) &&
+ state->crtc_w <= (2 * width - 1))
+ new_r_hw_overlay = true;
+ else
+ return -EINVAL;
+ } else {
+ if (state->src_w <= (2 * width_fp) &&
+ state->crtc_w <= (2 * width))
+ new_r_hw_overlay = true;
+ else
+ return -EINVAL;
+ }
+ }
- if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
- return -EINVAL;
+ /*
+ * Note: these are just sanity checks to filter out totally bad scaling
+ * factors. The real limits must be calculated case by case, and
+ * unfortunately we currently do those checks only at the commit
+ * phase in dispc.
+ */
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+ if (ret)
+ return ret;
+
+ DBG("%s: check (%d -> %d)", plane->name,
+ old_state->visible, state->visible);
+
+ if (state->visible) {
+ if (state->rotation != DRM_MODE_ROTATE_0 &&
+ !omap_framebuffer_supports_rotation(state->fb))
+ return -EINVAL;
+
+ if ((state->src_w >> 16) != state->crtc_w ||
+ (state->src_h >> 16) != state->crtc_h)
+ caps |= OMAP_DSS_OVL_CAP_SCALE;
+
+ fourcc = state->fb->format->format;
+ crtc_mask = drm_crtc_mask(state->crtc);
+
+ /*
+ * (re)allocate hw overlay if we don't have one or
+ * there is a caps mismatch
+ */
+ if (!omap_state->overlay ||
+ (caps & ~omap_state->overlay->caps)) {
+ new_hw_overlay = true;
+ } else {
+ /* check if allowed on crtc */
+ if (!(omap_state->overlay->possible_crtcs & crtc_mask))
+ new_hw_overlay = true;
+
+ /* check supported format */
+ if (!priv->dispc_ops->ovl_color_mode_supported(priv->dispc,
+ omap_state->overlay->overlay_id,
+ fourcc))
+ new_hw_overlay = true;
+ }
+ /*
+ * check if we need two overlays and only have 1 or
+ * if we had 2 overlays but will only need 1
+ */
+ if ((new_r_hw_overlay && !omap_state->r_overlay) ||
+ (!new_r_hw_overlay && omap_state->r_overlay))
+ new_hw_overlay = true;
+
+ if (new_hw_overlay) {
+ struct omap_hw_overlay *old_ovl =
+ omap_state->overlay;
+ struct omap_hw_overlay *old_r_ovl =
+ omap_state->r_overlay;
+ struct omap_hw_overlay *new_ovl = NULL;
+ struct omap_hw_overlay *new_r_ovl = NULL;
+
+ omap_overlay_release(state->state, plane, old_ovl);
+ omap_overlay_release(state->state, plane, old_r_ovl);
+
+ ret = omap_overlay_assign(state->state, plane, caps,
+ fourcc, crtc_mask, &new_ovl,
+ new_r_hw_overlay ?
+ &new_r_ovl : NULL);
+ if (ret) {
+ DBG("%s: failed to assign hw_overlay(s)!",
+ plane->name);
+ omap_state->overlay = NULL;
+ omap_state->r_overlay = NULL;
+ return ret;
+ }
+
+ omap_state->overlay = new_ovl;
+ if (new_r_hw_overlay)
+ omap_state->r_overlay = new_r_ovl;
+ else
+ omap_state->r_overlay = NULL;
+ }
+ } else {
+ omap_overlay_release(state->state, plane, omap_state->overlay);
+ omap_overlay_release(state->state, plane,
+ omap_state->r_overlay);
+ omap_state->overlay = NULL;
+ omap_state->r_overlay = NULL;
+ }
- if (state->rotation != DRM_MODE_ROTATE_0 &&
- !omap_framebuffer_supports_rotation(state->fb))
- return -EINVAL;
+ if (omap_state->overlay)
+ DBG("plane: %s overlay_id: %d", plane->name,
+ omap_state->overlay->overlay_id);
+ if (omap_state->r_overlay)
+ DBG("plane: %s r_overlay_id: %d", plane->name,
+ omap_state->r_overlay->overlay_id);
return 0;
}
@@ -178,17 +423,73 @@ void omap_plane_install_properties(struct drm_plane *plane,
static void omap_plane_reset(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_plane_state *omap_state;
- drm_atomic_helper_plane_reset(plane);
- if (!plane->state)
+ if (plane->state)
+ drm_atomic_helper_plane_destroy_state(plane, plane->state);
+
+ omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
+ if (!omap_state)
return;
+ __drm_atomic_helper_plane_reset(plane, &omap_state->base);
+
/*
* Set the zpos default depending on whether we are a primary or overlay
* plane.
*/
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
+ plane->state->color_encoding = DRM_COLOR_YCBCR_BT601;
+ plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE;
+}
+
+static struct drm_plane_state *
+omap_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct omap_plane_state *state;
+ struct omap_plane_state *copy;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ state = to_omap_plane_state(plane->state);
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+
+ return &copy->base;
+}
+
+static void omap_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct omap_plane_state *omap_state = to_omap_plane_state(state);
+
+ drm_printf(p, "\toverlay=%s\n", omap_state->overlay ?
+ omap_state->overlay->name : "(null)");
+ if (omap_state->overlay) {
+ drm_printf(p, "\t\tidx=%d\n", omap_state->overlay->idx);
+ drm_printf(p, "\t\toverlay_id=%d\n",
+ omap_state->overlay->overlay_id);
+ drm_printf(p, "\t\tcaps=0x%x\n", omap_state->overlay->caps);
+ drm_printf(p, "\t\tpossible_crtcs=0x%x\n",
+ omap_state->overlay->possible_crtcs);
+ }
+
+ drm_printf(p, "\tr_overlay=%s\n", omap_state->r_overlay ?
+ omap_state->r_overlay->name :
+ "(null)");
+ if (omap_state->r_overlay) {
+ drm_printf(p, "\t\tidx=%d\n", omap_state->r_overlay->idx);
+ drm_printf(p, "\t\toverlay_id=%d\n",
+ omap_state->r_overlay->overlay_id);
+ drm_printf(p, "\t\tcaps=0x%x\n", omap_state->r_overlay->caps);
+ drm_printf(p, "\t\tpossible_crtcs=0x%x\n",
+ omap_state->r_overlay->possible_crtcs);
+ }
}
static int omap_plane_atomic_set_property(struct drm_plane *plane,
@@ -226,12 +527,30 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.disable_plane = drm_atomic_helper_disable_plane,
.reset = omap_plane_reset,
.destroy = omap_plane_destroy,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_duplicate_state = omap_plane_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = omap_plane_atomic_set_property,
.atomic_get_property = omap_plane_atomic_get_property,
+ .atomic_print_state = omap_plane_atomic_print_state,
};
+static bool omap_plane_supports_yuv(struct drm_plane *plane)
+{
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ const u32 *formats =
+ priv->dispc_ops->ovl_get_color_modes(priv->dispc, omap_plane->id);
+ int i;
+
+ for (i = 0; formats[i]; i++)
+ if (formats[i] == DRM_FORMAT_YUYV ||
+ formats[i] == DRM_FORMAT_UYVY ||
+ formats[i] == DRM_FORMAT_NV12)
+ return true;
+
+ return false;
+}
+
static const char *plane_id_to_name[] = {
[OMAP_DSS_GFX] = "gfx",
[OMAP_DSS_VIDEO1] = "vid1",
@@ -239,13 +558,6 @@ static const char *plane_id_to_name[] = {
[OMAP_DSS_VIDEO3] = "vid3",
};
-static const enum omap_plane_id plane_idx_to_id[] = {
- OMAP_DSS_GFX,
- OMAP_DSS_VIDEO1,
- OMAP_DSS_VIDEO2,
- OMAP_DSS_VIDEO3,
-};
-
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
int idx, enum drm_plane_type type,
@@ -255,27 +567,28 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
struct drm_plane *plane;
struct omap_plane *omap_plane;
- enum omap_plane_id id;
int ret;
u32 nformats;
const u32 *formats;
- if (WARN_ON(idx >= ARRAY_SIZE(plane_idx_to_id)))
+ if (WARN_ON(idx >= num_planes))
return ERR_PTR(-EINVAL);
- id = plane_idx_to_id[idx];
-
- DBG("%s: type=%d", plane_id_to_name[id], type);
-
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
return ERR_PTR(-ENOMEM);
- formats = priv->dispc_ops->ovl_get_color_modes(priv->dispc, id);
+ omap_plane->id = idx;
+ omap_plane->name = plane_id_to_name[idx];
+
+ DBG("%s: type=%d", omap_plane->name, type);
+ DBG(" omap_plane->id: %d", omap_plane->id);
+ DBG(" crtc_mask: 0x%04x", possible_crtcs);
+
+ formats = priv->dispc_ops->ovl_get_color_modes(priv->dispc,
+ omap_plane->id);
for (nformats = 0; formats[nformats]; ++nformats)
;
- omap_plane->id = id;
- omap_plane->name = plane_id_to_name[id];
plane = &omap_plane->base;
@@ -293,12 +606,85 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
drm_plane_create_blend_mode_property(plane, BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ if (omap_plane_supports_yuv(plane))
+ drm_plane_create_color_properties(plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_FULL_RANGE);
+
return plane;
error:
dev_err(dev->dev, "%s(): could not create plane: %s\n",
- __func__, plane_id_to_name[id]);
+ __func__, omap_plane->name);
kfree(omap_plane);
return NULL;
}
+
+enum omap_plane_id omap_plane_id_wb(struct drm_plane *plane)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+
+ return omap_plane->reserved_wb_overlay->overlay_id;
+}
+
+struct drm_plane *omap_plane_reserve_wb(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ int i, ret;
+
+ /*
+ * Look from the last plane to the first to lessen chances of the
+ * display side trying to use the same plane as writeback.
+ */
+ for (i = priv->num_planes - 1; i >= 0; --i) {
+ struct drm_plane *plane = priv->planes[i];
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_hw_overlay *new_ovl = NULL;
+ u32 crtc_mask = (1 << priv->num_pipes) - 1;
+ u32 fourcc = DRM_FORMAT_YUYV;
+ u32 caps = OMAP_DSS_OVL_CAP_SCALE;
+
+ if (plane->state->crtc || plane->state->fb)
+ continue;
+
+ if (omap_plane->reserved_wb_overlay)
+ continue;
+
+ ret = omap_overlay_assign_wb(priv, plane, caps, fourcc,
+ crtc_mask, &new_ovl);
+ if (ret) {
+ DBG("%s: failed to assign hw_overlay for wb!",
+ plane->name);
+ return NULL;
+ }
+
+ omap_plane->reserved_wb_overlay = new_ovl;
+
+ return plane;
+ }
+
+ return NULL;
+}
+
+void omap_plane_release_wb(struct drm_plane *plane)
+{
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ struct omap_plane *omap_plane;
+
+ /*
+ * This is also called on module unload at which point plane might
+ * not be set. In that case just return as there is nothing to do.
+ */
+ if (!plane)
+ return;
+
+ omap_plane = to_omap_plane(plane);
+
+ omap_overlay_release_wb(priv, plane, omap_plane->reserved_wb_overlay);
+ omap_plane->reserved_wb_overlay = NULL;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.h b/drivers/gpu/drm/omapdrm/omap_plane.h
index 0c28fe8ffa20..378b06345a70 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.h
+++ b/drivers/gpu/drm/omapdrm/omap_plane.h
@@ -22,5 +22,10 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs);
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
+bool is_omap_plane_dual_overlay(struct drm_plane_state *state);
+
+enum omap_plane_id omap_plane_id_wb(struct drm_plane *plane);
+struct drm_plane *omap_plane_reserve_wb(struct drm_device *dev);
+void omap_plane_release_wb(struct drm_plane *plane);
#endif /* __OMAPDRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_wb.c b/drivers/gpu/drm/omapdrm/omap_wb.c
new file mode 100644
index 000000000000..7655a747cf22
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_wb.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Benoit Parrot <bparrot@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "omap_wb.h"
+
+unsigned int wbdebug;
+module_param(wbdebug, uint, 0644);
+MODULE_PARM_DESC(wbdebug, "activates debug info");
+
+struct wb_fmt wb_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .coplanar = 0,
+ .depth = {8, 4},
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .coplanar = 1,
+ .depth = {8, 4},
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .coplanar = 0,
+ .depth = {16, 0},
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .coplanar = 0,
+ .depth = {16, 0},
+ },
+ {
+ /* "XR24", DRM_FORMAT_XRGB8888 */
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .coplanar = 0,
+ .depth = {32, 0},
+ },
+};
+
+unsigned int num_wb_formats = ARRAY_SIZE(wb_formats);
+
+/* find our format description corresponding to the passed v4l2_format */
+struct wb_fmt *find_format(struct v4l2_format *f)
+{
+ struct wb_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < num_wb_formats; k++) {
+ fmt = &wb_formats[k];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+int omap_wb_fourcc_v4l2_to_drm(u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12M:
+ return DRM_FORMAT_NV12;
+ case V4L2_PIX_FMT_YUYV:
+ return DRM_FORMAT_YUYV;
+ case V4L2_PIX_FMT_UYVY:
+ return DRM_FORMAT_UYVY;
+ case V4L2_PIX_FMT_XBGR32:
+ return DRM_FORMAT_XRGB8888;
+ default:
+ WARN(1, "WB: unsupported fourcc\n");
+ return 0;
+ }
+}
+
+void omap_wb_irq(void *priv, u32 irqstatus)
+{
+ struct wb_dev *dev = (struct wb_dev *)priv;
+ const u32 mask = OMAP_WB_IRQ_MASK |
+ DISPC_IRQ_VSYNC |
+ DISPC_IRQ_VSYNC2 |
+ DISPC_IRQ_VSYNC3 |
+ DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD;
+
+ if (!dev)
+ return;
+
+ irqstatus &= mask;
+ if (!irqstatus)
+ return;
+
+ if (!atomic_read(&dev->irq_enabled))
+ return;
+
+ switch (dev->mode) {
+ case OMAP_WB_NOT_CONFIGURED:
+ break;
+ case OMAP_WB_MEM2MEM_OVL:
+ wbm2m_irq(dev->m2m, irqstatus);
+ break;
+ case OMAP_WB_MEM2MEM_MGR:
+ /* To be added */
+ break;
+ case OMAP_WB_CAPTURE_MGR:
+ wbcap_irq(dev->cap, irqstatus);
+ break;
+ default:
+ WARN_ONCE(1, "WB: unknown WB mode: 0x%x\n", dev->mode);
+ break;
+ }
+}
+
+/*
+ * The initial setup of this device instance. Note that the initial state of
+ * the driver should be complete. So the initial format, standard, timings
+ * and video input should all be initialized to some reasonable value.
+ */
+int omap_wb_init(struct drm_device *drmdev)
+{
+ struct omap_drm_private *priv = drmdev->dev_private;
+ struct wb_dev *dev;
+ int ret = 0;
+
+ /* Allocate a new instance */
+ dev = devm_kzalloc(drmdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->drm_dev = drmdev;
+
+ /* set pseudo v4l2 device name so we can use v4l2_printk */
+ strlcpy(dev->v4l2_dev.name, WB_MODULE_NAME,
+ sizeof(dev->v4l2_dev.name));
+
+ priv->wb_private = dev;
+
+ mutex_init(&dev->lock);
+
+ atomic_set(&dev->irq_enabled, 0);
+
+ dev->mode = OMAP_WB_NOT_CONFIGURED;
+
+ ret = wbcap_init(dev);
+ if (ret) {
+ log_err(dev, "Failed to initialize wb capture\n");
+ goto error;
+ }
+
+ ret = wbm2m_init(dev);
+ if (ret) {
+ log_err(dev, "Failed to initialize wb m2m\n");
+ goto free_cap;
+ }
+
+ log_dbg(dev, "WB loaded\n");
+ return 0;
+
+free_cap:
+ wbcap_cleanup(dev);
+error:
+ return ret;
+}
+
+void omap_wb_cleanup(struct drm_device *drmdev)
+{
+ struct omap_drm_private *priv = drmdev->dev_private;
+ struct wb_dev *dev = priv->wb_private;
+
+ log_dbg(dev, "Cleanup WB\n");
+
+ wbcap_cleanup(dev);
+ wbm2m_cleanup(dev);
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_wb.h b/drivers/gpu/drm/omapdrm/omap_wb.h
new file mode 100644
index 000000000000..744149978991
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_wb.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Benoit Parrot <bparrot@ti.com>
+ */
+
+#ifndef __OMAP_WB_H__
+#define __OMAP_WB_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/hrtimer.h>
+#include <drm/drm_fourcc.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "dss/omapdss.h"
+#include "omap_drv.h"
+
+#define WB_MODULE_NAME "omapwb"
+#define WBM2M_MODULE_NAME "omapwb-m2m"
+#define WBCAP_MODULE_NAME "omapwb-cap"
+
+extern unsigned int wbdebug;
+
+#define log_dbg(dev, fmt, arg...) \
+ v4l2_dbg(1, wbdebug, &dev->v4l2_dev, "%s: " fmt, \
+ __func__, ## arg)
+#define log_err(dev, fmt, arg...) \
+ v4l2_err(&dev->v4l2_dev, fmt, ## arg)
+#define log_info(dev, fmt, arg...) \
+ v4l2_info(&dev->v4l2_dev, fmt, ## arg)
+
+/* minimum and maximum frame sizes */
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 2048
+#define MAX_H 2048
+
+/* required alignments */
+#define S_ALIGN 0 /* multiple of 1 */
+#define H_ALIGN 0 /* multiple of 2 */
+
+/* used as plane indices */
+#define MAX_PLANES 2
+#define LUMA_PLANE 0
+#define CHROMA_PLANE 1
+
+enum omap_wb_mode {
+ OMAP_WB_NOT_CONFIGURED = 0,
+ /* mem2mem from single ovl to wb */
+ OMAP_WB_MEM2MEM_OVL = 1,
+ /* mem2mem from N overlays via single mgr to wb */
+ OMAP_WB_MEM2MEM_MGR = 2,
+ /* capture from single mgr to wb */
+ OMAP_WB_CAPTURE_MGR = 3
+};
+
+enum wb_state {
+ WB_STATE_NONE = 0,
+ WB_STATE_FIRST_FRAME,
+ WB_STATE_CAPTURING,
+ WB_STATE_STOPPING,
+ WB_STATE_STOPPED,
+};
+
+/* driver info for each of the supported video formats */
+struct wb_fmt {
+ u32 fourcc; /* standard format identifier */
+ u8 coplanar; /* set for unpacked Luma and Chroma */
+ u8 depth[MAX_PLANES]; /* Bits per pixel per plane*/
+};
+
+extern struct wb_fmt wb_formats[];
+extern unsigned int num_wb_formats;
+
+struct wb_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+/*
+ * per-queue, driver-specific private data.
+ * MEM-2-MEM: Source: V4L2_BUF_TYPE_VIDEO_OUTPUT*
+ * Destination: V4L2_BUF_TYPE_VIDEO_CAPTURE*
+ * CAPTURE: Destination: V4L2_BUF_TYPE_VIDEO_CAPTURE* only
+ */
+struct wb_q_data {
+ /* format info */
+ struct v4l2_format format;
+ /* crop/compose rectangle */
+ struct v4l2_rect c_rect;
+ /* format info */
+ struct wb_fmt *fmt;
+};
+
+enum {
+ Q_DATA_SRC = 0,
+ Q_DATA_DST = 1,
+};
+
+/* find our format description corresponding to the passed v4l2_format */
+struct wb_fmt *find_format(struct v4l2_format *f);
+
+struct wb_dev {
+ struct v4l2_device v4l2_dev;
+ struct drm_device *drm_dev;
+
+ atomic_t irq_enabled;
+
+ /* v4l2_ioctl mutex */
+ struct mutex lock;
+
+ enum omap_wb_mode mode;
+ struct wbcap_dev *cap;
+ struct wbm2m_dev *m2m;
+};
+
+/*
+ * there is one wbcap_dev structure in the driver.
+ */
+struct wbcap_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct v4l2_fh fh;
+ struct wb_dev *dev;
+ struct v4l2_ctrl_handler hdl;
+
+ /* dst queue data */
+ struct wb_q_data q_data[2];
+
+ unsigned int input;
+
+ struct vb2_queue queue;
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ spinlock_t qlock;
+ struct list_head buf_list;
+
+ /* Current v4l2_buffer */
+ struct wb_buffer *cur_frm;
+ /* Next v4l2_buffer */
+ struct wb_buffer *next_frm;
+
+ unsigned int field;
+ unsigned int sequence;
+
+ bool stopping;
+ wait_queue_head_t event;
+
+ enum wb_state state;
+
+ /* timer used to wait for wb go bit to be cleared */
+ struct hrtimer wbgo_timer;
+};
+
+/*
+ * there is one wbm2m_dev structure in the driver.
+ */
+struct wbm2m_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct wb_dev *dev;
+ struct drm_plane *plane;
+
+ /* v4l2 buffers lock */
+ spinlock_t lock;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+};
+
+/*
+ * There is one wbm2m_ctx structure for each m2m context.
+ */
+struct wbm2m_ctx {
+ struct v4l2_fh fh;
+ struct wbm2m_dev *dev;
+ struct v4l2_ctrl_handler hdl;
+
+ /* current frame seq */
+ unsigned int sequence;
+ /* abort after next irq */
+ unsigned int aborting;
+
+ /* src & dst queue data */
+ struct wb_q_data q_data[2];
+};
+
+static inline struct wb_buffer *to_wb_buffer(struct vb2_buffer *vb2)
+{
+ return container_of(vb2, struct wb_buffer, vb.vb2_buf);
+}
+
+int omap_wb_fourcc_v4l2_to_drm(u32 fourcc);
+
+void wbm2m_irq(struct wbm2m_dev *dev, uint32_t irqstatus);
+int wbm2m_init(struct wb_dev *dev);
+void wbm2m_cleanup(struct wb_dev *dev);
+
+void wbcap_irq(struct wbcap_dev *dev, u32 irqstatus);
+int wbcap_init(struct wb_dev *dev);
+void wbcap_cleanup(struct wb_dev *dev);
+
+#endif /* __OMAP_WB_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_wb_cap.c b/drivers/gpu/drm/omapdrm/omap_wb_cap.c
new file mode 100644
index 000000000000..1f0ac5d872f7
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_wb_cap.c
@@ -0,0 +1,1045 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Benoit Parrot <bparrot@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <drm/drm_vblank.h>
+
+#include "omap_wb.h"
+
+static int omap_channel_to_wb_channel(int oc)
+{
+ switch (oc) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return DSS_WB_LCD1_MGR;
+ case OMAP_DSS_CHANNEL_DIGIT:
+ return DSS_WB_TV_MGR;
+ case OMAP_DSS_CHANNEL_LCD2:
+ return DSS_WB_LCD2_MGR;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return DSS_WB_LCD3_MGR;
+ default:
+ return DSS_WB_LCD1_MGR;
+ }
+}
+
+static char *omap_channel_to_name(int oc)
+{
+ switch (oc) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return "LCD1";
+ case OMAP_DSS_CHANNEL_DIGIT:
+ return "DIGIT/TV";
+ case OMAP_DSS_CHANNEL_LCD2:
+ return "LCD2";
+ case OMAP_DSS_CHANNEL_LCD3:
+ return "LCD3";
+ default:
+ return "LCD1";
+ }
+}
+
+/* driver info for each of the supported input overlay/mgr */
+struct wb_input {
+ char name[64];
+ u32 wb_channel;
+ u32 omap_channel;
+ u32 crtc_index;
+};
+
+static struct wb_input wb_inputs[8];
+static int num_wb_input;
+
+static bool is_input_active(struct wbcap_dev *wbcap)
+{
+ struct omap_drm_private *priv = wbcap->dev->drm_dev->dev_private;
+ u32 oc = wb_inputs[wbcap->input].omap_channel;
+
+ return priv->dispc_ops->mgr_is_enabled(priv->dispc, oc);
+}
+
+static bool is_input_enabled(struct wbcap_dev *wbcap)
+{
+ struct omap_drm_private *priv = wbcap->dev->drm_dev->dev_private;
+ struct drm_crtc *crtc;
+ struct wb_input *input;
+
+ input = &wb_inputs[wbcap->input];
+ crtc = priv->pipes[input->crtc_index].crtc;
+
+ return crtc->enabled;
+}
+
+static void build_input_table(struct wbcap_dev *wbcap)
+{
+ struct omap_drm_private *priv = wbcap->dev->drm_dev->dev_private;
+ struct drm_crtc *crtc;
+ struct wb_input *input;
+ int i;
+
+ for (i = 0; i < priv->num_pipes; i++) {
+ crtc = priv->pipes[i].crtc;
+ input = &wb_inputs[i];
+
+ input->crtc_index = i;
+ input->omap_channel = omap_crtc_channel(crtc);
+ input->wb_channel =
+ omap_channel_to_wb_channel(input->omap_channel);
+ snprintf(input->name, sizeof(input->name), "CRTC#%d - %s",
+ i, omap_channel_to_name(input->omap_channel));
+
+ log_dbg(wbcap, "Input# %d, name:'%s' omap_channel:%d wb_channel:%d\n",
+ i, input->name, input->omap_channel, input->wb_channel);
+ }
+ num_wb_input = i;
+}
+
+static struct wb_q_data *get_q_data(struct wbcap_dev *dev,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &dev->q_data[Q_DATA_DST];
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static bool wb_cap_setup(struct wbcap_dev *dev,
+ enum dss_writeback_channel wb_channel,
+ const struct omap_dss_writeback_info *wb_info)
+{
+ struct omap_drm_private *priv = dev->dev->drm_dev->dev_private;
+ struct drm_crtc *crtc;
+ struct videomode *ct;
+ int r;
+
+ crtc = priv->pipes[wb_inputs[dev->input].crtc_index].crtc;
+ ct = omap_crtc_timings(crtc);
+
+ /* configure wb */
+ r = priv->dispc_ops->wb_setup(priv->dispc, wb_info, false, ct, wb_channel);
+ if (r)
+ return false;
+
+ if (is_input_active(dev)) {
+ priv->dispc_ops->ovl_enable(priv->dispc, OMAP_DSS_WB, true);
+ priv->dispc_ops->wb_go(priv->dispc);
+ } else {
+ log_err(dev, "CHANNEL %u not enabled, skip WB GO\n",
+ wb_inputs[dev->input].omap_channel);
+ }
+
+ return true;
+}
+
+static bool is_input_irq_vsync_set(struct wbcap_dev *dev, u32 irqstatus)
+{
+ struct omap_drm_private *priv = dev->dev->drm_dev->dev_private;
+ u32 oc = wb_inputs[dev->input].omap_channel;
+
+ if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, oc))
+ return true;
+ return false;
+}
+
+static int wbcap_schedule_next_buffer(struct wbcap_dev *dev)
+{
+ struct wb_buffer *buf;
+ unsigned long addr_y = 0;
+ unsigned long addr_uv = 0;
+ struct wb_q_data *q_data;
+ int num_planes;
+ bool ok;
+ struct omap_dss_writeback_info wb_info = { 0 };
+ struct v4l2_pix_format_mplane *pix;
+ unsigned long flags;
+
+ if (!is_input_active(dev)) {
+ dev->next_frm = NULL;
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev->qlock, flags);
+ if (list_empty(&dev->buf_list)) {
+ dev->next_frm = NULL;
+ spin_unlock_irqrestore(&dev->qlock, flags);
+ return 0;
+ }
+
+ buf = list_entry(dev->buf_list.next, struct wb_buffer, list);
+ dev->next_frm = buf;
+ list_del(&buf->list);
+ spin_unlock_irqrestore(&dev->qlock, flags);
+
+ q_data = get_q_data(dev, buf->vb.vb2_buf.type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix = &q_data->format.fmt.pix_mp;
+ num_planes = pix->num_planes;
+
+ addr_y = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ if (num_planes == 2)
+ addr_uv = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 1);
+ else if (pix->pixelformat == V4L2_PIX_FMT_NV12)
+ addr_uv = addr_y + (pix->plane_fmt[0].bytesperline *
+ pix->height);
+
+ /* fill WB DSS info */
+ wb_info.paddr = (u32)addr_y;
+ wb_info.p_uv_addr = (u32)addr_uv;
+ wb_info.buf_width = pix->plane_fmt[0].bytesperline /
+ (q_data->fmt->depth[LUMA_PLANE] / 8);
+
+ wb_info.width = pix->width;
+ wb_info.height = pix->height;
+ wb_info.fourcc = omap_wb_fourcc_v4l2_to_drm(pix->pixelformat);
+ wb_info.pre_mult_alpha = 1;
+
+ wb_info.rotation = DRM_MODE_ROTATE_0;
+ wb_info.rotation_type = OMAP_DSS_ROT_NONE;
+
+ ok = wb_cap_setup(dev,
+ wb_inputs[dev->input].wb_channel,
+ &wb_info);
+ if (!ok)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void wbcap_process_buffer_complete(struct wbcap_dev *dev)
+{
+ dev->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
+ dev->cur_frm->vb.field = dev->field;
+ dev->cur_frm->vb.sequence = dev->sequence++;
+
+ vb2_buffer_done(&dev->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ dev->cur_frm = dev->next_frm;
+}
+
+static enum hrtimer_restart wbcap_wbgo_timer(struct hrtimer *timer)
+{
+ struct wbcap_dev *dev = container_of(timer,
+ struct wbcap_dev, wbgo_timer);
+ struct omap_drm_private *priv = dev->dev->drm_dev->dev_private;
+
+ if (priv->dispc_ops->wb_go_busy(priv->dispc))
+ log_err(dev, "WARNING, WB BUSY at hrtimer, state %u\n",
+ dev->state);
+
+ switch (dev->state) {
+ case WB_STATE_NONE:
+ break;
+
+ case WB_STATE_FIRST_FRAME:
+ dev->cur_frm = dev->next_frm;
+ wbcap_schedule_next_buffer(dev);
+ dev->state = WB_STATE_CAPTURING;
+ break;
+
+ case WB_STATE_CAPTURING:
+ if (dev->cur_frm && dev->next_frm) {
+ /*
+ * We have cur_frm that was just captured, and next_frm
+ * to which the HW will start capturing.
+ * This means cur_frm is now released from DSS HW.
+ */
+ wbcap_process_buffer_complete(dev);
+ dev->next_frm = NULL;
+ } else {
+ /*
+ * We have cur_frm which has a captured frame,
+ * but we don't have next_frm.
+ * This means cur_frm is will still be used by
+ * DSS for capture
+ */
+ }
+
+ if (dev->stopping) {
+ /* XXX should we set WB GO? */
+ priv->dispc_ops->ovl_enable(priv->dispc, OMAP_DSS_WB,
+ false);
+ dev->state = WB_STATE_STOPPING;
+ } else {
+ wbcap_schedule_next_buffer(dev);
+ }
+ break;
+
+ case WB_STATE_STOPPING:
+ if (dev->cur_frm)
+ wbcap_process_buffer_complete(dev);
+
+ dev->state = WB_STATE_STOPPED;
+ atomic_dec(&dev->dev->irq_enabled);
+ dev->stopping = false;
+ wake_up(&dev->event);
+ break;
+
+ case WB_STATE_STOPPED:
+ log_err(dev, "ERROR: timer triggered in the stopped state. This shouldn't happen\n");
+ break;
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void wbcap_handle_vsync(struct wbcap_dev *dev)
+{
+ /*
+ * In writeback capture mode, the GO bit doesn't get reset
+ * at the manager's VSYNC interrupt. It takes an extra
+ * 'WBDELAYCOUNTER' time after VSYNC when the writeback
+ * FIFOs are flushed and the shadow registers are taken in.
+ * There isn't any DSS interrupt to notify this point in time.
+ * The correct solution is to set a timer far enough that it
+ * should cover the period defined by WBDELAYCOUNTER.
+ * The max value allowed in WBDELAYCOUNTER is 255 which
+ * correspond to 255 lines. So waiting anywhere from 1/4 to
+ * 1/2 a frame (i.e. 2ms at 60 to 120 fps) should be safe
+ * enough.
+ */
+
+ hrtimer_start_range_ns(&dev->wbgo_timer, ms_to_ktime(3), 1000000,
+ HRTIMER_MODE_REL);
+}
+
+void wbcap_irq(struct wbcap_dev *dev, u32 irqstatus)
+{
+ if (irqstatus & DISPC_IRQ_FRAMEDONEWB)
+ log_dbg(dev, "WB: FRAMEDONE\n");
+
+ if (irqstatus & DISPC_IRQ_WBBUFFEROVERFLOW)
+ log_err(dev, "WB: UNDERFLOW\n");
+
+ if (irqstatus & DISPC_IRQ_WBUNCOMPLETEERROR)
+ log_err(dev, "WB: WBUNCOMPLETEERROR\n");
+
+ if (is_input_irq_vsync_set(dev, irqstatus)) {
+ if (dev->field != V4L2_FIELD_NONE) {
+ if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
+ dev->field = V4L2_FIELD_BOTTOM;
+ else if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
+ dev->field = V4L2_FIELD_TOP;
+ }
+ wbcap_handle_vsync(dev);
+ }
+}
+
+/*
+ * Setup the constraints of the queue: besides setting the number of planes
+ * per buffer and the size and allocation context of each plane, it also
+ * checks if sufficient buffers have been allocated. Usually 3 is a good
+ * minimum number: many DMA engines need a minimum of 2 buffers in the
+ * queue and you need to have another available for userspace processing.
+ */
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ int i;
+ struct wbcap_dev *wbcap = vb2_get_drv_priv(vq);
+ struct wb_q_data *q_data;
+
+ q_data = get_q_data(wbcap, vq->type);
+
+ if (!q_data)
+ return -EINVAL;
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = q_data->format.fmt.pix_mp.num_planes;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+
+ log_dbg(wbcap, "get %d buffer(s) of size %d\n", *nbuffers,
+ sizes[LUMA_PLANE]);
+ if (*nplanes == 2)
+ log_dbg(wbcap, " and %d\n", sizes[CHROMA_PLANE]);
+
+ return 0;
+}
+
+/*
+ * Prepare the buffer for queueing to the DMA engine: check and set the
+ * payload size.
+ */
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct wbcap_dev *wbcap = vb2_get_drv_priv(vb->vb2_queue);
+ struct wb_q_data *q_data;
+ struct v4l2_pix_format_mplane *mp;
+ int i, num_planes;
+
+ q_data = get_q_data(wbcap, vb->vb2_queue->type);
+ if (!q_data)
+ return -EINVAL;
+ num_planes = q_data->format.fmt.pix_mp.num_planes;
+
+ for (i = 0; i < num_planes; i++) {
+ mp = &q_data->format.fmt.pix_mp;
+ if (vb2_plane_size(vb, i) < mp->plane_fmt[i].sizeimage) {
+ log_err(wbcap,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, i),
+ (long)mp->plane_fmt[i].sizeimage);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, mp->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+/*
+ * Queue this buffer to the DMA engine.
+ */
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct wbcap_dev *wbcap = vb2_get_drv_priv(vb->vb2_queue);
+ struct wb_buffer *buf = to_wb_buffer(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wbcap->qlock, flags);
+ list_add_tail(&buf->list, &wbcap->buf_list);
+
+ spin_unlock_irqrestore(&wbcap->qlock, flags);
+}
+
+static void return_all_buffers(struct wbcap_dev *wbcap,
+ enum vb2_buffer_state state)
+{
+ struct wb_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wbcap->qlock, flags);
+ list_for_each_entry_safe(buf, node, &wbcap->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+
+ if (wbcap->cur_frm) {
+ vb2_buffer_done(&wbcap->cur_frm->vb.vb2_buf, state);
+ wbcap->cur_frm = NULL;
+ }
+
+ if (wbcap->next_frm) {
+ vb2_buffer_done(&wbcap->next_frm->vb.vb2_buf, state);
+ wbcap->next_frm = NULL;
+ }
+
+ spin_unlock_irqrestore(&wbcap->qlock, flags);
+}
+
+/*
+ * Start streaming. First check if the minimum number of buffers have been
+ * queued. If not, then return -ENOBUFS and the vb2 framework will call
+ * this function again the next time a buffer has been queued until enough
+ * buffers are available to actually start the DMA engine.
+ */
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct wbcap_dev *wbcap = vb2_get_drv_priv(vq);
+ struct omap_drm_private *priv = wbcap->dev->drm_dev->dev_private;
+ struct drm_crtc *crtc;
+ int ret;
+ struct wb_q_data *q_data;
+
+ priv->dispc_ops->runtime_get(priv->dispc);
+
+ wbcap->sequence = 0;
+ q_data = get_q_data(wbcap, wbcap->queue.type);
+ if (!q_data) {
+ log_err(wbcap, "ERROR: getting q_data failed\n");
+ return_all_buffers(wbcap, VB2_BUF_STATE_QUEUED);
+ priv->dispc_ops->runtime_put(priv->dispc);
+ return -EINVAL;
+ }
+
+ if (q_data->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE)
+ wbcap->field = V4L2_FIELD_TOP;
+ else
+ wbcap->field = V4L2_FIELD_NONE;
+
+ log_dbg(wbcap, "Input (%s) is %s : %s\n",
+ wb_inputs[wbcap->input].name,
+ is_input_enabled(wbcap) ? "enabled" : "disabled",
+ is_input_active(wbcap) ? "active" : "inactive");
+
+ if (!is_input_active(wbcap)) {
+ log_err(wbcap, "ERROR: Selected input (%s) is not active, bailing out\n",
+ wb_inputs[wbcap->input].name);
+ return_all_buffers(wbcap, VB2_BUF_STATE_QUEUED);
+ priv->dispc_ops->runtime_put(priv->dispc);
+ return -EINVAL;
+ }
+
+ /* Enable vsync irq on the input crtc */
+ crtc = priv->pipes[wb_inputs[wbcap->input].crtc_index].crtc;
+ ret = drm_crtc_vblank_get(crtc);
+ WARN_ON(ret != 0);
+
+ if (wbcap_schedule_next_buffer(wbcap)) {
+ return_all_buffers(wbcap, VB2_BUF_STATE_QUEUED);
+ priv->dispc_ops->runtime_put(priv->dispc);
+ return -EINVAL;
+ }
+
+ wbcap->state = WB_STATE_FIRST_FRAME;
+ atomic_inc(&wbcap->dev->irq_enabled);
+ return 0;
+}
+
+/*
+ * Stop the DMA engine. Any remaining buffers in the DMA queue are dequeued
+ * and passed on to the vb2 framework marked as STATE_ERROR.
+ */
+static void stop_streaming(struct vb2_queue *vq)
+{
+ struct wbcap_dev *wbcap = vb2_get_drv_priv(vq);
+ struct omap_drm_private *priv = wbcap->dev->drm_dev->dev_private;
+ struct drm_crtc *crtc;
+ int ret;
+
+ log_dbg(wbcap, "Stopping WB\n");
+ log_dbg(wbcap, "current state: %d\n", wbcap->state);
+
+ wbcap->stopping = true;
+ ret = wait_event_timeout(wbcap->event,
+ !wbcap->stopping,
+ msecs_to_jiffies(250));
+
+ log_dbg(wbcap, "Returning VB2 buffers\n");
+
+ if (priv->dispc_ops->wb_go_busy(priv->dispc))
+ log_err(wbcap, "WARNING, WB BUSY when stopping\n");
+
+ /* Release all active buffers */
+ return_all_buffers(wbcap, VB2_BUF_STATE_ERROR);
+
+ /* Disable vsync irq on the input crtc */
+ crtc = priv->pipes[wb_inputs[wbcap->input].crtc_index].crtc;
+ drm_crtc_vblank_put(crtc);
+
+ priv->dispc_ops->runtime_put(priv->dispc);
+}
+
+/*
+ * The vb2 queue ops. Note that since q->lock is set we can use the standard
+ * vb2_ops_wait_prepare/finish helper functions. If q->lock would be NULL,
+ * then this driver would have to provide these ops.
+ */
+static struct vb2_ops wbcap_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * Required ioctl querycap. Note that the version field is prefilled with
+ * the version of the kernel.
+ */
+static int wbcap_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct wbcap_dev *wbcap = video_drvdata(file);
+
+ strscpy(cap->driver, WBCAP_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, WBCAP_MODULE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ wbcap->v4l2_dev.name);
+ return 0;
+}
+
+/*
+ * Helper function to check and correct struct v4l2_pix_format. It's used
+ * not only in VIDIOC_TRY/S_FMT, but also elsewhere if changes to the SDTV
+ * standard, HDTV timings or the video input would require updating the
+ * current format.
+ */
+static int wbcap_fill_pix_format(struct wbcap_dev *wbcap,
+ struct v4l2_format *f)
+{
+ struct wb_fmt *fmt = find_format(f);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ unsigned int w_align;
+ int i, depth, depth_bytes;
+
+ if (!fmt) {
+ log_dbg(wbcap, "Fourcc format (0x%08x) invalid.\n",
+ pix->pixelformat);
+ fmt = &wb_formats[1];
+ }
+
+ /* we only allow V4L2_FIELD_NONE or V4L2_FIELD_ALTERNATE */
+ if (pix->field != V4L2_FIELD_NONE &&
+ pix->field != V4L2_FIELD_ALTERNATE)
+ pix->field = V4L2_FIELD_NONE;
+
+ depth = fmt->depth[LUMA_PLANE];
+
+ /*
+ * The line stride needs to be even is even.
+ * Special case is with YUV422 interleaved format an even number
+ * of pixels is required also.
+ */
+ depth_bytes = depth >> 3;
+
+ w_align = 0;
+ if ((depth_bytes == 3) || (depth_bytes == 1))
+ w_align = 1;
+ else if ((depth_bytes == 2) &&
+ (fmt->fourcc == V4L2_PIX_FMT_YUYV ||
+ fmt->fourcc == V4L2_PIX_FMT_UYVY))
+ w_align = 1;
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
+ &pix->height, MIN_H, MAX_H, H_ALIGN,
+ S_ALIGN);
+ pix->num_planes = fmt->coplanar ? 2 : 1;
+ pix->pixelformat = fmt->fourcc;
+
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ memset(pix->reserved, 0, sizeof(pix->reserved));
+ for (i = 0; i < pix->num_planes; i++) {
+ plane_fmt = &pix->plane_fmt[i];
+ depth = fmt->depth[i];
+
+ if (i == LUMA_PLANE)
+ plane_fmt->bytesperline = pix->width * depth / 8;
+ else
+ plane_fmt->bytesperline = pix->width;
+
+ plane_fmt->sizeimage = (pix->height * pix->width *
+ depth) / 8;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
+ /*
+ * Since we are using a single plane buffer
+ * we need to adjust the reported sizeimage
+ * to include the colocated UV part.
+ */
+ plane_fmt->sizeimage += (pix->height / 2 *
+ plane_fmt->bytesperline);
+ }
+
+ memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
+ }
+
+ return 0;
+}
+
+static int wbcap_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct wbcap_dev *wbcap = video_drvdata(file);
+ struct omap_drm_private *drmpriv = wbcap->dev->drm_dev->dev_private;
+ struct drm_crtc *crtc;
+ struct videomode *ct;
+
+ log_dbg(wbcap, "requested fourcc:%4.4s size: %dx%d\n",
+ (char *)&f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height);
+
+ /*
+ * Scaling currently does not work properly for Capture mode.
+ * So we are temporarily forcing the frame size to be the
+ * same as the source crtc for now.
+ */
+ crtc = drmpriv->pipes[wb_inputs[wbcap->input].crtc_index].crtc;
+ ct = omap_crtc_timings(crtc);
+
+ f->fmt.pix.width = ct->hactive;
+ f->fmt.pix.height = ct->vactive;
+
+ if (ct->flags & DISPLAY_FLAGS_INTERLACED) {
+ f->fmt.pix.height /= 2;
+ f->fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
+ }
+
+ log_dbg(wbcap, "replied fourcc:%4.4s size: %dx%d\n",
+ (char *)&f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height);
+
+ return wbcap_fill_pix_format(wbcap, f);
+}
+
+static int wbcap_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct wbcap_dev *wbcap = video_drvdata(file);
+ int ret;
+ struct wb_q_data *q_data;
+
+ log_dbg(wbcap, "type:%d\n", f->type);
+
+ ret = wbcap_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ q_data = get_q_data(wbcap, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ /*
+ * It is not allowed to change the format while buffers for use with
+ * streaming have already been allocated.
+ */
+ if (vb2_is_busy(&wbcap->queue))
+ return -EBUSY;
+
+ q_data->format = *f;
+ q_data->fmt = find_format(f);
+
+ log_dbg(wbcap, "Setting format for type %d, %dx%d, fmt: %4.4s bpl_y %d",
+ f->type, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ (char *)&f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.plane_fmt[LUMA_PLANE].bytesperline);
+ if (f->fmt.pix_mp.num_planes == 2)
+ log_dbg(wbcap, " bpl_uv %d\n",
+ f->fmt.pix_mp.plane_fmt[CHROMA_PLANE].bytesperline);
+
+ return 0;
+}
+
+static int wbcap_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct wbcap_dev *wbcap = video_drvdata(file);
+ struct wb_q_data *q_data;
+
+ log_dbg(wbcap, "type:%d\n", f->type);
+
+ q_data = get_q_data(wbcap, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ *f = q_data->format;
+ return 0;
+}
+
+static int wbcap_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= num_wb_formats)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f->pixelformat = wb_formats[f->index].fourcc;
+ return 0;
+}
+
+static int wbcap_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index >= num_wb_input)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, wb_inputs[i->index].name, sizeof(i->name));
+ return 0;
+}
+
+static int wbcap_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct wbcap_dev *wbcap = video_drvdata(file);
+ struct wb_q_data *q_data;
+
+ log_dbg(wbcap, "%d\n", i);
+
+ q_data = get_q_data(wbcap, wbcap->queue.type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (i >= num_wb_input)
+ return -EINVAL;
+
+ /*
+ * Changing the input implies a format change, which is not allowed
+ * while buffers for use with streaming have already been allocated.
+ */
+ if (vb2_is_busy(&wbcap->queue))
+ return -EBUSY;
+
+ wbcap->input = i;
+
+ /* Update the internal format to match the selected input */
+ wbcap_try_fmt_vid_cap(file, priv, &q_data->format);
+ return 0;
+}
+
+static int wbcap_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct wbcap_dev *wbcap = video_drvdata(file);
+
+ log_dbg(wbcap, "%d\n", wbcap->input);
+
+ *i = wbcap->input;
+ return 0;
+}
+
+/*
+ * File operations
+ */
+static int wbcap_open(struct file *file)
+{
+ struct wbcap_dev *dev = video_drvdata(file);
+ int ret;
+
+ log_dbg(dev, "enter\n");
+
+ if (mutex_lock_interruptible(&dev->dev->lock)) {
+ ret = -ERESTARTSYS;
+ goto unlock;
+ }
+
+ if ((dev->dev->mode != OMAP_WB_NOT_CONFIGURED) &&
+ (dev->dev->mode != OMAP_WB_CAPTURE_MGR)) {
+ /* WB is already open for other modes */
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ log_err(dev, "v4l2_fh_open failed\n");
+ goto unlock;
+ }
+
+ if (v4l2_fh_is_singular_file(file))
+ dev->dev->mode = OMAP_WB_CAPTURE_MGR;
+
+unlock:
+ mutex_unlock(&dev->dev->lock);
+ return ret;
+}
+
+static int wbcap_release(struct file *file)
+{
+ struct wbcap_dev *dev = video_drvdata(file);
+ bool fh_singular;
+ int ret;
+
+ log_dbg(dev, "releasing\n");
+
+ mutex_lock(&dev->dev->lock);
+
+ /* Save the singular status before we call the clean-up helper */
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ /* the release helper will cleanup any on-going streaming */
+ ret = _vb2_fop_release(file, NULL);
+
+ if (fh_singular)
+ dev->dev->mode = OMAP_WB_NOT_CONFIGURED;
+
+ mutex_unlock(&dev->dev->lock);
+
+ return ret;
+}
+
+/*
+ * The set of all supported ioctls. Note that all the streaming ioctls
+ * use the vb2 helper functions that take care of all the locking and
+ * that also do ownership tracking (i.e. only the filehandle that requested
+ * the buffers can call the streaming ioctls, all other filehandles will
+ * receive -EBUSY if they attempt to call the same streaming ioctls).
+ *
+ * The last three ioctls also use standard helper functions: these implement
+ * standard behavior for drivers with controls.
+ */
+static const struct v4l2_ioctl_ops wbcap_ioctl_ops = {
+ .vidioc_querycap = wbcap_querycap,
+ .vidioc_try_fmt_vid_cap_mplane = wbcap_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = wbcap_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = wbcap_g_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = wbcap_enum_fmt_vid_cap,
+
+ .vidioc_enum_input = wbcap_enum_input,
+ .vidioc_g_input = wbcap_g_input,
+ .vidioc_s_input = wbcap_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * The set of file operations. Note that all these ops are standard core
+ * helper functions.
+ */
+static const struct v4l2_file_operations wbcap_fops = {
+ .owner = THIS_MODULE,
+ .open = wbcap_open,
+ .release = wbcap_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+/*
+ * The initial setup of this device instance. Note that the initial state of
+ * the driver should be complete. So the initial format, standard, timings
+ * and video input should all be initialized to some reasonable value.
+ */
+int wbcap_init(struct wb_dev *dev)
+{
+ struct wbcap_dev *wbcap;
+ struct video_device *vdev;
+ struct vb2_queue *q;
+ struct wb_q_data *q_data;
+ int ret;
+
+ if (!dev)
+ return -ENOMEM;
+
+ /* Allocate a new instance */
+ wbcap = devm_kzalloc(dev->drm_dev->dev, sizeof(*wbcap), GFP_KERNEL);
+ if (!wbcap)
+ return -ENOMEM;
+
+ dev->cap = wbcap;
+ wbcap->dev = dev;
+
+ /* Fill in the initial format-related settings */
+ q_data = &wbcap->q_data[Q_DATA_DST];
+ q_data->fmt = &wb_formats[1];
+ q_data->format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q_data->format.fmt.pix_mp.pixelformat = q_data->fmt->fourcc;
+ q_data->format.fmt.pix_mp.width = 1920;
+ q_data->format.fmt.pix_mp.height = 1080;
+ wbcap_fill_pix_format(wbcap, &q_data->format);
+
+ /* Initialize the top-level structure */
+ strlcpy(wbcap->v4l2_dev.name, WBCAP_MODULE_NAME,
+ sizeof(wbcap->v4l2_dev.name));
+ ret = v4l2_device_register(dev->drm_dev->dev, &wbcap->v4l2_dev);
+ if (ret)
+ return ret;
+
+ /*
+ * This lock is now created by the main level.
+ * We might need one per sub structure in the future
+ *
+ * mutex_init(&dev->lock);
+ */
+
+ /* Initialize the vb2 queue */
+ q = &wbcap->queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = wbcap;
+ q->buf_struct_size = sizeof(struct wb_buffer);
+ q->ops = &wbcap_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->dev = wbcap->v4l2_dev.dev;
+
+ /*
+ * Assume that this DMA engine needs to have at least two buffers
+ * available before it can be started. The start_streaming() op
+ * won't be called until at least this many buffers are queued up.
+ */
+ q->min_buffers_needed = 2;
+ /*
+ * The serialization lock for the streaming ioctls. This is the same
+ * as the main serialization lock, but if some of the non-streaming
+ * ioctls could take a long time to execute, then you might want to
+ * have a different lock here to prevent VIDIOC_DQBUF from being
+ * blocked while waiting for another action to finish. This is
+ * generally not needed for PCI devices, but USB devices usually do
+ * want a separate lock here.
+ */
+ q->lock = &dev->lock;
+ /*
+ * Since this driver can only do 32-bit DMA we must make sure that
+ * the vb2 core will allocate the buffers in 32-bit DMA memory.
+ */
+ q->gfp_flags = GFP_DMA32;
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto free_hdl;
+
+ INIT_LIST_HEAD(&wbcap->buf_list);
+ spin_lock_init(&wbcap->qlock);
+
+ /* Initialize the video_device structure */
+ vdev = &wbcap->vdev;
+ strlcpy(vdev->name, WBCAP_MODULE_NAME, sizeof(vdev->name));
+ /*
+ * There is nothing to clean up, so release is set to an empty release
+ * function. The release callback must be non-NULL.
+ */
+ vdev->release = video_device_release_empty;
+ vdev->fops = &wbcap_fops,
+ vdev->ioctl_ops = &wbcap_ioctl_ops,
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ /*
+ * The main serialization lock. All ioctls are serialized by this
+ * lock. Exception: if q->lock is set, then the streaming ioctls
+ * are serialized by that separate lock.
+ */
+ vdev->lock = &dev->lock;
+ vdev->queue = q;
+ vdev->v4l2_dev = &wbcap->v4l2_dev;
+ video_set_drvdata(vdev, wbcap);
+
+ hrtimer_init(&wbcap->wbgo_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ wbcap->wbgo_timer.function = wbcap_wbgo_timer;
+
+ init_waitqueue_head(&wbcap->event);
+ wbcap->stopping = false;
+
+ build_input_table(wbcap);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, 11);
+ if (ret)
+ goto free_hdl;
+
+ log_dbg(wbcap, "Device registered as %s\n",
+ video_device_node_name(vdev));
+ return 0;
+
+free_hdl:
+ v4l2_device_unregister(&wbcap->v4l2_dev);
+ return ret;
+}
+
+void wbcap_cleanup(struct wb_dev *dev)
+{
+ log_dbg(dev, "Cleanup WB Capture\n");
+
+ video_unregister_device(&dev->cap->vdev);
+ v4l2_device_unregister(&dev->cap->v4l2_dev);
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_wb_m2m.c b/drivers/gpu/drm/omapdrm/omap_wb_m2m.c
new file mode 100644
index 000000000000..d9428d83866d
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_wb_m2m.c
@@ -0,0 +1,1198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Benoit Parrot <bparrot@ti.com>
+ *
+ * Based on the virtual v4l2-mem2mem example device
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "omap_wb.h"
+
+MODULE_DESCRIPTION("TI OMAP WB M2M driver");
+MODULE_AUTHOR("Benoit Parrot <bparrot@ti.com>");
+MODULE_LICENSE("GPL v2");
+
+/*
+ * M2M devices get 2 queues.
+ * Return the queue given the type.
+ */
+static struct wb_q_data *get_q_data(struct wbm2m_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->q_data[Q_DATA_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->q_data[Q_DATA_DST];
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static bool wbm2m_convert(struct wbm2m_dev *dev, enum omap_plane_id src_plane,
+ const struct omap_overlay_info *src_info,
+ const struct omap_dss_writeback_info *wb_info)
+{
+ struct omap_drm_private *priv = dev->dev->drm_dev->dev_private;
+ enum dss_writeback_channel wb_channel;
+ struct videomode t = { 0 };
+ int r;
+
+ t.hactive = src_info->out_width;
+ t.vactive = src_info->out_height;
+
+ /* configure input */
+
+ r = priv->dispc_ops->ovl_setup(priv->dispc, src_plane, src_info, &t,
+ true, OMAP_DSS_CHANNEL_WB);
+ if (r)
+ return false;
+
+ priv->dispc_ops->ovl_enable(priv->dispc, src_plane, true);
+
+ /* configure output */
+
+ switch (src_plane) {
+ case OMAP_DSS_GFX:
+ wb_channel = DSS_WB_OVL0; break;
+ case OMAP_DSS_VIDEO1:
+ wb_channel = DSS_WB_OVL1; break;
+ case OMAP_DSS_VIDEO2:
+ wb_channel = DSS_WB_OVL2; break;
+ case OMAP_DSS_VIDEO3:
+ wb_channel = DSS_WB_OVL3; break;
+ default:
+ /*
+ * if src_plane is not valid it should have been flagged
+ * during the ovl_setup() step above. Let's set a default
+ * at any rate.
+ */
+ wb_channel = DSS_WB_OVL3; break;
+ }
+
+ r = priv->dispc_ops->wb_setup(priv->dispc, wb_info, true, &t,
+ wb_channel);
+ if (r) {
+ priv->dispc_ops->ovl_enable(priv->dispc, src_plane, false);
+ return false;
+ }
+
+ priv->dispc_ops->ovl_enable(priv->dispc, OMAP_DSS_WB, true);
+
+ return true;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+ struct wbm2m_ctx *ctx = priv;
+
+ /*
+ * This check is needed as this might be called directly from driver
+ * When called by m2m framework, this will always satisy, but when
+ * called from wbm2m_irq, this might fail.
+ * (src stream with zero buffers)
+ */
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) <= 0 ||
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) <= 0)
+ return 0;
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct wbm2m_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+
+ log_dbg(ctx->dev, "Aborting transaction\n");
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+/* device_run() - prepares and starts the device
+ *
+ * This function is only called when both the source and destination
+ * buffers are in place.
+ */
+static void device_run(void *priv)
+{
+ struct wbm2m_ctx *ctx = priv;
+ struct wbm2m_dev *dev = ctx->dev;
+ struct wb_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+ struct wb_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
+ struct vb2_buffer *s_vb, *d_vb;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ dma_addr_t src_dma_addr[2] = {0, 0};
+ dma_addr_t dst_dma_addr[2] = {0, 0};
+ struct omap_overlay_info src_info = { 0 };
+ struct omap_dss_writeback_info wb_info = { 0 };
+ struct v4l2_pix_format_mplane *spix, *dpix;
+ struct v4l2_rect *srect, *drect;
+ u32 stride, depth;
+ bool ok;
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!src_vb) {
+ log_err(dev, "getting next source buffer failed\n");
+ return;
+ }
+
+ s_vb = &src_vb->vb2_buf;
+ if (!s_vb) {
+ log_err(dev, "getting next src vb2_buf addr failed\n");
+ return;
+ }
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ if (!dst_vb) {
+ log_err(dev, "getting next dest buffer failed\n");
+ return;
+ }
+
+ d_vb = &dst_vb->vb2_buf;
+ if (!d_vb) {
+ log_err(dev, "getting next dest vb2_buf addr failed\n");
+ return;
+ }
+
+ srect = &s_q_data->c_rect;
+ spix = &s_q_data->format.fmt.pix_mp;
+ src_dma_addr[0] = vb2_dma_contig_plane_dma_addr(s_vb, 0);
+ if (spix->num_planes == 2)
+ src_dma_addr[1] = vb2_dma_contig_plane_dma_addr(s_vb, 1);
+ else if (spix->pixelformat == V4L2_PIX_FMT_NV12)
+ src_dma_addr[1] = src_dma_addr[0] +
+ (spix->plane_fmt[0].bytesperline * spix->height);
+ if (!src_dma_addr[0]) {
+ log_err(dev,
+ "acquiring source buffer(%d) dma_addr failed\n",
+ s_vb->index);
+ return;
+ }
+
+ drect = &d_q_data->c_rect;
+ dpix = &d_q_data->format.fmt.pix_mp;
+ dst_dma_addr[0] = vb2_dma_contig_plane_dma_addr(d_vb, 0);
+ if (dpix->num_planes == 2)
+ dst_dma_addr[1] = vb2_dma_contig_plane_dma_addr(d_vb, 1);
+ else if (dpix->pixelformat == V4L2_PIX_FMT_NV12)
+ dst_dma_addr[1] = dst_dma_addr[0] +
+ (dpix->plane_fmt[0].bytesperline * dpix->height);
+ if (!dst_dma_addr[0]) {
+ log_err(dev,
+ "acquiring destination buffer(%d) dma_addr failed\n",
+ d_vb->index);
+ return;
+ }
+
+ /* fill source DSS info */
+ src_info.paddr = (u32)src_dma_addr[0];
+ src_info.p_uv_addr = (u32)src_dma_addr[1];
+
+ /* update addr based on cropping window */
+ stride = spix->plane_fmt[0].bytesperline;
+ depth = s_q_data->fmt->depth[0];
+ src_info.paddr += srect->top * stride + (srect->left * depth / 8);
+
+ if (src_info.p_uv_addr) {
+ u32 top = srect->top;
+
+ if (spix->pixelformat == V4L2_PIX_FMT_NV12 ||
+ spix->pixelformat == V4L2_PIX_FMT_NV12M) {
+ top >>= 1;
+ depth = 8;
+ }
+ src_info.p_uv_addr += top * stride + (srect->left * depth / 8);
+ }
+
+ src_info.screen_width = spix->plane_fmt[0].bytesperline /
+ (s_q_data->fmt->depth[0] / 8);
+
+ src_info.width = srect->width;
+ src_info.height = srect->height;
+
+ src_info.pos_x = 0;
+ src_info.pos_y = 0;
+ src_info.out_width = srect->width;
+ src_info.out_height = srect->height;
+
+ src_info.fourcc = omap_wb_fourcc_v4l2_to_drm(spix->pixelformat);
+ src_info.global_alpha = 0xff;
+
+ src_info.rotation = DRM_MODE_ROTATE_0;
+ src_info.rotation_type = OMAP_DSS_ROT_NONE;
+
+ log_dbg(dev, "SRC: ctx %pa buf_index %d %dx%d, sw %d\n",
+ &ctx, s_vb->index,
+ src_info.width, src_info.height, src_info.screen_width);
+
+ /* fill WB DSS info */
+ wb_info.paddr = (u32)dst_dma_addr[0];
+ wb_info.p_uv_addr = (u32)dst_dma_addr[1];
+
+ wb_info.buf_width = dpix->plane_fmt[0].bytesperline /
+ (d_q_data->fmt->depth[0] / 8);
+
+ /* update addr based on compose window */
+ stride = dpix->plane_fmt[0].bytesperline;
+ depth = d_q_data->fmt->depth[0];
+ wb_info.paddr += drect->top * stride + (drect->left * depth / 8);
+
+ if (wb_info.p_uv_addr) {
+ u32 top = drect->top;
+
+ if (dpix->pixelformat == V4L2_PIX_FMT_NV12 ||
+ dpix->pixelformat == V4L2_PIX_FMT_NV12M) {
+ top >>= 1;
+ depth = 8;
+ }
+ wb_info.p_uv_addr += top * stride + (drect->left * depth / 8);
+ }
+
+ wb_info.width = drect->width;
+ wb_info.height = drect->height;
+ wb_info.fourcc = omap_wb_fourcc_v4l2_to_drm(dpix->pixelformat);
+ wb_info.pre_mult_alpha = 1;
+
+ wb_info.rotation = DRM_MODE_ROTATE_0;
+ wb_info.rotation_type = OMAP_DSS_ROT_NONE;
+
+ log_dbg(dev, "DST: ctx %pa buf_index %d %dx%d, sw %d\n",
+ &ctx, d_vb->index,
+ wb_info.width, wb_info.height, wb_info.buf_width);
+
+ ok = wbm2m_convert(dev, omap_plane_id_wb(dev->plane), &src_info,
+ &wb_info);
+ if (!ok) {
+ log_err(dev,
+ "Conversion setup failed, check source and destination parameters\n"
+ );
+ log_err(dev, "\tSRC: %dx%d, fmt: %4.4s sw %d\n",
+ src_info.width, src_info.height,
+ (char *)&spix->pixelformat,
+ src_info.screen_width);
+ log_err(dev, "\tDST: %dx%d, fmt: %4.4s sw %d\n",
+ wb_info.width, wb_info.height,
+ (char *)&dpix->pixelformat,
+ wb_info.buf_width);
+ return;
+ }
+}
+
+void wbm2m_irq(struct wbm2m_dev *wbm2m, u32 irqstatus)
+{
+ struct wbm2m_ctx *ctx;
+ struct vb2_v4l2_buffer *s_vb, *d_vb;
+ unsigned long flags;
+
+ if (irqstatus & DISPC_IRQ_WBBUFFEROVERFLOW)
+ log_err(wbm2m, "WB: UNDERFLOW\n");
+
+ if (irqstatus & DISPC_IRQ_WBUNCOMPLETEERROR)
+ log_err(wbm2m, "WB: DISPC_IRQ_WBUNCOMPLETEERROR\n");
+
+ /* If DISPC_IRQ_FRAMEDONEWB is not set then we are done */
+ if (!(irqstatus & DISPC_IRQ_FRAMEDONEWB))
+ goto handled;
+
+ log_dbg(wbm2m, "WB: FRAMEDONE\n");
+
+ ctx = v4l2_m2m_get_curr_priv(wbm2m->m2m_dev);
+ if (!ctx) {
+ log_err(wbm2m, "instance released before end of transaction\n");
+ goto handled;
+ }
+
+ log_dbg(ctx->dev, "ctx %pa\n", &ctx);
+
+ s_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ d_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!s_vb || !d_vb) {
+ log_err(wbm2m, "source or dest vb pointer is NULL!!");
+ goto handled;
+ }
+
+ d_vb->flags = s_vb->flags;
+
+ d_vb->vb2_buf.timestamp = s_vb->vb2_buf.timestamp;
+ if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ d_vb->timecode = s_vb->timecode;
+
+ d_vb->sequence = ctx->sequence;
+ s_vb->sequence = ctx->sequence;
+ log_dbg(wbm2m, "ctx %pa sequence %d\n",
+ &ctx, ctx->sequence);
+
+ d_vb->field = V4L2_FIELD_NONE;
+ ctx->sequence++;
+
+ spin_lock_irqsave(&wbm2m->lock, flags);
+
+ v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
+
+ spin_unlock_irqrestore(&wbm2m->lock, flags);
+
+ v4l2_m2m_job_finish(wbm2m->m2m_dev, ctx->fh.m2m_ctx);
+handled:
+ return;
+}
+
+/*
+ * video ioctls
+ */
+static int wbm2m_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct wbm2m_ctx *ctx = file->private_data;
+
+ strscpy(cap->driver, WBM2M_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, WBM2M_MODULE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ ctx->dev->v4l2_dev.name);
+ return 0;
+}
+
+static int wbm2m_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= num_wb_formats)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f->pixelformat = wb_formats[f->index].fourcc;
+ return 0;
+}
+
+static int wbm2m_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct wbm2m_ctx *ctx = file->private_data;
+ struct vb2_queue *vq;
+ struct wb_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ *f = q_data->format;
+
+ if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
+ struct wb_q_data *s_q_data;
+
+ /* get colorspace from the source queue */
+ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ f->fmt.pix_mp.colorspace =
+ s_q_data->format.fmt.pix_mp.colorspace;
+ }
+
+ log_dbg(ctx->dev, "ctx %pa type %d, %dx%d, fmt: %4.4s bpl_y %d",
+ &ctx, f->type, pix->width, pix->height,
+ (char *)&pix->pixelformat,
+ pix->plane_fmt[LUMA_PLANE].bytesperline);
+ if (pix->num_planes == 2)
+ log_dbg(ctx->dev, " bpl_uv %d\n",
+ pix->plane_fmt[CHROMA_PLANE].bytesperline);
+
+ return 0;
+}
+
+static int wbm2m_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct wbm2m_ctx *ctx = file->private_data;
+ struct wb_fmt *fmt = find_format(f);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ unsigned int w_align;
+ int i, depth, depth_bytes;
+
+ if (!fmt) {
+ log_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ pix->pixelformat);
+ fmt = &wb_formats[1];
+ }
+
+ /* we only allow V4L2_FIELD_NONE */
+ if (pix->field != V4L2_FIELD_NONE)
+ pix->field = V4L2_FIELD_NONE;
+
+ depth = fmt->depth[LUMA_PLANE];
+
+ /*
+ * The line stride needs to be even is even.
+ * Special case is with YUV422 interleaved format an even number
+ * of pixels is required also.
+ */
+ depth_bytes = depth >> 3;
+
+ w_align = 0;
+ if ((depth_bytes == 3) || (depth_bytes == 1))
+ w_align = 1;
+ else if ((depth_bytes == 2) &&
+ (fmt->fourcc == V4L2_PIX_FMT_YUYV ||
+ fmt->fourcc == V4L2_PIX_FMT_UYVY))
+ w_align = 1;
+
+ v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
+ &pix->height, MIN_H, MAX_H, H_ALIGN,
+ S_ALIGN);
+ pix->num_planes = fmt->coplanar ? 2 : 1;
+ pix->pixelformat = fmt->fourcc;
+
+ /* Probably need something better here */
+ if (!pix->colorspace) {
+ if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
+ fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
+ fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
+ fmt->fourcc == V4L2_PIX_FMT_BGR32) {
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ } else {
+ if (pix->height > 1280) /* HD */
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ else /* SD */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ }
+ }
+
+ memset(pix->reserved, 0, sizeof(pix->reserved));
+ for (i = 0; i < pix->num_planes; i++) {
+ plane_fmt = &pix->plane_fmt[i];
+ depth = fmt->depth[i];
+
+ if (i == LUMA_PLANE)
+ plane_fmt->bytesperline = pix->width * depth / 8;
+ else
+ plane_fmt->bytesperline = pix->width;
+
+ plane_fmt->sizeimage = (pix->height * pix->width *
+ depth) / 8;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
+ /*
+ * Since we are using a single plane buffer
+ * we need to adjust the reported sizeimage
+ * to include the colocated UV part.
+ */
+ plane_fmt->sizeimage += (pix->height / 2 *
+ plane_fmt->bytesperline);
+ }
+
+ memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
+ }
+
+ return 0;
+}
+
+static int __wbm2m_s_fmt(struct wbm2m_ctx *ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct wb_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ log_err(ctx->dev, "queue busy\n");
+ return -EBUSY;
+ }
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ q_data->fmt = find_format(f);
+ q_data->format = *f;
+
+ q_data->c_rect.left = 0;
+ q_data->c_rect.top = 0;
+ q_data->c_rect.width = pix->width;
+ q_data->c_rect.height = pix->height;
+
+ log_dbg(ctx->dev, "ctx %pa type %d, %dx%d, fmt: %4.4s bpl_y %d",
+ &ctx, f->type, pix->width, pix->height,
+ (char *)&pix->pixelformat,
+ pix->plane_fmt[LUMA_PLANE].bytesperline);
+ if (pix->num_planes == 2)
+ log_dbg(ctx->dev, " bpl_uv %d\n",
+ pix->plane_fmt[CHROMA_PLANE].bytesperline);
+
+ return 0;
+}
+
+static int wbm2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ int ret;
+ struct wbm2m_ctx *ctx = file->private_data;
+
+ ret = wbm2m_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = __wbm2m_s_fmt(ctx, f);
+ if (ret)
+ return ret;
+
+ ctx->sequence = 0;
+
+ return 0;
+}
+
+static int __wbm2m_try_selection(struct wbm2m_ctx *ctx,
+ struct v4l2_selection *s)
+{
+ struct wb_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
+ unsigned int w_align;
+ int depth_bytes;
+
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix = &q_data->format.fmt.pix_mp;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ /*
+ * COMPOSE target is only valid for capture buffer type, return
+ * error for output buffer type
+ */
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * CROP target is only valid for output buffer type, return
+ * error for capture buffer type
+ */
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ /*
+ * bound and default crop/compose targets are invalid targets to
+ * try/set
+ */
+ default:
+ return -EINVAL;
+ }
+
+ if (s->r.top < 0 || s->r.left < 0) {
+ log_err(ctx->dev, "negative values for top and left\n");
+ s->r.top = 0;
+ s->r.left = 0;
+ }
+
+ depth_bytes = q_data->fmt->depth[LUMA_PLANE] >> 3;
+
+ w_align = 0;
+ if ((depth_bytes == 3) || (depth_bytes == 1))
+ w_align = 1;
+ else if ((depth_bytes == 2) &&
+ (pix->pixelformat == V4L2_PIX_FMT_YUYV ||
+ pix->pixelformat == V4L2_PIX_FMT_UYVY))
+ w_align = 1;
+
+ v4l_bound_align_image(&s->r.width, MIN_W, pix->width, w_align,
+ &s->r.height, MIN_H, pix->height,
+ H_ALIGN, S_ALIGN);
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ if (s->r.left + s->r.width > pix->width)
+ s->r.left = pix->width - s->r.width;
+ if (s->r.top + s->r.height > pix->height)
+ s->r.top = pix->height - s->r.height;
+
+ return 0;
+}
+
+static int wbm2m_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct wbm2m_ctx *ctx = file->private_data;
+ struct wb_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
+ bool use_c_rect = false;
+
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix = &q_data->format.fmt.pix_mp;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ use_c_rect = true;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ use_c_rect = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_c_rect) {
+ /*
+ * for CROP/COMPOSE target type, return c_rect params from the
+ * respective buffer type
+ */
+ s->r = q_data->c_rect;
+ } else {
+ /*
+ * for DEFAULT/BOUNDS target type, return width and height from
+ * S_FMT of the respective buffer type
+ */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = pix->width;
+ s->r.height = pix->height;
+ }
+
+ return 0;
+}
+
+static int wbm2m_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct wbm2m_ctx *ctx = file->private_data;
+ struct wb_q_data *q_data;
+ struct v4l2_selection sel = *s;
+ int ret;
+
+ ret = __wbm2m_try_selection(ctx, &sel);
+ if (ret)
+ return ret;
+
+ q_data = get_q_data(ctx, sel.type);
+ if (!q_data)
+ return -EINVAL;
+
+ if ((q_data->c_rect.left == sel.r.left) &&
+ (q_data->c_rect.top == sel.r.top) &&
+ (q_data->c_rect.width == sel.r.width) &&
+ (q_data->c_rect.height == sel.r.height)) {
+ log_dbg(ctx->dev,
+ "type: %d, requested crop/compose values are already set\n",
+ sel.type);
+ return 0;
+ }
+
+ q_data->c_rect = sel.r;
+
+ ctx->sequence = 0;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops wbm2m_ioctl_ops = {
+ .vidioc_querycap = wbm2m_querycap,
+
+ .vidioc_enum_fmt_vid_cap = wbm2m_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = wbm2m_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = wbm2m_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = wbm2m_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = wbm2m_enum_fmt,
+ .vidioc_g_fmt_vid_out_mplane = wbm2m_g_fmt,
+ .vidioc_try_fmt_vid_out_mplane = wbm2m_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = wbm2m_s_fmt,
+
+ .vidioc_g_selection = wbm2m_g_selection,
+ .vidioc_s_selection = wbm2m_s_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+static int wbm2m_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ int i;
+ struct wbm2m_ctx *ctx = vb2_get_drv_priv(vq);
+ struct wb_q_data *q_data;
+
+ q_data = get_q_data(ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
+
+ *nplanes = q_data->format.fmt.pix_mp.num_planes;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+
+ log_dbg(ctx->dev, "get %d buffer(s) of size %d\n", *nbuffers,
+ sizes[LUMA_PLANE]);
+ if (*nplanes == 2)
+ log_dbg(ctx->dev, " and %d\n", sizes[CHROMA_PLANE]);
+
+ return 0;
+}
+
+static int wbm2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct wbm2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct wb_q_data *q_data;
+ struct v4l2_pix_format_mplane *mp;
+ int i, num_planes;
+
+ log_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ vbuf->field = V4L2_FIELD_NONE;
+
+ num_planes = q_data->format.fmt.pix_mp.num_planes;
+
+ for (i = 0; i < num_planes; i++) {
+ mp = &q_data->format.fmt.pix_mp;
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (vb2_get_plane_payload(vb, i) <
+ mp->plane_fmt[i].sizeimage) {
+ log_dbg(ctx->dev,
+ "the payload is too small for plane plane (%lu < %lu)\n",
+ vb2_get_plane_payload(vb, i),
+ (long)mp->plane_fmt[i].sizeimage);
+ return -EINVAL;
+ }
+ } else {
+ if (vb2_plane_size(vb, i) <
+ mp->plane_fmt[i].sizeimage) {
+ log_dbg(ctx->dev,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, i),
+ (long)mp->plane_fmt[i].sizeimage);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i,
+ mp->plane_fmt[i].sizeimage);
+ }
+ }
+
+ if (num_planes == 2) {
+ if (vb->planes[0].m.fd ==
+ vb->planes[1].m.fd) {
+ /*
+ * So it appears we are in a single memory buffer
+ * with 2 plane case. Then we need to also set the
+ * data_offset properly
+ */
+ vb->planes[1].data_offset =
+ vb2_get_plane_payload(vb, 0);
+ }
+ }
+ return 0;
+}
+
+static void wbm2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct wbm2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ log_dbg(ctx->dev, "queueing buffer: %s index %d\n",
+ V4L2_TYPE_IS_OUTPUT(vb->type) ? "OUTPUT" : "CAPTURE",
+ vb->index);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int wbm2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct wbm2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct omap_drm_private *priv = ctx->dev->dev->drm_dev->dev_private;
+
+ log_dbg(ctx->dev, "ctx %pa queue: %s\n", &ctx,
+ V4L2_TYPE_IS_OUTPUT(q->type) ? "OUTPUT" : "CAPTURE");
+
+ ctx->sequence = 0;
+
+ priv->dispc_ops->runtime_get(priv->dispc);
+ atomic_inc(&ctx->dev->dev->irq_enabled);
+
+ return 0;
+}
+
+static void wbm2m_stop_streaming(struct vb2_queue *q)
+{
+ struct wbm2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct omap_drm_private *priv = ctx->dev->dev->drm_dev->dev_private;
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ log_dbg(ctx->dev, "ctx %pa queue: %s\n", &ctx,
+ V4L2_TYPE_IS_OUTPUT(q->type) ? "OUTPUT" : "CAPTURE");
+
+ atomic_dec(&ctx->dev->dev->irq_enabled);
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vb)
+ break;
+ log_dbg(ctx->dev, "returning from queue: buffer index %d\n",
+ vb->vb2_buf.index);
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
+ }
+
+ /*
+ * Cleanup the in-transit vb2 buffers that have been
+ * removed from their respective queue already but for
+ * which procecessing has not been completed yet.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * DRA7xx errata i829 (Reusing Pipe Connected To Writeback
+ * Pipeline On The Fly To An Active Panel)
+ */
+ priv->dispc_ops->ovl_enable(priv->dispc,
+ omap_plane_id_wb(ctx->dev->plane),
+ false);
+ priv->dispc_ops->ovl_enable(priv->dispc, OMAP_DSS_WB, true);
+ priv->dispc_ops->ovl_enable(priv->dispc, OMAP_DSS_WB, false);
+ }
+
+ priv->dispc_ops->runtime_put(priv->dispc);
+}
+
+static struct vb2_ops wbm2m_qops = {
+ .queue_setup = wbm2m_queue_setup,
+ .buf_prepare = wbm2m_buf_prepare,
+ .buf_queue = wbm2m_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = wbm2m_start_streaming,
+ .stop_streaming = wbm2m_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct wbm2m_ctx *ctx = priv;
+ struct wbm2m_dev *dev = ctx->dev;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &wbm2m_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &dev->dev->lock;
+ src_vq->min_buffers_needed = 1;
+ src_vq->dev = dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &wbm2m_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &dev->dev->lock;
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->dev = dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int wbm2m_open(struct file *file)
+{
+ struct wbm2m_dev *dev = video_drvdata(file);
+ struct wb_q_data *s_q_data;
+ struct wbm2m_ctx *ctx;
+ struct v4l2_pix_format_mplane *pix;
+ int ret;
+
+ log_dbg(dev, "enter\n");
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ if (mutex_lock_interruptible(&dev->dev->lock)) {
+ ret = -ERESTARTSYS;
+ goto free_ctx;
+ }
+
+ if ((dev->dev->mode != OMAP_WB_NOT_CONFIGURED) &&
+ (dev->dev->mode != OMAP_WB_MEM2MEM_OVL)) {
+ /* WB is already open for other modes */
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = ctx;
+
+ s_q_data = &ctx->q_data[Q_DATA_SRC];
+ s_q_data->fmt = &wb_formats[1];
+ pix = &s_q_data->format.fmt.pix_mp;
+ pix->pixelformat = s_q_data->fmt->fourcc;
+ s_q_data->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ pix->width = 1920;
+ pix->height = 1080;
+ pix->plane_fmt[LUMA_PLANE].bytesperline = (pix->width *
+ s_q_data->fmt->depth[LUMA_PLANE]) >> 3;
+ pix->plane_fmt[LUMA_PLANE].sizeimage =
+ pix->plane_fmt[LUMA_PLANE].bytesperline *
+ pix->height;
+ pix->num_planes = s_q_data->fmt->coplanar ? 2 : 1;
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->field = V4L2_FIELD_NONE;
+ s_q_data->c_rect.left = 0;
+ s_q_data->c_rect.top = 0;
+ s_q_data->c_rect.width = pix->width;
+ s_q_data->c_rect.height = pix->height;
+
+ ctx->q_data[Q_DATA_DST] = *s_q_data;
+ ctx->q_data[Q_DATA_DST].format.type =
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+ ctx->sequence = 0;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto exit_fh;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ if (v4l2_fh_is_singular_file(file)) {
+ log_dbg(dev, "first instance created\n");
+
+ drm_modeset_lock_all(dev->dev->drm_dev);
+ dev->plane = omap_plane_reserve_wb(dev->dev->drm_dev);
+ drm_modeset_unlock_all(dev->dev->drm_dev);
+
+ if (!dev->plane) {
+ log_dbg(dev, "Could not reserve plane!\n");
+ ret = -EBUSY;
+ goto free_fh;
+ }
+
+ dev->dev->mode = OMAP_WB_MEM2MEM_OVL;
+ }
+
+ log_dbg(dev, "created instance %pa, m2m_ctx: %pa\n",
+ &ctx, &ctx->fh.m2m_ctx);
+
+ mutex_unlock(&dev->dev->lock);
+
+ return 0;
+
+free_fh:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+exit_fh:
+ v4l2_fh_exit(&ctx->fh);
+unlock:
+ mutex_unlock(&dev->dev->lock);
+free_ctx:
+ kfree(ctx);
+ return ret;
+}
+
+static int wbm2m_release(struct file *file)
+{
+ struct wbm2m_dev *dev = video_drvdata(file);
+ struct wbm2m_ctx *ctx = file->private_data;
+ bool fh_singular;
+
+ log_dbg(dev, "releasing instance %pa\n", &ctx);
+
+ mutex_lock(&dev->dev->lock);
+
+ /* Save the singular status before we call the clean-up helper */
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ kfree(ctx);
+
+ if (fh_singular) {
+ log_dbg(dev, "last instance released\n");
+
+ drm_modeset_lock_all(dev->dev->drm_dev);
+ omap_plane_release_wb(dev->plane);
+ drm_modeset_unlock_all(dev->dev->drm_dev);
+ dev->dev->mode = OMAP_WB_NOT_CONFIGURED;
+ }
+
+ mutex_unlock(&dev->dev->lock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations wbm2m_fops = {
+ .owner = THIS_MODULE,
+ .open = wbm2m_open,
+ .release = wbm2m_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static struct video_device wbm2m_videodev = {
+ .name = WBM2M_MODULE_NAME,
+ .fops = &wbm2m_fops,
+ .ioctl_ops = &wbm2m_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+};
+
+int wbm2m_init(struct wb_dev *dev)
+{
+ struct wbm2m_dev *wbm2m;
+ struct video_device *vfd;
+ int ret;
+
+ if (!dev)
+ return -ENOMEM;
+
+ /* Allocate a new instance */
+ wbm2m = devm_kzalloc(dev->drm_dev->dev, sizeof(*wbm2m), GFP_KERNEL);
+ if (!wbm2m)
+ return -ENOMEM;
+
+ dev->m2m = wbm2m;
+ wbm2m->dev = dev;
+
+ spin_lock_init(&wbm2m->lock);
+
+ snprintf(wbm2m->v4l2_dev.name, sizeof(wbm2m->v4l2_dev.name),
+ "%s", WBM2M_MODULE_NAME);
+ ret = v4l2_device_register(dev->drm_dev->dev, &wbm2m->v4l2_dev);
+ if (ret)
+ return ret;
+
+ wbm2m->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(wbm2m->m2m_dev)) {
+ log_err(wbm2m, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(wbm2m->m2m_dev);
+ goto v4l2_dev_unreg;
+ }
+
+ vfd = &wbm2m->vfd;
+ *vfd = wbm2m_videodev;
+ vfd->lock = &dev->lock;
+ vfd->v4l2_dev = &wbm2m->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 10);
+ if (ret) {
+ log_err(wbm2m, "Failed to register video device\n");
+ goto rel_m2m;
+ }
+
+ video_set_drvdata(vfd, wbm2m);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", wbm2m_videodev.name);
+ log_dbg(wbm2m, "Device registered as %s\n",
+ video_device_node_name(vfd));
+
+ return 0;
+
+rel_m2m:
+ v4l2_m2m_release(wbm2m->m2m_dev);
+v4l2_dev_unreg:
+ v4l2_device_unregister(&wbm2m->v4l2_dev);
+
+ return ret;
+}
+
+void wbm2m_cleanup(struct wb_dev *dev)
+{
+ log_dbg(dev->m2m, "Cleanup WB M2M\n");
+
+ v4l2_m2m_release(dev->m2m->m2m_dev);
+ video_unregister_device(&dev->m2m->vfd);
+ v4l2_device_unregister(&dev->m2m->v4l2_dev);
+}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 1a87cc445b5e..1a76eb1a640a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -858,6 +858,63 @@ static const struct panel_desc auo_b116xak01 = {
.connector_type = DRM_MODE_CONNECTOR_eDP,
};
+static const struct drm_display_mode ti_panel_edp_mode[] = {
+ {
+ .clock = 36000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 48,
+ .hsync_end = 800 + 48 + 32,
+ .htotal = 800 + 48 + 32 + 80,
+ .vdisplay = 600,
+ .vsync_start = 600 + 3,
+ .vsync_end = 600 + 3 + 4,
+ .vtotal = 600 + 3 + 4 + 9,
+
+ .crtc_clock = 36000,
+ .crtc_hdisplay = 800,
+ .crtc_hsync_start = 800 + 48,
+ .crtc_hsync_end = 800 + 48 + 32,
+ .crtc_htotal = 800 + 48 + 32 + 80,
+ .crtc_vdisplay = 600,
+ .crtc_vsync_start = 600 + 3,
+ .crtc_vsync_end = 600 + 3 + 4,
+ .crtc_vtotal = 600 + 3 + 4 + 9,
+
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ },
+ {
+ .clock = 108000,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 112,
+ .htotal = 1280 + 48 + 112 + 248,
+ .vdisplay = 1024,
+ .vsync_start = 1024 + 1,
+ .vsync_end = 1024 + 1 + 3,
+ .vtotal = 1024 + 1 + 3 + 38,
+
+ .crtc_clock = 108000,
+ .crtc_hdisplay = 1280,
+ .crtc_hsync_start = 1280 + 48,
+ .crtc_hsync_end = 1280 + 48 + 112,
+ .crtc_htotal = 1280 + 48 + 112 + 248,
+ .crtc_vdisplay = 1024,
+ .crtc_vsync_start = 1024 + 1,
+ .crtc_vsync_end = 1024 + 1 + 3,
+ .crtc_vtotal = 1024 + 1 + 3 + 38,
+
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ },
+};
+
+static const struct panel_desc ti_panel_edp = {
+ .modes = ti_panel_edp_mode,
+ .num_modes = ARRAY_SIZE(ti_panel_edp_mode),
+ .bpc = 8,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode auo_b116xw03_mode = {
.clock = 70589,
.hdisplay = 1366,
@@ -2761,6 +2818,35 @@ static const struct panel_desc mitsubishi_aa070mc01 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
+static const struct drm_display_mode microtips_13_101hieb0hf0_s_mode = {
+ .clock = 150275,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 32,
+ .hsync_end = 1920 + 32 + 52,
+ .htotal = 1920 + 32 + 52 + 24,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 24,
+ .vsync_end = 1200 + 24 + 8,
+ .vtotal = 1200 + 24 + 8 + 3,
+};
+
+static const struct panel_desc microtips_13_101hieb0hf0_s = {
+ .modes = &microtips_13_101hieb0hf0_s_mode,
+ .bpc = 8,
+ .num_modes = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing nec_nl12880bc20_05_timing = {
.pixelclock = { 67000000, 71000000, 75000000 },
.hactive = { 1280, 1280, 1280 },
@@ -3235,6 +3321,7 @@ static const struct drm_display_mode rocktech_rk101ii01d_ct_mode = {
static const struct panel_desc rocktech_rk101ii01d_ct = {
.modes = &rocktech_rk101ii01d_ct_mode,
+ .bpc = 8,
.num_modes = 1,
.size = {
.width = 217,
@@ -4155,6 +4242,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "logictechno,lt170410-2whc",
.data = &logictechno_lt170410_2whc,
}, {
+ .compatible = "microtips,13-101hieb0hf0-s",
+ .data = &microtips_13_101hieb0hf0_s,
+ }, {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
@@ -4302,6 +4392,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "vxt,vl050-8048nt-c01",
.data = &vl050_8048nt_c01,
}, {
+ .compatible = "ti,panel-edp",
+ .data = &ti_panel_edp,
+ }, {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
}, {
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index b669168ae7cb..5571ddfe3c44 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -25,6 +25,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
#include "tidss_crtc.h"
#include "tidss_dispc.h"
@@ -272,6 +273,55 @@ const struct dispc_features dispc_j721e_feats = {
.vid_order = { 1, 3, 0, 2 },
};
+const struct dispc_features dispc_am625_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ [DISPC_VP_OLDI] = 165000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 2560,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_AM625,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 2,
+ .vp_name = { "vp1", "vp2" },
+ .ovr_name = { "ovr1", "ovr2" },
+ .vpclk_name = { "vp1", "vp2" },
+ .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 2,
+ /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+ .vid_name = { "vid", "vidl1" },
+ .vid_lite = { false, true, },
+ .vid_order = { 1, 0 },
+};
+
static const u16 *dispc_common_regmap;
struct dss_vp_data {
@@ -299,6 +349,8 @@ struct dispc_device {
struct dss_vp_data vp_data[TIDSS_MAX_PORTS];
+ enum dss_oldi_modes oldi_mode;
+
u32 *fourccs;
u32 num_fourccs;
@@ -775,6 +827,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
return dispc_k2g_read_and_clear_irqstatus(dispc);
case DISPC_AM65X:
case DISPC_J721E:
+ case DISPC_AM625:
return dispc_k3_read_and_clear_irqstatus(dispc);
default:
WARN_ON(1);
@@ -790,6 +843,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
break;
case DISPC_AM65X:
case DISPC_J721E:
+ case DISPC_AM625:
dispc_k3_set_irqenable(dispc, mask);
break;
default:
@@ -860,21 +914,54 @@ int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power)
{
- u32 val = power ? 0 : OLDI_PWRDN_TX;
+ u32 val;
if (WARN_ON(!dispc->oldi_io_ctrl))
return;
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
- OLDI_PWRDN_TX, val);
- regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
- OLDI_PWRDN_TX, val);
+ if (dispc->feat->subrev == DISPC_AM65X) {
+ val = power ? 0 : OLDI_PWRDN_TX;
+
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+
+ } else if (dispc->feat->subrev == DISPC_AM625) {
+ if (power) {
+ switch (dispc->oldi_mode) {
+ case OLDI_SINGLE_LINK_SINGLE_MODE:
+ /* Power down OLDI TX 1 */
+ val = OLDI1_PWRDN_TX;
+ break;
+
+ case OLDI_SINGLE_LINK_DUPLICATE_MODE:
+ case OLDI_DUAL_LINK:
+ /* No Power down */
+ val = 0;
+ break;
+
+ default:
+ /* Power down both OLDI TXes and the LVDS Bandgap */
+ val = OLDI_BG_PWRDN | OLDI0_PWRDN_TX | OLDI1_PWRDN_TX;
+ break;
+ }
+
+ } else {
+ /* Power down both OLDI TXes and the LVDS Bandgap */
+ val = OLDI_BG_PWRDN | OLDI0_PWRDN_TX | OLDI1_PWRDN_TX;
+ }
+
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_PD_CTRL,
+ OLDI_BG_PWRDN | OLDI0_PWRDN_TX |
+ OLDI1_PWRDN_TX, val);
+ }
}
static void dispc_set_num_datalines(struct dispc_device *dispc,
@@ -911,8 +998,8 @@ static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
int count = 0;
/*
- * For the moment DUALMODESYNC, MASTERSLAVE, MODE, and SRC
- * bits of DISPC_VP_DSS_OLDI_CFG are set statically to 0.
+ * For the moment MASTERSLAVE, and SRC bits of DISPC_VP_DSS_OLDI_CFG are
+ * set statically to 0.
*/
if (fmt->data_width == 24)
@@ -929,6 +1016,30 @@ static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
oldi_cfg |= BIT(0); /* ENABLE */
+ switch (dispc->oldi_mode) {
+ case OLDI_MODE_OFF:
+ oldi_cfg &= ~BIT(0); /* DISABLE */
+ break;
+
+ case OLDI_SINGLE_LINK_SINGLE_MODE:
+ /* All configuration is done for this mode. */
+ break;
+
+ case OLDI_SINGLE_LINK_DUPLICATE_MODE:
+ oldi_cfg |= BIT(5); /* DUPLICATE MODE */
+ break;
+
+ case OLDI_DUAL_LINK:
+ oldi_cfg |= BIT(11); /* DUALMODESYNC */
+ oldi_cfg |= BIT(3); /* data-mapping field also indicates dual-link mode */
+ break;
+
+ default:
+ dev_warn(dispc->dev, "%s: Incorrect oldi mode. Returning.\n",
+ __func__);
+ return;
+ }
+
dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
while (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)) &&
@@ -1216,6 +1327,16 @@ int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
int r;
unsigned long new_rate;
+ /*
+ * For AM625 OLDI video ports, the requested pixel clock needs to take into account the
+ * serial clock required for the serialization of DPI signals into LVDS signals. The
+ * incoming pixel clock on the OLDI video port gets divided by 7 whenever OLDI enable bit
+ * gets set.
+ */
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI &&
+ dispc->feat->subrev == DISPC_AM625)
+ rate *= 7;
+
r = clk_set_rate(dispc->vp_clk[hw_videoport], rate);
if (r) {
dev_err(dispc->dev, "vp%d: failed to set clk rate to %lu\n",
@@ -1279,6 +1400,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
x, y, layer);
break;
case DISPC_AM65X:
+ case DISPC_AM625:
dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
break;
@@ -1855,8 +1977,8 @@ static const struct {
{ DRM_FORMAT_XBGR4444, 0x21, },
{ DRM_FORMAT_RGBX4444, 0x22, },
- { DRM_FORMAT_ARGB1555, 0x25, },
- { DRM_FORMAT_ABGR1555, 0x26, },
+ { DRM_FORMAT_XRGB1555, 0x25, },
+ { DRM_FORMAT_XBGR1555, 0x26, },
{ DRM_FORMAT_XRGB8888, 0x27, },
{ DRM_FORMAT_XBGR8888, 0x28, },
@@ -2202,6 +2324,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
break;
case DISPC_AM65X:
case DISPC_J721E:
+ case DISPC_AM625:
dispc_k3_plane_init(dispc);
break;
default:
@@ -2307,6 +2430,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
break;
case DISPC_AM65X:
+ case DISPC_AM625:
dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
break;
case DISPC_J721E:
@@ -2580,7 +2704,7 @@ int dispc_runtime_resume(struct dispc_device *dispc)
REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
- if (dispc->feat->subrev == DISPC_AM65X)
+ if (dispc->feat->subrev == DISPC_AM65X || dispc->feat->subrev == DISPC_AM625)
dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
@@ -2657,6 +2781,74 @@ static void dispc_init_errata(struct dispc_device *dispc)
}
}
+static void dispc_softreset(struct dispc_device *dispc)
+{
+ u32 val;
+ int ret = 0;
+
+ /* Soft reset */
+ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
+ /* Wait for reset to complete */
+ ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
+ val, val & 1, 100, 5000);
+ if (ret)
+ dev_warn(dispc->dev, "failed to reset dispc\n");
+}
+
+static void dispc_get_oldi_mode(struct dispc_device *dispc)
+{
+ int pixel_order;
+ struct device_node *dss_ports, *oldi0_port, *oldi1_port;
+
+ dss_ports = of_get_next_child(dispc->dev->of_node, NULL);
+ oldi0_port = of_graph_get_port_by_id(dss_ports, 0);
+ oldi1_port = of_graph_get_port_by_id(dss_ports, 2);
+
+ if (!oldi0_port && !oldi1_port) {
+ dispc->oldi_mode = OLDI_MODE_OFF;
+ } else if ((oldi0_port && !oldi1_port) ||
+ (!oldi0_port && oldi1_port)) {
+ dispc->oldi_mode = OLDI_SINGLE_LINK_SINGLE_MODE;
+ } else {
+ /*
+ * OLDI Ports found for both the OLDI TXes. The DSS is
+ * to be configured in either Dual Link or Cloning Mode.
+ */
+ pixel_order = drm_of_lvds_get_dual_link_pixel_order(oldi0_port,
+ oldi1_port);
+ switch (pixel_order) {
+ case -EINVAL:
+ /*
+ * The dual link properties were not found in at least
+ * one of the sink nodes. Since the ports are present in
+ * the DT, we can safely assume the required configuration
+ * is Duplicate Modes.
+ */
+ dispc->oldi_mode = OLDI_SINGLE_LINK_DUPLICATE_MODE;
+ break;
+
+ case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS:
+ case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS:
+ /*
+ * Note that the OLDI TX 0 transmits the odd set of pixels
+ * while the OLDI TX 1 transmits the even set. This is a
+ * fixed configuration in the IP integration and is not
+ * changeable. These properties have been used to merely
+ * identify if a Dual Link configuration is required.
+ * Swapping this property in the panel port DT nodes will
+ * not make any difference and will result in a don't
+ * care condition.
+ */
+ dispc->oldi_mode = OLDI_DUAL_LINK;
+ break;
+
+ default:
+ dispc->oldi_mode = OLDI_MODE_OFF;
+ break;
+ }
+ }
+}
+
int dispc_init(struct tidss_device *tidss)
{
struct device *dev = tidss->dev;
@@ -2716,6 +2908,10 @@ int dispc_init(struct tidss_device *tidss)
return r;
}
+ /* K2G display controller does not support soft reset */
+ if (feat->subrev != DISPC_K2G)
+ dispc_softreset(dispc);
+
for (i = 0; i < dispc->feat->num_vps; i++) {
u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
u32 *gamma_table;
@@ -2747,10 +2943,19 @@ int dispc_init(struct tidss_device *tidss)
dispc->vp_data[i].gamma_table = gamma_table;
}
- if (feat->subrev == DISPC_AM65X) {
- r = dispc_init_am65x_oldi_io_ctrl(dev, dispc);
- if (r)
- return r;
+ /*
+ * K2G and J721E DSS do not support these properties.
+ * Furthermore, the default mode for the OLDI TXes is OFF.
+ */
+ dispc->oldi_mode = OLDI_MODE_OFF;
+
+ if (feat->subrev == DISPC_AM65X || feat->subrev == DISPC_AM625) {
+ dispc_get_oldi_mode(dispc);
+ if (dispc->oldi_mode) {
+ r = dispc_init_am65x_oldi_io_ctrl(dev, dispc);
+ if (r)
+ return r;
+ }
}
dispc->fclk = devm_clk_get(dev, "fck");
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index e49432f0abf5..6cb0a6ab6eb4 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -61,6 +61,14 @@ enum dispc_dss_subrevision {
DISPC_K2G,
DISPC_AM65X,
DISPC_J721E,
+ DISPC_AM625,
+};
+
+enum dss_oldi_modes {
+ OLDI_MODE_OFF, /* OLDI turned off / tied off in IP. */
+ OLDI_SINGLE_LINK_SINGLE_MODE, /* Single Output over OLDI 0. */
+ OLDI_SINGLE_LINK_DUPLICATE_MODE, /* Duplicate Output over OLDI 0 and 1. */
+ OLDI_DUAL_LINK, /* Combined Output over OLDI 0 and 1. */
};
struct dispc_features {
@@ -88,6 +96,7 @@ struct dispc_features {
extern const struct dispc_features dispc_k2g_feats;
extern const struct dispc_features dispc_am65x_feats;
extern const struct dispc_features dispc_j721e_feats;
+extern const struct dispc_features dispc_am625_feats;
void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask);
dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
index 13feedfe5d6d..bdb82b487d79 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -238,6 +238,15 @@ enum dispc_common_regs {
#define OLDI_DAT3_IO_CTRL 0x0C
#define OLDI_CLK_IO_CTRL 0x10
+/* Only for AM625 OLDI TX */
+#define OLDI_PD_CTRL 0x100
+#define OLDI_LB_CTRL 0x104
+
#define OLDI_PWRDN_TX BIT(8)
+#define OLDI0_PWRDN_TX BIT(0)
+#define OLDI1_PWRDN_TX BIT(1)
+
+/* Bit to power on/off Bandgap reference for LVDS Buffers */
+#define OLDI_BG_PWRDN BIT(8)
#endif /* __TIDSS_DISPC_REGS_H */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 9179ea18f625..b2edeefabdd5 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -244,6 +244,7 @@ static const struct of_device_id tidss_of_table[] = {
{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
+ { .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
{ }
};
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 43e72d0b2d84..35067ae674ea 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -10,6 +10,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "tidss_crtc.h"
#include "tidss_dispc.h"
@@ -150,6 +151,7 @@ static void drm_plane_destroy(struct drm_plane *plane)
}
static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = tidss_plane_atomic_check,
.atomic_update = tidss_plane_atomic_update,
.atomic_disable = tidss_plane_atomic_disable,
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 3b05560456ea..33eeff94fc2a 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -2,11 +2,12 @@
/*
* OMAP hardware spinlock driver
*
- * Copyright (C) 2010-2015 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2010-2021 Texas Instruments Incorporated - https://www.ti.com
*
* Contact: Simon Que <sque@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
* Ohad Ben-Cohen <ohad@wizery.com>
+ * Suman Anna <s-anna@ti.com>
*/
#include <linux/kernel.h>
@@ -164,6 +165,7 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
static const struct of_device_id omap_hwspinlock_of_match[] = {
{ .compatible = "ti,omap4-hwspinlock", },
+ { .compatible = "ti,am64-hwspinlock", },
{ .compatible = "ti,am654-hwspinlock", },
{ /* end */ },
};
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 438905e2a1d0..c6d1a345ea6d 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -71,6 +71,15 @@ config I2C_MUX
source "drivers/i2c/muxes/Kconfig"
+config I2C_ATR
+ tristate "I2C Address Translator (ATR) support"
+ help
+ Enable support for I2C Address Translator (ATR) chips.
+
+ An ATR allows accessing multiple I2C busses from a single
+ physical bus via address translation instead of bus selection as
+ i2c-muxes do.
+
config I2C_HELPER_AUTO
bool "Autoselect pertinent helper modules"
default y
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index c1d493dc9bac..3f71ce4711e3 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -13,6 +13,7 @@ i2c-core-$(CONFIG_OF) += i2c-core-of.o
obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-$(CONFIG_I2C_MUX) += i2c-mux.o
+obj-$(CONFIG_I2C_ATR) += i2c-atr.o
obj-y += algos/ busses/ muxes/
obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_I2C_SLAVE_EEPROM) += i2c-slave-eeprom.o
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
new file mode 100644
index 000000000000..699446be1e90
--- /dev/null
+++ b/drivers/i2c/i2c-atr.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * I2C Address Translator
+ *
+ * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
+ *
+ * An I2C Address Translator (ATR) is a device with an I2C slave parent
+ * ("upstream") port and N I2C master child ("downstream") ports, and
+ * forwards transactions from upstream to the appropriate downstream port
+ * with a modified slave address. The address used on the parent bus is
+ * called the "alias" and is (potentially) different from the physical
+ * slave address of the child bus. Address translation is done by the
+ * hardware.
+ *
+ * An ATR looks similar to an i2c-mux except:
+ * - the address on the parent and child busses can be different
+ * - there is normally no need to select the child port; the alias used on
+ * the parent bus implies it
+ *
+ * The ATR functionality can be provided by a chip with many other
+ * features. This file provides a helper to implement an ATR within your
+ * driver.
+ *
+ * The ATR creates a new I2C "child" adapter on each child bus. Adding
+ * devices on the child bus ends up in invoking the driver code to select
+ * an available alias. Maintaining an appropriate pool of available aliases
+ * and picking one for each new device is up to the driver implementer. The
+ * ATR maintains an table of currently assigned alias and uses it to modify
+ * all I2C transactions directed to devices on the child buses.
+ *
+ * A typical example follows.
+ *
+ * Topology:
+ *
+ * Slave X @ 0x10
+ * .-----. |
+ * .-----. | |---+---- B
+ * | CPU |--A--| ATR |
+ * `-----' | |---+---- C
+ * `-----' |
+ * Slave Y @ 0x10
+ *
+ * Alias table:
+ *
+ * Client Alias
+ * -------------
+ * X 0x20
+ * Y 0x30
+ *
+ * Transaction:
+ *
+ * - Slave X driver sends a transaction (on adapter B), slave address 0x10
+ * - ATR driver rewrites messages with address 0x20, forwards to adapter A
+ * - Physical I2C transaction on bus A, slave address 0x20
+ * - ATR chip propagates transaction on bus B with address translated to 0x10
+ * - Slave X chip replies on bus B
+ * - ATR chip forwards reply on bus A
+ * - ATR driver rewrites messages with address 0x10
+ * - Slave X driver gets back the msgs[], with reply and address 0x10
+ *
+ * Usage:
+ *
+ * 1. In your driver (typically in the probe function) add an ATR by
+ * calling i2c_atr_new() passing your attach/detach callbacks
+ * 2. When the attach callback is called pick an appropriate alias,
+ * configure it in your chip and return the chosen alias in the
+ * alias_id parameter
+ * 3. When the detach callback is called, deconfigure the alias from
+ * your chip and put it back in the pool for later usage
+ *
+ * Originally based on i2c-mux.c
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-atr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+/**
+ * struct i2c_atr_cli2alias_pair - Hold the alias assigned to a client.
+ * @node: List node
+ * @client: Pointer to the client on the child bus
+ * @alias: I2C alias address assigned by the driver.
+ * This is the address that will be used to issue I2C transactions
+ * on the parent (physical) bus.
+ */
+struct i2c_atr_cli2alias_pair {
+ struct list_head node;
+ const struct i2c_client *client;
+ u16 alias;
+};
+
+/*
+ * Data for each channel (child bus)
+ */
+struct i2c_atr_chan {
+ struct i2c_adapter adap;
+ struct i2c_atr *atr;
+ u32 chan_id;
+
+ struct list_head alias_list;
+
+ u16 *orig_addrs;
+ unsigned int orig_addrs_size;
+ struct mutex orig_addrs_lock; /* Lock orig_addrs during xfer */
+};
+
+static struct i2c_atr_cli2alias_pair *
+i2c_atr_find_mapping_by_client(struct list_head *list,
+ const struct i2c_client *client)
+{
+ struct i2c_atr_cli2alias_pair *c2a;
+
+ list_for_each_entry(c2a, list, node) {
+ if (c2a->client == client)
+ return c2a;
+ }
+
+ return NULL;
+}
+
+static struct i2c_atr_cli2alias_pair *
+i2c_atr_find_mapping_by_addr(struct list_head *list,
+ u16 phys_addr)
+{
+ struct i2c_atr_cli2alias_pair *c2a;
+
+ list_for_each_entry(c2a, list, node) {
+ if (c2a->client->addr == phys_addr)
+ return c2a;
+ }
+
+ return NULL;
+}
+
+/*
+ * Replace all message addresses with their aliases, saving the original
+ * addresses.
+ *
+ * This function is internal for use in i2c_atr_master_xfer(). It must be
+ * followed by i2c_atr_unmap_msgs() to restore the original addresses.
+ */
+static int i2c_atr_map_msgs(struct i2c_atr_chan *chan,
+ struct i2c_msg msgs[], int num)
+
+{
+ struct i2c_atr *atr = chan->atr;
+ static struct i2c_atr_cli2alias_pair *c2a;
+ int i;
+
+ /* Ensure we have enough room to save the original addresses */
+ if (unlikely(chan->orig_addrs_size < num)) {
+ void *new_buf = kmalloc(num * sizeof(chan->orig_addrs[0]),
+ GFP_KERNEL);
+ if (new_buf == NULL)
+ return -ENOMEM;
+
+ kfree(chan->orig_addrs);
+ chan->orig_addrs = new_buf;
+ chan->orig_addrs_size = num;
+ }
+
+ for (i = 0; i < num; i++) {
+ chan->orig_addrs[i] = msgs[i].addr;
+
+ c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list,
+ msgs[i].addr);
+ if (c2a) {
+ msgs[i].addr = c2a->alias;
+ } else {
+ dev_err(atr->dev, "client 0x%02x not mapped!\n",
+ msgs[i].addr);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Restore all message address aliases with the original addresses.
+ *
+ * This function is internal for use in i2c_atr_master_xfer().
+ *
+ * @see i2c_atr_map_msgs()
+ */
+static void i2c_atr_unmap_msgs(struct i2c_atr_chan *chan,
+ struct i2c_msg msgs[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ msgs[i].addr = chan->orig_addrs[i];
+}
+
+static int i2c_atr_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct i2c_atr_chan *chan = adap->algo_data;
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_adapter *parent = atr->parent;
+ int ret = 0;
+
+ /* Switch to the right atr port */
+ if (atr->ops->select) {
+ ret = atr->ops->select(atr, chan->chan_id);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Translate addresses */
+ mutex_lock(&chan->orig_addrs_lock);
+ ret = i2c_atr_map_msgs(chan, msgs, num);
+ if (ret < 0) {
+ mutex_unlock(&chan->orig_addrs_lock);
+ goto out;
+ }
+
+ /* Perform the transfer */
+ ret = i2c_transfer(parent, msgs, num);
+
+ /* Restore addresses */
+ i2c_atr_unmap_msgs(chan, msgs, num);
+ mutex_unlock(&chan->orig_addrs_lock);
+
+out:
+ if (atr->ops->deselect)
+ atr->ops->deselect(atr, chan->chan_id);
+
+ return ret;
+}
+
+static int i2c_atr_smbus_xfer(struct i2c_adapter *adap,
+ u16 addr, unsigned short flags,
+ char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct i2c_atr_chan *chan = adap->algo_data;
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_adapter *parent = atr->parent;
+ struct i2c_atr_cli2alias_pair *c2a;
+ int err = 0;
+
+ c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, addr);
+ if (!c2a) {
+ dev_err(atr->dev, "client 0x%02x not mapped!\n", addr);
+ return -ENXIO;
+ }
+
+ if (atr->ops->select)
+ err = atr->ops->select(atr, chan->chan_id);
+ if (!err)
+ err = i2c_smbus_xfer(parent, c2a->alias, flags,
+ read_write, command, size, data);
+ if (atr->ops->deselect)
+ atr->ops->deselect(atr, chan->chan_id);
+
+ return err;
+}
+
+static u32 i2c_atr_functionality(struct i2c_adapter *adap)
+{
+ struct i2c_atr_chan *chan = adap->algo_data;
+ struct i2c_adapter *parent = chan->atr->parent;
+
+ return parent->algo->functionality(parent);
+}
+
+static void i2c_atr_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+
+ mutex_lock(&atr->lock);
+}
+
+static int i2c_atr_trylock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+
+ return mutex_trylock(&atr->lock);
+}
+
+static void i2c_atr_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+
+ mutex_unlock(&atr->lock);
+}
+
+static const struct i2c_lock_operations i2c_atr_lock_ops = {
+ .lock_bus = i2c_atr_lock_bus,
+ .trylock_bus = i2c_atr_trylock_bus,
+ .unlock_bus = i2c_atr_unlock_bus,
+};
+
+static int i2c_atr_attach_client(struct i2c_adapter *adapter,
+ const struct i2c_board_info *info,
+ const struct i2c_client *client)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_cli2alias_pair *c2a;
+ u16 alias_id = 0;
+ int err = 0;
+
+ c2a = kzalloc(sizeof(struct i2c_atr_cli2alias_pair), GFP_KERNEL);
+ if (!c2a) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ err = atr->ops->attach_client(atr, chan->chan_id, info, client,
+ &alias_id);
+ if (err)
+ goto err_attach;
+ if (alias_id == 0) {
+ err = -EINVAL;
+ goto err_attach;
+ }
+
+ c2a->client = client;
+ c2a->alias = alias_id;
+ list_add(&c2a->node, &chan->alias_list);
+
+ return 0;
+
+err_attach:
+ kfree(c2a);
+err_alloc:
+ return err;
+}
+
+static void i2c_atr_detach_client(struct i2c_adapter *adapter,
+ const struct i2c_client *client)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_cli2alias_pair *c2a;
+
+ atr->ops->detach_client(atr, chan->chan_id, client);
+
+ c2a = i2c_atr_find_mapping_by_client(&chan->alias_list, client);
+ if (c2a != NULL) {
+ list_del(&c2a->node);
+ kfree(c2a);
+ }
+}
+
+static const struct i2c_attach_operations i2c_atr_attach_ops = {
+ .attach_client = i2c_atr_attach_client,
+ .detach_client = i2c_atr_detach_client,
+};
+
+/**
+ * i2c_atr_add_adapter - Create a child ("downstream") I2C bus.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the new adapter (0 .. max_adapters-1). This value is
+ * passed to the callbacks in `struct i2c_atr_ops`.
+ *
+ * After calling this function a new i2c bus will appear. Adding and
+ * removing devices on the downstream bus will result in calls to the
+ * `attach_client` and `detach_client` callbacks for the driver to assign
+ * an alias to the device.
+ *
+ * If there is a device tree node under "i2c-atr" whose "reg" property
+ * equals chan_id, the new adapter will receive that node and perhaps start
+ * adding devices under it. The callbacks for those additions will be made
+ * before i2c_atr_add_adapter() returns.
+ *
+ * Call i2c_atr_del_adapter() to remove the adapter.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id)
+{
+ struct i2c_adapter *parent = atr->parent;
+ struct device *dev = atr->dev;
+ struct i2c_atr_chan *chan;
+ char symlink_name[20];
+ int err;
+
+ if (chan_id >= atr->max_adapters)
+ return -EINVAL;
+
+ if (atr->adapter[chan_id]) {
+ dev_err(dev, "Adapter %d already present\n", chan_id);
+ return -EEXIST;
+ }
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->atr = atr;
+ chan->chan_id = chan_id;
+ INIT_LIST_HEAD(&chan->alias_list);
+ mutex_init(&chan->orig_addrs_lock);
+
+ snprintf(chan->adap.name, sizeof(chan->adap.name),
+ "i2c-%d-atr-%d", i2c_adapter_id(parent), chan_id);
+ chan->adap.owner = THIS_MODULE;
+ chan->adap.algo = &atr->algo;
+ chan->adap.algo_data = chan;
+ chan->adap.dev.parent = dev;
+ chan->adap.retries = parent->retries;
+ chan->adap.timeout = parent->timeout;
+ chan->adap.quirks = parent->quirks;
+ chan->adap.lock_ops = &i2c_atr_lock_ops;
+ chan->adap.attach_ops = &i2c_atr_attach_ops;
+
+ if (dev->of_node) {
+ struct device_node *atr_node;
+ struct device_node *child;
+ u32 reg;
+
+ atr_node = of_get_child_by_name(dev->of_node, "i2c-atr");
+
+ for_each_child_of_node(atr_node, child) {
+ err = of_property_read_u32(child, "reg", &reg);
+ if (err)
+ continue;
+ if (chan_id == reg)
+ break;
+ }
+
+ chan->adap.dev.of_node = child;
+ of_node_put(atr_node);
+ }
+
+ err = i2c_add_adapter(&chan->adap);
+ if (err) {
+ dev_err(dev, "failed to add atr-adapter %u (error=%d)\n",
+ chan_id, err);
+ goto err_add_adapter;
+ }
+
+ WARN(sysfs_create_link(&chan->adap.dev.kobj, &dev->kobj, "atr_device"),
+ "can't create symlink to atr device\n");
+ snprintf(symlink_name, sizeof(symlink_name), "channel-%u", chan_id);
+ WARN(sysfs_create_link(&dev->kobj, &chan->adap.dev.kobj, symlink_name),
+ "can't create symlink for channel %u\n", chan_id);
+
+ dev_dbg(dev, "Added ATR child bus %d\n", i2c_adapter_id(&chan->adap));
+
+ atr->adapter[chan_id] = &chan->adap;
+ return 0;
+
+err_add_adapter:
+ mutex_destroy(&chan->orig_addrs_lock);
+ kfree(chan);
+ return err;
+}
+EXPORT_SYMBOL_GPL(i2c_atr_add_adapter);
+
+/**
+ * i2c_atr_del_adapter - Remove a child ("downstream") I2C bus added by
+ * i2c_atr_del_adapter().
+ * @atr: The I2C ATR
+ * @chan_id: Index of the `adapter to be removed (0 .. max_adapters-1)
+ */
+void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id)
+{
+ char symlink_name[20];
+
+ struct i2c_adapter *adap = atr->adapter[chan_id];
+ struct i2c_atr_chan *chan = adap->algo_data;
+ struct device_node *np = adap->dev.of_node;
+ struct device *dev = atr->dev;
+
+ if (atr->adapter[chan_id] == NULL) {
+ dev_err(dev, "Adapter %d does not exist\n", chan_id);
+ return;
+ }
+
+ dev_dbg(dev, "Removing ATR child bus %d\n", i2c_adapter_id(adap));
+
+ atr->adapter[chan_id] = NULL;
+
+ snprintf(symlink_name, sizeof(symlink_name),
+ "channel-%u", chan->chan_id);
+ sysfs_remove_link(&dev->kobj, symlink_name);
+ sysfs_remove_link(&chan->adap.dev.kobj, "atr_device");
+
+ i2c_del_adapter(adap);
+ of_node_put(np);
+ mutex_destroy(&chan->orig_addrs_lock);
+ kfree(chan->orig_addrs);
+ kfree(chan);
+}
+EXPORT_SYMBOL_GPL(i2c_atr_del_adapter);
+
+/**
+ * i2c_atr_new() - Allocate and initialize an I2C ATR helper.
+ * @parent: The parent (upstream) adapter
+ * @dev: The device acting as an ATR
+ * @ops: Driver-specific callbacks
+ * @max_adapters: Maximum number of child adapters
+ *
+ * The new ATR helper is connected to the parent adapter but has no child
+ * adapters. Call i2c_atr_add_adapter() to add some.
+ *
+ * Call i2c_atr_delete() to remove.
+ *
+ * Return: pointer to the new ATR helper object, or ERR_PTR
+ */
+struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
+ const struct i2c_atr_ops *ops, int max_adapters)
+{
+ struct i2c_atr *atr;
+
+ if (!ops || !ops->attach_client || !ops->detach_client)
+ return ERR_PTR(-EINVAL);
+
+ atr = devm_kzalloc(dev, sizeof(*atr)
+ + max_adapters * sizeof(atr->adapter[0]),
+ GFP_KERNEL);
+ if (!atr)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&atr->lock);
+
+ atr->parent = parent;
+ atr->dev = dev;
+ atr->ops = ops;
+ atr->max_adapters = max_adapters;
+
+ if (parent->algo->master_xfer)
+ atr->algo.master_xfer = i2c_atr_master_xfer;
+ if (parent->algo->smbus_xfer)
+ atr->algo.smbus_xfer = i2c_atr_smbus_xfer;
+ atr->algo.functionality = i2c_atr_functionality;
+
+ return atr;
+}
+EXPORT_SYMBOL_GPL(i2c_atr_new);
+
+/**
+ * i2c_atr_delete - Delete an I2C ATR helper.
+ * @atr: I2C ATR helper to be deleted.
+ *
+ * Precondition: all the adapters added with i2c_atr_add_adapter() mumst be
+ * removed by calling i2c_atr_del_adapter().
+ */
+void i2c_atr_delete(struct i2c_atr *atr)
+{
+ mutex_destroy(&atr->lock);
+}
+EXPORT_SYMBOL_GPL(i2c_atr_delete);
+
+MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_DESCRIPTION("I2C Address Translator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 34fecf97a355..3af60892406c 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -921,15 +921,23 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
}
}
+ if (adap->attach_ops &&
+ adap->attach_ops->attach_client &&
+ adap->attach_ops->attach_client(adap, info, client) != 0)
+ goto out_free_props;
+
status = device_register(&client->dev);
if (status)
- goto out_free_props;
+ goto out_detach_client;
dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
client->name, dev_name(&client->dev));
return client;
+out_detach_client:
+ if (adap->attach_ops && adap->attach_ops->detach_client)
+ adap->attach_ops->detach_client(adap, client);
out_free_props:
if (info->properties)
device_remove_properties(&client->dev);
@@ -952,9 +960,17 @@ EXPORT_SYMBOL_GPL(i2c_new_client_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ struct i2c_adapter *adap;
+
if (IS_ERR_OR_NULL(client))
return;
+ adap = client->adapter;
+
+ if (adap->attach_ops &&
+ adap->attach_ops->detach_client)
+ adap->attach_ops->detach_client(adap, client);
+
if (client->dev.of_node) {
of_node_clear_flag(client->dev.of_node, OF_POPULATED);
of_node_put(client->dev.of_node);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 362e8a01980c..1a7b5bb3d0dd 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -458,6 +458,16 @@ config INPUT_TPS65218_PWRBUTTON
To compile this driver as a module, choose M here. The module will
be called tps65218-pwrbutton.
+config INPUT_TPS65219_PWRBUTTON
+ tristate "TPS65219 Power button driver"
+ depends on MFD_TPS65219
+ help
+ Say Y here if you want to enable power button reporting for
+ TPS65219 Power Management IC devices.
+
+ To compile this driver as a module, choose M here. The module will
+ be called tps65219-pwrbutton.
+
config INPUT_AXP20X_PEK
tristate "X-Powers AXP20X power button driver"
depends on MFD_AXP20X
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index a48e5f2d859d..b9ff86b5445a 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_STPMIC1_ONKEY) += stpmic1_onkey.o
obj-$(CONFIG_INPUT_TPS65218_PWRBUTTON) += tps65218-pwrbutton.o
+obj-$(CONFIG_INPUT_TPS65219_PWRBUTTON) += tps65219-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o
obj-$(CONFIG_INPUT_TWL6040_VIBRA) += twl6040-vibra.o
diff --git a/drivers/input/misc/tps65219-pwrbutton.c b/drivers/input/misc/tps65219-pwrbutton.c
new file mode 100644
index 000000000000..c3285424c0f7
--- /dev/null
+++ b/drivers/input/misc/tps65219-pwrbutton.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for TPS65219 Push Button
+ *
+ * Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps65219.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+struct tps65219_pwrbutton {
+ struct device *dev;
+ struct input_dev *idev;
+ char phys[32];
+};
+
+static irqreturn_t tps65219_pb_push_irq(int irq, void *_pwr)
+{
+ struct tps65219_pwrbutton *pwr = _pwr;
+
+ input_report_key(pwr->idev, KEY_POWER, 1);
+ pm_wakeup_event(pwr->dev, 0);
+ input_sync(pwr->idev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tps65219_pb_release_irq(int irq, void *_pwr)
+{
+ struct tps65219_pwrbutton *pwr = _pwr;
+
+ input_report_key(pwr->idev, KEY_POWER, 0);
+ input_sync(pwr->idev);
+
+ return IRQ_HANDLED;
+}
+
+static int tps65219_pb_probe(struct platform_device *pdev)
+{
+ struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct tps65219_pwrbutton *pwr;
+ struct input_dev *idev;
+ int error;
+ int push_irq;
+ int release_irq;
+
+ pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
+ if (!pwr)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_device(dev);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->name = pdev->name;
+ snprintf(pwr->phys, sizeof(pwr->phys), "%s/input0",
+ pdev->name);
+ idev->phys = pwr->phys;
+ idev->dev.parent = dev;
+ idev->id.bustype = BUS_I2C;
+
+ input_set_capability(idev, EV_KEY, KEY_POWER);
+
+ pwr->dev = dev;
+ pwr->idev = idev;
+ device_init_wakeup(dev, true);
+
+ push_irq = platform_get_irq(pdev, 0);
+ if (push_irq < 0)
+ return -EINVAL;
+
+ release_irq = platform_get_irq(pdev, 1);
+ if (release_irq < 0)
+ return -EINVAL;
+
+ error = devm_request_threaded_irq(dev, push_irq, NULL,
+ tps65219_pb_push_irq,
+ IRQF_ONESHOT,
+ dev->init_name, pwr);
+ if (error) {
+ dev_err(dev, "failed to request push IRQ #%d: %d\n", push_irq,
+ error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, release_irq, NULL,
+ tps65219_pb_release_irq,
+ IRQF_ONESHOT,
+ dev->init_name, pwr);
+ if (error) {
+ dev_err(dev, "failed to request release IRQ #%d: %d\n",
+ release_irq, error);
+ return error;
+ }
+
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(dev, "Can't register power button: %d\n", error);
+ return error;
+ }
+
+ /* Enable interrupts for the pushbutton */
+ regmap_clear_bits(tps->regmap, TPS65219_REG_MASK_CONFIG,
+ TPS65219_REG_MASK_INT_FOR_PB_MASK);
+
+ /* Set PB/EN/VSENSE pin to be a pushbutton */
+ regmap_update_bits(tps->regmap, TPS65219_REG_MFP_2_CONFIG,
+ TPS65219_MFP_2_EN_PB_VSENSE_MASK, TPS65219_MFP_2_PB);
+
+ return 0;
+}
+
+static int tps65219_pb_remove(struct platform_device *pdev)
+{
+ struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
+
+ /* Disable interrupt for the pushbutton */
+ return regmap_update_bits(tps->regmap, TPS65219_REG_MASK_CONFIG,
+ TPS65219_REG_MASK_INT_FOR_PB_MASK,
+ TPS65219_REG_MASK_INT_FOR_PB_MASK);
+}
+
+static const struct platform_device_id tps65219_pwrbtn_id_table[] = {
+ { "tps65219-pwrbutton", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65219_pwrbtn_id_table);
+
+static struct platform_driver tps65219_pb_driver = {
+ .probe = tps65219_pb_probe,
+ .remove = tps65219_pb_remove,
+ .driver = {
+ .name = "tps65219_pwrbutton",
+ },
+ .id_table = tps65219_pwrbtn_id_table,
+};
+module_platform_driver(tps65219_pb_driver);
+
+MODULE_DESCRIPTION("TPS65219 Power Button");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Markus Schneider-Pargmann <msp@baylibre.com");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index cc18f54ea887..088a1dd41095 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -418,6 +418,7 @@ config TOUCHSCREEN_HIDEEP
config TOUCHSCREEN_ILI210X
tristate "Ilitek ILI210X based touchscreen"
depends on I2C
+ select CRC_CCITT
help
Say Y here if you have a ILI210X based touchscreen
controller. This driver supports models ILI2102,
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b7f87ad4b9a9..5f8962719b98 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -677,31 +677,30 @@ static int goodix_reset(struct goodix_ts_data *ts)
{
int error;
- /* begin select I2C slave addr */
- error = gpiod_direction_output(ts->gpiod_rst, 0);
- if (error)
- return error;
-
msleep(20); /* T2: > 10ms */
/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
+#ifdef ACPI_GPIO_SUPPORT
error = goodix_irq_direction_output(ts, ts->client->addr == 0x14);
if (error)
return error;
+#else
+ if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_ACPI_GPIO)
+ /*
+ * The IRQ pin triggers on a falling edge, so its gets marked
+ * as active-low, manually invert the value.
+ */
+ gpiod_set_value_cansleep(ts->gpiod_int, ts->client->addr != 0x14);
+ else
+ gpiod_set_value_cansleep(ts->gpiod_int, ts->client->addr == 0x14);
+#endif
usleep_range(100, 2000); /* T3: > 100us */
- error = gpiod_direction_output(ts->gpiod_rst, 1);
- if (error)
- return error;
+ gpiod_set_value_cansleep(ts->gpiod_rst, 1);
usleep_range(6000, 10000); /* T4: > 5ms */
- /* end select I2C slave addr */
- error = gpiod_direction_input(ts->gpiod_rst);
- if (error)
- return error;
-
error = goodix_int_sync(ts);
if (error)
return error;
@@ -863,7 +862,8 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
retry_get_irq_gpio:
/* Get the interrupt GPIO pin number */
- gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME, GPIOD_IN);
+ gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME,
+ GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
@@ -880,7 +880,8 @@ retry_get_irq_gpio:
ts->gpiod_int = gpiod;
/* Get the reset line GPIO pin number */
- gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_IN);
+ gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME,
+ GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
@@ -1381,6 +1382,13 @@ static SIMPLE_DEV_PM_OPS(goodix_pm_ops, goodix_suspend, goodix_resume);
static const struct i2c_device_id goodix_ts_id[] = {
{ "GDIX1001:00", 0 },
+ { "gt911", 0 },
+ { "gt9110", 0 },
+ { "gt912", 0 },
+ { "gt927", 0 },
+ { "gt9271", 0 },
+ { "gt928", 0 },
+ { "gt967", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index f437eefec94a..e9bd36adbe47 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/crc-ccitt.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/ihex.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
@@ -12,7 +14,7 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
-#define ILI2XXX_POLL_PERIOD 20
+#define ILI2XXX_POLL_PERIOD 15
#define ILI210X_DATA_SIZE 64
#define ILI211X_DATA_SIZE 43
@@ -22,8 +24,23 @@
/* Touchscreen commands */
#define REG_TOUCHDATA 0x10
#define REG_PANEL_INFO 0x20
+#define REG_FIRMWARE_VERSION 0x40
+#define REG_PROTOCOL_VERSION 0x42
+#define REG_KERNEL_VERSION 0x61
+#define REG_IC_BUSY 0x80
+#define REG_IC_BUSY_NOT_BUSY 0x50
+#define REG_GET_MODE 0xc0
+#define REG_GET_MODE_AP 0x5a
+#define REG_GET_MODE_BL 0x55
+#define REG_SET_MODE_AP 0xc1
+#define REG_SET_MODE_BL 0xc2
+#define REG_WRITE_DATA 0xc3
+#define REG_WRITE_ENABLE 0xc4
+#define REG_READ_DATA_CRC 0xc7
#define REG_CALIBRATE 0xcc
+#define ILI251X_FW_FILENAME "ilitek/ili251x.bin"
+
struct ili2xxx_chip {
int (*read_reg)(struct i2c_client *client, u8 reg,
void *buf, size_t len);
@@ -35,6 +52,7 @@ struct ili2xxx_chip {
unsigned int max_touches;
unsigned int resolution;
bool has_calibrate_reg;
+ bool has_firmware_proto;
bool has_pressure_reg;
};
@@ -44,6 +62,10 @@ struct ili210x {
struct gpio_desc *reset_gpio;
struct touchscreen_properties prop;
const struct ili2xxx_chip *chip;
+ u8 version_firmware[8];
+ u8 version_kernel[5];
+ u8 version_proto[2];
+ u8 ic_mode[2];
bool stop;
};
@@ -202,15 +224,17 @@ static const struct ili2xxx_chip ili212x_chip = {
.has_calibrate_reg = true,
};
-static int ili251x_read_reg(struct i2c_client *client,
- u8 reg, void *buf, size_t len)
+static int ili251x_read_reg_common(struct i2c_client *client,
+ u8 reg, void *buf, size_t len,
+ unsigned int delay)
{
int error;
int ret;
ret = i2c_master_send(client, &reg, 1);
if (ret == 1) {
- usleep_range(5000, 5500);
+ if (delay)
+ usleep_range(delay, delay + 500);
ret = i2c_master_recv(client, buf, len);
if (ret == len)
@@ -222,12 +246,18 @@ static int ili251x_read_reg(struct i2c_client *client,
return ret;
}
+static int ili251x_read_reg(struct i2c_client *client,
+ u8 reg, void *buf, size_t len)
+{
+ return ili251x_read_reg_common(client, reg, buf, len, 5000);
+}
+
static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
{
int error;
- error = ili251x_read_reg(client, REG_TOUCHDATA,
- data, ILI251X_DATA_SIZE1);
+ error = ili251x_read_reg_common(client, REG_TOUCHDATA,
+ data, ILI251X_DATA_SIZE1, 0);
if (!error && data[0] == 2) {
error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
ILI251X_DATA_SIZE2);
@@ -268,6 +298,7 @@ static const struct ili2xxx_chip ili251x_chip = {
.continue_polling = ili251x_check_continue_polling,
.max_touches = 10,
.has_calibrate_reg = true,
+ .has_firmware_proto = true,
.has_pressure_reg = true,
};
@@ -303,10 +334,13 @@ static irqreturn_t ili210x_irq(int irq, void *irq_data)
const struct ili2xxx_chip *chip = priv->chip;
u8 touchdata[ILI210X_DATA_SIZE] = { 0 };
bool keep_polling;
+ ktime_t time_next;
+ s64 time_delta;
bool touch;
int error;
do {
+ time_next = ktime_add_ms(ktime_get(), ILI2XXX_POLL_PERIOD);
error = chip->get_touch_data(client, touchdata);
if (error) {
dev_err(&client->dev,
@@ -316,13 +350,201 @@ static irqreturn_t ili210x_irq(int irq, void *irq_data)
touch = ili210x_report_events(priv, touchdata);
keep_polling = chip->continue_polling(touchdata, touch);
- if (keep_polling)
- msleep(ILI2XXX_POLL_PERIOD);
+ if (keep_polling) {
+ time_delta = ktime_us_delta(time_next, ktime_get());
+ if (time_delta > 0)
+ usleep_range(time_delta, time_delta + 1000);
+ }
} while (!priv->stop && keep_polling);
return IRQ_HANDLED;
}
+static int ili251x_firmware_update_resolution(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u16 resx, resy;
+ u8 rs[10];
+ int error;
+
+ /* The firmware update blob might have changed the resolution. */
+ error = priv->chip->read_reg(client, REG_PANEL_INFO, &rs, sizeof(rs));
+ if (error)
+ return error;
+
+ resx = le16_to_cpup((__le16 *)rs);
+ resy = le16_to_cpup((__le16 *)(rs + 2));
+
+ /* The value reported by the firmware is invalid. */
+ if (!resx || resx == 0xffff || !resy || resy == 0xffff)
+ return -EINVAL;
+
+ input_abs_set_max(priv->input, ABS_X, resx - 1);
+ input_abs_set_max(priv->input, ABS_Y, resy - 1);
+ input_abs_set_max(priv->input, ABS_MT_POSITION_X, resx - 1);
+ input_abs_set_max(priv->input, ABS_MT_POSITION_Y, resy - 1);
+
+ return 0;
+}
+
+static ssize_t ili251x_firmware_update_firmware_version(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+ u8 fw[8];
+
+ /* Get firmware version */
+ error = priv->chip->read_reg(client, REG_FIRMWARE_VERSION,
+ &fw, sizeof(fw));
+ if (!error)
+ memcpy(priv->version_firmware, fw, sizeof(fw));
+
+ return error;
+}
+
+static ssize_t ili251x_firmware_update_kernel_version(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+ u8 kv[5];
+
+ /* Get kernel version */
+ error = priv->chip->read_reg(client, REG_KERNEL_VERSION,
+ &kv, sizeof(kv));
+ if (!error)
+ memcpy(priv->version_kernel, kv, sizeof(kv));
+
+ return error;
+}
+
+static ssize_t ili251x_firmware_update_protocol_version(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+ u8 pv[2];
+
+ /* Get protocol version */
+ error = priv->chip->read_reg(client, REG_PROTOCOL_VERSION,
+ &pv, sizeof(pv));
+ if (!error)
+ memcpy(priv->version_proto, pv, sizeof(pv));
+
+ return error;
+}
+
+static ssize_t ili251x_firmware_update_ic_mode(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+ u8 md[2];
+
+ /* Get chip boot mode */
+ error = priv->chip->read_reg(client, REG_GET_MODE, &md, sizeof(md));
+ if (!error)
+ memcpy(priv->ic_mode, md, sizeof(md));
+
+ return error;
+}
+
+static int ili251x_firmware_update_cached_state(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error;
+
+ if (!priv->chip->has_firmware_proto)
+ return 0;
+
+ /* Wait for firmware to boot and stabilize itself. */
+ msleep(200);
+
+ /* Firmware does report valid information. */
+ error = ili251x_firmware_update_resolution(dev);
+ if (error)
+ return error;
+
+ error = ili251x_firmware_update_firmware_version(dev);
+ if (error)
+ return error;
+
+ error = ili251x_firmware_update_kernel_version(dev);
+ if (error)
+ return error;
+
+ error = ili251x_firmware_update_protocol_version(dev);
+ if (error)
+ return error;
+
+ error = ili251x_firmware_update_ic_mode(dev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static ssize_t ili251x_firmware_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 *fw = priv->version_firmware;
+
+ return sysfs_emit(buf, "%02x%02x.%02x%02x.%02x%02x.%02x%02x\n",
+ fw[0], fw[1], fw[2], fw[3],
+ fw[4], fw[5], fw[6], fw[7]);
+}
+static DEVICE_ATTR(firmware_version, 0444, ili251x_firmware_version_show, NULL);
+
+static ssize_t ili251x_kernel_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 *kv = priv->version_kernel;
+
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x.%02x\n",
+ kv[0], kv[1], kv[2], kv[3], kv[4]);
+}
+static DEVICE_ATTR(kernel_version, 0444, ili251x_kernel_version_show, NULL);
+
+static ssize_t ili251x_protocol_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 *pv = priv->version_proto;
+
+ return sysfs_emit(buf, "%02x.%02x\n", pv[0], pv[1]);
+}
+static DEVICE_ATTR(protocol_version, 0444, ili251x_protocol_version_show, NULL);
+
+static ssize_t ili251x_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 *md = priv->ic_mode;
+ char *mode = "AP";
+
+ if (md[0] == REG_GET_MODE_AP) /* Application Mode */
+ mode = "AP";
+ else if (md[0] == REG_GET_MODE_BL) /* BootLoader Mode */
+ mode = "BL";
+ else /* Unknown Mode */
+ mode = "??";
+
+ return sysfs_emit(buf, "%02x.%02x:%s\n", md[0], md[1], mode);
+}
+static DEVICE_ATTR(mode, 0444, ili251x_mode_show, NULL);
+
static ssize_t ili210x_calibrate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -349,24 +571,331 @@ static ssize_t ili210x_calibrate(struct device *dev,
}
static DEVICE_ATTR(calibrate, S_IWUSR, NULL, ili210x_calibrate);
+static int ili251x_firmware_to_buffer(const struct firmware *fw,
+ u8 **buf, u16 *ac_end, u16 *df_end)
+{
+ const struct ihex_binrec *rec;
+ u32 fw_addr, fw_last_addr = 0;
+ u16 fw_len;
+ u8 *fw_buf;
+ int error;
+
+ /*
+ * The firmware ihex blob can never be bigger than 64 kiB, so make this
+ * simple -- allocate a 64 kiB buffer, iterate over the ihex blob records
+ * once, copy them all into this buffer at the right locations, and then
+ * do all operations on this linear buffer.
+ */
+ fw_buf = kzalloc(SZ_64K, GFP_KERNEL);
+ if (!fw_buf)
+ return -ENOMEM;
+
+ rec = (const struct ihex_binrec *)fw->data;
+ while (rec) {
+ fw_addr = be32_to_cpu(rec->addr);
+ fw_len = be16_to_cpu(rec->len);
+
+ /* The last 32 Byte firmware block can be 0xffe0 */
+ if (fw_addr + fw_len > SZ_64K || fw_addr > SZ_64K - 32) {
+ error = -EFBIG;
+ goto err_big;
+ }
+
+ /* Find the last address before DF start address, that is AC end */
+ if (fw_addr == 0xf000)
+ *ac_end = fw_last_addr;
+ fw_last_addr = fw_addr + fw_len;
+
+ memcpy(fw_buf + fw_addr, rec->data, fw_len);
+ rec = ihex_next_binrec(rec);
+ }
+
+ /* DF end address is the last address in the firmware blob */
+ *df_end = fw_addr + fw_len;
+ *buf = fw_buf;
+ return 0;
+
+err_big:
+ kfree(fw_buf);
+ return error;
+}
+
+/* Switch mode between Application and BootLoader */
+static int ili251x_switch_ic_mode(struct i2c_client *client, u8 cmd_mode)
+{
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 cmd_wren[3] = { REG_WRITE_ENABLE, 0x5a, 0xa5 };
+ u8 md[2];
+ int error;
+
+ error = priv->chip->read_reg(client, REG_GET_MODE, md, sizeof(md));
+ if (error)
+ return error;
+ /* Mode already set */
+ if ((cmd_mode == REG_SET_MODE_AP && md[0] == REG_GET_MODE_AP) ||
+ (cmd_mode == REG_SET_MODE_BL && md[0] == REG_GET_MODE_BL))
+ return 0;
+
+ /* Unlock writes */
+ error = i2c_master_send(client, cmd_wren, sizeof(cmd_wren));
+ if (error != sizeof(cmd_wren))
+ return -EINVAL;
+
+ mdelay(20);
+
+ /* Select mode (BootLoader or Application) */
+ error = i2c_master_send(client, &cmd_mode, 1);
+ if (error != 1)
+ return -EINVAL;
+
+ mdelay(200); /* Reboot into bootloader takes a lot of time ... */
+
+ /* Read back mode */
+ error = priv->chip->read_reg(client, REG_GET_MODE, md, sizeof(md));
+ if (error)
+ return error;
+ /* Check if mode is correct now. */
+ if ((cmd_mode == REG_SET_MODE_AP && md[0] == REG_GET_MODE_AP) ||
+ (cmd_mode == REG_SET_MODE_BL && md[0] == REG_GET_MODE_BL))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int ili251x_firmware_busy(struct i2c_client *client)
+{
+ struct ili210x *priv = i2c_get_clientdata(client);
+ int error, i = 0;
+ u8 data;
+
+ do {
+ /* The read_reg already contains suitable delay */
+ error = priv->chip->read_reg(client, REG_IC_BUSY, &data, 1);
+ if (error)
+ return error;
+ if (i++ == 100000)
+ return -ETIMEDOUT;
+ } while (data != REG_IC_BUSY_NOT_BUSY);
+
+ return 0;
+}
+
+static int ili251x_firmware_write_to_ic(struct device *dev, u8 *fwbuf,
+ u16 start, u16 end, u8 dataflash)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ u8 cmd_crc = REG_READ_DATA_CRC;
+ u8 crcrb[4] = { 0 };
+ u8 fw_data[33];
+ u16 fw_addr;
+ int error;
+
+ /*
+ * The DF (dataflash) needs 2 bytes offset for unknown reasons,
+ * the AC (application) has 2 bytes CRC16-CCITT at the end.
+ */
+ u16 crc = crc_ccitt(0, fwbuf + start + (dataflash ? 2 : 0),
+ end - start - 2);
+
+ /* Unlock write to either AC (application) or DF (dataflash) area */
+ u8 cmd_wr[10] = {
+ REG_WRITE_ENABLE, 0x5a, 0xa5, dataflash,
+ (end >> 16) & 0xff, (end >> 8) & 0xff, end & 0xff,
+ (crc >> 16) & 0xff, (crc >> 8) & 0xff, crc & 0xff
+ };
+
+ error = i2c_master_send(client, cmd_wr, sizeof(cmd_wr));
+ if (error != sizeof(cmd_wr))
+ return -EINVAL;
+
+ error = ili251x_firmware_busy(client);
+ if (error)
+ return error;
+
+ for (fw_addr = start; fw_addr < end; fw_addr += 32) {
+ fw_data[0] = REG_WRITE_DATA;
+ memcpy(&(fw_data[1]), fwbuf + fw_addr, 32);
+ error = i2c_master_send(client, fw_data, 33);
+ if (error != sizeof(fw_data))
+ return error;
+ error = ili251x_firmware_busy(client);
+ if (error)
+ return error;
+ }
+
+ error = i2c_master_send(client, &cmd_crc, 1);
+ if (error != 1)
+ return -EINVAL;
+
+ error = ili251x_firmware_busy(client);
+ if (error)
+ return error;
+
+ error = priv->chip->read_reg(client, REG_READ_DATA_CRC,
+ &crcrb, sizeof(crcrb));
+ if (error)
+ return error;
+
+ /* Check CRC readback */
+ if ((crcrb[0] != (crc & 0xff)) || crcrb[1] != ((crc >> 8) & 0xff))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ili251x_firmware_reset(struct i2c_client *client)
+{
+ u8 cmd_reset[2] = { 0xf2, 0x01 };
+ int error;
+
+ error = i2c_master_send(client, cmd_reset, sizeof(cmd_reset));
+ if (error != sizeof(cmd_reset))
+ return -EINVAL;
+
+ return ili251x_firmware_busy(client);
+}
+
+static void ili210x_hardware_reset(struct gpio_desc *reset_gpio)
+{
+ /* Reset the controller */
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ usleep_range(12000, 15000);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ msleep(300);
+}
+
+static ssize_t ili210x_firmware_update_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+ const char *fwname = ILI251X_FW_FILENAME;
+ const struct firmware *fw;
+ u16 ac_end, df_end;
+ u8 *fwbuf;
+ int error;
+ int i;
+
+ error = request_ihex_firmware(&fw, fwname, dev);
+ if (error) {
+ dev_err(dev, "Failed to request firmware %s, error=%d\n",
+ fwname, error);
+ return error;
+ }
+
+ error = ili251x_firmware_to_buffer(fw, &fwbuf, &ac_end, &df_end);
+ release_firmware(fw);
+ if (error)
+ return error;
+
+ /*
+ * Disable touchscreen IRQ, so that we would not get spurious touch
+ * interrupt during firmware update, and so that the IRQ handler won't
+ * trigger and interfere with the firmware update. There is no bit in
+ * the touch controller to disable the IRQs during update, so we have
+ * to do it this way here.
+ */
+ disable_irq(client->irq);
+
+ dev_dbg(dev, "Firmware update started, firmware=%s\n", fwname);
+
+ ili210x_hardware_reset(priv->reset_gpio);
+
+ error = ili251x_firmware_reset(client);
+ if (error)
+ goto exit;
+
+ /* This may not succeed on first try, so re-try a few times. */
+ for (i = 0; i < 5; i++) {
+ error = ili251x_switch_ic_mode(client, REG_SET_MODE_BL);
+ if (!error)
+ break;
+ }
+
+ if (error)
+ goto exit;
+
+ dev_dbg(dev, "IC is now in BootLoader mode\n");
+
+ msleep(200); /* The bootloader seems to need some time too. */
+
+ error = ili251x_firmware_write_to_ic(dev, fwbuf, 0xf000, df_end, 1);
+ if (error) {
+ dev_err(dev, "DF firmware update failed, error=%d\n", error);
+ goto exit;
+ }
+
+ dev_dbg(dev, "DataFlash firmware written\n");
+
+ error = ili251x_firmware_write_to_ic(dev, fwbuf, 0x2000, ac_end, 0);
+ if (error) {
+ dev_err(dev, "AC firmware update failed, error=%d\n", error);
+ goto exit;
+ }
+
+ dev_dbg(dev, "Application firmware written\n");
+
+ /* This may not succeed on first try, so re-try a few times. */
+ for (i = 0; i < 5; i++) {
+ error = ili251x_switch_ic_mode(client, REG_SET_MODE_AP);
+ if (!error)
+ break;
+ }
+
+ if (error)
+ goto exit;
+
+ dev_dbg(dev, "IC is now in Application mode\n");
+
+ error = ili251x_firmware_update_cached_state(dev);
+ if (error)
+ goto exit;
+
+ error = count;
+
+exit:
+ ili210x_hardware_reset(priv->reset_gpio);
+ dev_dbg(dev, "Firmware update ended, error=%i\n", error);
+ enable_irq(client->irq);
+ kfree(fwbuf);
+ return error;
+}
+
+static DEVICE_ATTR(firmware_update, 0200, NULL, ili210x_firmware_update_store);
+
static struct attribute *ili210x_attributes[] = {
&dev_attr_calibrate.attr,
+ &dev_attr_firmware_update.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_kernel_version.attr,
+ &dev_attr_protocol_version.attr,
+ &dev_attr_mode.attr,
NULL,
};
-static umode_t ili210x_calibrate_visible(struct kobject *kobj,
+static umode_t ili210x_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = kobj_to_dev(kobj);
struct i2c_client *client = to_i2c_client(dev);
struct ili210x *priv = i2c_get_clientdata(client);
- return priv->chip->has_calibrate_reg ? attr->mode : 0;
+ /* Calibrate is present on all ILI2xxx which have calibrate register */
+ if (attr == &dev_attr_calibrate.attr)
+ return priv->chip->has_calibrate_reg ? attr->mode : 0;
+
+ /* Firmware/Kernel/Protocol/BootMode is implememted only for ILI251x */
+ if (!priv->chip->has_firmware_proto)
+ return 0;
+
+ return attr->mode;
}
static const struct attribute_group ili210x_attr_group = {
.attrs = ili210x_attributes,
- .is_visible = ili210x_calibrate_visible,
+ .is_visible = ili210x_attributes_visible,
};
static void ili210x_power_down(void *data)
@@ -420,9 +949,7 @@ static int ili210x_i2c_probe(struct i2c_client *client,
if (error)
return error;
- usleep_range(12000, 15000);
- gpiod_set_value_cansleep(reset_gpio, 0);
- msleep(160);
+ ili210x_hardware_reset(reset_gpio);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -449,6 +976,12 @@ static int ili210x_i2c_probe(struct i2c_client *client,
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
if (priv->chip->has_pressure_reg)
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
+ error = ili251x_firmware_update_cached_state(dev);
+ if (error) {
+ dev_err(dev, "Unable to cache firmware information, err: %d\n",
+ error);
+ return error;
+ }
touchscreen_parse_properties(input, true, &priv->prop);
error = input_mt_init_slots(input, priv->chip->max_touches,
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index ff2c692c0db4..2894cba15544 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -848,7 +848,7 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
{
int err;
- spin_lock(&obj->iommu_lock);
+ mutex_lock(&obj->iommu_lock);
obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
DMA_TO_DEVICE);
@@ -864,14 +864,14 @@ static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
goto out_err;
flush_iotlb_all(obj);
- spin_unlock(&obj->iommu_lock);
+ mutex_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
return 0;
out_err:
- spin_unlock(&obj->iommu_lock);
+ mutex_unlock(&obj->iommu_lock);
return err;
}
@@ -885,7 +885,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
if (!obj || IS_ERR(obj))
return;
- spin_lock(&obj->iommu_lock);
+ mutex_lock(&obj->iommu_lock);
dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
DMA_TO_DEVICE);
@@ -893,7 +893,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
obj->iopgd = NULL;
iommu_disable(obj);
- spin_unlock(&obj->iommu_lock);
+ mutex_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
}
@@ -1021,12 +1021,16 @@ static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
omap2_iommu_disable(obj);
+ if (!obj->hwmod_mode)
+ goto skip_hwmod_ops;
+
if (pdata && pdata->device_idle)
pdata->device_idle(pdev);
if (pdata && pdata->assert_reset)
pdata->assert_reset(pdev, pdata->reset_name);
+skip_hwmod_ops:
if (pdata && pdata->set_pwrdm_constraint) {
ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
if (ret) {
@@ -1065,6 +1069,9 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
}
}
+ if (!obj->hwmod_mode)
+ goto skip_hwmod_ops;
+
if (pdata && pdata->deassert_reset) {
ret = pdata->deassert_reset(pdev, pdata->reset_name);
if (ret) {
@@ -1076,6 +1083,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
if (pdata && pdata->device_enable)
pdata->device_enable(pdev);
+skip_hwmod_ops:
/* restore the TLBs only during resume, and not for power up */
if (obj->domain)
omap_iommu_restore_tlb_entries(obj);
@@ -1194,6 +1202,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
return -EINVAL;
if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
+ obj->hwmod_mode = of_get_property(of, "ti,hwmods", NULL);
obj->dev = &pdev->dev;
obj->ctx = (void *)obj + sizeof(*obj);
@@ -1203,7 +1212,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
if (!obj->cr_ctx)
return -ENOMEM;
- spin_lock_init(&obj->iommu_lock);
+ mutex_init(&obj->iommu_lock);
spin_lock_init(&obj->page_table_lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1471,7 +1480,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EINVAL;
}
- spin_lock(&omap_domain->lock);
+ mutex_lock(&omap_domain->lock);
/* only a single client device can be attached to a domain */
if (omap_domain->dev) {
@@ -1517,7 +1526,7 @@ attach_fail:
init_fail:
omap_iommu_detach_fini(omap_domain);
out:
- spin_unlock(&omap_domain->lock);
+ mutex_unlock(&omap_domain->lock);
return ret;
}
@@ -1565,9 +1574,9 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- spin_lock(&omap_domain->lock);
+ mutex_lock(&omap_domain->lock);
_omap_iommu_detach_dev(omap_domain, dev);
- spin_unlock(&omap_domain->lock);
+ mutex_unlock(&omap_domain->lock);
}
static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
@@ -1581,7 +1590,7 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
if (!omap_domain)
return NULL;
- spin_lock_init(&omap_domain->lock);
+ mutex_init(&omap_domain->lock);
omap_domain->domain.geometry.aperture_start = 0;
omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 18ee713ede78..b8b94cdd8ee0 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -47,7 +47,7 @@ struct omap_iommu_domain {
u32 num_iommus;
struct omap_iommu_device *iommus;
struct device *dev;
- spinlock_t lock;
+ struct mutex lock;
struct iommu_domain domain;
};
@@ -58,8 +58,9 @@ struct omap_iommu {
struct device *dev;
struct iommu_domain *domain;
struct dentry *debug_dir;
+ const char *hwmod_mode;
- spinlock_t iommu_lock; /* global for this whole object */
+ struct mutex iommu_lock; /* global for this whole object */
/*
* We don't change iopgd for a situation like pgd for a task,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3c24bf45263c..7ca6995d09d2 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -482,9 +482,10 @@ config LS1X_IRQ
Support for the Loongson-1 platform Interrupt Controller.
config TI_SCI_INTR_IRQCHIP
- bool
+ tristate "TI SCI INTR Interrupt Controller"
depends on TI_SCI_PROTOCOL
select IRQ_DOMAIN_HIERARCHY
+ default ARCH_K3
help
This enables the irqchip driver support for K3 Interrupt router
over TI System Control Interface available on some new TI's SoCs.
@@ -492,10 +493,12 @@ config TI_SCI_INTR_IRQCHIP
TI System Controller, say Y here. Otherwise, say N.
config TI_SCI_INTA_IRQCHIP
- bool
+ tristate "TI SCI INTA Interrupt Controller"
+ depends on ARCH_K3
depends on TI_SCI_PROTOCOL
select IRQ_DOMAIN_HIERARCHY
select TI_SCI_INTA_MSI_DOMAIN
+ default ARCH_K3
help
This enables the irqchip driver support for K3 Interrupt aggregator
over TI System Control Interface available on some new TI's SoCs.
@@ -503,8 +506,9 @@ config TI_SCI_INTA_IRQCHIP
TI System Controller, say Y here. Otherwise, say N.
config TI_PRUSS_INTC
- tristate "TI PRU-ICSS Interrupt Controller"
- depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ tristate
+ depends on TI_PRUSS
+ default TI_PRUSS
select IRQ_DOMAIN
help
This enables support for the PRU-ICSS Local Interrupt Controller
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
index 92fb5780dc10..00dd8ee40b82 100644
--- a/drivers/irqchip/irq-pruss-intc.c
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -70,6 +70,8 @@
#define MAX_PRU_SYS_EVENTS 160
#define MAX_PRU_CHANNELS 20
+#define MAX_PRU_INT_EVENTS 64
+
/**
* struct pruss_intc_map_record - keeps track of actual mapping state
* @value: The currently mapped value (channel or host)
@@ -85,10 +87,13 @@ struct pruss_intc_map_record {
* @num_system_events: number of input system events handled by the PRUSS INTC
* @num_host_events: number of host events (which is equal to number of
* channels) supported by the PRUSS INTC
+ * @quirky_events: bitmask of events that need quirky IRQ handling (limited to
+ * (internal sources only for now, so 64 bits suffice)
*/
struct pruss_intc_match_data {
u8 num_system_events;
u8 num_host_events;
+ u64 quirky_events;
};
/**
@@ -101,6 +106,7 @@ struct pruss_intc_match_data {
* @soc_config: cached PRUSS INTC IP configuration data
* @dev: PRUSS INTC device pointer
* @lock: mutex to serialize interrupts mapping
+ * @irqs_reserved: bit-mask of reserved host interrupts
*/
struct pruss_intc {
struct pruss_intc_map_record event_channel[MAX_PRU_SYS_EVENTS];
@@ -111,6 +117,7 @@ struct pruss_intc {
const struct pruss_intc_match_data *soc_config;
struct device *dev;
struct mutex lock; /* PRUSS INTC lock */
+ u8 irqs_reserved;
};
/**
@@ -178,6 +185,7 @@ static void pruss_intc_update_hmr(struct pruss_intc *intc, u8 ch, u8 host)
static void pruss_intc_map(struct pruss_intc *intc, unsigned long hwirq)
{
struct device *dev = intc->dev;
+ bool enable_hwirq = false;
u8 ch, host, reg_idx;
u32 val;
@@ -187,6 +195,9 @@ static void pruss_intc_map(struct pruss_intc *intc, unsigned long hwirq)
ch = intc->event_channel[hwirq].value;
host = intc->channel_host[ch].value;
+ enable_hwirq = (host < FIRST_PRU_HOST_INT ||
+ host >= FIRST_PRU_HOST_INT + MAX_NUM_HOST_IRQS ||
+ intc->irqs_reserved & BIT(host - FIRST_PRU_HOST_INT));
pruss_intc_update_cmr(intc, hwirq, ch);
@@ -194,8 +205,10 @@ static void pruss_intc_map(struct pruss_intc *intc, unsigned long hwirq)
val = BIT(hwirq % 32);
/* clear and enable system event */
- pruss_intc_write_reg(intc, PRU_INTC_ESR(reg_idx), val);
pruss_intc_write_reg(intc, PRU_INTC_SECR(reg_idx), val);
+ /* unmask only events going to various PRU and other cores by default */
+ if (enable_hwirq)
+ pruss_intc_write_reg(intc, PRU_INTC_ESR(reg_idx), val);
if (++intc->channel_host[ch].ref_count == 1) {
pruss_intc_update_hmr(intc, ch, host);
@@ -204,7 +217,8 @@ static void pruss_intc_map(struct pruss_intc *intc, unsigned long hwirq)
pruss_intc_write_reg(intc, PRU_INTC_HIEISR, host);
}
- dev_dbg(dev, "mapped system_event = %lu channel = %d host = %d",
+ dev_dbg(dev, "mapped%s system_event = %lu channel = %d host = %d",
+ enable_hwirq ? " and enabled" : "",
hwirq, ch, host);
mutex_unlock(&intc->lock);
@@ -268,11 +282,14 @@ static void pruss_intc_init(struct pruss_intc *intc)
/*
* configure polarity (SIPR register) to active high and
- * type (SITR register) to level interrupt for all system events
+ * type (SITR register) to level interrupt for all system events,
+ * and disable and clear all the system events
*/
for (i = 0; i < num_event_type_regs; i++) {
pruss_intc_write_reg(intc, PRU_INTC_SIPR(i), 0xffffffff);
pruss_intc_write_reg(intc, PRU_INTC_SITR(i), 0);
+ pruss_intc_write_reg(intc, PRU_INTC_ECR(i), 0xffffffff);
+ pruss_intc_write_reg(intc, PRU_INTC_SECR(i), 0xffffffff);
}
/* clear all interrupt channel map registers, 4 events per register */
@@ -292,6 +309,10 @@ static void pruss_intc_irq_ack(struct irq_data *data)
struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
unsigned int hwirq = data->hwirq;
+ if (hwirq < MAX_PRU_INT_EVENTS &&
+ intc->soc_config->quirky_events & BIT_ULL(hwirq))
+ return;
+
pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq);
}
@@ -308,6 +329,9 @@ static void pruss_intc_irq_unmask(struct irq_data *data)
struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
unsigned int hwirq = data->hwirq;
+ if (hwirq < MAX_PRU_INT_EVENTS &&
+ intc->soc_config->quirky_events & BIT_ULL(hwirq))
+ pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq);
pruss_intc_write_reg(intc, PRU_INTC_EISR, hwirq);
}
@@ -361,6 +385,14 @@ static int pruss_intc_irq_set_irqchip_state(struct irq_data *data,
return 0;
}
+static int pruss_intc_irq_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ if (type != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ return 0;
+}
+
static struct irq_chip pruss_irqchip = {
.name = "pruss-intc",
.irq_ack = pruss_intc_irq_ack,
@@ -370,6 +402,7 @@ static struct irq_chip pruss_irqchip = {
.irq_release_resources = pruss_intc_irq_relres,
.irq_get_irqchip_state = pruss_intc_irq_get_irqchip_state,
.irq_set_irqchip_state = pruss_intc_irq_set_irqchip_state,
+ .irq_set_type = pruss_intc_irq_irq_set_type,
};
static int pruss_intc_validate_mapping(struct pruss_intc *intc, int event,
@@ -524,7 +557,7 @@ static int pruss_intc_probe(struct platform_device *pdev)
struct pruss_intc *intc;
struct pruss_host_irq_data *host_data;
int i, irq, ret;
- u8 max_system_events, irqs_reserved = 0;
+ u8 max_system_events;
data = of_device_get_match_data(dev);
if (!data)
@@ -545,7 +578,7 @@ static int pruss_intc_probe(struct platform_device *pdev)
return PTR_ERR(intc->base);
ret = of_property_read_u8(dev->of_node, "ti,irqs-reserved",
- &irqs_reserved);
+ &intc->irqs_reserved);
/*
* The irqs-reserved is used only for some SoC's therefore not having
@@ -564,7 +597,7 @@ static int pruss_intc_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < MAX_NUM_HOST_IRQS; i++) {
- if (irqs_reserved & BIT(i))
+ if (intc->irqs_reserved & BIT(i))
continue;
irq = platform_get_irq_byname(pdev, irq_names[i]);
@@ -626,11 +659,13 @@ static int pruss_intc_remove(struct platform_device *pdev)
static const struct pruss_intc_match_data pruss_intc_data = {
.num_system_events = 64,
.num_host_events = 10,
+ .quirky_events = BIT_ULL(7), /* IEP capture/compare event */
};
static const struct pruss_intc_match_data icssg_intc_data = {
.num_system_events = 160,
.num_host_events = 20,
+ .quirky_events = BIT_ULL(7) | BIT_ULL(56), /* IEP{0,1} capture/compare events */
};
static const struct of_device_id pruss_intc_of_match[] = {
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 532d0ae172d9..51e3abe16741 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -64,6 +64,7 @@ struct ti_sci_inta_event_desc {
* @events: Array of event descriptors assigned to this vint.
* @parent_virq: Linux IRQ number that gets attached to parent
* @vint_id: TISCI vint ID
+ * @affinity_managed flag to indicate VINT affinity is managed
*/
struct ti_sci_inta_vint_desc {
struct irq_domain *domain;
@@ -72,6 +73,7 @@ struct ti_sci_inta_vint_desc {
struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT];
unsigned int parent_virq;
u16 vint_id;
+ bool affinity_managed;
};
/**
@@ -205,7 +207,8 @@ static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta,
*
* Return 0 if all went well else corresponding error value.
*/
-static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain)
+static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain,
+ u16 vint_id)
{
struct ti_sci_inta_irq_domain *inta = domain->host_data;
struct ti_sci_inta_vint_desc *vint_desc;
@@ -213,11 +216,6 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
struct device_node *parent_node;
unsigned int parent_virq;
int p_hwirq, ret;
- u16 vint_id;
-
- vint_id = ti_sci_get_free_resource(inta->vint);
- if (vint_id == TI_SCI_RESOURCE_NULL)
- return ERR_PTR(-EINVAL);
p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id);
if (p_hwirq < 0) {
@@ -331,29 +329,42 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *d
struct ti_sci_inta_vint_desc *vint_desc = NULL;
struct ti_sci_inta_event_desc *event_desc;
u16 free_bit;
+ u16 vint_id;
mutex_lock(&inta->vint_mutex);
- list_for_each_entry(vint_desc, &inta->vint_list, list) {
+ /*
+ * Allocate new VINT each time until we runout, then start
+ * aggregating
+ */
+ vint_id = ti_sci_get_free_resource(inta->vint);
+ if (vint_id == TI_SCI_RESOURCE_NULL) {
+ list_for_each_entry(vint_desc, &inta->vint_list, list) {
+ if (vint_desc->affinity_managed)
+ continue;
+ free_bit = find_first_zero_bit(vint_desc->event_map,
+ MAX_EVENTS_PER_VINT);
+ if (free_bit != MAX_EVENTS_PER_VINT) {
+ set_bit(free_bit, vint_desc->event_map);
+ break;
+ }
+ }
+ } else {
+ vint_desc = ti_sci_inta_alloc_parent_irq(domain, vint_id);
+ if (IS_ERR(vint_desc)) {
+ event_desc = ERR_CAST(vint_desc);
+ goto unlock;
+ }
+
free_bit = find_first_zero_bit(vint_desc->event_map,
MAX_EVENTS_PER_VINT);
- if (free_bit != MAX_EVENTS_PER_VINT) {
- set_bit(free_bit, vint_desc->event_map);
- goto alloc_event;
- }
+ set_bit(free_bit, vint_desc->event_map);
}
- /* No free bits available. Allocate a new vint */
- vint_desc = ti_sci_inta_alloc_parent_irq(domain);
- if (IS_ERR(vint_desc)) {
- event_desc = ERR_CAST(vint_desc);
+ if (free_bit == MAX_EVENTS_PER_VINT) {
+ event_desc = ERR_PTR(-EINVAL);
goto unlock;
}
- free_bit = find_first_zero_bit(vint_desc->event_map,
- MAX_EVENTS_PER_VINT);
- set_bit(free_bit, vint_desc->event_map);
-
-alloc_event:
event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq);
if (IS_ERR(event_desc))
clear_bit(free_bit, vint_desc->event_map);
@@ -432,6 +443,7 @@ static int ti_sci_inta_request_resources(struct irq_data *data)
return PTR_ERR(event_desc);
data->chip_data = event_desc;
+ irq_data_update_effective_affinity(data, cpu_online_mask);
return 0;
}
@@ -502,11 +514,45 @@ static void ti_sci_inta_ack_irq(struct irq_data *data)
ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET);
}
+#ifdef CONFIG_SMP
+static int ti_sci_inta_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ struct ti_sci_inta_event_desc *event_desc;
+ struct ti_sci_inta_vint_desc *vint_desc;
+ struct irq_data *parent_irq_data;
+
+ if (cpumask_equal(irq_data_get_effective_affinity_mask(d), mask_val))
+ return 0;
+
+ event_desc = irq_data_get_irq_chip_data(d);
+ if (event_desc) {
+ vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
+
+ /*
+ * Cannot set affinity if there is more than one event
+ * mapped to same VINT
+ */
+ if (bitmap_weight(vint_desc->event_map, MAX_EVENTS_PER_VINT) > 1)
+ return -EINVAL;
+
+ vint_desc->affinity_managed = true;
+
+ irq_data_update_effective_affinity(d, mask_val);
+ parent_irq_data = irq_get_irq_data(vint_desc->parent_virq);
+ if (parent_irq_data->chip->irq_set_affinity)
+ return parent_irq_data->chip->irq_set_affinity(parent_irq_data, mask_val, force);
+ }
+
+ return -EINVAL;
+}
+#else
static int ti_sci_inta_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
return -EINVAL;
}
+#endif
/**
* ti_sci_inta_set_type() - Update the trigger type of the irq.
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index fe8fad22bcf9..c23ed3bac8e9 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -25,6 +25,7 @@
* @dev: Struct device pointer.
* @ti_sci_id: TI-SCI device identifier
* @type: Specifies the trigger type supported by this Interrupt Router
+ * @mapping: Pointer to out_irq <-> hwirq mapping table
*/
struct ti_sci_intr_irq_domain {
const struct ti_sci_handle *sci;
@@ -32,6 +33,7 @@ struct ti_sci_intr_irq_domain {
struct device *dev;
u32 ti_sci_id;
u32 type;
+ u32 *mapping;
};
static struct irq_chip ti_sci_intr_irq_chip = {
@@ -100,6 +102,25 @@ static int ti_sci_intr_xlate_irq(struct ti_sci_intr_irq_domain *intr, u32 irq)
}
/**
+ * ti_sci_intr_free_irq - Free the irq entry in the out_irq <-> hwirq
+ * mapping table
+ * @intr: IRQ domain corresponding to Interrupt Router
+ * @out_irq: Out irq number
+ */
+static void ti_sci_intr_free_irq(struct ti_sci_intr_irq_domain *intr,
+ u16 out_irq)
+{
+ u16 start = intr->out_irqs->desc->start;
+ u16 num = intr->out_irqs->desc->num;
+ u16 start_sec = intr->out_irqs->desc->start_sec;
+
+ if (out_irq < start + num)
+ intr->mapping[out_irq - start] = UINT_MAX;
+ else
+ intr->mapping[out_irq - start_sec + num] = UINT_MAX;
+}
+
+/**
* ti_sci_intr_irq_domain_free() - Free the specified IRQs from the domain.
* @domain: Domain to which the irqs belong
* @virq: Linux virtual IRQ to be freed.
@@ -118,12 +139,33 @@ static void ti_sci_intr_irq_domain_free(struct irq_domain *domain,
intr->sci->ops.rm_irq_ops.free_irq(intr->sci,
intr->ti_sci_id, data->hwirq,
intr->ti_sci_id, out_irq);
+ ti_sci_intr_free_irq(intr, out_irq);
ti_sci_release_resource(intr->out_irqs, out_irq);
irq_domain_free_irqs_parent(domain, virq, 1);
irq_domain_reset_irq_data(data);
}
/**
+ * ti_sci_intr_add_irq - Add the irq entry in the out_irq <-> hwirq
+ * mapping table
+ * @intr: IRQ domain corresponding to Interrupt Router
+ * @hwirq: Input irq number
+ * @out_irq: Out irq number
+ */
+static void ti_sci_intr_add_irq(struct ti_sci_intr_irq_domain *intr, u32 hwirq,
+ u16 out_irq)
+{
+ u16 start = intr->out_irqs->desc->start;
+ u16 num = intr->out_irqs->desc->num;
+ u16 start_sec = intr->out_irqs->desc->start_sec;
+
+ if (out_irq < start + num)
+ intr->mapping[out_irq - start] = hwirq;
+ else
+ intr->mapping[out_irq - start_sec + num] = hwirq;
+}
+
+/**
* ti_sci_intr_alloc_parent_irq() - Allocate parent IRQ
* @domain: Pointer to the interrupt router IRQ domain
* @virq: Corresponding Linux virtual IRQ number
@@ -173,6 +215,9 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
if (err)
goto err_msg;
+ /* Adding out_irq <-> hwirq to mapping */
+ ti_sci_intr_add_irq(intr, hwirq, out_irq);
+
return out_irq;
err_msg:
@@ -221,6 +266,28 @@ static const struct irq_domain_ops ti_sci_intr_irq_domain_ops = {
.translate = ti_sci_intr_irq_domain_translate,
};
+/**
+ * ti_sci_intr_initialize_mapping - Initialize the out_irq <-> hwirq mapping table
+ * @intr: IRQ domain corresponding to Interrupt Router
+ */
+static int ti_sci_intr_initialize_mapping(struct ti_sci_intr_irq_domain *intr)
+{
+ int i;
+ int mapping_len = intr->out_irqs->desc->num +
+ intr->out_irqs->desc->num_sec;
+
+ intr->mapping = devm_kzalloc(intr->dev, mapping_len * sizeof(u32),
+ GFP_KERNEL);
+ if (!intr->mapping)
+ return -ENOMEM;
+
+ /* Set all the elements in the array to max value of u32 */
+ for (i = 0; i < mapping_len; i++)
+ intr->mapping[i] = UINT_MAX;
+
+ return 0;
+}
+
static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
{
struct irq_domain *parent_domain, *domain;
@@ -246,6 +313,8 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
return -ENOMEM;
intr->dev = dev;
+ platform_set_drvdata(pdev, intr);
+
ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type",
&intr->type);
if (ret) {
@@ -273,6 +342,10 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
return PTR_ERR(intr->out_irqs);
}
+ ret = ti_sci_intr_initialize_mapping(intr);
+ if (ret)
+ return ret;
+
domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev),
&ti_sci_intr_irq_domain_ops, intr);
if (!domain) {
@@ -285,6 +358,44 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
return 0;
}
+static int ti_sci_intr_restore_irqs(struct ti_sci_intr_irq_domain *intr)
+{
+ const struct ti_sci_rm_irq_ops *iops = &intr->sci->ops.rm_irq_ops;
+ u16 start = intr->out_irqs->desc->start;
+ u16 num = intr->out_irqs->desc->num;
+ u16 start_sec = intr->out_irqs->desc->start_sec;
+ u16 num_sec = intr->out_irqs->desc->num_sec;
+ int i, err = 0;
+
+ for (i = 0; i < num + num_sec; i++) {
+ if (intr->mapping[i] == UINT_MAX)
+ continue;
+ if (i < num)
+ err = iops->set_irq(intr->sci,
+ intr->ti_sci_id, intr->mapping[i],
+ intr->ti_sci_id, i + start);
+ else
+ err = iops->set_irq(intr->sci,
+ intr->ti_sci_id, intr->mapping[i],
+ intr->ti_sci_id, i + start_sec);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused ti_sci_intr_resume(struct device *dev)
+{
+ struct ti_sci_intr_irq_domain *intr = dev_get_drvdata(dev);
+
+ return ti_sci_intr_restore_irqs(intr);
+}
+
+static const struct dev_pm_ops ti_sci_intr_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, ti_sci_intr_resume)
+};
+
static const struct of_device_id ti_sci_intr_irq_domain_of_match[] = {
{ .compatible = "ti,sci-intr", },
{ /* sentinel */ },
@@ -295,6 +406,7 @@ static struct platform_driver ti_sci_intr_irq_domain_driver = {
.probe = ti_sci_intr_irq_domain_probe,
.driver = {
.name = "ti-sci-intr",
+ .pm = &ti_sci_intr_dev_pm_ops,
.of_match_table = ti_sci_intr_irq_domain_of_match,
},
};
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 93fe08aef3ca..7295e3835e30 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -3,7 +3,7 @@
* OMAP mailbox driver
*
* Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
- * Copyright (C) 2013-2019 Texas Instruments Incorporated - https://www.ti.com
+ * Copyright (C) 2013-2021 Texas Instruments Incorporated - https://www.ti.com
*
* Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
* Suman Anna <s-anna@ti.com>
@@ -664,6 +664,10 @@ static const struct of_device_id omap_mailbox_of_match[] = {
.data = &omap4_data,
},
{
+ .compatible = "ti,am64-mailbox",
+ .data = &omap4_data,
+ },
+ {
/* end */
},
};
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index 0130628f4d9d..3b7e13c66501 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments' Message Manager Driver
*
- * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
@@ -100,6 +101,7 @@ struct ti_msgmgr_desc {
* @queue_ctrl: Queue Control register
* @chan: Mailbox channel
* @rx_buff: Receive buffer pointer allocated at probe, max_message_size
+ * @polled_rx_mode: Use polling for rx instead of interrupts
*/
struct ti_queue_inst {
char name[30];
@@ -113,6 +115,7 @@ struct ti_queue_inst {
void __iomem *queue_ctrl;
struct mbox_chan *chan;
u32 *rx_buff;
+ bool polled_rx_mode;
};
/**
@@ -190,6 +193,73 @@ static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d,
return val ? true : false;
}
+static int ti_msgmgr_queue_rx_data(struct mbox_chan *chan, struct ti_queue_inst *qinst,
+ const struct ti_msgmgr_desc *desc)
+{
+ int num_words;
+ struct ti_msgmgr_message message;
+ void __iomem *data_reg;
+ u32 *word_data;
+
+ /*
+ * I have no idea about the protocol being used to communicate with the
+ * remote producer - 0 could be valid data, so I wont make a judgement
+ * of how many bytes I should be reading. Let the client figure this
+ * out.. I just read the full message and pass it on..
+ */
+ message.len = desc->max_message_size;
+ message.buf = (u8 *)qinst->rx_buff;
+
+ /*
+ * NOTE about register access involved here:
+ * the hardware block is implemented with 32bit access operations and no
+ * support for data splitting. We don't want the hardware to misbehave
+ * with sub 32bit access - For example: if the last register read is
+ * split into byte wise access, it can result in the queue getting
+ * stuck or indeterminate behavior. An out of order read operation may
+ * result in weird data results as well.
+ * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead
+ * we depend on readl for the purpose.
+ *
+ * Also note that the final register read automatically marks the
+ * queue message as read.
+ */
+ for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
+ num_words = (desc->max_message_size / sizeof(u32));
+ num_words; num_words--, data_reg += sizeof(u32), word_data++)
+ *word_data = readl(data_reg);
+
+ /*
+ * Last register read automatically clears the IRQ if only 1 message
+ * is pending - so send the data up the stack..
+ * NOTE: Client is expected to be as optimal as possible, since
+ * we invoke the handler in IRQ context.
+ */
+ mbox_chan_received_data(chan, (void *)&message);
+
+ return 0;
+}
+
+static int ti_msgmgr_queue_rx_poll_timeout(struct mbox_chan *chan, int timeout_us)
+{
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *desc = inst->desc;
+ int msg_count;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(qinst->queue_state, msg_count,
+ (msg_count & desc->status_cnt_mask),
+ 10, timeout_us);
+ if (ret != 0)
+ return ret;
+
+ ti_msgmgr_queue_rx_data(chan, qinst, desc);
+
+ return 0;
+}
+
/**
* ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
* @irq: Interrupt number
@@ -206,10 +276,7 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
struct ti_queue_inst *qinst = chan->con_priv;
const struct ti_msgmgr_desc *desc;
- int msg_count, num_words;
- struct ti_msgmgr_message message;
- void __iomem *data_reg;
- u32 *word_data;
+ int msg_count;
if (WARN_ON(!inst)) {
dev_err(dev, "no platform drv data??\n");
@@ -237,41 +304,7 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
return IRQ_NONE;
}
- /*
- * I have no idea about the protocol being used to communicate with the
- * remote producer - 0 could be valid data, so I wont make a judgement
- * of how many bytes I should be reading. Let the client figure this
- * out.. I just read the full message and pass it on..
- */
- message.len = desc->max_message_size;
- message.buf = (u8 *)qinst->rx_buff;
-
- /*
- * NOTE about register access involved here:
- * the hardware block is implemented with 32bit access operations and no
- * support for data splitting. We don't want the hardware to misbehave
- * with sub 32bit access - For example: if the last register read is
- * split into byte wise access, it can result in the queue getting
- * stuck or indeterminate behavior. An out of order read operation may
- * result in weird data results as well.
- * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead
- * we depend on readl for the purpose.
- *
- * Also note that the final register read automatically marks the
- * queue message as read.
- */
- for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
- num_words = (desc->max_message_size / sizeof(u32));
- num_words; num_words--, data_reg += sizeof(u32), word_data++)
- *word_data = readl(data_reg);
-
- /*
- * Last register read automatically clears the IRQ if only 1 message
- * is pending - so send the data up the stack..
- * NOTE: Client is expected to be as optimal as possible, since
- * we invoke the handler in IRQ context.
- */
- mbox_chan_received_data(chan, (void *)&message);
+ ti_msgmgr_queue_rx_data(chan, qinst, desc);
return IRQ_HANDLED;
}
@@ -336,6 +369,17 @@ static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
return msg_count ? false : true;
}
+static bool ti_msgmgr_chan_has_polled_queue_rx(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst;
+
+ if (!chan)
+ return false;
+
+ qinst = chan->con_priv;
+ return qinst->polled_rx_mode;
+}
+
/**
* ti_msgmgr_send_data() - Send data
* @chan: Channel Pointer
@@ -353,6 +397,7 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
struct ti_msgmgr_message *message = data;
void __iomem *data_reg;
u32 *word_data;
+ int ret = 0;
if (WARN_ON(!inst)) {
dev_err(dev, "no platform drv data??\n");
@@ -394,7 +439,12 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
if (data_reg <= qinst->queue_buff_end)
writel(0, qinst->queue_buff_end);
- return 0;
+ /* If we are in polled mode, wait for a response before proceeding */
+ if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx))
+ ret = ti_msgmgr_queue_rx_poll_timeout(message->chan_rx,
+ message->timeout_rx_ms * 1000);
+
+ return ret;
}
/**
@@ -642,6 +692,54 @@ static int ti_msgmgr_queue_setup(int idx, struct device *dev,
return 0;
}
+static int ti_msgmgr_queue_rx_set_polled_mode(struct ti_queue_inst *qinst, bool enable)
+{
+ if (enable) {
+ disable_irq(qinst->irq);
+ qinst->polled_rx_mode = true;
+ } else {
+ enable_irq(qinst->irq);
+ qinst->polled_rx_mode = false;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused ti_msgmgr_suspend(struct device *dev)
+{
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst;
+ int i;
+
+ /*
+ * We must switch operation to polled mode now as drivers and the genpd
+ * layer may make late TI SCI calls to change clock and device states
+ * from the noirq phase of suspend.
+ */
+ for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
+ if (!qinst->is_tx)
+ ti_msgmgr_queue_rx_set_polled_mode(qinst, true);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused ti_msgmgr_resume(struct device *dev)
+{
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst;
+ int i;
+
+ for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
+ if (!qinst->is_tx)
+ ti_msgmgr_queue_rx_set_polled_mode(qinst, false);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ti_msgmgr_pm_ops, ti_msgmgr_suspend, ti_msgmgr_resume);
+
/* Queue operations */
static const struct mbox_chan_ops ti_msgmgr_chan_ops = {
.startup = ti_msgmgr_queue_startup,
@@ -829,6 +927,7 @@ static struct platform_driver ti_msgmgr_driver = {
.driver = {
.name = "ti-msgmgr",
.of_match_table = of_match_ptr(ti_msgmgr_of_match),
+ .pm = &ti_msgmgr_pm_ops,
},
};
module_platform_driver(ti_msgmgr_driver);
diff --git a/drivers/media/dvb-frontends/ascot2e.h b/drivers/media/dvb-frontends/ascot2e.h
index f886fab1283f..d86b3de85c6a 100644
--- a/drivers/media/dvb-frontends/ascot2e.h
+++ b/drivers/media/dvb-frontends/ascot2e.h
@@ -33,7 +33,7 @@ struct ascot2e_config {
#if IS_REACHABLE(CONFIG_DVB_ASCOT2E)
/**
- * Attach an ascot2e tuner
+ * ascot2e_attach - Attach an ascot2e tuner
*
* @fe: frontend to be attached
* @config: pointer to &struct ascot2e_config with tuner configuration.
diff --git a/drivers/media/dvb-frontends/cxd2820r.h b/drivers/media/dvb-frontends/cxd2820r.h
index a28b8754932b..4aa6cf4fb913 100644
--- a/drivers/media/dvb-frontends/cxd2820r.h
+++ b/drivers/media/dvb-frontends/cxd2820r.h
@@ -96,7 +96,7 @@ struct cxd2820r_config {
#if IS_REACHABLE(CONFIG_DVB_CXD2820R)
/**
- * Attach a cxd2820r demod
+ * cxd2820r_attach - Attach a cxd2820r demod
*
* @config: pointer to &struct cxd2820r_config with demod configuration.
* @i2c: i2c adapter to use.
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index ee06e89187e4..69fdca00f364 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -54,7 +54,7 @@ struct drxk_config {
#if IS_REACHABLE(CONFIG_DVB_DRXK)
/**
- * Attach a drxk demod
+ * drxk_attach - Attach a drxk demod
*
* @config: pointer to &struct drxk_config with demod configuration.
* @i2c: i2c adapter to use.
diff --git a/drivers/media/dvb-frontends/dvb-pll.h b/drivers/media/dvb-frontends/dvb-pll.h
index 973a66a82e27..71838888743b 100644
--- a/drivers/media/dvb-frontends/dvb-pll.h
+++ b/drivers/media/dvb-frontends/dvb-pll.h
@@ -38,7 +38,7 @@ struct dvb_pll_config {
#if IS_REACHABLE(CONFIG_DVB_PLL)
/**
- * Attach a dvb-pll to the supplied frontend structure.
+ * dvb_pll_attach - Attach a dvb-pll to the supplied frontend structure.
*
* @fe: Frontend to attach to.
* @pll_addr: i2c address of the PLL (if used).
diff --git a/drivers/media/dvb-frontends/helene.h b/drivers/media/dvb-frontends/helene.h
index c026bdcf548d..32e0b1fb268c 100644
--- a/drivers/media/dvb-frontends/helene.h
+++ b/drivers/media/dvb-frontends/helene.h
@@ -44,7 +44,7 @@ struct helene_config {
#if IS_REACHABLE(CONFIG_DVB_HELENE)
/**
- * Attach a helene tuner (terrestrial and cable standards)
+ * helene_attach - Attach a helene tuner (terrestrial and cable standards)
*
* @fe: frontend to be attached
* @config: pointer to &struct helene_config with tuner configuration.
@@ -57,7 +57,7 @@ extern struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c);
/**
- * Attach a helene tuner (satellite standards)
+ * helene_attach_s - Attach a helene tuner (satellite standards)
*
* @fe: frontend to be attached
* @config: pointer to &struct helene_config with tuner configuration.
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h
index 366c399e3329..91dbe20169cd 100644
--- a/drivers/media/dvb-frontends/horus3a.h
+++ b/drivers/media/dvb-frontends/horus3a.h
@@ -33,7 +33,7 @@ struct horus3a_config {
#if IS_REACHABLE(CONFIG_DVB_HORUS3A)
/**
- * Attach a horus3a tuner
+ * horus3a_attach - Attach a horus3a tuner
*
* @fe: frontend to be attached
* @config: pointer to &struct helene_config with tuner configuration.
diff --git a/drivers/media/dvb-frontends/ix2505v.h b/drivers/media/dvb-frontends/ix2505v.h
index 671c0e0959f7..175569131365 100644
--- a/drivers/media/dvb-frontends/ix2505v.h
+++ b/drivers/media/dvb-frontends/ix2505v.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
*
* Copyright (C) 2010 Malcolm Priestley
@@ -31,7 +31,7 @@ struct ix2505v_config {
#if IS_REACHABLE(CONFIG_DVB_IX2505V)
/**
- * Attach a ix2505v tuner to the supplied frontend structure.
+ * ix2505v_attach - Attach a ix2505v tuner to the supplied frontend structure.
*
* @fe: Frontend to attach to.
* @config: pointer to &struct ix2505v_config
diff --git a/drivers/media/dvb-frontends/m88ds3103.h b/drivers/media/dvb-frontends/m88ds3103.h
index 46b722495e4c..e32b68c0df70 100644
--- a/drivers/media/dvb-frontends/m88ds3103.h
+++ b/drivers/media/dvb-frontends/m88ds3103.h
@@ -128,7 +128,7 @@ struct m88ds3103_config {
#if defined(CONFIG_DVB_M88DS3103) || \
(defined(CONFIG_DVB_M88DS3103_MODULE) && defined(MODULE))
/**
- * Attach a m88ds3103 demod
+ * m88ds3103_attach - Attach a m88ds3103 demod
*
* @config: pointer to &struct m88ds3103_config with demod configuration.
* @i2c: i2c adapter to use.
diff --git a/drivers/media/dvb-frontends/mb86a20s.h b/drivers/media/dvb-frontends/mb86a20s.h
index 00a6b6e9b5e4..d20d22bf7580 100644
--- a/drivers/media/dvb-frontends/mb86a20s.h
+++ b/drivers/media/dvb-frontends/mb86a20s.h
@@ -26,7 +26,7 @@ struct mb86a20s_config {
#if IS_REACHABLE(CONFIG_DVB_MB86A20S)
/**
- * Attach a mb86a20s demod
+ * mb86a20s_attach - Attach a mb86a20s demod
*
* @config: pointer to &struct mb86a20s_config with demod configuration.
* @i2c: i2c adapter to use.
diff --git a/drivers/media/dvb-frontends/stb6000.h b/drivers/media/dvb-frontends/stb6000.h
index 570a4b1d07d6..38da55af7ea9 100644
--- a/drivers/media/dvb-frontends/stb6000.h
+++ b/drivers/media/dvb-frontends/stb6000.h
@@ -15,7 +15,7 @@
#if IS_REACHABLE(CONFIG_DVB_STB6000)
/**
- * Attach a stb6000 tuner to the supplied frontend structure.
+ * stb6000_attach - Attach a stb6000 tuner to the supplied frontend structure.
*
* @fe: Frontend to attach to.
* @addr: i2c address of the tuner.
diff --git a/drivers/media/dvb-frontends/tda826x.h b/drivers/media/dvb-frontends/tda826x.h
index bb575a251b04..e1d33edbb8ec 100644
--- a/drivers/media/dvb-frontends/tda826x.h
+++ b/drivers/media/dvb-frontends/tda826x.h
@@ -14,7 +14,7 @@
#include <media/dvb_frontend.h>
/**
- * Attach a tda826x tuner to the supplied frontend structure.
+ * tda826x_attach - Attach a tda826x tuner to the supplied frontend structure.
*
* @fe: Frontend to attach to.
* @addr: i2c address of the tuner.
diff --git a/drivers/media/dvb-frontends/zl10036.h b/drivers/media/dvb-frontends/zl10036.h
index 91eea777eaf1..ad83e6344e7f 100644
--- a/drivers/media/dvb-frontends/zl10036.h
+++ b/drivers/media/dvb-frontends/zl10036.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Driver for Zarlink ZL10036 DVB-S silicon tuner
*
* Copyright (C) 2006 Tino Reichardt
@@ -19,7 +19,7 @@ struct zl10036_config {
#if IS_REACHABLE(CONFIG_DVB_ZL10036)
/**
- * Attach a zl10036 tuner to the supplied frontend structure.
+ * zl10036_attach - Attach a zl10036 tuner to the supplied frontend structure.
*
* @fe: Frontend to attach to.
* @config: zl10036_config structure.
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 878f66ef2719..ab76776e890c 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -825,6 +825,30 @@ config VIDEO_IMX355
To compile this driver as a module, choose M here: the
module will be called imx355.
+config VIDEO_IMX390
+ tristate "Sony IMX390 sensor support"
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX390 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx390.
+
+config VIDEO_OV2312
+ tristate "OmniVision OV2312 sensor support"
+ depends on VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV2312 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov2312.
+
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -886,7 +910,7 @@ config VIDEO_OV2740
config VIDEO_OV5640
tristate "OmniVision OV5640 sensor support"
- depends on OF
+ depends on OF && PM
depends on GPIOLIB && VIDEO_V4L2 && I2C
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
@@ -1060,6 +1084,18 @@ config VIDEO_OV13858
This is a Video4Linux2 sensor driver for the OmniVision
OV13858 camera.
+config VIDEO_OV1063X
+ tristate "OmniVision OV1063X sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT && OF && GPIOLIB
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor-level driver for the OmniVision
+ OV1063X Sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov1063x.
+
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -1335,4 +1371,26 @@ config VIDEO_LM3646
flash, torch LEDs.
endmenu
+#
+# Video serializers and deserializers (e.g. FPDLink)
+#
+
+menu "Video serializers and deserializers"
+
+config VIDEO_DS90UB953
+ tristate "TI DS90UB953 Serializer"
+ help
+ Device driver for the Texas Instruments DS90UB953
+ FPD-Link III Serializer.
+
+config VIDEO_DS90UB960
+ tristate "TI DS90UB960 Deserializer"
+ depends on OF_GPIO
+ select I2C_ATR
+ help
+ Device driver for the Texas Instruments DS90UB960
+ FPD-Link III Deserializer
+
+endmenu
+
endif # VIDEO_V4L2
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index f0a77473979d..5689e649b4bb 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
obj-$(CONFIG_VIDEO_SONY_BTF_MPX) += sony-btf-mpx.o
obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
+obj-$(CONFIG_VIDEO_OV2312) += ov2312.o
obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
obj-$(CONFIG_VIDEO_OV2685) += ov2685.o
@@ -84,6 +85,7 @@ obj-$(CONFIG_VIDEO_OV8856) += ov8856.o
obj-$(CONFIG_VIDEO_OV9640) += ov9640.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
+obj-$(CONFIG_VIDEO_OV1063X) += ov1063x.o
obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
@@ -119,9 +121,12 @@ obj-$(CONFIG_VIDEO_IMX274) += imx274.o
obj-$(CONFIG_VIDEO_IMX290) += imx290.o
obj-$(CONFIG_VIDEO_IMX319) += imx319.o
obj-$(CONFIG_VIDEO_IMX355) += imx355.o
+obj-$(CONFIG_VIDEO_IMX390) += imx390.o
obj-$(CONFIG_VIDEO_MAX9286) += max9286.o
rdacm20-camera_module-objs := rdacm20.o max9271.o
obj-$(CONFIG_VIDEO_RDACM20) += rdacm20-camera_module.o
obj-$(CONFIG_VIDEO_ST_MIPID02) += st-mipid02.o
obj-$(CONFIG_SDR_MAX2175) += max2175.o
+obj-$(CONFIG_VIDEO_DS90UB953) += ds90ub953.o
+obj-$(CONFIG_VIDEO_DS90UB960) += ds90ub960.o
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index e4e8fda51ad8..714e31f993e1 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -250,7 +250,7 @@ static int adv7170_s_routing(struct v4l2_subdev *sd,
}
static int adv7170_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(adv7170_codes))
@@ -261,7 +261,7 @@ static int adv7170_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv7170_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -284,7 +284,7 @@ static int adv7170_get_fmt(struct v4l2_subdev *sd,
}
static int adv7170_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index 0cdd8e033197..1813f67f0fe1 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -288,7 +288,7 @@ static int adv7175_s_routing(struct v4l2_subdev *sd,
}
static int adv7175_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(adv7175_codes))
@@ -299,7 +299,7 @@ static int adv7175_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv7175_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -322,7 +322,7 @@ static int adv7175_get_fmt(struct v4l2_subdev *sd,
}
static int adv7175_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 4498d14d3429..1b6bde8a21a9 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -633,7 +633,7 @@ static void adv7180_exit_controls(struct adv7180_state *state)
}
static int adv7180_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -699,13 +699,13 @@ static int adv7180_set_field_mode(struct adv7180_state *state)
}
static int adv7180_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7180_state *state = to_state(sd);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
} else {
adv7180_mbus_fmt(sd, &format->format);
format->format.field = state->field;
@@ -715,7 +715,7 @@ static int adv7180_get_pad_format(struct v4l2_subdev *sd,
}
static int adv7180_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7180_state *state = to_state(sd);
@@ -742,7 +742,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
adv7180_set_power(state, true);
}
} else {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
*framefmt = format->format;
}
@@ -750,14 +750,14 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
}
static int adv7180_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
- .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
};
- return adv7180_set_pad_format(sd, cfg, &fmt);
+ return adv7180_set_pad_format(sd, sd_state, &fmt);
}
static int adv7180_get_mbus_config(struct v4l2_subdev *sd,
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 8bcd632c081a..92cafdea3f1f 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -409,7 +409,7 @@ static int adv7183_g_input_status(struct v4l2_subdev *sd, u32 *status)
}
static int adv7183_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -420,7 +420,7 @@ static int adv7183_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv7183_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7183 *decoder = to_adv7183(sd);
@@ -443,12 +443,12 @@ static int adv7183_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
decoder->fmt = *fmt;
else
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
static int adv7183_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7183 *decoder = to_adv7183(sd);
diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
index dbbb1e4d6363..47e36d4e422c 100644
--- a/drivers/media/i2c/adv748x/adv748x-afe.c
+++ b/drivers/media/i2c/adv748x/adv748x-afe.c
@@ -331,7 +331,7 @@ static int adv748x_afe_propagate_pixelrate(struct adv748x_afe *afe)
}
static int adv748x_afe_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -343,7 +343,7 @@ static int adv748x_afe_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv748x_afe_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
@@ -354,7 +354,8 @@ static int adv748x_afe_get_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) {
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state,
+ sdformat->pad);
sdformat->format = *mbusformat;
} else {
adv748x_afe_fill_format(afe, &sdformat->format);
@@ -365,7 +366,7 @@ static int adv748x_afe_get_format(struct v4l2_subdev *sd,
}
static int adv748x_afe_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct v4l2_mbus_framefmt *mbusformat;
@@ -375,9 +376,9 @@ static int adv748x_afe_set_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return adv748x_afe_get_format(sd, cfg, sdformat);
+ return adv748x_afe_get_format(sd, sd_state, sdformat);
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state, sdformat->pad);
*mbusformat = sdformat->format;
return 0;
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
index 99bb63d05eef..dd3a8138f000 100644
--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -142,26 +142,26 @@ static const struct v4l2_subdev_video_ops adv748x_csi2_video_ops = {
static struct v4l2_mbus_framefmt *
adv748x_csi2_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(sd, cfg, pad);
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
return &tx->format;
}
static int adv748x_csi2_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
struct adv748x_state *state = tx->state;
struct v4l2_mbus_framefmt *mbusformat;
- mbusformat = adv748x_csi2_get_pad_format(sd, cfg, sdformat->pad,
+ mbusformat = adv748x_csi2_get_pad_format(sd, sd_state, sdformat->pad,
sdformat->which);
if (!mbusformat)
return -EINVAL;
@@ -176,7 +176,7 @@ static int adv748x_csi2_get_format(struct v4l2_subdev *sd,
}
static int adv748x_csi2_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
@@ -184,7 +184,7 @@ static int adv748x_csi2_set_format(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mbusformat;
int ret = 0;
- mbusformat = adv748x_csi2_get_pad_format(sd, cfg, sdformat->pad,
+ mbusformat = adv748x_csi2_get_pad_format(sd, sd_state, sdformat->pad,
sdformat->which);
if (!mbusformat)
return -EINVAL;
@@ -194,7 +194,7 @@ static int adv748x_csi2_set_format(struct v4l2_subdev *sd,
if (sdformat->pad == ADV748X_CSI2_SOURCE) {
const struct v4l2_mbus_framefmt *sink_fmt;
- sink_fmt = adv748x_csi2_get_pad_format(sd, cfg,
+ sink_fmt = adv748x_csi2_get_pad_format(sd, sd_state,
ADV748X_CSI2_SINK,
sdformat->which);
diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
index c557f8fdf11a..52fa7bd75660 100644
--- a/drivers/media/i2c/adv748x/adv748x-hdmi.c
+++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
@@ -409,7 +409,7 @@ static int adv748x_hdmi_propagate_pixelrate(struct adv748x_hdmi *hdmi)
}
static int adv748x_hdmi_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -421,7 +421,7 @@ static int adv748x_hdmi_enum_mbus_code(struct v4l2_subdev *sd,
}
static int adv748x_hdmi_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
@@ -431,7 +431,8 @@ static int adv748x_hdmi_get_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) {
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state,
+ sdformat->pad);
sdformat->format = *mbusformat;
} else {
adv748x_hdmi_fill_format(hdmi, &sdformat->format);
@@ -442,7 +443,7 @@ static int adv748x_hdmi_get_format(struct v4l2_subdev *sd,
}
static int adv748x_hdmi_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct v4l2_mbus_framefmt *mbusformat;
@@ -451,9 +452,9 @@ static int adv748x_hdmi_set_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return adv748x_hdmi_get_format(sd, cfg, sdformat);
+ return adv748x_hdmi_get_format(sd, sd_state, sdformat);
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state, sdformat->pad);
*mbusformat = sdformat->format;
return 0;
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 9f5713b76794..aed95defda37 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1227,7 +1227,7 @@ static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
}
static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad != 0)
@@ -1258,7 +1258,7 @@ static void adv7511_fill_format(struct adv7511_state *state,
}
static int adv7511_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7511_state *state = get_adv7511_state(sd);
@@ -1272,7 +1272,7 @@ static int adv7511_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format.code = fmt->code;
format->format.colorspace = fmt->colorspace;
format->format.ycbcr_enc = fmt->ycbcr_enc;
@@ -1290,7 +1290,7 @@ static int adv7511_get_fmt(struct v4l2_subdev *sd,
}
static int adv7511_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7511_state *state = get_adv7511_state(sd);
@@ -1327,7 +1327,7 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
fmt->code = format->format.code;
fmt->colorspace = format->format.colorspace;
fmt->ycbcr_enc = format->format.ycbcr_enc;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 8cf1704308bf..0ef651b49845 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1819,7 +1819,7 @@ static int adv76xx_s_routing(struct v4l2_subdev *sd,
}
static int adv76xx_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct adv76xx_state *state = to_state(sd);
@@ -1899,7 +1899,7 @@ static void adv76xx_setup_format(struct adv76xx_state *state)
}
static int adv76xx_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv76xx_state *state = to_state(sd);
@@ -1912,7 +1912,7 @@ static int adv76xx_get_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format.code = fmt->code;
} else {
format->format.code = state->format->code;
@@ -1922,7 +1922,7 @@ static int adv76xx_get_format(struct v4l2_subdev *sd,
}
static int adv76xx_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct adv76xx_state *state = to_state(sd);
@@ -1942,7 +1942,7 @@ static int adv76xx_get_selection(struct v4l2_subdev *sd,
}
static int adv76xx_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv76xx_state *state = to_state(sd);
@@ -1961,7 +1961,7 @@ static int adv76xx_set_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
fmt->code = format->format.code;
} else {
state->format = info;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index a870117feb44..50530d34e766 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1995,7 +1995,7 @@ static int adv7842_s_routing(struct v4l2_subdev *sd,
}
static int adv7842_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(adv7842_formats))
@@ -2071,7 +2071,7 @@ static void adv7842_setup_format(struct adv7842_state *state)
}
static int adv7842_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7842_state *state = to_state(sd);
@@ -2099,7 +2099,7 @@ static int adv7842_get_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format.code = fmt->code;
} else {
format->format.code = state->format->code;
@@ -2109,7 +2109,7 @@ static int adv7842_get_format(struct v4l2_subdev *sd,
}
static int adv7842_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct adv7842_state *state = to_state(sd);
@@ -2119,7 +2119,7 @@ static int adv7842_set_format(struct v4l2_subdev *sd,
return -EINVAL;
if (state->mode == ADV7842_MODE_SDP)
- return adv7842_get_format(sd, cfg, format);
+ return adv7842_get_format(sd, sd_state, format);
info = adv7842_format_info(state, format->format.code);
if (info == NULL)
@@ -2131,7 +2131,7 @@ static int adv7842_set_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
fmt->code = format->format.code;
} else {
state->format = info;
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index 1adaf470c75a..dc569d5a4d9d 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -91,7 +91,7 @@ static int ak881x_s_register(struct v4l2_subdev *sd,
#endif
static int ak881x_fill_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -111,7 +111,7 @@ static int ak881x_fill_fmt(struct v4l2_subdev *sd,
}
static int ak881x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index)
@@ -122,7 +122,7 @@ static int ak881x_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ak881x_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index e2e935f78986..dc31944c7d5b 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -1746,7 +1746,7 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
/* ----------------------------------------------------------------------- */
static int cx25840_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
new file mode 100644
index 000000000000..837587c2d9a5
--- /dev/null
+++ b/drivers/media/i2c/ds90ub953.c
@@ -0,0 +1,1169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Texas Instruments DS90UB953 video serializer
+ *
+ * Based on a driver from Luca Ceresoli <luca@lucaceresoli.net>
+ *
+ * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2021 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#define UB953_PAD_SINK 0
+#define UB953_PAD_SOURCE 1
+
+#define UB953_NUM_GPIOS 4
+#define UB953_MAX_DATA_LANES 4
+
+#define UB953_REG_RESET_CTL 0x01
+#define UB953_REG_RESET_CTL_DIGITAL_RESET_1 BIT(1)
+#define UB953_REG_RESET_CTL_DIGITAL_RESET_0 BIT(0)
+
+#define UB953_REG_GENERAL_CFG 0x02
+#define UB953_REG_MODE_SEL 0x03
+
+#define UB953_REG_CLKOUT_CTRL0 0x06
+#define UB953_REG_CLKOUT_CTRL1 0x07
+
+#define UB953_REG_SCL_HIGH_TIME 0x0B
+#define UB953_REG_SCL_LOW_TIME 0x0C
+
+#define UB953_REG_LOCAL_GPIO_DATA 0x0d
+#define UB953_REG_LOCAL_GPIO_DATA_GPIO_RMTEN(n) BIT(4 + (n))
+#define UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(n) BIT(0 + (n))
+
+#define UB953_REG_GPIO_INPUT_CTRL 0x0e
+#define UB953_REG_GPIO_INPUT_CTRL_OUT_EN(n) BIT(4 + (n))
+#define UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(n) BIT(0 + (n))
+
+#define UB953_REG_REV_MASK_ID 0x50
+
+#define UB953_REG_GPIO_PIN_STS 0x53
+#define UB953_REG_GPIO_PIN_STS_GPIO_STS(n) BIT(0 + (n))
+
+#define UB953_REG_IND_ACC_CTL 0xb0
+#define UB953_REG_IND_ACC_CTL_IA_AUTO_INC BIT(1)
+#define UB953_REG_IND_ACC_CTL_IA_SEL_MASK GENMASK(4, 2)
+#define UB953_REG_IND_ACC_ADDR 0xb1
+#define UB953_REG_IND_ACC_DATA 0xb2
+
+#define UB953_IND_PGEN_CTL 0x01
+#define UB953_IND_PGEN_CTL_PGEN_ENABLE BIT(0)
+#define UB953_IND_PGEN_CFG 0x02
+#define UB953_IND_PGEN_CSI_DI 0x03
+#define UB953_IND_PGEN_LINE_SIZE1 0x04
+#define UB953_IND_PGEN_LINE_SIZE0 0x05
+#define UB953_IND_PGEN_BAR_SIZE1 0x06
+#define UB953_IND_PGEN_BAR_SIZE0 0x07
+#define UB953_IND_PGEN_ACT_LPF1 0x08
+#define UB953_IND_PGEN_ACT_LPF0 0x09
+#define UB953_IND_PGEN_TOT_LPF1 0x0A
+#define UB953_IND_PGEN_TOT_LPF0 0x0B
+#define UB953_IND_PGEN_LINE_PD1 0x0C
+#define UB953_IND_PGEN_LINE_PD0 0x0D
+#define UB953_IND_PGEN_VBP 0x0E
+#define UB953_IND_PGEN_VFP 0x0F
+#define UB953_IND_PGEN_COLOR(n) (0x10 + (n)) /* n <= 15 */
+
+struct ub953_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+
+ struct clk *line_rate_clk;
+ struct clk_hw clk_out_hw;
+
+ u32 gpio_func[UB953_NUM_GPIOS];
+
+ struct gpio_chip gpio_chip;
+ char gpio_chip_name[64];
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[2];
+
+ struct v4l2_async_notifier notifier;
+
+ struct v4l2_subdev *source_sd;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ bool streaming;
+
+ struct device_node *rx_ep_np;
+ struct device_node *tx_ep_np;
+
+ bool use_1v8_i2c;
+
+ u8 clkout_mul;
+ u8 clkout_div;
+ u8 clkout_ctrl0;
+ u8 clkout_ctrl1;
+};
+
+static inline struct ub953_data *sd_to_ub953(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ub953_data, sd);
+}
+
+/*
+ * HW Access
+ */
+
+static int ub953_read(const struct ub953_data *priv, u8 reg, u8 *val)
+{
+ unsigned int v;
+ int ret;
+
+ ret = regmap_read(priv->regmap, reg, &v);
+ if (ret < 0) {
+ dev_err(&priv->client->dev,
+ "Cannot read register 0x%02x: %d!\n", reg, ret);
+ return ret;
+ }
+
+ *val = v;
+ return 0;
+}
+
+static int ub953_write(const struct ub953_data *priv, u8 reg, u8 val)
+{
+ int ret;
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret < 0)
+ dev_err(&priv->client->dev,
+ "Cannot write register 0x%02x: %d!\n", reg, ret);
+
+ return ret;
+}
+
+static int ub953_write_ind8(const struct ub953_data *priv, u8 reg, u8 val)
+{
+ int ret;
+
+ ret = ub953_write(priv, UB953_REG_IND_ACC_ADDR, reg);
+ if (!ret)
+ ret = ub953_write(priv, UB953_REG_IND_ACC_DATA, val);
+ return ret;
+}
+
+/* Assumes IA_AUTO_INC is set in UB953_REG_IND_ACC_CTL */
+static int ub953_write_ind16(const struct ub953_data *priv, u8 reg, u16 val)
+{
+ int ret;
+
+ ret = ub953_write(priv, UB953_REG_IND_ACC_ADDR, reg);
+ if (!ret)
+ ret = ub953_write(priv, UB953_REG_IND_ACC_DATA, val >> 8);
+ if (!ret)
+ ret = ub953_write(priv, UB953_REG_IND_ACC_DATA, val & 0xff);
+ return ret;
+}
+
+/*
+ * Clock output
+ */
+
+/*
+ * Assume mode 0 "CSI-2 Synchronous mode" (strap, reg 0x03) is always
+ * used. In this mode all clocks are derived from the deserializer. Other
+ * modes are not implemented.
+ */
+
+/*
+ * We always use 4 as a pre-divider (HS_CLK_DIV = 2).
+ *
+ * According to the datasheet:
+ * - "HS_CLK_DIV typically should be set to either 16, 8, or 4 (default)."
+ * - "if it is not possible to have an integer ratio of N/M, it is best to
+ * select a smaller value for HS_CLK_DIV.
+ *
+ * For above reasons the default HS_CLK_DIV seems the best in the average
+ * case. Use always that value to keep the code simple.
+ */
+static const unsigned long hs_clk_div = 2;
+static const unsigned long prediv = (1 << hs_clk_div);
+
+static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ub953_data *priv = container_of(hw, struct ub953_data, clk_out_hw);
+ u8 ctrl0, ctrl1;
+ unsigned long mul, div, ret;
+
+ ub953_read(priv, UB953_REG_CLKOUT_CTRL0, &ctrl0);
+ ub953_read(priv, UB953_REG_CLKOUT_CTRL1, &ctrl1);
+
+ if (ctrl0 < 0 || ctrl1 < 0) {
+ /* Perhaps link down, use cached values */
+ ctrl0 = priv->clkout_ctrl0;
+ ctrl1 = priv->clkout_ctrl1;
+ }
+
+ mul = ctrl0 & 0x1f;
+ div = ctrl1 & 0xff;
+
+ if (div == 0)
+ return 0;
+
+ ret = parent_rate / prediv * mul / div;
+
+ return ret;
+}
+
+static long ub953_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ub953_data *priv = container_of(hw, struct ub953_data, clk_out_hw);
+ struct device *dev = &priv->client->dev;
+ unsigned long mul, div, res;
+
+ rational_best_approximation(rate, *parent_rate / prediv,
+ (1 << 5) - 1, (1 << 8) - 1,
+ &mul, &div);
+ priv->clkout_mul = mul;
+ priv->clkout_div = div;
+
+ res = *parent_rate / prediv * priv->clkout_mul / priv->clkout_div;
+
+ dev_dbg(dev, "%lu / %lu * %lu / %lu = %lu (wanted %lu)",
+ *parent_rate, prediv, mul, div, res, rate);
+
+ return res;
+}
+
+static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ub953_data *priv = container_of(hw, struct ub953_data, clk_out_hw);
+
+ priv->clkout_ctrl0 = (hs_clk_div << 5) | priv->clkout_mul;
+ priv->clkout_ctrl1 = priv->clkout_div;
+
+ ub953_write(priv, UB953_REG_CLKOUT_CTRL0, priv->clkout_ctrl0);
+ ub953_write(priv, UB953_REG_CLKOUT_CTRL1, priv->clkout_ctrl1);
+
+ return 0;
+}
+
+static const struct clk_ops ub953_clkout_ops = {
+ .recalc_rate = ub953_clkout_recalc_rate,
+ .round_rate = ub953_clkout_round_rate,
+ .set_rate = ub953_clkout_set_rate,
+};
+
+static int ub953_register_clkout(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ const char *parent_names[1] = { __clk_get_name(priv->line_rate_clk) };
+ const struct clk_init_data init = {
+ .name = kasprintf(GFP_KERNEL, "%s.clk_out", dev_name(dev)),
+ .ops = &ub953_clkout_ops,
+ .parent_names = parent_names,
+ .num_parents = 1,
+ };
+ int err;
+
+ priv->clk_out_hw.init = &init;
+
+ err = devm_clk_hw_register(dev, &priv->clk_out_hw);
+ kfree(init.name); /* clock framework made a copy of the name */
+ if (err)
+ return dev_err_probe(dev, err, "Cannot register clock HW\n");
+
+ err = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &priv->clk_out_hw);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot add OF clock provider\n");
+
+ return 0;
+}
+
+/*
+ * GPIO chip
+ */
+static int ub953_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+ int ret;
+ u8 v;
+
+ ret = ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &v);
+ if (ret)
+ return ret;
+
+ if (v & UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(offset))
+ return GPIO_LINE_DIRECTION_IN;
+ else
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int ub953_gpio_direction_in(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+
+ return regmap_update_bits(
+ priv->regmap, UB953_REG_GPIO_INPUT_CTRL,
+ UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(offset) |
+ UB953_REG_GPIO_INPUT_CTRL_OUT_EN(offset),
+ UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(offset));
+}
+
+static int ub953_gpio_direction_out(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+ int ret;
+
+ ret = regmap_update_bits(
+ priv->regmap, UB953_REG_LOCAL_GPIO_DATA,
+ UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset),
+ value ? UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset) : 0);
+
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(
+ priv->regmap, UB953_REG_GPIO_INPUT_CTRL,
+ UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(offset) |
+ UB953_REG_GPIO_INPUT_CTRL_OUT_EN(offset),
+ UB953_REG_GPIO_INPUT_CTRL_OUT_EN(offset));
+}
+
+static int ub953_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+ int ret;
+ u8 v;
+
+ ret = ub953_read(priv, UB953_REG_GPIO_PIN_STS, &v);
+ if (ret)
+ return ret;
+
+ return !!(v & UB953_REG_GPIO_PIN_STS_GPIO_STS(offset));
+}
+
+static void ub953_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+
+ regmap_update_bits(
+ priv->regmap, UB953_REG_LOCAL_GPIO_DATA,
+ UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset),
+ value ? UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset) : 0);
+}
+
+static int ub953_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (flags)
+ *flags = gpiospec->args[1];
+
+ return gpiospec->args[0];
+}
+
+static int ub953_gpiochip_probe(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct gpio_chip *gc = &priv->gpio_chip;
+ int ret;
+
+ /* Set all GPIOs to local mode */
+ ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
+
+ scnprintf(priv->gpio_chip_name, sizeof(priv->gpio_chip_name), "%s",
+ dev_name(dev));
+
+ gc->label = priv->gpio_chip_name;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->base = -1;
+ gc->can_sleep = 1;
+ gc->ngpio = UB953_NUM_GPIOS;
+ gc->get_direction = ub953_gpio_get_direction;
+ gc->direction_input = ub953_gpio_direction_in;
+ gc->direction_output = ub953_gpio_direction_out;
+ gc->get = ub953_gpio_get;
+ gc->set = ub953_gpio_set;
+ gc->of_xlate = ub953_gpio_of_xlate;
+ gc->of_node = priv->client->dev.of_node;
+ gc->of_gpio_n_cells = 2;
+
+ ret = gpiochip_add_data(gc, priv);
+ if (ret) {
+ dev_err(dev, "Failed to add GPIOs: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ub953_gpiochip_remove(struct ub953_data *priv)
+{
+ gpiochip_remove(&priv->gpio_chip);
+}
+
+/*
+ * V4L2
+ */
+
+static int ub953_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+ int ret;
+
+ priv->streaming = enable;
+
+ ret = v4l2_subdev_call(priv->source_sd, video, s_stream, enable);
+ if (ret && enable)
+ priv->streaming = false;
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops ub953_video_ops = {
+ .s_stream = ub953_s_stream,
+};
+
+static int _ub953_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ const struct v4l2_mbus_framefmt format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ };
+ int ret;
+
+ /*
+ * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
+ * frame desc is made dynamically allocated.
+ */
+
+ if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
+ return -EINVAL;
+
+ ret = v4l2_routing_simple_verify(routing);
+ if (ret)
+ return ret;
+
+ v4l2_subdev_lock_state(state);
+
+ ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
+
+ v4l2_subdev_unlock_state(state);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+
+static int ub953_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
+ return -EBUSY;
+
+ return _ub953_set_routing(sd, state, routing);
+}
+
+static int ub953_get_source_frame_desc(struct ub953_data *priv,
+ struct v4l2_mbus_frame_desc *desc)
+{
+ struct media_pad *pad;
+ int ret;
+
+ pad = media_entity_remote_pad(&priv->pads[UB953_PAD_SINK]);
+ if (!pad)
+ return -EPIPE;
+
+ ret = v4l2_subdev_call(priv->source_sd, pad, get_frame_desc, pad->index,
+ desc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ub953_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+ const struct v4l2_subdev_krouting *routing;
+ struct v4l2_mbus_frame_desc source_fd;
+ struct v4l2_subdev_state *state;
+ unsigned int i;
+ int ret = 0;
+
+ if (pad != 1) /* first tx pad */
+ return -EINVAL;
+
+ ret = ub953_get_source_frame_desc(priv, &source_fd);
+ if (ret)
+ return ret;
+
+ state = v4l2_subdev_lock_active_state(sd);
+
+ routing = &state->routing;
+
+ memset(fd, 0, sizeof(*fd));
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ const struct v4l2_subdev_route *route = &routing->routes[i];
+ struct v4l2_mbus_frame_desc_entry *source_entry = NULL;
+ unsigned int j;
+
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ if (route->source_pad != pad)
+ continue;
+
+ for (j = 0; j < source_fd.num_entries; ++j)
+ if (source_fd.entry[j].stream == route->sink_stream) {
+ source_entry = &source_fd.entry[j];
+ break;
+ }
+
+ if (!source_entry) {
+ dev_err(&priv->client->dev,
+ "Failed to find stream from source frame desc\n");
+ ret = -EPIPE;
+ goto out;
+ }
+
+ fd->entry[fd->num_entries].stream = route->source_stream;
+
+ fd->entry[fd->num_entries].flags =
+ V4L2_MBUS_FRAME_DESC_FL_LEN_MAX;
+ fd->entry[fd->num_entries].length = source_entry->length;
+ fd->entry[fd->num_entries].pixelcode = source_entry->pixelcode;
+ fd->entry[fd->num_entries].bus.csi2.vc =
+ source_entry->bus.csi2.vc;
+ fd->entry[fd->num_entries].bus.csi2.dt =
+ source_entry->bus.csi2.dt;
+
+ fd->num_entries++;
+ }
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ub953_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
+ return -EBUSY;
+
+ /* No transcoding, source and sink formats must match. */
+ if (format->pad == 1)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ v4l2_subdev_lock_state(state);
+
+ /* Set sink format */
+ fmt = v4l2_state_get_stream_format(state, format->pad, format->stream);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *fmt = format->format;
+
+ /* Propagate to source format */
+ fmt = v4l2_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *fmt = format->format;
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ub953_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route routes[] = {
+ {
+ .sink_pad = 0,
+ .sink_stream = 0,
+ .source_pad = 1,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ },
+ };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+
+ return _ub953_set_routing(sd, state, &routing);
+}
+
+static const struct v4l2_subdev_pad_ops ub953_pad_ops = {
+ .set_routing = ub953_set_routing,
+ .get_frame_desc = ub953_get_frame_desc,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ub953_set_fmt,
+ .init_cfg = ub953_init_cfg,
+};
+
+static const struct v4l2_subdev_ops ub953_subdev_ops = {
+ .video = &ub953_video_ops,
+ .pad = &ub953_pad_ops,
+};
+
+static const struct media_entity_operations ub953_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+enum {
+ TEST_PATTERN_DISABLED = 0,
+ TEST_PATTERN_V_COLOR_BARS_1,
+ TEST_PATTERN_V_COLOR_BARS_2,
+ TEST_PATTERN_V_COLOR_BARS_4,
+ TEST_PATTERN_V_COLOR_BARS_8,
+};
+
+static const char *const ub953_tpg_qmenu[] = {
+ "Disabled",
+ "1 vertical color bar",
+ "2 vertical color bars",
+ "4 vertical color bars",
+ "8 vertical color bars",
+};
+
+static void ub953_enable_tpg(struct ub953_data *priv, int tpg_num)
+{
+ struct v4l2_subdev *sd = &priv->sd;
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *fmt;
+ u8 vbp, vfp;
+ u16 blank_lines;
+ u16 width;
+ u16 height;
+
+ u16 bytespp = 2; /* For MEDIA_BUS_FMT_UYVY8_1X16 */
+ u8 cbars_idx = tpg_num - TEST_PATTERN_V_COLOR_BARS_1;
+ u8 num_cbars = 1 << cbars_idx;
+
+ u16 line_size; /* Line size [bytes] */
+ u16 bar_size; /* cbar size [bytes] */
+ u16 act_lpf; /* active lines/frame */
+ u16 tot_lpf; /* tot lines/frame */
+ u16 line_pd; /* Line period in 10-ns units */
+
+ u16 fps = 30;
+
+ vbp = 33;
+ vfp = 10;
+ blank_lines = vbp + vfp + 2; /* total blanking lines */
+
+ state = v4l2_subdev_lock_active_state(sd);
+
+ fmt = v4l2_state_get_stream_format(state, UB953_PAD_SOURCE, 0);
+
+ width = fmt->width;
+ height = fmt->height;
+
+ line_size = width * bytespp;
+ bar_size = line_size / num_cbars;
+ act_lpf = height;
+ tot_lpf = act_lpf + blank_lines;
+ line_pd = 100000000 / fps / tot_lpf;
+
+ /* Access Indirect Pattern Gen */
+ ub953_write(priv, UB953_REG_IND_ACC_CTL,
+ UB953_REG_IND_ACC_CTL_IA_AUTO_INC | (0 << 2));
+
+ ub953_write_ind8(priv, UB953_IND_PGEN_CTL,
+ UB953_IND_PGEN_CTL_PGEN_ENABLE);
+
+ /* YUV422 8bit: 2 bytes/block, CSI-2 data type 0x1e */
+ ub953_write_ind8(priv, UB953_IND_PGEN_CFG, cbars_idx << 4 | 0x2);
+ ub953_write_ind8(priv, UB953_IND_PGEN_CSI_DI, 0x1e);
+
+ ub953_write_ind16(priv, UB953_IND_PGEN_LINE_SIZE1, line_size);
+ ub953_write_ind16(priv, UB953_IND_PGEN_BAR_SIZE1, bar_size);
+ ub953_write_ind16(priv, UB953_IND_PGEN_ACT_LPF1, act_lpf);
+ ub953_write_ind16(priv, UB953_IND_PGEN_TOT_LPF1, tot_lpf);
+ ub953_write_ind16(priv, UB953_IND_PGEN_LINE_PD1, line_pd);
+ ub953_write_ind8(priv, UB953_IND_PGEN_VBP, vbp);
+ ub953_write_ind8(priv, UB953_IND_PGEN_VFP, vfp);
+
+ v4l2_subdev_unlock_state(state);
+}
+
+static void ub953_disable_tpg(struct ub953_data *priv)
+{
+ ub953_write_ind8(priv, UB953_IND_PGEN_CTL, 0x00);
+}
+
+static int ub953_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ub953_data *priv =
+ container_of(ctrl->handler, struct ub953_data, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ if (ctrl->val == 0)
+ ub953_disable_tpg(priv);
+ else
+ ub953_enable_tpg(priv, ctrl->val);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ub953_ctrl_ops = {
+ .s_ctrl = ub953_s_ctrl,
+};
+
+static int ub953_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *source_subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct ub953_data *priv = sd_to_ub953(notifier->sd);
+ struct device *dev = &priv->client->dev;
+ unsigned int src_pad;
+ int ret;
+
+ dev_dbg(dev, "Bind %s\n", source_subdev->name);
+
+ ret = media_entity_get_fwnode_pad(&source_subdev->entity,
+ source_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find pad for %s\n",
+ source_subdev->name);
+ return ret;
+ }
+
+ priv->source_sd = source_subdev;
+ src_pad = ret;
+
+ ret = media_create_pad_link(
+ &source_subdev->entity, src_pad, &priv->sd.entity, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(dev, "Unable to link %s:%u -> %s:0\n",
+ source_subdev->name, src_pad, priv->sd.name);
+ return ret;
+ }
+
+ dev_dbg(dev, "Bound %s:%u\n", source_subdev->name, src_pad);
+
+ dev_dbg(dev, "All subdevs bound\n");
+
+ return 0;
+}
+
+static void ub953_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *source_subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct ub953_data *priv = sd_to_ub953(notifier->sd);
+ struct device *dev = &priv->client->dev;
+
+ dev_dbg(dev, "Unbind %s\n", source_subdev->name);
+}
+
+static const struct v4l2_async_notifier_operations ub953_notify_ops = {
+ .bound = ub953_notify_bound,
+ .unbind = ub953_notify_unbind,
+};
+
+static int ub953_v4l2_notifier_register(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct v4l2_async_subdev *asd;
+ struct device_node *ep_node;
+ int ret;
+
+ dev_dbg(dev, "register async notif\n");
+
+ ep_node = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+ if (!ep_node) {
+ dev_err(dev, "No graph endpoint\n");
+ return -ENODEV;
+ }
+
+ v4l2_async_notifier_init(&priv->notifier);
+
+ asd = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &priv->notifier, of_fwnode_handle(ep_node),
+ sizeof(*asd));
+
+ of_node_put(ep_node);
+
+ if (IS_ERR(asd)) {
+ dev_err(dev, "Failed to add subdev: %ld", PTR_ERR(asd));
+ v4l2_async_notifier_cleanup(&priv->notifier);
+ return PTR_ERR(asd);
+ }
+
+ priv->notifier.ops = &ub953_notify_ops;
+
+ ret = v4l2_async_subdev_notifier_register(&priv->sd, &priv->notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register subdev_notifier");
+ v4l2_async_notifier_cleanup(&priv->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ub953_v4l2_notifier_unregister(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+
+ dev_dbg(dev, "Unregister async notif\n");
+
+ v4l2_async_notifier_unregister(&priv->notifier);
+ v4l2_async_notifier_cleanup(&priv->notifier);
+}
+
+/*
+ * Probing
+ */
+
+static void ub953_soft_reset(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ int retries;
+
+ ub953_write(priv, UB953_REG_RESET_CTL,
+ UB953_REG_RESET_CTL_DIGITAL_RESET_1);
+
+ usleep_range(10000, 30000);
+
+ retries = 10;
+ while (retries-- > 0) {
+ int ret;
+ u8 v;
+
+ ret = ub953_read(priv, UB953_REG_RESET_CTL, &v);
+
+ if (ret >= 0 &&
+ (v & UB953_REG_RESET_CTL_DIGITAL_RESET_1) == 0) {
+ dev_dbg(dev, "reset done\n");
+ break;
+ }
+
+ usleep_range(1000, 3000);
+ }
+
+ if (retries == 0)
+ dev_err(dev, "reset timeout\n");
+}
+
+static int ub953_i2c_init(struct ub953_data *priv)
+{
+ /* i2c fast mode */
+ u32 scl_high = 915; /* ns */
+ u32 scl_low = 1641; /* ns */
+ u32 ref = 25000000; /* TODO: get refclock from deserializer */
+ int ret = 0;
+
+ scl_high = div64_u64((u64)scl_high * ref, 1000000000) - 5;
+ scl_low = div64_u64((u64)scl_low * ref, 1000000000) - 5;
+
+ ret = ub953_write(priv, UB953_REG_SCL_HIGH_TIME, scl_high);
+ if (ret)
+ return ret;
+
+ ret = ub953_write(priv, UB953_REG_SCL_LOW_TIME, scl_low);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ub953_general_cfg(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ u32 num_data_lanes;
+ bool clock_continuous;
+ int ret;
+
+ ret = of_property_count_u32_elems(priv->rx_ep_np, "data-lanes");
+ if (ret < 1 || ret > UB953_MAX_DATA_LANES) {
+ dev_err(dev, "DT: invalid data-lanes (%d), only 1-4 lanes supported\n", ret);
+ return ret;
+ } else {
+ num_data_lanes = ret;
+ }
+
+ clock_continuous = !of_property_read_bool(priv->rx_ep_np, "clock-noncontinuous");
+
+ return ub953_write(priv, UB953_REG_GENERAL_CFG,
+ ((clock_continuous) << 6) |
+ ((num_data_lanes - 1) << 4) |
+ (1 << 1) | /* CRC TX gen */
+ (priv->use_1v8_i2c << 0));
+}
+
+static int ub953_parse_dt(struct ub953_data *priv)
+{
+ struct device_node *np = priv->client->dev.of_node;
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ if (!np) {
+ dev_err(dev, "OF: no device tree node!\n");
+ return -ENOENT;
+ }
+
+ /* optional, if absent all GPIO pins are unused */
+ ret = of_property_read_u32_array(np, "gpio-functions", priv->gpio_func,
+ ARRAY_SIZE(priv->gpio_func));
+ if (ret && ret != -EINVAL)
+ dev_err(dev, "DT: invalid gpio-functions property (%d)", ret);
+
+ /* read i2c voltage level */
+ priv->use_1v8_i2c = of_property_read_bool(np, "i2c-1_8v");
+
+ return 0;
+}
+
+static const struct regmap_config ub953_regmap_config = {
+ .name = "ds90ub953",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_DEFAULT,
+ .val_format_endian = REGMAP_ENDIAN_DEFAULT,
+};
+
+static int ub953_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ub953_data *priv;
+ int ret;
+ u8 rev;
+
+ dev_dbg(dev, "probing, addr 0x%02x\n", client->addr);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+
+ priv->regmap = devm_regmap_init_i2c(client, &ub953_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "Failed to init regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->line_rate_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->line_rate_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->line_rate_clk),
+ "Cannot get line rate clock\n");
+ dev_dbg(dev, "line rate: %lu Hz\n", clk_get_rate(priv->line_rate_clk));
+
+ ret = ub953_parse_dt(priv);
+ if (ret)
+ return ret;
+
+ ub953_soft_reset(priv);
+
+ ret = ub953_read(priv, UB953_REG_REV_MASK_ID, &rev);
+ if (ret) {
+ dev_err(dev, "Failed to read revision: %d", ret);
+ return ret;
+ }
+
+ dev_info(dev, "Found rev %u, mask %u\n", rev >> 4, rev & 0xf);
+
+ ret = ub953_i2c_init(priv);
+ if (ret) {
+ dev_err(dev, "i2c init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = ub953_gpiochip_probe(priv);
+ if (ret) {
+ dev_err(dev, "Failed to init gpiochip\n");
+ return ret;
+ }
+
+ v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub953_subdev_ops);
+
+ v4l2_ctrl_handler_init(&priv->ctrl_handler,
+ ARRAY_SIZE(ub953_tpg_qmenu) - 1);
+ priv->sd.ctrl_handler = &priv->ctrl_handler;
+
+ v4l2_ctrl_new_std_menu_items(&priv->ctrl_handler, &ub953_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ub953_tpg_qmenu) - 1, 0, 0,
+ ub953_tpg_qmenu);
+
+ if (priv->ctrl_handler.error) {
+ ret = priv->ctrl_handler.error;
+ goto err_gpiochip_remove;
+ }
+
+ priv->sd.flags |=
+ V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_MULTIPLEXED;
+ priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ priv->sd.entity.ops = &ub953_entity_ops;
+
+ priv->pads[0].flags = MEDIA_PAD_FL_SINK;
+ priv->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->sd.entity, 2, priv->pads);
+ if (ret) {
+ dev_err(dev, "Failed to init pads\n");
+ goto err_remove_ctrls;
+ }
+
+ priv->rx_ep_np = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+ priv->tx_ep_np = of_graph_get_endpoint_by_regs(dev->of_node, 1, 0);
+ priv->sd.fwnode = of_fwnode_handle(priv->tx_ep_np);
+
+ ret = v4l2_subdev_init_finalize(&priv->sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = ub953_v4l2_notifier_register(priv);
+ if (ret) {
+ dev_err(dev, "v4l2 subdev notifier register failed: %d\n", ret);
+ goto err_free_state;
+ }
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret) {
+ dev_err(dev, "v4l2_async_register_subdev error: %d\n", ret);
+ goto err_unreg_notif;
+ }
+
+ /* Default values for clock multiplier and divider registers */
+ priv->clkout_ctrl0 = 0x41;
+ priv->clkout_ctrl1 = 0x28;
+ ret = ub953_register_clkout(priv);
+ if (ret) {
+ goto err_unreg_notif;
+ }
+
+ ub953_general_cfg(priv);
+ if (ret)
+ goto err_unreg_notif;
+
+ dev_dbg(dev, "Successfully probed\n");
+
+ return 0;
+
+err_unreg_notif:
+ ub953_v4l2_notifier_unregister(priv);
+err_free_state:
+ v4l2_subdev_cleanup(&priv->sd);
+err_entity_cleanup:
+ if (priv->rx_ep_np)
+ of_node_put(priv->rx_ep_np);
+ if (priv->tx_ep_np)
+ of_node_put(priv->tx_ep_np);
+
+ media_entity_cleanup(&priv->sd.entity);
+err_remove_ctrls:
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
+err_gpiochip_remove:
+ ub953_gpiochip_remove(priv);
+
+ return ret;
+}
+
+static int ub953_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ub953_data *priv = sd_to_ub953(sd);
+
+ dev_dbg(&client->dev, "Removing\n");
+
+ ub953_v4l2_notifier_unregister(priv);
+ v4l2_async_unregister_subdev(&priv->sd);
+
+ v4l2_subdev_cleanup(&priv->sd);
+
+ of_node_put(priv->tx_ep_np);
+
+ media_entity_cleanup(&priv->sd.entity);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
+
+ ub953_gpiochip_remove(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id ub953_id[] = { { "ds90ub953-q1", 0 }, {} };
+MODULE_DEVICE_TABLE(i2c, ub953_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id ub953_dt_ids[] = {
+ { .compatible = "ti,ds90ub953-q1", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ub953_dt_ids);
+#endif
+
+static struct i2c_driver ds90ub953_driver = {
+ .probe_new = ub953_probe,
+ .remove = ub953_remove,
+ .id_table = ub953_id,
+ .driver = {
+ .name = "ds90ub953",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ub953_dt_ids),
+ },
+};
+
+module_i2c_driver(ds90ub953_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Texas Instruments DS90UB953 serializer driver");
+MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
new file mode 100644
index 000000000000..b876063a4201
--- /dev/null
+++ b/drivers/media/i2c/ds90ub960.c
@@ -0,0 +1,2504 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Texas Instruments DS90UB960-Q1 video deserializer
+ *
+ * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2021 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c-atr.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#define UB960_MAX_RX_NPORTS 4
+#define UB960_MAX_TX_NPORTS 2
+#define UB960_MAX_NPORTS (UB960_MAX_RX_NPORTS + UB960_MAX_TX_NPORTS)
+
+#define UB960_NUM_SLAVE_ALIASES 8
+#define UB960_MAX_POOL_ALIASES (UB960_MAX_RX_NPORTS * UB960_NUM_SLAVE_ALIASES)
+
+#define UB960_MAX_VC 4
+
+/*
+ * Register map
+ *
+ * 0x00-0x32 Shared (UB960_SR)
+ * 0x33-0x3A CSI-2 TX (per-port paged on DS90UB960, shared on 954) (UB960_TR)
+ * 0x4C Shared (UB960_SR)
+ * 0x4D-0x7F FPD-Link RX, per-port paged (UB960_RR)
+ * 0xB0-0xBF Shared (UB960_SR)
+ * 0xD0-0xDF FPD-Link RX, per-port paged (UB960_RR)
+ * 0xF0-0xF5 Shared (UB960_SR)
+ * 0xF8-0xFB Shared (UB960_SR)
+ * All others Reserved
+ *
+ * Register prefixes:
+ * UB960_SR_* = Shared register
+ * UB960_RR_* = FPD-Link RX, per-port paged register
+ * UB960_TR_* = CSI-2 TX, per-port paged register
+ * UB960_XR_* = Reserved register
+ * UB960_IR_* = Indirect register
+ */
+
+#define UB960_SR_I2C_DEV_ID 0x00
+#define UB960_SR_RESET 0x01
+#define UB960_SR_GEN_CONFIG 0x02
+#define UB960_SR_REV_MASK 0x03
+#define UB960_SR_DEVICE_STS 0x04
+#define UB960_SR_PAR_ERR_THOLD_HI 0x05
+#define UB960_SR_PAR_ERR_THOLD_LO 0x06
+#define UB960_SR_BCC_WDOG_CTL 0x07
+#define UB960_SR_I2C_CTL1 0x08
+#define UB960_SR_I2C_CTL2 0x09
+#define UB960_SR_SCL_HIGH_TIME 0x0A
+#define UB960_SR_SCL_LOW_TIME 0x0B
+#define UB960_SR_RX_PORT_CTL 0x0C
+#define UB960_SR_IO_CTL 0x0D
+#define UB960_SR_GPIO_PIN_STS 0x0E
+#define UB960_SR_GPIO_INPUT_CTL 0x0F
+#define UB960_SR_GPIO_PIN_CTL(n) (0x10 + (n)) /* n < UB960_NUM_GPIOS */
+#define UB960_SR_FS_CTL 0x18
+#define UB960_SR_FS_HIGH_TIME_1 0x19
+#define UB960_SR_FS_HIGH_TIME_0 0x1A
+#define UB960_SR_FS_LOW_TIME_1 0x1B
+#define UB960_SR_FS_LOW_TIME_0 0x1C
+#define UB960_SR_MAX_FRM_HI 0x1D
+#define UB960_SR_MAX_FRM_LO 0x1E
+#define UB960_SR_CSI_PLL_CTL 0x1F
+
+#define UB960_SR_FWD_CTL1 0x20
+#define UB960_SR_FWD_CTL1_PORT_DIS(n) BIT((n) + 4)
+
+#define UB960_SR_FWD_CTL2 0x21
+#define UB960_SR_FWD_STS 0x22
+
+#define UB960_SR_INTERRUPT_CTL 0x23
+#define UB960_SR_INTERRUPT_CTL_INT_EN BIT(7)
+#define UB960_SR_INTERRUPT_CTL_IE_CSI_TX0 BIT(4)
+#define UB960_SR_INTERRUPT_CTL_IE_RX(n) BIT((n)) /* rxport[n] IRQ */
+#define UB960_SR_INTERRUPT_CTL_ALL 0x83 /* TODO 0x93 to enable CSI */
+
+#define UB960_SR_INTERRUPT_STS 0x24
+#define UB960_SR_INTERRUPT_STS_INT BIT(7)
+#define UB960_SR_INTERRUPT_STS_IS_CSI_TX(n) BIT(4 + (n)) /* txport[n] IRQ */
+#define UB960_SR_INTERRUPT_STS_IS_RX(n) BIT((n)) /* rxport[n] IRQ */
+
+#define UB960_SR_TS_CONFIG 0x25
+#define UB960_SR_TS_CONTROL 0x26
+#define UB960_SR_TS_LINE_HI 0x27
+#define UB960_SR_TS_LINE_LO 0x28
+#define UB960_SR_TS_STATUS 0x29
+#define UB960_SR_TIMESTAMP_P0_HI 0x2A
+#define UB960_SR_TIMESTAMP_P0_LO 0x2B
+#define UB960_SR_TIMESTAMP_P1_HI 0x2C
+#define UB960_SR_TIMESTAMP_P1_LO 0x2D
+
+#define UB960_SR_CSI_PORT_SEL 0x32
+
+#define UB960_TR_CSI_CTL 0x33
+#define UB960_TR_CSI_CTL_CSI_CAL_EN BIT(6)
+#define UB960_TR_CSI_CTL_CSI_ENABLE BIT(0)
+
+#define UB960_TR_CSI_CTL2 0x34
+#define UB960_TR_CSI_STS 0x35
+#define UB960_TR_CSI_TX_ICR 0x36
+
+#define UB960_TR_CSI_TX_ISR 0x37
+#define UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR BIT(3)
+#define UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR BIT(1)
+
+#define UB960_TR_CSI_TEST_CTL 0x38
+#define UB960_TR_CSI_TEST_PATT_HI 0x39
+#define UB960_TR_CSI_TEST_PATT_LO 0x3A
+
+#define UB960_XR_AEQ_CTL1 0x42
+#define UB960_XR_AEQ_ERR_THOLD 0x43
+
+#define UB960_RR_BCC_ERR_CTL 0x46
+#define UB960_RR_BCC_STATUS 0x47
+
+#define UB960_RR_FPD3_CAP 0x4A
+#define UB960_RR_RAW_EMBED_DTYPE 0x4B
+
+#define UB960_SR_FPD3_PORT_SEL 0x4C
+
+#define UB960_RR_RX_PORT_STS1 0x4D
+#define UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR BIT(5)
+#define UB960_RR_RX_PORT_STS1_LOCK_STS_CHG BIT(4)
+#define UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR BIT(3)
+#define UB960_RR_RX_PORT_STS1_PARITY_ERROR BIT(2)
+#define UB960_RR_RX_PORT_STS1_PORT_PASS BIT(1)
+#define UB960_RR_RX_PORT_STS1_LOCK_STS BIT(0)
+
+#define UB960_RR_RX_PORT_STS2 0x4E
+#define UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE BIT(7)
+#define UB960_RR_RX_PORT_STS2_LINE_LEN_CHG BIT(6)
+#define UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR BIT(5)
+#define UB960_RR_RX_PORT_STS2_BUFFER_ERROR BIT(4)
+#define UB960_RR_RX_PORT_STS2_CSI_ERROR BIT(3)
+#define UB960_RR_RX_PORT_STS2_FREQ_STABLE BIT(2)
+#define UB960_RR_RX_PORT_STS2_CABLE_FAULT BIT(1)
+#define UB960_RR_RX_PORT_STS2_LINE_CNT_CHG BIT(0)
+
+#define UB960_RR_RX_FREQ_HIGH 0x4F
+#define UB960_RR_RX_FREQ_LOW 0x50
+#define UB960_RR_SENSOR_STS_0 0x51
+#define UB960_RR_SENSOR_STS_1 0x52
+#define UB960_RR_SENSOR_STS_2 0x53
+#define UB960_RR_SENSOR_STS_3 0x54
+#define UB960_RR_RX_PAR_ERR_HI 0x55
+#define UB960_RR_RX_PAR_ERR_LO 0x56
+#define UB960_RR_BIST_ERR_COUNT 0x57
+
+#define UB960_RR_BCC_CONFIG 0x58
+#define UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH BIT(6)
+
+#define UB960_RR_DATAPATH_CTL1 0x59
+#define UB960_RR_DATAPATH_CTL2 0x5A
+#define UB960_RR_SER_ID 0x5B
+#define UB960_RR_SER_ALIAS_ID 0x5C
+
+/* For these two register sets: n < UB960_NUM_SLAVE_ALIASES */
+#define UB960_RR_SLAVE_ID(n) (0x5D + (n))
+#define UB960_RR_SLAVE_ALIAS(n) (0x65 + (n))
+
+#define UB960_RR_PORT_CONFIG 0x6D
+#define UB960_RR_BC_GPIO_CTL(n) (0x6E + (n)) /* n < 2 */
+#define UB960_RR_RAW10_ID 0x70
+#define UB960_RR_RAW12_ID 0x71
+#define UB960_RR_CSI_VC_MAP 0x72
+#define UB960_RR_LINE_COUNT_HI 0x73
+#define UB960_RR_LINE_COUNT_LO 0x74
+#define UB960_RR_LINE_LEN_1 0x75
+#define UB960_RR_LINE_LEN_0 0x76
+#define UB960_RR_FREQ_DET_CTL 0x77
+#define UB960_RR_MAILBOX_1 0x78
+#define UB960_RR_MAILBOX_2 0x79
+
+#define UB960_RR_CSI_RX_STS 0x7A
+#define UB960_RR_CSI_RX_STS_LENGTH_ERR BIT(3)
+#define UB960_RR_CSI_RX_STS_CKSUM_ERR BIT(2)
+#define UB960_RR_CSI_RX_STS_ECC2_ERR BIT(1)
+#define UB960_RR_CSI_RX_STS_ECC1_ERR BIT(0)
+
+#define UB960_RR_CSI_ERR_COUNTER 0x7B
+#define UB960_RR_PORT_CONFIG2 0x7C
+#define UB960_RR_PORT_PASS_CTL 0x7D
+#define UB960_RR_SEN_INT_RISE_CTL 0x7E
+#define UB960_RR_SEN_INT_FALL_CTL 0x7F
+
+#define UB960_XR_REFCLK_FREQ 0xA5
+
+#define UB960_SR_IND_ACC_CTL 0xB0
+#define UB960_SR_IND_ACC_CTL_IA_AUTO_INC BIT(1)
+
+#define UB960_SR_IND_ACC_ADDR 0xB1
+#define UB960_SR_IND_ACC_DATA 0xB2
+#define UB960_SR_BIST_CONTROL 0xB3
+#define UB960_SR_MODE_IDX_STS 0xB8
+#define UB960_SR_LINK_ERROR_COUNT 0xB9
+#define UB960_SR_FPD3_ENC_CTL 0xBA
+#define UB960_SR_FV_MIN_TIME 0xBC
+#define UB960_SR_GPIO_PD_CTL 0xBE
+
+#define UB960_RR_PORT_DEBUG 0xD0
+#define UB960_RR_AEQ_CTL2 0xD2
+#define UB960_RR_AEQ_STATUS 0xD3
+#define UB960_RR_AEQ_BYPASS 0xD4
+#define UB960_RR_AEQ_MIN_MAX 0xD5
+#define UB960_RR_PORT_ICR_HI 0xD8
+#define UB960_RR_PORT_ICR_LO 0xD9
+#define UB960_RR_PORT_ISR_HI 0xDA
+#define UB960_RR_PORT_ISR_LO 0xDB
+#define UB960_RR_FC_GPIO_STS 0xDC
+#define UB960_RR_FC_GPIO_ICR 0xDD
+#define UB960_RR_SEN_INT_RISE_STS 0xDE
+#define UB960_RR_SEN_INT_FALL_STS 0xDF
+
+#define UB960_SR_FPD3_RX_ID0 0xF0
+#define UB960_SR_FPD3_RX_ID1 0xF1
+#define UB960_SR_FPD3_RX_ID2 0xF2
+#define UB960_SR_FPD3_RX_ID3 0xF3
+#define UB960_SR_FPD3_RX_ID4 0xF4
+#define UB960_SR_FPD3_RX_ID5 0xF5
+#define UB960_SR_I2C_RX_ID(n) (0xF8 + (n)) /* < UB960_FPD_RX_NPORTS */
+
+/* UB960_IR_PGEN_*: Indirect Registers for Test Pattern Generator */
+
+#define UB960_IR_PGEN_CTL 0x01
+#define UB960_IR_PGEN_CTL_PGEN_ENABLE BIT(0)
+
+#define UB960_IR_PGEN_CFG 0x02
+#define UB960_IR_PGEN_CSI_DI 0x03
+#define UB960_IR_PGEN_LINE_SIZE1 0x04
+#define UB960_IR_PGEN_LINE_SIZE0 0x05
+#define UB960_IR_PGEN_BAR_SIZE1 0x06
+#define UB960_IR_PGEN_BAR_SIZE0 0x07
+#define UB960_IR_PGEN_ACT_LPF1 0x08
+#define UB960_IR_PGEN_ACT_LPF0 0x09
+#define UB960_IR_PGEN_TOT_LPF1 0x0A
+#define UB960_IR_PGEN_TOT_LPF0 0x0B
+#define UB960_IR_PGEN_LINE_PD1 0x0C
+#define UB960_IR_PGEN_LINE_PD0 0x0D
+#define UB960_IR_PGEN_VBP 0x0E
+#define UB960_IR_PGEN_VFP 0x0F
+#define UB960_IRT_PGEN_COLOR(n) (0x10 + (n)) /* n < 15 */
+
+struct ub960_hw_data {
+ u8 num_rxports;
+ u8 num_txports;
+};
+
+enum ub960_rxport_mode {
+ RXPORT_MODE_RAW10 = 0,
+ RXPORT_MODE_RAW12_HF = 1,
+ RXPORT_MODE_RAW12_LF = 2,
+ RXPORT_MODE_CSI2 = 3,
+};
+
+struct ub960_rxport {
+ struct ub960_data *priv;
+ u8 nport; /* RX port number, and index in priv->rxport[] */
+
+ struct v4l2_subdev *sd; /* Connected subdev */
+ struct fwnode_handle *fwnode;
+
+ enum ub960_rxport_mode mode;
+
+ struct device_node *remote_of_node; /* "remote-chip" OF node */
+ struct i2c_client *ser_client; /* remote serializer */
+ unsigned short ser_alias; /* ser i2c alias (lower 7 bits) */
+ bool locked;
+};
+
+struct ub960_asd {
+ struct v4l2_async_subdev base;
+ struct ub960_rxport *rxport;
+};
+
+static inline struct ub960_asd *to_ub960_asd(struct v4l2_async_subdev *asd)
+{
+ return container_of(asd, struct ub960_asd, base);
+}
+
+struct ub960_txport {
+ u32 num_data_lanes;
+};
+
+struct ub960_vc_map {
+ u8 vc_map[UB960_MAX_RX_NPORTS];
+ bool port_en[UB960_MAX_RX_NPORTS];
+};
+
+struct ub960_data {
+ const struct ub960_hw_data *hw_data;
+ struct i2c_client *client; /* for shared local registers */
+ struct regmap *regmap;
+ struct gpio_desc *pd_gpio;
+ struct task_struct *kthread;
+ struct i2c_atr *atr;
+ struct ub960_rxport *rxports[UB960_MAX_RX_NPORTS];
+ struct ub960_txport *txports[UB960_MAX_TX_NPORTS];
+ struct ub960_vc_map vc_map;
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[UB960_MAX_NPORTS];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_async_notifier notifier;
+
+ struct clk *refclk;
+ struct clk_hw *line_clk_hw;
+
+ u32 tx_data_rate; /* Nominal data rate (Gb/s) */
+ s64 tx_link_freq[1];
+
+ /* Address Translator alias-to-slave map table */
+ size_t atr_alias_num; /* Number of aliases configured */
+ u16 atr_alias_id[UB960_MAX_POOL_ALIASES]; /* 0 = no alias */
+ u16 atr_slave_id[UB960_MAX_POOL_ALIASES]; /* 0 = not in use */
+ struct mutex alias_table_lock;
+
+ u8 current_read_rxport;
+ u8 current_write_rxport_mask;
+
+ u8 current_read_csiport;
+ u8 current_write_csiport_mask;
+
+ bool streaming;
+};
+
+static inline struct ub960_data *sd_to_ub960(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ub960_data, sd);
+}
+
+enum {
+ TEST_PATTERN_DISABLED = 0,
+ TEST_PATTERN_V_COLOR_BARS_1,
+ TEST_PATTERN_V_COLOR_BARS_2,
+ TEST_PATTERN_V_COLOR_BARS_4,
+ TEST_PATTERN_V_COLOR_BARS_8,
+};
+
+static const char * const ub960_tpg_qmenu[] = {
+ "Disabled",
+ "1 vertical color bar",
+ "2 vertical color bars",
+ "4 vertical color bars",
+ "8 vertical color bars",
+};
+
+static inline bool ub960_pad_is_sink(struct ub960_data *priv, u32 pad)
+{
+ return pad < priv->hw_data->num_rxports;
+}
+
+static inline bool ub960_pad_is_source(struct ub960_data *priv, u32 pad)
+{
+ return pad >= priv->hw_data->num_rxports &&
+ pad < (priv->hw_data->num_rxports + priv->hw_data->num_txports);
+}
+
+struct ub960_format_info {
+ u32 code;
+ u32 bpp;
+ u8 datatype;
+ bool meta;
+};
+
+static const struct ub960_format_info ub960_formats[] = {
+ { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .datatype = 0x1e, },
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .datatype = 0x1e, },
+ { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .datatype = 0x1e, },
+ { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .datatype = 0x1e, },
+
+ /* Legacy */
+ { .code = MEDIA_BUS_FMT_YUYV8_2X8, .bpp = 16, .datatype = 0x1e, },
+ { .code = MEDIA_BUS_FMT_UYVY8_2X8, .bpp = 16, .datatype = 0x1e, },
+ { .code = MEDIA_BUS_FMT_VYUY8_2X8, .bpp = 16, .datatype = 0x1e, },
+ { .code = MEDIA_BUS_FMT_YVYU8_2X8, .bpp = 16, .datatype = 0x1e, },
+
+ /* RAW */
+ { .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, .datatype = 0x2c, },
+ { .code = MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, .datatype = 0x2c, },
+};
+
+static const struct ub960_format_info *ub960_find_format(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ub960_formats); ++i) {
+ if (ub960_formats[i].code == code)
+ return &ub960_formats[i];
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Basic device access
+ */
+
+static int ub960_read(const struct ub960_data *priv, u8 reg, u8 *val)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+
+ ret = regmap_read(priv->regmap, reg, &v);
+ if (ret) {
+ dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+ return ret;
+ }
+
+ *val = v;
+
+ return 0;
+}
+
+static int ub960_write(const struct ub960_data *priv, u8 reg, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret < 0)
+ dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+ return ret;
+}
+
+static int ub960_update_bits_shared(const struct ub960_data *priv, u8 reg,
+ u8 mask, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret < 0)
+ dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+ return ret;
+}
+
+static int ub960_rxport_select(struct ub960_data *priv, u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ if (priv->current_read_rxport == nport &&
+ priv->current_write_rxport_mask == BIT(nport))
+ return 0;
+
+ ret = regmap_write(priv->regmap, UB960_SR_FPD3_PORT_SEL,
+ (nport << 4) | (1 << nport));
+ if (ret) {
+ dev_err(dev, "%s: cannot select rxport %d (%d)!\n", __func__,
+ nport, ret);
+ return ret;
+ }
+
+ priv->current_read_rxport = nport;
+ priv->current_write_rxport_mask = BIT(nport);
+
+ return 0;
+}
+
+static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 *val)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+
+ ub960_rxport_select(priv, nport);
+
+ ret = regmap_read(priv->regmap, reg, &v);
+ if (ret) {
+ dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+ return ret;
+ }
+
+ *val = v;
+
+ return 0;
+}
+
+static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ub960_rxport_select(priv, nport);
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret)
+ dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+ return ret;
+}
+
+static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 mask, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ub960_rxport_select(priv, nport);
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+
+ if (ret)
+ dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+ return ret;
+}
+
+static int ub960_csiport_select(struct ub960_data *priv, u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ if (priv->current_read_csiport == nport &&
+ priv->current_write_csiport_mask == BIT(nport))
+ return 0;
+
+ ret = regmap_write(priv->regmap, UB960_SR_CSI_PORT_SEL,
+ (nport << 4) | (1 << nport));
+ if (ret) {
+ dev_err(dev, "%s: cannot select csi port %d (%d)!\n", __func__,
+ nport, ret);
+ return ret;
+ }
+
+ priv->current_read_csiport = nport;
+ priv->current_write_csiport_mask = BIT(nport);
+
+ return 0;
+}
+
+static int ub960_csiport_read(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 *val)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+
+ ub960_csiport_select(priv, nport);
+
+ ret = regmap_read(priv->regmap, reg, &v);
+ if (ret) {
+ dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+ return ret;
+ }
+
+ *val = v;
+
+ return 0;
+}
+
+static int ub960_csiport_write(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ub960_csiport_select(priv, nport);
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret)
+ dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+ return ret;
+}
+
+__maybe_unused
+static int ub960_csiport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 mask, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ub960_csiport_select(priv, nport);
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+
+ if (ret)
+ dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+ return ret;
+}
+
+static int ub960_write_ind8(const struct ub960_data *priv, u8 reg, u8 val)
+{
+ int ret;
+
+ ret = ub960_write(priv, UB960_SR_IND_ACC_ADDR, reg);
+ if (!ret)
+ ret = ub960_write(priv, UB960_SR_IND_ACC_DATA, val);
+ return ret;
+}
+
+/* Assumes IA_AUTO_INC is set in UB960_SR_IND_ACC_CTL */
+static int ub960_write_ind16(const struct ub960_data *priv, u8 reg, u16 val)
+{
+ int ret;
+
+ ret = ub960_write(priv, UB960_SR_IND_ACC_ADDR, reg);
+ if (!ret)
+ ret = ub960_write(priv, UB960_SR_IND_ACC_DATA, val >> 8);
+ if (!ret)
+ ret = ub960_write(priv, UB960_SR_IND_ACC_DATA, val & 0xff);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * I2C-ATR (address translator)
+ */
+
+static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_board_info *info,
+ const struct i2c_client *client,
+ u16 *alias_id)
+{
+ struct ub960_data *priv = i2c_atr_get_clientdata(atr);
+ struct ub960_rxport *rxport = priv->rxports[chan_id];
+ struct device *dev = &priv->client->dev;
+ unsigned int reg_idx;
+ unsigned int pool_idx;
+ u16 alias = 0;
+ int ret = 0;
+
+ dev_dbg(dev, "rx%d: %s\n", chan_id, __func__);
+
+ mutex_lock(&priv->alias_table_lock);
+
+ /* Find unused alias in table */
+
+ for (pool_idx = 0; pool_idx < priv->atr_alias_num; pool_idx++)
+ if (priv->atr_slave_id[pool_idx] == 0)
+ break;
+
+ if (pool_idx == priv->atr_alias_num) {
+ dev_warn(dev, "rx%d: alias pool exhausted\n", rxport->nport);
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ alias = priv->atr_alias_id[pool_idx];
+
+ /* Find first unused alias register */
+
+ for (reg_idx = 0; reg_idx < UB960_NUM_SLAVE_ALIASES; reg_idx++) {
+ u8 regval;
+
+ ret = ub960_rxport_read(priv, chan_id,
+ UB960_RR_SLAVE_ALIAS(reg_idx), &regval);
+ if (!ret && regval == 0)
+ break;
+ }
+
+ if (reg_idx == UB960_NUM_SLAVE_ALIASES) {
+ dev_warn(dev, "rx%d: all aliases in use\n", rxport->nport);
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ /* Map alias to slave */
+
+ ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx),
+ client->addr << 1);
+ ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
+ alias << 1);
+
+ priv->atr_slave_id[pool_idx] = client->addr;
+
+ *alias_id = alias; /* tell the atr which alias we chose */
+
+ dev_dbg(dev, "rx%d: client 0x%02x mapped at alias 0x%02x (%s)\n",
+ rxport->nport, client->addr, alias, client->name);
+
+out:
+ mutex_unlock(&priv->alias_table_lock);
+ return ret;
+}
+
+static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client)
+{
+ struct ub960_data *priv = i2c_atr_get_clientdata(atr);
+ struct ub960_rxport *rxport = priv->rxports[chan_id];
+ struct device *dev = &priv->client->dev;
+ unsigned int reg_idx;
+ unsigned int pool_idx;
+ u16 alias = 0;
+
+ mutex_lock(&priv->alias_table_lock);
+
+ /* Find alias mapped to this client */
+
+ for (pool_idx = 0; pool_idx < priv->atr_alias_num; pool_idx++)
+ if (priv->atr_slave_id[pool_idx] == client->addr)
+ break;
+
+ if (pool_idx == priv->atr_alias_num) {
+ dev_err(dev, "rx%d: client 0x%02x is not mapped!\n",
+ rxport->nport, client->addr);
+ goto out;
+ }
+
+ alias = priv->atr_alias_id[pool_idx];
+
+ /* Find alias register used for this client */
+
+ for (reg_idx = 0; reg_idx < UB960_NUM_SLAVE_ALIASES; reg_idx++) {
+ u8 regval;
+ int ret;
+
+ ret = ub960_rxport_read(priv, chan_id,
+ UB960_RR_SLAVE_ALIAS(reg_idx), &regval);
+ if (!ret && regval == (alias << 1))
+ break;
+ }
+
+ if (reg_idx == UB960_NUM_SLAVE_ALIASES) {
+ dev_err(dev,
+ "rx%d: cannot find alias 0x%02x reg (client 0x%02x)!\n",
+ rxport->nport, alias, client->addr);
+ goto out;
+ }
+
+ /* Unmap */
+
+ ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx), 0);
+ priv->atr_slave_id[pool_idx] = 0;
+
+ dev_dbg(dev, "rx%d: client 0x%02x unmapped from alias 0x%02x (%s)\n",
+ rxport->nport, client->addr, alias, client->name);
+
+out:
+ mutex_unlock(&priv->alias_table_lock);
+}
+
+static const struct i2c_atr_ops ub960_atr_ops = {
+ .attach_client = ub960_atr_attach_client,
+ .detach_client = ub960_atr_detach_client,
+};
+
+/* -----------------------------------------------------------------------------
+ * CSI ports
+ */
+
+static int ub960_csiport_probe_one(struct ub960_data *priv,
+ const struct device_node *np,
+ u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ struct ub960_txport *txport;
+ int ret;
+
+ if (priv->txports[nport]) {
+ dev_err(dev, "OF: %s: duplicate tx port\n",
+ of_node_full_name(np));
+ return -EADDRINUSE;
+ }
+
+ txport = kzalloc(sizeof(*txport), GFP_KERNEL);
+ if (!txport)
+ return -ENOMEM;
+
+ priv->txports[nport] = txport;
+
+ ret = of_property_count_u32_elems(np, "data-lanes");
+
+ if (ret <= 0) {
+ dev_err(dev, "OF: %s: failed to parse data-lanes: %d\n",
+ of_node_full_name(np), ret);
+ goto err_free_txport;
+ }
+
+ txport->num_data_lanes = ret;
+
+ return 0;
+
+err_free_txport:
+ kfree(txport);
+
+ return ret;
+}
+
+static void ub960_txport_remove_one(struct ub960_data *priv, u8 nport)
+{
+ struct ub960_txport *txport = priv->txports[nport];
+
+ kfree(txport);
+ priv->txports[nport] = NULL;
+}
+
+static void ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ u8 csi_tx_isr;
+ int ret;
+
+ ret = ub960_csiport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr);
+
+ if (!ret) {
+ if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR)
+ dev_warn(dev, "TX%u: CSI_SYNC_ERROR\n", nport);
+
+ if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR)
+ dev_warn(dev, "TX%u: CSI_PASS_ERROR\n", nport);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * RX ports
+ */
+
+/*
+ * Instantiate serializer and i2c adapter for a locked remote end.
+ *
+ * Must be called with priv->alias_table_lock not held! The added i2c adapter
+ * will probe new slaves, which can request i2c transfers, ending up in
+ * calling ub960_atr_attach_client() where the lock is taken.
+ */
+static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport)
+{
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ struct device *dev = &priv->client->dev;
+ struct i2c_board_info ser_info = {
+ .of_node = rxport->remote_of_node,
+ };
+
+ /*
+ * Adding the serializer under rxport->adap would be cleaner, but it
+ * would need tweaks to bypass the alias table. Adding to the
+ * upstream adapter is way simpler.
+ */
+ ser_info.addr = rxport->ser_alias;
+ rxport->ser_client =
+ i2c_new_client_device(priv->client->adapter, &ser_info);
+ if (!rxport->ser_client) {
+ dev_err(dev, "rx%d: cannot add %s i2c device", nport,
+ ser_info.type);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "rx%d: remote serializer at alias 0x%02x\n", nport,
+ rxport->ser_client->addr);
+
+ return 0;
+}
+
+static void ub960_rxport_remove_serializer(struct ub960_data *priv, u8 nport)
+{
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (rxport->ser_client) {
+ i2c_unregister_device(rxport->ser_client);
+ rxport->ser_client = NULL;
+ }
+}
+
+static int ub960_rxport_probe_serializers(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned long timeout;
+ u8 nport;
+ unsigned int missing = 0;
+
+ timeout = jiffies + msecs_to_jiffies(750);
+
+ while (time_before(jiffies, timeout)) {
+ missing = 0;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; ++nport) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ u8 rx_port_sts1;
+ int ret;
+
+ /* No serializer in DT? */
+ if (!rxport)
+ continue;
+
+ /* Serializer already added? */
+ if (rxport->ser_client)
+ continue;
+
+ ret = ub960_rxport_read(priv, nport,
+ UB960_RR_RX_PORT_STS1,
+ &rx_port_sts1);
+ if (ret)
+ return ret;
+
+ /* Serializer not locked yet? */
+ if (!(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS)) {
+ missing++;
+ continue;
+ }
+
+ ret = ub960_rxport_add_serializer(priv, nport);
+ if (ret)
+ return ret;
+
+ rxport->locked = true;
+ }
+
+ if (missing == 0)
+ return 0;
+
+ usleep_range(500, 5000);
+ }
+
+ dev_err(dev, "timeout, continuing with %u missing serializer(s)\n",
+ missing);
+
+ return 0;
+}
+
+/*
+ * Return the local alias for a given remote serializer.
+ */
+static int ub960_of_get_reg(struct device_node *np, const char *serializer_name)
+{
+ u32 alias;
+ int ret;
+ int idx;
+
+ if (!np)
+ return -ENODEV;
+
+ idx = of_property_match_string(np, "reg-names", serializer_name);
+ if (idx < 0)
+ return idx;
+
+ ret = of_property_read_u32_index(np, "reg", idx, &alias);
+ if (ret)
+ return ret;
+
+ return alias;
+}
+
+static int ub960_rxport_probe_one(struct ub960_data *priv,
+ const struct device_node *np,
+ u8 nport)
+{
+ const char *ser_names[UB960_MAX_RX_NPORTS] = { "ser0", "ser1", "ser2",
+ "ser3" };
+ struct device *dev = &priv->client->dev;
+ struct ub960_rxport *rxport;
+ u32 bc_freq, bc_freq_val;
+ int ret;
+ u32 mode;
+
+ if (priv->rxports[nport]) {
+ dev_err(dev, "OF: %s: reg value %d is duplicated\n",
+ of_node_full_name(np), nport);
+ return -EADDRINUSE;
+ }
+
+ rxport = kzalloc(sizeof(*rxport), GFP_KERNEL);
+ if (!rxport)
+ return -ENOMEM;
+
+ priv->rxports[nport] = rxport;
+
+ rxport->nport = nport;
+ rxport->priv = priv;
+
+ ret = ub960_of_get_reg(priv->client->dev.of_node, ser_names[nport]);
+ if (ret < 0)
+ goto err_free_rxport;
+
+ rxport->ser_alias = ret;
+
+ ret = of_property_read_u32(np, "mode", &mode);
+ if (ret < 0) {
+ dev_err(dev, "Missing RX port mode: %d\n", ret);
+ goto err_free_rxport;
+ }
+
+ if (mode > RXPORT_MODE_CSI2) {
+ dev_err(dev, "Bad RX port mode %u\n", mode);
+ goto err_free_rxport;
+ }
+
+ rxport->mode = mode;
+
+ ret = of_property_read_u32(np, "bc-freq", &bc_freq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read BC freq for port %u: %d\n", nport,
+ ret);
+ goto err_free_rxport;
+ }
+
+ switch (bc_freq) {
+ case 2500000:
+ bc_freq_val = 0; break;
+ case 10000000:
+ bc_freq_val = 2; break;
+ case 50000000:
+ bc_freq_val = 6; break;
+ default:
+ dev_err(dev, "Bad BC freq %u\n", bc_freq);
+ goto err_free_rxport;
+ }
+
+ rxport->remote_of_node = of_get_child_by_name(np, "remote-chip");
+ if (!rxport->remote_of_node) {
+ dev_err(dev, "OF: %s: missing remote-chip child\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ goto err_free_rxport;
+ }
+
+ rxport->fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(np));
+ if (!rxport->fwnode) {
+ dev_err(dev, "No remote endpoint for rxport%d\n", nport);
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ /*
+ * Back channel frequency select.
+ * Override FREQ_SELECT from the strap.
+ * 0 - 2.5 Mbps (DS90UB913A-Q1 / DS90UB933-Q1)
+ * 2 - 10 Mbps
+ * 6 - 50 Mbps (DS90UB953-Q1)
+ *
+ * Note that changing this setting will result in some errors on the back
+ * channel for a short period of time.
+ */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7,
+ bc_freq_val);
+
+ switch (rxport->mode) {
+ default:
+ WARN_ON(true);
+ fallthrough;
+
+ case RXPORT_MODE_RAW10:
+ /* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3,
+ 0x3);
+
+ /*
+ * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits
+ * 0b10 : 8-bit processing using upper 8 bits
+ */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
+ 0x3 << 6, 0x2 << 6);
+
+ break;
+
+ case RXPORT_MODE_CSI2:
+ /* CSI-2 Mode (DS90UB953-Q1 compatible) */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3,
+ 0x0);
+
+ break;
+ }
+
+ /* LV_POLARITY & FV_POLARITY */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3, 0x1);
+
+ /* Enable all interrupt sources from this port */
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07);
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f);
+
+ /* Enable I2C_PASS_THROUGH */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH);
+
+ /* Enable I2C communication to the serializer via the alias addr */
+ ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
+ rxport->ser_alias << 1);
+
+ dev_dbg(dev, "ser%d: at alias 0x%02x\n", nport, rxport->ser_alias);
+
+ ret = i2c_atr_add_adapter(priv->atr, nport);
+ if (ret) {
+ dev_err(dev, "rx%d: cannot add adapter", nport);
+ goto err_node_put;
+ }
+
+ return 0;
+
+err_node_put:
+ of_node_put(rxport->remote_of_node);
+err_free_rxport:
+ priv->rxports[nport] = NULL;
+ kfree(rxport);
+ return ret;
+}
+
+static void ub960_rxport_remove_one(struct ub960_data *priv, u8 nport)
+{
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ i2c_atr_del_adapter(priv->atr, nport);
+ ub960_rxport_remove_serializer(priv, nport);
+ of_node_put(rxport->remote_of_node);
+ kfree(rxport);
+}
+
+static int ub960_atr_probe(struct ub960_data *priv)
+{
+ struct i2c_adapter *parent_adap = priv->client->adapter;
+ struct device *dev = &priv->client->dev;
+
+ priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops,
+ priv->hw_data->num_rxports);
+ if (IS_ERR(priv->atr))
+ return PTR_ERR(priv->atr);
+
+ i2c_atr_set_clientdata(priv->atr, priv);
+
+ return 0;
+}
+
+static void ub960_atr_remove(struct ub960_data *priv)
+{
+ i2c_atr_delete(priv->atr);
+ priv->atr = NULL;
+}
+
+static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ u8 rx_port_sts1;
+ u8 rx_port_sts2;
+ u8 csi_rx_sts;
+ u8 bcc_sts;
+ int ret = 0;
+
+ /* Read interrupts (also clears most of them) */
+ if (!ret)
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
+ &rx_port_sts1);
+ if (!ret)
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
+ &rx_port_sts2);
+ if (!ret)
+ ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS,
+ &csi_rx_sts);
+ if (!ret)
+ ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS,
+ &bcc_sts);
+
+ if (ret)
+ return;
+
+ dev_dbg(dev, "Handle RX%d events: STS: %x, %x, %x, BCC %x\n", nport,
+ rx_port_sts1, rx_port_sts2, csi_rx_sts, bcc_sts);
+
+ if (rx_port_sts1 & (UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR |
+ UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR |
+ UB960_RR_RX_PORT_STS1_PARITY_ERROR))
+ dev_err(dev, "RX%u STS1 error: %#02x\n", nport, rx_port_sts1);
+
+ if (rx_port_sts2 & (UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR |
+ UB960_RR_RX_PORT_STS2_BUFFER_ERROR |
+ UB960_RR_RX_PORT_STS2_CSI_ERROR |
+ UB960_RR_RX_PORT_STS2_CABLE_FAULT))
+ dev_err(dev, "RX%u STS2 error: %#02x\n", nport, rx_port_sts2);
+
+ if (csi_rx_sts)
+ dev_err(dev, "RX%u CSI error: %#02x\n", nport, csi_rx_sts);
+
+ if (bcc_sts)
+ dev_err(dev, "RX%u BCC error: %#02x\n", nport, bcc_sts);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2
+ */
+
+static int ub960_start_streaming(struct ub960_data *priv)
+{
+ const struct v4l2_subdev_krouting *routing;
+ struct v4l2_subdev_state *state;
+ unsigned int i;
+ u8 nport;
+ int ret;
+ u32 csi_ctl;
+ u32 speed_select;
+ u32 fwd_ctl;
+ struct {
+ u32 num_streams;
+ u8 pixel_dt;
+ u8 meta_dt;
+ u32 meta_lines;
+ u32 tx_port;
+ } rx_data[UB960_MAX_RX_NPORTS] = { 0 };
+
+ ret = 0;
+
+ state = v4l2_subdev_lock_active_state(&priv->sd);
+
+ routing = &state->routing;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+ u32 port = route->sink_pad;
+ struct ub960_rxport *rxport = priv->rxports[port];
+ struct v4l2_mbus_framefmt *fmt;
+ const struct ub960_format_info *ub960_fmt;
+
+ if (!rxport)
+ continue;
+
+ rx_data[port].tx_port =
+ route->source_pad - priv->hw_data->num_rxports;
+
+ /* For the rest, we are only interested in parallel busses */
+ if (rxport->mode == RXPORT_MODE_CSI2)
+ continue;
+
+ rx_data[port].num_streams++;
+
+ if (rx_data[port].num_streams > 2) {
+ ret = -EPIPE;
+ break;
+ }
+
+ fmt = v4l2_state_get_stream_format(state, port,
+ route->sink_stream);
+ if (!fmt) {
+ ret = -EPIPE;
+ break;
+ }
+
+ ub960_fmt = ub960_find_format(fmt->code);
+ if (!ub960_fmt) {
+ ret = -EPIPE;
+ break;
+ }
+
+ if (ub960_fmt->meta) {
+ if (fmt->height > 3) {
+ dev_err(&priv->client->dev,
+ "Unsupported metadata height %u\n",
+ fmt->height);
+ ret = -EPIPE;
+ break;
+ }
+
+ rx_data[port].meta_dt = ub960_fmt->datatype;
+ rx_data[port].meta_lines = fmt->height;
+ } else {
+ rx_data[port].pixel_dt = ub960_fmt->datatype;
+ }
+ }
+
+ v4l2_subdev_unlock_state(state);
+
+ if (ret)
+ return ret;
+
+ switch (priv->tx_data_rate) {
+ case 1600000000:
+ default:
+ speed_select = 0;
+ break;
+ case 1200000000:
+ speed_select = 1;
+ break;
+ case 800000000:
+ speed_select = 2;
+ break;
+ case 400000000:
+ speed_select = 3;
+ break;
+ }
+
+ ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select);
+
+ for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
+ struct ub960_txport *txport = priv->txports[nport];
+
+ if (!txport)
+ continue;
+
+ csi_ctl = UB960_TR_CSI_CTL_CSI_ENABLE;
+
+ /*
+ * From the datasheet: "initial CSI Skew-Calibration
+ * sequence [...] should be set when operating at 1.6 Gbps"
+ */
+ if (speed_select == 0)
+ csi_ctl |= UB960_TR_CSI_CTL_CSI_CAL_EN;
+
+ csi_ctl |= (4 - txport->num_data_lanes) << 4;
+
+ ub960_csiport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl);
+ }
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; ++nport) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport || !rxport->locked)
+ continue;
+
+ switch (rxport->mode) {
+ default:
+ WARN_ON(true);
+ fallthrough;
+
+ case RXPORT_MODE_RAW10:
+ /* VC=nport */
+ ub960_rxport_write(priv, nport, UB960_RR_RAW10_ID,
+ rx_data[nport].pixel_dt |
+ (nport << 6));
+
+ ub960_rxport_write(priv, rxport->nport,
+ UB960_RR_RAW_EMBED_DTYPE,
+ (rx_data[nport].meta_lines << 6) |
+ rx_data[nport].meta_dt);
+
+ break;
+
+ case RXPORT_MODE_CSI2:
+ if (priv->vc_map.port_en[nport]) {
+ /* Map VCs from this port */
+ ub960_rxport_write(priv, nport, UB960_RR_CSI_VC_MAP,
+ priv->vc_map.vc_map[nport]);
+ } else {
+ /* Disable port */
+ ub960_update_bits_shared(priv, UB960_SR_RX_PORT_CTL,
+ BIT(nport), 0);
+ }
+
+ break;
+ }
+ }
+
+ /* Start all cameras */
+
+ priv->streaming = true;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; ++nport) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport || !rxport->locked)
+ continue;
+
+ ret = v4l2_subdev_call(rxport->sd, video, s_stream, 1);
+ if (ret) {
+ for (; nport > 0; --nport) {
+ rxport = priv->rxports[nport - 1];
+ if (!rxport)
+ continue;
+
+ v4l2_subdev_call(rxport->sd, video, s_stream,
+ 0);
+ }
+
+ priv->streaming = false;
+
+ return ret;
+ }
+ }
+
+ /* Forwarding */
+
+ fwd_ctl = 0;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; ++nport) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport || !rxport->locked) {
+ fwd_ctl |= BIT(4 + nport); /* forward disable */
+ continue;
+ }
+
+ if (rx_data[nport].tx_port == 1)
+ fwd_ctl |= BIT(nport); /* forward to TX1 */
+ }
+
+ ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl);
+
+ return 0;
+}
+
+static int ub960_stop_streaming(struct ub960_data *priv)
+{
+ unsigned int i;
+
+ /* Disable forwarding */
+ ub960_write(priv, UB960_SR_FWD_CTL1,
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3)) << 4);
+
+ /* Stop all cameras */
+ for (i = 0; i < priv->hw_data->num_rxports; ++i) {
+ struct ub960_rxport *rxport = priv->rxports[i];
+
+ if (!rxport || !rxport->locked)
+ continue;
+
+ v4l2_subdev_call(rxport->sd, video, s_stream, 0);
+ }
+
+ for (i = 0; i < priv->hw_data->num_txports; i++) {
+ struct ub960_txport *txport = priv->txports[i];
+
+ if (!txport)
+ continue;
+
+ ub960_csiport_write(priv, i, UB960_TR_CSI_CTL, 0);
+ }
+
+ priv->streaming = false;
+
+ return 0;
+}
+
+static int ub960_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+
+ if (enable)
+ return ub960_start_streaming(priv);
+ else
+ return ub960_stop_streaming(priv);
+}
+
+static const struct v4l2_subdev_video_ops ub960_video_ops = {
+ .s_stream = ub960_s_stream,
+};
+
+static int _ub960_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ const struct v4l2_mbus_framefmt format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ };
+ int ret;
+
+ /*
+ * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
+ * frame desc is made dynamically allocated.
+ */
+
+ if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
+ return -EINVAL;
+
+ ret = v4l2_routing_simple_verify(routing);
+ if (ret)
+ return ret;
+
+ v4l2_subdev_lock_state(state);
+
+ ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
+
+ v4l2_subdev_unlock_state(state);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ub960_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
+ return -EBUSY;
+
+ return _ub960_set_routing(sd, state, routing);
+}
+
+static int ub960_get_source_frame_desc(struct ub960_data *priv,
+ struct v4l2_mbus_frame_desc *desc,
+ u8 nport)
+{
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+ int ret;
+
+ pad = media_entity_remote_pad(&priv->pads[nport]);
+ if (!pad)
+ return -EPIPE;
+
+ sd = priv->rxports[nport]->sd;
+
+ ret = v4l2_subdev_call(sd, pad, get_frame_desc, pad->index,
+ desc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline u8 ub960_get_output_vc(u8 map, u8 input_vc) {
+ return (map >> (2 * input_vc)) & 0x03;
+}
+
+static void ub960_map_virtual_channels(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct ub960_vc_map vc_map = {.vc_map = {0x00}, .port_en = {false}};
+ u8 nport, available_vc = 0;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; ++nport) {
+ struct v4l2_mbus_frame_desc source_fd;
+ bool used_vc[UB960_MAX_VC] = {false};
+ u8 vc, cur_vc = available_vc;
+ int j, ret;
+ u8 map;
+
+ ret = ub960_get_source_frame_desc(priv, &source_fd, nport);
+ /* Mark channels used in source in used_vc[] */
+ if (!ret) {
+ for (j = 0; j < source_fd.num_entries; ++j) {
+ u8 source_vc = source_fd.entry[j].bus.csi2.vc;
+ if (source_vc < UB960_MAX_VC) {
+ used_vc[source_vc] = true;
+ }
+ }
+ } else if (ret == -ENOIOCTLCMD) {
+ /* assume VC=0 is used if sensor driver doesn't provide info */
+ used_vc[0] = true;
+ } else {
+ continue;
+ }
+
+ /* Start with all channels mapped to first free output */
+ map = (cur_vc << 6) | (cur_vc << 4) | (cur_vc << 2) |
+ (cur_vc << 0);
+
+ /* Map actually used to channels to distinct free outputs */
+ for (vc = 0; vc < UB960_MAX_VC; ++vc) {
+ if (used_vc[vc]) {
+ map &= ~(0x03 << (2*vc));
+ map |= (cur_vc << (2*vc));
+ ++cur_vc;
+ }
+ }
+
+ /* Don't enable port if we ran out of available channels */
+ if (cur_vc > UB960_MAX_VC) {
+ dev_err(dev,
+ "No VCs available, RX ports %d will be disabled\n",
+ nport);
+ continue;
+ }
+
+ /* Enable port and update map */
+ vc_map.vc_map[nport] = map;
+ vc_map.port_en[nport] = true;
+ available_vc = cur_vc;
+ dev_dbg(dev, "%s: VC map for port %d is 0x%02x",
+ __func__, nport, map);
+ }
+ priv->vc_map = vc_map;
+}
+
+static int ub960_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+ const struct v4l2_subdev_krouting *routing;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+ unsigned int i;
+ struct device *dev = &priv->client->dev;
+
+ dev_dbg(dev, "%s for pad %d\n", __func__, pad);
+
+ if (!ub960_pad_is_source(priv, pad))
+ return -EINVAL;
+
+ state = v4l2_subdev_lock_active_state(&priv->sd);
+
+ routing = &state->routing;
+
+ memset(fd, 0, sizeof(*fd));
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+
+ ub960_map_virtual_channels(priv);
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ const struct v4l2_subdev_route *route = &routing->routes[i];
+ struct v4l2_mbus_frame_desc_entry *source_entry = NULL;
+ struct v4l2_mbus_frame_desc source_fd;
+ unsigned int j;
+
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ if (route->source_pad != pad)
+ continue;
+
+ ret = ub960_get_source_frame_desc(priv, &source_fd,
+ route->sink_pad);
+ if (ret) {
+ dev_err(dev,
+ "Failed to get source frame desc for port %u\n",
+ route->sink_pad);
+ goto out;
+ }
+
+ for (j = 0; j < source_fd.num_entries; ++j)
+ if (source_fd.entry[j].stream == route->sink_stream) {
+ source_entry = &source_fd.entry[j];
+ break;
+ }
+
+ if (!source_entry) {
+ dev_err(dev,
+ "Failed to find stream from source frame desc\n");
+ ret = -EPIPE;
+ goto out;
+ }
+
+ fd->entry[fd->num_entries].stream = route->source_stream;
+
+ fd->entry[fd->num_entries].flags =
+ V4L2_MBUS_FRAME_DESC_FL_LEN_MAX;
+ fd->entry[fd->num_entries].length = source_entry->length;
+ fd->entry[fd->num_entries].pixelcode =
+ source_entry->pixelcode;
+
+ fd->entry[fd->num_entries].bus.csi2.vc =
+ ub960_get_output_vc(priv->vc_map.vc_map[route->sink_pad],
+ source_entry->bus.csi2.vc);
+ dev_dbg(dev, "Mapping sink %d/%d to output VC %d",
+ route->sink_pad, route->sink_stream,
+ fd->entry[fd->num_entries].bus.csi2.vc);
+
+ if (source_fd.type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
+ fd->entry[fd->num_entries].bus.csi2.dt =
+ source_entry->bus.csi2.dt;
+ } else {
+ const struct ub960_format_info *ub960_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_state_get_stream_format(
+ state, pad, route->source_stream);
+
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ub960_fmt = ub960_find_format(fmt->code);
+ if (!ub960_fmt) {
+ dev_err(dev, "Unable to find format\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fd->entry[fd->num_entries].bus.csi2.dt =
+ ub960_fmt->datatype;
+ }
+
+ fd->num_entries++;
+ }
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ub960_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
+ return -EBUSY;
+
+ /* No transcoding, source and sink formats must match. */
+ if (ub960_pad_is_source(priv, format->pad))
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ /* TODO: implement fmt validation */
+
+ v4l2_subdev_lock_state(state);
+
+ fmt = v4l2_state_get_stream_format(state, format->pad, format->stream);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *fmt = format->format;
+
+ fmt = v4l2_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *fmt = format->format;
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ub960_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+
+ struct v4l2_subdev_route routes[] = {
+ {
+ .sink_pad = 0,
+ .sink_stream = 0,
+ .source_pad = priv->hw_data->num_rxports,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ },
+ };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+
+ return _ub960_set_routing(sd, state, &routing);
+}
+
+static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
+ .set_routing = ub960_set_routing,
+ .get_frame_desc = ub960_get_frame_desc,
+
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ub960_set_fmt,
+
+ .init_cfg = ub960_init_cfg,
+};
+
+static const struct v4l2_subdev_core_ops ub960_subdev_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops ub960_subdev_ops = {
+ .core = &ub960_subdev_core_ops,
+ .video = &ub960_video_ops,
+ .pad = &ub960_pad_ops,
+};
+
+static const struct media_entity_operations ub960_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_route = v4l2_subdev_has_route
+};
+
+static void ub960_enable_tpg(struct ub960_data *priv, int tpg_num)
+{
+ /*
+ * Note: no need to write UB960_REG_IND_ACC_CTL: the only indirect
+ * registers target we use is "CSI-2 Pattern Generator & Timing
+ * Registers", which is the default
+ */
+
+ /*
+ * TPG can only provide a single stream per CSI TX port. If
+ * multiple streams are currently enabled, only the first
+ * one will use the TPG, other streams will be halted.
+ */
+
+ struct v4l2_mbus_framefmt *fmt;
+ u8 vbp, vfp;
+ u16 blank_lines;
+ u16 width;
+ u16 height;
+
+ u16 bytespp = 2; /* For MEDIA_BUS_FMT_UYVY8_1X16 */
+ u8 cbars_idx = tpg_num - TEST_PATTERN_V_COLOR_BARS_1;
+ u8 num_cbars = 1 << cbars_idx;
+
+ u16 line_size; /* Line size [bytes] */
+ u16 bar_size; /* cbar size [bytes] */
+ u16 act_lpf; /* active lines/frame */
+ u16 tot_lpf; /* tot lines/frame */
+ u16 line_pd; /* Line period in 10-ns units */
+
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_lock_active_state(&priv->sd);
+
+ vbp = 33;
+ vfp = 10;
+ blank_lines = vbp + vfp + 2; /* total blanking lines */
+
+ fmt = v4l2_state_get_stream_format(state, 4, 0);
+
+ width = fmt->width;
+ height = fmt->height;
+
+ line_size = width * bytespp;
+ bar_size = line_size / num_cbars;
+ act_lpf = height;
+ tot_lpf = act_lpf + blank_lines;
+ line_pd = 100000000 / 60 / tot_lpf;
+
+ /* Disable forwarding from FPD-3 RX ports */
+ ub960_write(priv, UB960_SR_FWD_CTL1,
+ UB960_SR_FWD_CTL1_PORT_DIS(0) |
+ UB960_SR_FWD_CTL1_PORT_DIS(1));
+
+ /* Access Indirect Pattern Gen */
+ ub960_write(priv, UB960_SR_IND_ACC_CTL,
+ UB960_SR_IND_ACC_CTL_IA_AUTO_INC | 0);
+
+ ub960_write_ind8(priv, UB960_IR_PGEN_CTL,
+ UB960_IR_PGEN_CTL_PGEN_ENABLE);
+
+ /* YUV422 8bit: 2 bytes/block, CSI-2 data type 0x1e */
+ ub960_write_ind8(priv, UB960_IR_PGEN_CFG, cbars_idx << 4 | 0x2);
+ ub960_write_ind8(priv, UB960_IR_PGEN_CSI_DI, 0x1e);
+
+ ub960_write_ind16(priv, UB960_IR_PGEN_LINE_SIZE1, line_size);
+ ub960_write_ind16(priv, UB960_IR_PGEN_BAR_SIZE1, bar_size);
+ ub960_write_ind16(priv, UB960_IR_PGEN_ACT_LPF1, act_lpf);
+ ub960_write_ind16(priv, UB960_IR_PGEN_TOT_LPF1, tot_lpf);
+ ub960_write_ind16(priv, UB960_IR_PGEN_LINE_PD1, line_pd);
+ ub960_write_ind8(priv, UB960_IR_PGEN_VBP, vbp);
+ ub960_write_ind8(priv, UB960_IR_PGEN_VFP, vfp);
+
+ v4l2_subdev_unlock_state(state);
+}
+
+static void ub960_disable_tpg(struct ub960_data *priv)
+{
+ /* TPG off, enable forwarding from FPD-3 RX ports */
+ ub960_write(priv, UB960_SR_FWD_CTL1, 0x00);
+
+ ub960_write_ind8(priv, UB960_IR_PGEN_CTL, 0x00);
+}
+
+static int ub960_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ub960_data *priv =
+ container_of(ctrl->handler, struct ub960_data, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ if (ctrl->val == 0)
+ ub960_disable_tpg(priv);
+ else
+ ub960_enable_tpg(priv, ctrl->val);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ub960_ctrl_ops = {
+ .s_ctrl = ub960_s_ctrl,
+};
+
+/* -----------------------------------------------------------------------------
+ * Core
+ */
+
+static irqreturn_t ub960_handle_events(int irq, void *arg)
+{
+ struct ub960_data *priv = arg;
+ unsigned int i;
+ u8 int_sts;
+ int ret;
+
+ ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts);
+
+ if (!ret && int_sts) {
+ u8 fwd_sts;
+
+ dev_dbg(&priv->client->dev, "INTERRUPT_STS %x\n", int_sts);
+
+ ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts);
+
+ dev_dbg(&priv->client->dev, "FWD_STS %#x\n", fwd_sts);
+
+ for (i = 0; i < priv->hw_data->num_txports; ++i) {
+ if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i))
+ ub960_csi_handle_events(priv, i);
+ }
+
+ for (i = 0; i < priv->hw_data->num_rxports; i++) {
+ if (!priv->rxports[i] || !priv->rxports[i]->locked)
+ continue;
+
+ if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(i))
+ ub960_rxport_handle_events(priv, i);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ub960_run(void *arg)
+{
+ struct ub960_data *priv = arg;
+
+ while (!kthread_should_stop()) {
+ ub960_handle_events(0, priv);
+
+ msleep(500);
+ }
+
+ return 0;
+}
+
+static void ub960_remove_ports(struct ub960_data *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i < priv->hw_data->num_rxports; i++)
+ if (priv->rxports[i])
+ ub960_rxport_remove_one(priv, i);
+
+ for (i = 0; i < priv->hw_data->num_txports; i++)
+ if (priv->txports[i])
+ ub960_txport_remove_one(priv, i);
+}
+
+static int ub960_register_clocks(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ const char *name;
+ int err;
+
+ /* Get our input clock (REFCLK, 23..26 MHz) */
+
+ priv->refclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->refclk))
+ return dev_err_probe(dev, PTR_ERR(priv->refclk), "Cannot get REFCLK");
+
+ dev_dbg(dev, "REFCLK: %lu Hz\n", clk_get_rate(priv->refclk));
+
+ /* Provide FPD-Link III line rate (160 * REFCLK in Synchronous mode) */
+
+ name = kasprintf(GFP_KERNEL, "%s.fpd_line_rate", dev_name(dev));
+ priv->line_clk_hw =
+ clk_hw_register_fixed_factor(dev, name,
+ __clk_get_name(priv->refclk),
+ 0, 160, 1);
+ kfree(name);
+ if (IS_ERR(priv->line_clk_hw))
+ return dev_err_probe(dev, PTR_ERR(priv->line_clk_hw),
+ "Cannot register clock HW\n");
+
+ dev_dbg(dev, "line rate: %lu Hz\n", clk_hw_get_rate(priv->line_clk_hw));
+
+ /* Expose the line rate to OF */
+
+ err = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, priv->line_clk_hw);
+ if (err) {
+ clk_hw_unregister_fixed_factor(priv->line_clk_hw);
+ return dev_err_probe(dev, err, "Cannot add OF clock provider\n");
+ }
+
+ return 0;
+}
+
+static void ub960_unregister_clocks(struct ub960_data *priv)
+{
+ clk_hw_unregister_fixed_factor(priv->line_clk_hw);
+}
+
+static int ub960_parse_dt(struct ub960_data *priv)
+{
+ struct device_node *np = priv->client->dev.of_node;
+ struct device *dev = &priv->client->dev;
+ int ret = 0;
+ int n;
+
+ if (!np) {
+ dev_err(dev, "OF: no device tree node!\n");
+ return -ENOENT;
+ }
+
+ n = of_property_read_variable_u16_array(np, "i2c-alias-pool",
+ priv->atr_alias_id,
+ 2, UB960_MAX_POOL_ALIASES);
+ if (n < 0)
+ dev_warn(dev,
+ "OF: no i2c-alias-pool, can't access remote I2C slaves");
+
+ priv->atr_alias_num = n;
+
+ dev_dbg(dev, "i2c-alias-pool has %zu aliases", priv->atr_alias_num);
+
+ if (of_property_read_u32(np, "data-rate", &priv->tx_data_rate) != 0) {
+ dev_err(dev, "OF: %s: missing \"data-rate\" node\n",
+ of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ if (priv->tx_data_rate != 1600000000 &&
+ priv->tx_data_rate != 1200000000 &&
+ priv->tx_data_rate != 800000000 &&
+ priv->tx_data_rate != 400000000) {
+ dev_err(dev, "OF: %s: invalid \"data-rate\" node\n",
+ of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ priv->tx_link_freq[0] = priv->tx_data_rate / 2;
+
+ dev_dbg(dev, "Nominal data rate: %u", priv->tx_data_rate);
+
+ for (n = 0; n < priv->hw_data->num_rxports + priv->hw_data->num_txports; ++n) {
+ struct device_node *ep_np;
+
+ ep_np = of_graph_get_endpoint_by_regs(np, n, 0);
+ if (!ep_np)
+ continue;
+
+ if (n < priv->hw_data->num_rxports)
+ ret = ub960_rxport_probe_one(priv, ep_np, n);
+ else
+ ret = ub960_csiport_probe_one(
+ priv, ep_np, n - priv->hw_data->num_rxports);
+
+ of_node_put(ep_np);
+
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ ub960_remove_ports(priv);
+
+ return ret;
+}
+
+static int ub960_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct ub960_data *priv = sd_to_ub960(notifier->sd);
+ struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
+ struct device *dev = &priv->client->dev;
+ u8 nport = rxport->nport;
+ unsigned int src_pad;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(dev, "Bind %s\n", subdev->name);
+
+ ret = media_entity_get_fwnode_pad(&subdev->entity, rxport->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find pad for %s\n", subdev->name);
+ return ret;
+ }
+
+ rxport->sd = subdev;
+ src_pad = ret;
+
+ ret = media_create_pad_link(&rxport->sd->entity, src_pad,
+ &priv->sd.entity, nport,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(dev, "Unable to link %s:%u -> %s:%u\n",
+ rxport->sd->name, src_pad, priv->sd.name, nport);
+ return ret;
+ }
+
+ dev_dbg(dev, "Bound %s pad: %u on index %u\n", subdev->name, src_pad,
+ nport);
+
+ for (i = 0; i < priv->hw_data->num_rxports; ++i) {
+ if (priv->rxports[i] && rxport->locked && !priv->rxports[i]->sd) {
+ dev_dbg(dev, "Waiting for more subdevs to be bound\n");
+ return 0;
+ }
+ }
+
+ dev_dbg(dev, "All subdevs bound\n");
+
+ return 0;
+}
+
+static void ub960_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct ub960_data *priv = sd_to_ub960(notifier->sd);
+ struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
+ struct device *dev = &priv->client->dev;
+
+ dev_dbg(dev, "Unbind %s\n", subdev->name);
+
+ rxport->sd = NULL;
+}
+
+static const struct v4l2_async_notifier_operations ub960_notify_ops = {
+ .bound = ub960_notify_bound,
+ .unbind = ub960_notify_unbind,
+};
+
+static int ub960_v4l2_notifier_register(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int i;
+ int ret;
+
+ v4l2_async_notifier_init(&priv->notifier);
+
+ for (i = 0; i < priv->hw_data->num_rxports; ++i) {
+ struct ub960_rxport *rxport = priv->rxports[i];
+ struct v4l2_async_subdev *asd;
+ struct ub960_asd *ubasd;
+
+ if (!rxport || !rxport->locked)
+ continue;
+
+ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
+ rxport->fwnode,
+ sizeof(*ubasd));
+ if (IS_ERR(asd)) {
+ dev_err(dev, "Failed to add subdev for source %u: %ld",
+ i, PTR_ERR(asd));
+ v4l2_async_notifier_cleanup(&priv->notifier);
+ return PTR_ERR(asd);
+ }
+
+ ubasd = to_ub960_asd(asd);
+ ubasd->rxport = rxport;
+ }
+
+ priv->notifier.ops = &ub960_notify_ops;
+
+ ret = v4l2_async_subdev_notifier_register(&priv->sd, &priv->notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register subdev_notifier");
+ v4l2_async_notifier_cleanup(&priv->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ub960_v4l2_notifier_unregister(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+
+ dev_dbg(dev, "Unregister async notif\n");
+
+ v4l2_async_notifier_unregister(&priv->notifier);
+ v4l2_async_notifier_cleanup(&priv->notifier);
+}
+
+static int ub960_create_subdev(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int i;
+ int ret;
+
+ v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub960_subdev_ops);
+ v4l2_ctrl_handler_init(&priv->ctrl_handler,
+ ARRAY_SIZE(ub960_tpg_qmenu) - 1);
+ priv->sd.ctrl_handler = &priv->ctrl_handler;
+
+ v4l2_ctrl_new_std_menu_items(&priv->ctrl_handler, &ub960_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ub960_tpg_qmenu) - 1, 0, 0,
+ ub960_tpg_qmenu);
+
+ v4l2_ctrl_new_int_menu(&priv->ctrl_handler, NULL, V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(priv->tx_link_freq) - 1, 0,
+ priv->tx_link_freq);
+
+ if (priv->ctrl_handler.error) {
+ ret = priv->ctrl_handler.error;
+ goto err_free_ctrl;
+ }
+
+ priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS |
+ V4L2_SUBDEV_FL_MULTIPLEXED;
+ priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ priv->sd.entity.ops = &ub960_entity_ops;
+
+ for (i = 0; i < priv->hw_data->num_rxports + priv->hw_data->num_txports; i++) {
+ priv->pads[i].flags = ub960_pad_is_sink(priv, i) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&priv->sd.entity,
+ priv->hw_data->num_rxports +
+ priv->hw_data->num_txports,
+ priv->pads);
+ if (ret)
+ goto err_free_ctrl;
+
+ ret = v4l2_subdev_init_finalize(&priv->sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = ub960_v4l2_notifier_register(priv);
+ if (ret) {
+ dev_err(dev, "v4l2 subdev notifier register failed: %d\n", ret);
+ goto err_free_state;
+ }
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret) {
+ dev_err(dev, "v4l2_async_register_subdev error: %d\n", ret);
+ goto err_unreg_notif;
+ }
+
+ return 0;
+
+err_unreg_notif:
+ ub960_v4l2_notifier_unregister(priv);
+err_free_state:
+ v4l2_subdev_cleanup(&priv->sd);
+err_entity_cleanup:
+ media_entity_cleanup(&priv->sd.entity);
+err_free_ctrl:
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
+
+ return ret;
+}
+
+static void ub960_destroy_subdev(struct ub960_data *priv)
+{
+ ub960_v4l2_notifier_unregister(priv);
+ v4l2_async_unregister_subdev(&priv->sd);
+
+ v4l2_subdev_cleanup(&priv->sd);
+
+ media_entity_cleanup(&priv->sd.entity);
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
+}
+
+static const struct regmap_config ub960_regmap_config = {
+ .name = "ds90ub960",
+
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+};
+
+static void ub960_sw_reset(struct ub960_data *priv)
+{
+ unsigned int i;
+
+ ub960_write(priv, UB960_SR_RESET, BIT(1));
+
+ for (i = 0; i < 10; ++i) {
+ int ret;
+ u8 v;
+
+ ret = ub960_read(priv, UB960_SR_RESET, &v);
+
+ if (ret || v == 0)
+ break;
+ }
+}
+
+static int ub960_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ub960_data *priv;
+ unsigned int nport;
+ u8 rev_mask;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+
+ priv->hw_data = of_device_get_match_data(dev);
+ if (!priv->hw_data)
+ return -ENODEV;
+
+ mutex_init(&priv->alias_table_lock);
+
+ priv->regmap = devm_regmap_init_i2c(client, &ub960_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ /* get power-down pin from DT */
+ priv->pd_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->pd_gpio)) {
+ ret = PTR_ERR(priv->pd_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Cannot get powerdown GPIO (%d)", ret);
+ return ret;
+ }
+
+ if (priv->pd_gpio) {
+ gpiod_set_value_cansleep(priv->pd_gpio, 1);
+ /* wait min 2 ms for reset to complete */
+ usleep_range(2000, 5000);
+ gpiod_set_value_cansleep(priv->pd_gpio, 0);
+ /* wait min 2 ms for power up to finish */
+ usleep_range(2000, 5000);
+ } else {
+ /* Use SW reset if we don't have PD gpio */
+ ub960_sw_reset(priv);
+ }
+
+ ret = ub960_register_clocks(priv);
+ if (ret)
+ return ret;
+
+ /* Runtime check register accessibility */
+ ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask);
+ if (ret) {
+ dev_err(dev, "Cannot read first register (%d), abort\n", ret);
+ goto err_reg_read;
+ }
+
+ ret = ub960_atr_probe(priv);
+ if (ret)
+ goto err_atr_probe;
+
+ ret = ub960_parse_dt(priv);
+ if (ret)
+ goto err_parse_dt;
+
+ ret = ub960_rxport_probe_serializers(priv);
+ if (ret)
+ goto err_parse_dt;
+
+ /*
+ * Clear any errors caused by switching the RX port settings while
+ * probing.
+ */
+ for (nport = 0; nport < priv->hw_data->num_rxports; ++nport) {
+ u8 dummy;
+
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &dummy);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &dummy);
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &dummy);
+ ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &dummy);
+ }
+
+ ret = ub960_create_subdev(priv);
+ if (ret)
+ goto err_subdev;
+
+ if (client->irq) {
+ dev_dbg(dev, "using IRQ %d\n", client->irq);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ ub960_handle_events,
+ IRQF_ONESHOT, client->name,
+ priv);
+ if (ret) {
+ dev_err(dev, "Cannot enable IRQ (%d)\n", ret);
+ goto err_irq;
+ }
+
+ /* Disable GPIO3 as input */
+ ub960_update_bits_shared(priv, UB960_SR_GPIO_INPUT_CTL, BIT(3),
+ 0);
+ /* Enable GPIO3 as output, active low interrupt */
+ ub960_write(priv, UB960_SR_GPIO_PIN_CTL(3), 0xd1);
+
+ ub960_write(priv, UB960_SR_INTERRUPT_CTL,
+ UB960_SR_INTERRUPT_CTL_ALL);
+ } else {
+ /* No IRQ, fallback to polling */
+
+ priv->kthread = kthread_run(ub960_run, priv, dev_name(dev));
+ if (IS_ERR(priv->kthread)) {
+ ret = PTR_ERR(priv->kthread);
+ dev_err(dev, "Cannot create kthread (%d)\n", ret);
+ goto err_kthread;
+ }
+ dev_dbg(dev, "using polling mode\n");
+ }
+
+ dev_info(dev, "Successfully probed (rev/mask %02x)\n", rev_mask);
+
+ return 0;
+
+err_kthread:
+err_irq:
+ ub960_destroy_subdev(priv);
+err_subdev:
+ ub960_remove_ports(priv);
+err_parse_dt:
+ ub960_atr_remove(priv);
+err_atr_probe:
+err_reg_read:
+ ub960_unregister_clocks(priv);
+ if (priv->pd_gpio)
+ gpiod_set_value_cansleep(priv->pd_gpio, 1);
+ mutex_destroy(&priv->alias_table_lock);
+ return ret;
+}
+
+static int ub960_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ub960_data *priv = sd_to_ub960(sd);
+
+ dev_dbg(&client->dev, "Removing\n");
+
+ if (priv->kthread)
+ kthread_stop(priv->kthread);
+ ub960_destroy_subdev(priv);
+ ub960_remove_ports(priv);
+ ub960_atr_remove(priv);
+ ub960_unregister_clocks(priv);
+ if (priv->pd_gpio)
+ gpiod_set_value_cansleep(priv->pd_gpio, 1);
+ mutex_destroy(&priv->alias_table_lock);
+
+ dev_dbg(&client->dev, "Remove done\n");
+
+ return 0;
+}
+
+static const struct ub960_hw_data ds90ub960_hw = {
+ .num_rxports = 4,
+ .num_txports = 2,
+};
+
+static const struct i2c_device_id ub960_id[] = {
+ { "ds90ub960-q1", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ub960_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id ub960_dt_ids[] = {
+ { .compatible = "ti,ds90ub960-q1", .data = &ds90ub960_hw },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ub960_dt_ids);
+#endif
+
+static struct i2c_driver ds90ub960_driver = {
+ .probe_new = ub960_probe,
+ .remove = ub960_remove,
+ .id_table = ub960_id,
+ .driver = {
+ .name = "ds90ub960",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ub960_dt_ids),
+ },
+};
+
+module_i2c_driver(ds90ub960_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Texas Instruments DS90UB960-Q1 FPDLink-3 deserializer driver");
+MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index 256acf73d5ea..54ee656843a2 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -882,7 +882,7 @@ out:
*/
#define MAX_FMTS 4
static int et8ek8_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct et8ek8_reglist **list =
@@ -920,7 +920,7 @@ static int et8ek8_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int et8ek8_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct et8ek8_reglist **list =
@@ -958,7 +958,7 @@ static int et8ek8_enum_frame_size(struct v4l2_subdev *subdev,
}
static int et8ek8_enum_frame_ival(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct et8ek8_reglist **list =
@@ -990,12 +990,13 @@ static int et8ek8_enum_frame_ival(struct v4l2_subdev *subdev,
static struct v4l2_mbus_framefmt *
__et8ek8_get_pad_format(struct et8ek8_sensor *sensor,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&sensor->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&sensor->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->format;
default:
@@ -1004,13 +1005,14 @@ __et8ek8_get_pad_format(struct et8ek8_sensor *sensor,
}
static int et8ek8_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
struct v4l2_mbus_framefmt *format;
- format = __et8ek8_get_pad_format(sensor, cfg, fmt->pad, fmt->which);
+ format = __et8ek8_get_pad_format(sensor, sd_state, fmt->pad,
+ fmt->which);
if (!format)
return -EINVAL;
@@ -1020,14 +1022,15 @@ static int et8ek8_get_pad_format(struct v4l2_subdev *subdev,
}
static int et8ek8_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
struct v4l2_mbus_framefmt *format;
struct et8ek8_reglist *reglist;
- format = __et8ek8_get_pad_format(sensor, cfg, fmt->pad, fmt->which);
+ format = __et8ek8_get_pad_format(sensor, sd_state, fmt->pad,
+ fmt->which);
if (!format)
return -EINVAL;
@@ -1327,7 +1330,7 @@ static int et8ek8_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct et8ek8_reglist *reglist;
reglist = et8ek8_reglist_find_type(&meta_reglist, ET8EK8_REGLIST_MODE);
- format = __et8ek8_get_pad_format(sensor, fh->pad, 0,
+ format = __et8ek8_get_pad_format(sensor, fh->state, 0,
V4L2_SUBDEV_FORMAT_TRY);
et8ek8_reglist_to_mbus(reglist, format);
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index c66cd1446c0f..808cef9f2e58 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -878,7 +878,7 @@ error:
}
static int hi556_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct hi556 *hi556 = to_hi556(sd);
@@ -893,7 +893,7 @@ static int hi556_set_format(struct v4l2_subdev *sd,
mutex_lock(&hi556->mutex);
hi556_assign_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
hi556->cur_mode = mode;
__v4l2_ctrl_s_ctrl(hi556->link_freq, mode->link_freq_index);
@@ -920,14 +920,15 @@ static int hi556_set_format(struct v4l2_subdev *sd,
}
static int hi556_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct hi556 *hi556 = to_hi556(sd);
mutex_lock(&hi556->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&hi556->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&hi556->sd,
+ sd_state,
fmt->pad);
else
hi556_assign_pad_format(hi556->cur_mode, &fmt->format);
@@ -938,7 +939,7 @@ static int hi556_get_format(struct v4l2_subdev *sd,
}
static int hi556_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -950,7 +951,7 @@ static int hi556_enum_mbus_code(struct v4l2_subdev *sd,
}
static int hi556_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -973,7 +974,7 @@ static int hi556_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&hi556->mutex);
hi556_assign_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&hi556->mutex);
return 0;
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index cee1a4817af9..bff314905ec4 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -474,7 +474,7 @@ static int __maybe_unused imx214_power_off(struct device *dev)
}
static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -486,7 +486,7 @@ static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx214_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->code != IMX214_MBUS_CODE)
@@ -534,13 +534,13 @@ static const struct v4l2_subdev_core_ops imx214_core_ops = {
static struct v4l2_mbus_framefmt *
__imx214_get_pad_format(struct imx214 *imx214,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&imx214->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&imx214->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &imx214->fmt;
default:
@@ -549,13 +549,14 @@ __imx214_get_pad_format(struct imx214 *imx214,
}
static int imx214_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct imx214 *imx214 = to_imx214(sd);
mutex_lock(&imx214->mutex);
- format->format = *__imx214_get_pad_format(imx214, cfg, format->pad,
+ format->format = *__imx214_get_pad_format(imx214, sd_state,
+ format->pad,
format->which);
mutex_unlock(&imx214->mutex);
@@ -563,12 +564,13 @@ static int imx214_get_format(struct v4l2_subdev *sd,
}
static struct v4l2_rect *
-__imx214_get_pad_crop(struct imx214 *imx214, struct v4l2_subdev_pad_config *cfg,
+__imx214_get_pad_crop(struct imx214 *imx214,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&imx214->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&imx214->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &imx214->crop;
default:
@@ -577,7 +579,7 @@ __imx214_get_pad_crop(struct imx214 *imx214, struct v4l2_subdev_pad_config *cfg,
}
static int imx214_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct imx214 *imx214 = to_imx214(sd);
@@ -587,7 +589,8 @@ static int imx214_set_format(struct v4l2_subdev *sd,
mutex_lock(&imx214->mutex);
- __crop = __imx214_get_pad_crop(imx214, cfg, format->pad, format->which);
+ __crop = __imx214_get_pad_crop(imx214, sd_state, format->pad,
+ format->which);
mode = v4l2_find_nearest_size(imx214_modes,
ARRAY_SIZE(imx214_modes), width, height,
@@ -597,7 +600,7 @@ static int imx214_set_format(struct v4l2_subdev *sd,
__crop->width = mode->width;
__crop->height = mode->height;
- __format = __imx214_get_pad_format(imx214, cfg, format->pad,
+ __format = __imx214_get_pad_format(imx214, sd_state, format->pad,
format->which);
__format->width = __crop->width;
__format->height = __crop->height;
@@ -617,7 +620,7 @@ static int imx214_set_format(struct v4l2_subdev *sd,
}
static int imx214_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct imx214 *imx214 = to_imx214(sd);
@@ -626,22 +629,22 @@ static int imx214_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&imx214->mutex);
- sel->r = *__imx214_get_pad_crop(imx214, cfg, sel->pad,
+ sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad,
sel->which);
mutex_unlock(&imx214->mutex);
return 0;
}
static int imx214_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { };
- fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
fmt.format.width = imx214_modes[0].width;
fmt.format.height = imx214_modes[0].height;
- imx214_set_format(subdev, cfg, &fmt);
+ imx214_set_format(subdev, sd_state, &fmt);
return 0;
}
@@ -810,7 +813,7 @@ static int imx214_g_frame_interval(struct v4l2_subdev *subdev,
}
static int imx214_enum_frame_interval(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
const struct imx214_mode *mode;
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 4771d0ef2c46..0c1ac2ee7b01 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -89,6 +89,12 @@
#define IMX219_REG_ORIENTATION 0x0172
+/* Binning Mode */
+#define IMX219_REG_BINNING_MODE 0x0174
+#define IMX219_BINNING_NONE 0x0000
+#define IMX219_BINNING_2X2 0x0101
+#define IMX219_BINNING_2X2_ANALOG 0x0303
+
/* Test Pattern Control */
#define IMX219_REG_TEST_PATTERN 0x0600
#define IMX219_TEST_PATTERN_DISABLE 0
@@ -143,6 +149,9 @@ struct imx219_mode {
/* Default register values */
struct imx219_reg_list reg_list;
+
+ /* 2x2 binning is used */
+ bool binning;
};
/*
@@ -176,8 +185,6 @@ static const struct imx219_reg mode_3280x2464_regs[] = {
{0x016f, 0xa0},
{0x0170, 0x01},
{0x0171, 0x01},
- {0x0174, 0x00},
- {0x0175, 0x00},
{0x0301, 0x05},
{0x0303, 0x01},
{0x0304, 0x03},
@@ -235,8 +242,6 @@ static const struct imx219_reg mode_1920_1080_regs[] = {
{0x016f, 0x38},
{0x0170, 0x01},
{0x0171, 0x01},
- {0x0174, 0x00},
- {0x0175, 0x00},
{0x0301, 0x05},
{0x0303, 0x01},
{0x0304, 0x03},
@@ -292,8 +297,6 @@ static const struct imx219_reg mode_1640_1232_regs[] = {
{0x016f, 0xd0},
{0x0170, 0x01},
{0x0171, 0x01},
- {0x0174, 0x01},
- {0x0175, 0x01},
{0x0301, 0x05},
{0x0303, 0x01},
{0x0304, 0x03},
@@ -351,8 +354,6 @@ static const struct imx219_reg mode_640_480_regs[] = {
{0x016f, 0xe0},
{0x0170, 0x01},
{0x0171, 0x01},
- {0x0174, 0x03},
- {0x0175, 0x03},
{0x0301, 0x05},
{0x0303, 0x01},
{0x0304, 0x03},
@@ -483,6 +484,7 @@ static const struct imx219_mode supported_modes[] = {
.num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
.regs = mode_3280x2464_regs,
},
+ .binning = false,
},
{
/* 1080P 30fps cropped */
@@ -499,6 +501,7 @@ static const struct imx219_mode supported_modes[] = {
.num_of_regs = ARRAY_SIZE(mode_1920_1080_regs),
.regs = mode_1920_1080_regs,
},
+ .binning = false,
},
{
/* 2x2 binned 30fps mode */
@@ -515,6 +518,7 @@ static const struct imx219_mode supported_modes[] = {
.num_of_regs = ARRAY_SIZE(mode_1640_1232_regs),
.regs = mode_1640_1232_regs,
},
+ .binning = true,
},
{
/* 640x480 30fps mode */
@@ -531,6 +535,7 @@ static const struct imx219_mode supported_modes[] = {
.num_of_regs = ARRAY_SIZE(mode_640_480_regs),
.regs = mode_640_480_regs,
},
+ .binning = true,
},
};
@@ -686,7 +691,7 @@ static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imx219 *imx219 = to_imx219(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
struct v4l2_rect *try_crop;
mutex_lock(&imx219->mutex);
@@ -699,7 +704,7 @@ static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
try_fmt->field = V4L2_FIELD_NONE;
/* Initialize try_crop rectangle. */
- try_crop = v4l2_subdev_get_try_crop(sd, fh->pad, 0);
+ try_crop = v4l2_subdev_get_try_crop(sd, fh->state, 0);
try_crop->top = IMX219_PIXEL_ARRAY_TOP;
try_crop->left = IMX219_PIXEL_ARRAY_LEFT;
try_crop->width = IMX219_PIXEL_ARRAY_WIDTH;
@@ -800,7 +805,7 @@ static const struct v4l2_ctrl_ops imx219_ctrl_ops = {
};
static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct imx219 *imx219 = to_imx219(sd);
@@ -814,7 +819,7 @@ static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx219_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct imx219 *imx219 = to_imx219(sd);
@@ -854,12 +859,13 @@ static void imx219_update_pad_format(struct imx219 *imx219,
}
static int __imx219_get_pad_format(struct imx219 *imx219,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(&imx219->sd, cfg, fmt->pad);
+ v4l2_subdev_get_try_format(&imx219->sd, sd_state,
+ fmt->pad);
/* update the code which could change due to vflip or hflip: */
try_fmt->code = imx219_get_format_code(imx219, try_fmt->code);
fmt->format = *try_fmt;
@@ -873,21 +879,21 @@ static int __imx219_get_pad_format(struct imx219 *imx219,
}
static int imx219_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx219 *imx219 = to_imx219(sd);
int ret;
mutex_lock(&imx219->mutex);
- ret = __imx219_get_pad_format(imx219, cfg, fmt);
+ ret = __imx219_get_pad_format(imx219, sd_state, fmt);
mutex_unlock(&imx219->mutex);
return ret;
}
static int imx219_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx219 *imx219 = to_imx219(sd);
@@ -913,7 +919,7 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx219_update_pad_format(imx219, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else if (imx219->mode != mode ||
imx219->fmt.code != fmt->format.code) {
@@ -969,13 +975,43 @@ static int imx219_set_framefmt(struct imx219 *imx219)
return -EINVAL;
}
+static int imx219_set_binning(struct imx219 *imx219)
+{
+ if (!imx219->mode->binning) {
+ return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
+ IMX219_REG_VALUE_16BIT,
+ IMX219_BINNING_NONE);
+ }
+
+ switch (imx219->fmt.code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
+ IMX219_REG_VALUE_16BIT,
+ IMX219_BINNING_2X2_ANALOG);
+
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
+ IMX219_REG_VALUE_16BIT,
+ IMX219_BINNING_2X2);
+ }
+
+ return -EINVAL;
+}
+
static const struct v4l2_rect *
-__imx219_get_pad_crop(struct imx219 *imx219, struct v4l2_subdev_pad_config *cfg,
+__imx219_get_pad_crop(struct imx219 *imx219,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&imx219->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&imx219->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &imx219->mode->crop;
}
@@ -984,7 +1020,7 @@ __imx219_get_pad_crop(struct imx219 *imx219, struct v4l2_subdev_pad_config *cfg,
}
static int imx219_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
switch (sel->target) {
@@ -992,7 +1028,7 @@ static int imx219_get_selection(struct v4l2_subdev *sd,
struct imx219 *imx219 = to_imx219(sd);
mutex_lock(&imx219->mutex);
- sel->r = *__imx219_get_pad_crop(imx219, cfg, sel->pad,
+ sel->r = *__imx219_get_pad_crop(imx219, sd_state, sel->pad,
sel->which);
mutex_unlock(&imx219->mutex);
@@ -1047,6 +1083,13 @@ static int imx219_start_streaming(struct imx219 *imx219)
goto err_rpm_put;
}
+ ret = imx219_set_binning(imx219);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set binning: %d\n",
+ __func__, ret);
+ goto err_rpm_put;
+ }
+
/* Apply customized values from user */
ret = __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
if (ret)
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index e6104ee97ed2..73165e8d6bdb 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -695,7 +695,7 @@ static int imx258_write_regs(struct imx258 *imx258,
static int imx258_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
/* Initialize try_fmt */
try_fmt->width = supported_modes[0].width;
@@ -789,7 +789,7 @@ static const struct v4l2_ctrl_ops imx258_ctrl_ops = {
};
static int imx258_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Only one bayer order(GRBG) is supported */
@@ -802,7 +802,7 @@ static int imx258_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx258_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -829,11 +829,12 @@ static void imx258_update_pad_format(const struct imx258_mode *mode,
}
static int __imx258_get_pad_format(struct imx258 *imx258,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&imx258->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&imx258->sd,
+ sd_state,
fmt->pad);
else
imx258_update_pad_format(imx258->cur_mode, fmt);
@@ -842,21 +843,21 @@ static int __imx258_get_pad_format(struct imx258 *imx258,
}
static int imx258_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx258 *imx258 = to_imx258(sd);
int ret;
mutex_lock(&imx258->mutex);
- ret = __imx258_get_pad_format(imx258, cfg, fmt);
+ ret = __imx258_get_pad_format(imx258, sd_state, fmt);
mutex_unlock(&imx258->mutex);
return ret;
}
static int imx258_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx258 *imx258 = to_imx258(sd);
@@ -878,7 +879,7 @@ static int imx258_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx258_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
imx258->cur_mode = mode;
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index e6aa9f32b6a8..c62ada9fc07d 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -870,7 +870,7 @@ static int imx274_binning_goodness(struct stimx274 *imx274,
* available (when called from set_fmt)
*/
static int __imx274_change_compose(struct stimx274 *imx274,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which,
u32 *width,
u32 *height,
@@ -884,8 +884,8 @@ static int __imx274_change_compose(struct stimx274 *imx274,
int best_goodness = INT_MIN;
if (which == V4L2_SUBDEV_FORMAT_TRY) {
- cur_crop = &cfg->try_crop;
- tgt_fmt = &cfg->try_fmt;
+ cur_crop = &sd_state->pads->try_crop;
+ tgt_fmt = &sd_state->pads->try_fmt;
} else {
cur_crop = &imx274->crop;
tgt_fmt = &imx274->format;
@@ -933,7 +933,7 @@ static int __imx274_change_compose(struct stimx274 *imx274,
* Return: 0 on success
*/
static int imx274_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct stimx274 *imx274 = to_imx274(sd);
@@ -955,7 +955,7 @@ static int imx274_get_fmt(struct v4l2_subdev *sd,
* Return: 0 on success
*/
static int imx274_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -964,7 +964,7 @@ static int imx274_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&imx274->lock);
- err = __imx274_change_compose(imx274, cfg, format->which,
+ err = __imx274_change_compose(imx274, sd_state, format->which,
&fmt->width, &fmt->height, 0);
if (err)
@@ -977,7 +977,7 @@ static int imx274_set_fmt(struct v4l2_subdev *sd,
*/
fmt->field = V4L2_FIELD_NONE;
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
else
imx274->format = *fmt;
@@ -988,7 +988,7 @@ out:
}
static int imx274_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct stimx274 *imx274 = to_imx274(sd);
@@ -1008,8 +1008,8 @@ static int imx274_get_selection(struct v4l2_subdev *sd,
}
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- src_crop = &cfg->try_crop;
- src_fmt = &cfg->try_fmt;
+ src_crop = &sd_state->pads->try_crop;
+ src_fmt = &sd_state->pads->try_fmt;
} else {
src_crop = &imx274->crop;
src_fmt = &imx274->format;
@@ -1043,7 +1043,7 @@ static int imx274_get_selection(struct v4l2_subdev *sd,
}
static int imx274_set_selection_crop(struct stimx274 *imx274,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct v4l2_rect *tgt_crop;
@@ -1080,7 +1080,7 @@ static int imx274_set_selection_crop(struct stimx274 *imx274,
sel->r = new_crop;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
- tgt_crop = &cfg->try_crop;
+ tgt_crop = &sd_state->pads->try_crop;
else
tgt_crop = &imx274->crop;
@@ -1094,7 +1094,7 @@ static int imx274_set_selection_crop(struct stimx274 *imx274,
/* if crop size changed then reset the output image size */
if (size_changed)
- __imx274_change_compose(imx274, cfg, sel->which,
+ __imx274_change_compose(imx274, sd_state, sel->which,
&new_crop.width, &new_crop.height,
sel->flags);
@@ -1104,7 +1104,7 @@ static int imx274_set_selection_crop(struct stimx274 *imx274,
}
static int imx274_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct stimx274 *imx274 = to_imx274(sd);
@@ -1113,13 +1113,13 @@ static int imx274_set_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->target == V4L2_SEL_TGT_CROP)
- return imx274_set_selection_crop(imx274, cfg, sel);
+ return imx274_set_selection_crop(imx274, sd_state, sel);
if (sel->target == V4L2_SEL_TGT_COMPOSE) {
int err;
mutex_lock(&imx274->lock);
- err = __imx274_change_compose(imx274, cfg, sel->which,
+ err = __imx274_change_compose(imx274, sd_state, sel->which,
&sel->r.width, &sel->r.height,
sel->flags);
mutex_unlock(&imx274->lock);
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index adcddf3204f7..b07335c57bb5 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -516,7 +516,7 @@ static const struct v4l2_ctrl_ops imx290_ctrl_ops = {
};
static int imx290_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(imx290_formats))
@@ -528,7 +528,7 @@ static int imx290_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx290_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
const struct imx290 *imx290 = to_imx290(sd);
@@ -550,7 +550,7 @@ static int imx290_enum_frame_size(struct v4l2_subdev *sd,
}
static int imx290_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx290 *imx290 = to_imx290(sd);
@@ -559,7 +559,7 @@ static int imx290_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&imx290->lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- framefmt = v4l2_subdev_get_try_format(&imx290->sd, cfg,
+ framefmt = v4l2_subdev_get_try_format(&imx290->sd, sd_state,
fmt->pad);
else
framefmt = &imx290->current_format;
@@ -596,8 +596,8 @@ static u64 imx290_calc_pixel_rate(struct imx290 *imx290)
}
static int imx290_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct imx290 *imx290 = to_imx290(sd);
const struct imx290_mode *mode;
@@ -624,7 +624,7 @@ static int imx290_set_fmt(struct v4l2_subdev *sd,
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- format = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ format = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
} else {
format = &imx290->current_format;
imx290->current_mode = mode;
@@ -646,15 +646,15 @@ static int imx290_set_fmt(struct v4l2_subdev *sd,
}
static int imx290_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { 0 };
- fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
fmt.format.width = 1920;
fmt.format.height = 1080;
- imx290_set_fmt(subdev, cfg, &fmt);
+ imx290_set_fmt(subdev, sd_state, &fmt);
return 0;
}
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index 17c2e4b41221..bb2b94baad70 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -1860,7 +1860,7 @@ static int imx319_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imx319 *imx319 = to_imx319(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
mutex_lock(&imx319->mutex);
@@ -1947,7 +1947,7 @@ static const struct v4l2_ctrl_ops imx319_ctrl_ops = {
};
static int imx319_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct imx319 *imx319 = to_imx319(sd);
@@ -1963,7 +1963,7 @@ static int imx319_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx319_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct imx319 *imx319 = to_imx319(sd);
@@ -1997,14 +1997,14 @@ static void imx319_update_pad_format(struct imx319 *imx319,
}
static int imx319_do_get_pad_format(struct imx319 *imx319,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
struct v4l2_subdev *sd = &imx319->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
imx319_update_pad_format(imx319, imx319->cur_mode, fmt);
@@ -2014,14 +2014,14 @@ static int imx319_do_get_pad_format(struct imx319 *imx319,
}
static int imx319_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx319 *imx319 = to_imx319(sd);
int ret;
mutex_lock(&imx319->mutex);
- ret = imx319_do_get_pad_format(imx319, cfg, fmt);
+ ret = imx319_do_get_pad_format(imx319, sd_state, fmt);
mutex_unlock(&imx319->mutex);
return ret;
@@ -2029,7 +2029,7 @@ static int imx319_get_pad_format(struct v4l2_subdev *sd,
static int
imx319_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx319 *imx319 = to_imx319(sd);
@@ -2055,7 +2055,7 @@ imx319_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx319_update_pad_format(imx319, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
imx319->cur_mode = mode;
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index bed293b60e50..329a3fd06159 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1161,7 +1161,7 @@ static int imx355_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imx355 *imx355 = to_imx355(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
mutex_lock(&imx355->mutex);
@@ -1248,7 +1248,7 @@ static const struct v4l2_ctrl_ops imx355_ctrl_ops = {
};
static int imx355_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct imx355 *imx355 = to_imx355(sd);
@@ -1264,7 +1264,7 @@ static int imx355_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx355_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct imx355 *imx355 = to_imx355(sd);
@@ -1298,14 +1298,14 @@ static void imx355_update_pad_format(struct imx355 *imx355,
}
static int imx355_do_get_pad_format(struct imx355 *imx355,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
struct v4l2_subdev *sd = &imx355->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
imx355_update_pad_format(imx355, imx355->cur_mode, fmt);
@@ -1315,14 +1315,14 @@ static int imx355_do_get_pad_format(struct imx355 *imx355,
}
static int imx355_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx355 *imx355 = to_imx355(sd);
int ret;
mutex_lock(&imx355->mutex);
- ret = imx355_do_get_pad_format(imx355, cfg, fmt);
+ ret = imx355_do_get_pad_format(imx355, sd_state, fmt);
mutex_unlock(&imx355->mutex);
return ret;
@@ -1330,7 +1330,7 @@ static int imx355_get_pad_format(struct v4l2_subdev *sd,
static int
imx355_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx355 *imx355 = to_imx355(sd);
@@ -1356,7 +1356,7 @@ imx355_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx355_update_pad_format(imx355, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
imx355->cur_mode = mode;
diff --git a/drivers/media/i2c/imx390.c b/drivers/media/i2c/imx390.c
new file mode 100644
index 000000000000..6c6f077c9970
--- /dev/null
+++ b/drivers/media/i2c/imx390.c
@@ -0,0 +1,900 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sony IMX390 CMOS Image Sensor Driver
+ *
+ * Copyright (c) 2021 Apurva Nandan <a-nandan@ti.com>
+ *
+ * Copyright (c) 2021 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ctrls.h>
+
+#include "imx390.h"
+
+static inline struct imx390 *to_imx390(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct imx390, subdev);
+}
+
+static int imx390_read(struct imx390 *imx390, u16 addr, u32 *val, size_t nbytes)
+{
+ int ret;
+ __le32 val_le = 0;
+
+ ret = regmap_bulk_read(imx390->regmap, addr, &val_le, nbytes);
+ if (ret < 0) {
+ dev_err(imx390->dev, "%s: failed to read reg 0x%04x: %d\n",
+ __func__, addr, ret);
+ return ret;
+ }
+
+ *val = le32_to_cpu(val_le);
+ return 0;
+}
+
+static int imx390_write(struct imx390 *imx390, u16 addr, u32 val, size_t nbytes)
+{
+ int ret;
+ __le32 val_le = cpu_to_le32(val);
+
+ ret = regmap_bulk_write(imx390->regmap, addr, &val_le, nbytes);
+ if (ret < 0)
+ dev_err(imx390->dev, "%s: failed to write reg 0x%04x: %d\n",
+ __func__, addr, ret);
+ return ret;
+}
+
+static int imx390_update_bits(struct imx390 *imx390, u16 addr, u32 val,
+ u32 mask, size_t nbytes)
+{
+ int ret;
+ u32 cfg;
+
+ ret = imx390_read(imx390, addr, &cfg, nbytes);
+ if (ret < 0)
+ return ret;
+
+ cfg = (val) ? (cfg | mask) : (cfg & (~mask));
+
+ return imx390_write(imx390, addr, cfg, nbytes);
+}
+
+static int imx390_write_table(struct imx390 *imx390,
+ const struct reg_sequence *regs,
+ unsigned int nr_regs)
+{
+ int ret;
+
+ ret = regmap_multi_reg_write(imx390->regmap, regs, nr_regs);
+ if (ret < 0)
+ dev_err(imx390->dev,
+ "%s: failed to write reg table (%d)!\n", __func__, ret);
+ return ret;
+}
+
+static void imx390_init_formats(struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_state_get_stream_format(state, 0, 0);
+ format->code = imx390_mbus_formats[0];
+ format->width = imx390_framesizes[0].width;
+ format->height = imx390_framesizes[0].height;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SMPTE170M;
+}
+
+static int _imx390_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route routes[] = {
+ {
+ .source_pad = 0,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_IMMUTABLE |
+ V4L2_SUBDEV_ROUTE_FL_SOURCE |
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ },
+ {
+ .source_pad = 0,
+ .source_stream = 1,
+ .flags = V4L2_SUBDEV_ROUTE_FL_IMMUTABLE |
+ V4L2_SUBDEV_ROUTE_FL_SOURCE,
+ }
+ };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+
+ int ret;
+
+ ret = v4l2_subdev_set_routing(sd, state, &routing);
+ if (ret < 0)
+ return ret;
+
+ imx390_init_formats(state);
+
+ return 0;
+}
+
+static int imx390_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ int ret;
+
+ v4l2_subdev_lock_state(state);
+
+ ret = _imx390_set_routing(sd, state);
+
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int imx390_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(imx390_mbus_formats))
+ return -EINVAL;
+
+ code->code = imx390_mbus_formats[code->index];
+
+ return 0;
+}
+
+static int imx390_enum_frame_sizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx390_mbus_formats); ++i) {
+ if (imx390_mbus_formats[i] == fse->code)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(imx390_mbus_formats))
+ return -EINVAL;
+
+ if (fse->index >= ARRAY_SIZE(imx390_framesizes))
+ return -EINVAL;
+
+ fse->min_width = imx390_framesizes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = imx390_framesizes[fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int imx390_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx390 *imx390 = to_imx390(sd);
+ struct v4l2_mbus_framefmt *format;
+ const struct v4l2_area *fsize;
+ unsigned int i;
+ u32 code;
+ int ret = 0;
+
+ if (fmt->pad != 0)
+ return -EINVAL;
+
+ if (fmt->stream != 0)
+ return -EINVAL;
+
+ /*
+ * Validate the media bus code, defaulting to the first one if the
+ * requested code isn't supported.
+ */
+ for (i = 0; i < ARRAY_SIZE(imx390_mbus_formats); ++i) {
+ if (imx390_mbus_formats[i] == fmt->format.code) {
+ code = fmt->format.code;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(imx390_mbus_formats))
+ code = imx390_mbus_formats[0];
+
+ /* Find the nearest supported frame size. */
+ fsize = v4l2_find_nearest_size(imx390_framesizes,
+ ARRAY_SIZE(imx390_framesizes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ v4l2_subdev_lock_state(state);
+
+ /* Update the stored format and return it. */
+ format = v4l2_state_get_stream_format(state, fmt->pad, fmt->stream);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE && imx390->streaming) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ format->code = code;
+ format->width = fsize->width;
+ format->height = fsize->height;
+
+ fmt->format = *format;
+
+done:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int imx390_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *fmt;
+ u32 bpp;
+ int ret = 0;
+
+ if (pad != 0)
+ return -EINVAL;
+
+ state = v4l2_subdev_lock_active_state(sd);
+
+ fmt = v4l2_state_get_stream_format(state, 0, 0);
+ if (!fmt) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ memset(fd, 0, sizeof(*fd));
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+
+ /* pixel stream */
+
+ bpp = 12;
+
+ fd->entry[fd->num_entries].stream = 0;
+
+ fd->entry[fd->num_entries].flags = V4L2_MBUS_FRAME_DESC_FL_LEN_MAX;
+ fd->entry[fd->num_entries].length = fmt->width * fmt->height * bpp / 8;
+ fd->entry[fd->num_entries].pixelcode = fmt->code;
+ fd->entry[fd->num_entries].bus.csi2.vc = 0;
+ fd->entry[fd->num_entries].bus.csi2.dt = 0x2c; /* SRGGB12 */
+
+ fd->num_entries++;
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int imx390_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ int ret;
+
+ if (routing->num_routes == 0 || routing->num_routes > 1)
+ return -EINVAL;
+
+ v4l2_subdev_lock_state(state);
+
+ ret = _imx390_set_routing(sd, state);
+
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int imx390_check_non_wdr_mode_fps(struct imx390 *imx390, bool enable)
+{
+ if (!enable && imx390->fps > IMX390_FRAMERATE_MAX_LINEAR) {
+ dev_err(imx390->dev,
+ "%s: failed, %dFPS unsupported in non-WDR mode\n",
+ __func__, imx390->fps);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int imx390_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx390 *imx390 = container_of(ctrl->handler,
+ struct imx390, ctrl.handler);
+ int ret;
+
+ dev_dbg(imx390->dev,
+ "%s: %s, value: %d\n", __func__, ctrl->name, ctrl->val);
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(imx390->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ ret = imx390_check_non_wdr_mode_fps(imx390, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = imx390_write(imx390, IMX390_REG_CAT0_SHS1,
+ IMX390_EXPOSURE_SHS_VAL(ctrl->val,
+ imx390->fps), 3);
+ break;
+
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = imx390_write(imx390, IMX390_REG_CAT0_AGAIN_SP1H,
+ ctrl->val, 2);
+ if (ret < 0)
+ break;
+
+ ret = imx390_write(imx390, IMX390_REG_CAT0_AGAIN_SP1L,
+ ctrl->val / IMX390_AGAIN_CONV_GAIN_RATIO, 2);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = imx390_write(imx390, IMX390_REG_CAT0_PGA_GAIN,
+ ctrl->val, 2);
+ break;
+
+ case V4L2_CID_RED_BALANCE:
+ ret = imx390_write(imx390, IMX390_REG_CAT0_WBGAIN_R,
+ ctrl->val, 2);
+ break;
+
+ case V4L2_CID_BLUE_BALANCE:
+ ret = imx390_write(imx390, IMX390_REG_CAT0_WBGAIN_B,
+ ctrl->val, 2);
+ break;
+
+ case V4L2_CID_HFLIP:
+ ret = imx390_update_bits(imx390, IMX390_REG_CAT0_V_H_REVERSE,
+ ctrl->val, IMX390_H_REV_MASK, 1);
+ if (ret < 0)
+ break;
+
+ ret = imx390_update_bits(imx390, IMX390_REG_SM_CFG_REVERSE_APL,
+ ctrl->val, IMX390_H_REV_APL_MASK, 1);
+ break;
+
+ case V4L2_CID_VFLIP:
+ ret = imx390_update_bits(imx390, IMX390_REG_CAT0_V_H_REVERSE,
+ ctrl->val, IMX390_V_REV_MASK, 1);
+ if (ret < 0)
+ break;
+
+ ret = imx390_update_bits(imx390, IMX390_REG_SM_CFG_REVERSE_APL,
+ ctrl->val, IMX390_V_REV_APL_MASK, 1);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = imx390_write(imx390, IMX390_REG_CAT0_PGMODE_PGREGEN,
+ imx390_pg_mode_reg_val[ctrl->val], 1);
+ if (ret < 0)
+ break;
+
+ ret = imx390_update_bits(imx390, IMX390_SM_CFG_SM_PGREGEN_APL,
+ ctrl->val, IMX390_SM_PG_APL_MASK, 1);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put_noidle(imx390->dev);
+ return ret;
+}
+
+static int imx390_detect(struct imx390 *imx390)
+{
+ int ret;
+ u32 id;
+
+ ret = imx390_read(imx390, IMX390_REG_VERSION_ROM_VERSION, &id, 2);
+ if (ret < 0)
+ return ret;
+
+ if (id != IMX390_ROM_VERSION) {
+ dev_err(imx390->dev,
+ "%s: unknown chip ID 0x%04x\n", __func__, id);
+ return -ENODEV;
+ }
+
+ dev_dbg(imx390->dev, "%s: detected chip ID 0x%04x\n", __func__, id);
+ return 0;
+}
+
+static int imx390_power_on(struct imx390 *imx390)
+{
+ int ret;
+
+ ret = clk_prepare_enable(imx390->clk);
+ if (ret < 0)
+ return ret;
+
+ if (imx390->xclr_gpio) {
+ gpiod_set_value_cansleep(imx390->xclr_gpio, 1);
+ /* Keep the XCLR pin on Low for 100 us or longer */
+ usleep_range(100, 1000);
+ gpiod_set_value_cansleep(imx390->xclr_gpio, 0);
+ /* It takes max 30 ms for the sensor to be ready */
+ msleep(30);
+ }
+ return 0;
+}
+
+static void imx390_power_off(struct imx390 *imx390)
+{
+ if (imx390->xclr_gpio) {
+ gpiod_set_value_cansleep(imx390->xclr_gpio, 1);
+ /* Wait for the XCLR pin to be Low for atleast 1 us */
+ usleep_range(1, 10);
+ }
+
+ clk_disable_unprepare(imx390->clk);
+}
+
+static int imx390_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct imx390 *imx390 = to_imx390(sd);
+
+ fi->interval.numerator = 1;
+ fi->interval.denominator = imx390->fps;
+ return 0;
+}
+
+static int imx390_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct imx390 *imx390 = to_imx390(sd);
+ struct v4l2_ctrl *ctrl = imx390->ctrl.exposure;
+ u32 req_fps;
+ int ret;
+
+ mutex_lock(&imx390->lock);
+
+ if (fi->interval.numerator == 0 || fi->interval.denominator == 0) {
+ fi->interval.denominator = IMX390_FRAMERATE_DEFAULT;
+ fi->interval.numerator = 1;
+ }
+
+ req_fps = clamp_val(DIV_ROUND_CLOSEST(fi->interval.denominator,
+ fi->interval.numerator),
+ IMX390_FRAMERATE_MIN, IMX390_FRAMERATE_MAX);
+
+ fi->interval.numerator = 1;
+ fi->interval.denominator = req_fps;
+
+ imx390->fps = req_fps;
+
+ ret = __v4l2_ctrl_modify_range(ctrl, 0, IMX390_EXPOSURE_MAX(req_fps), 1,
+ IMX390_EXPOSURE_DEFAULT);
+ if (ret < 0) {
+ dev_err(imx390->dev,
+ "%s: exposure ctrl range update failed %d\n",
+ __func__, ret);
+ }
+
+ mutex_unlock(&imx390->lock);
+ dev_dbg(imx390->dev, "%s frame rate = %d\n", __func__, imx390->fps);
+
+ return ret;
+}
+
+static int imx390_start_stream(struct imx390 *imx390)
+{
+ int ret;
+
+ if (!imx390->ctrl.wdr->val &&
+ imx390->fps <= IMX390_FRAMERATE_MAX_LINEAR)
+ ret = imx390_write_table(imx390, imx390_linear_1936x1096,
+ ARRAY_SIZE(imx390_linear_1936x1096));
+ else
+ ret = imx390_write_table(imx390, imx390_wdr_1936x1096,
+ ARRAY_SIZE(imx390_wdr_1936x1096));
+ if (ret < 0)
+ return ret;
+
+ msleep(100);
+
+ /* Restore the V4L2 control values into the registers */
+ ret = __v4l2_ctrl_handler_setup(imx390->subdev.ctrl_handler);
+ if (ret < 0) {
+ dev_err(imx390->dev,
+ "%s: failed to apply v4l2 ctrls: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = imx390_write(imx390, IMX390_REG_MODE_HMAX,
+ (u32)IMX390_FPS_TO_MODE_HMAX(imx390->fps), 2);
+ if (ret < 0)
+ return ret;
+
+ /* Set active */
+ ret = imx390_write(imx390, IMX390_REG_CAT0_STANDBY, 0, 1);
+ if (ret < 0)
+ return ret;
+
+ /* No communication is possible for a while after exiting standby */
+ msleep(20);
+
+ return 0;
+}
+
+static int imx390_stop_stream(struct imx390 *imx390)
+{
+ int ret;
+
+ /* Set standby */
+ ret = imx390_write(imx390, IMX390_REG_CAT0_STANDBY, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ /* No communication is possible for a while after entering standby */
+ usleep_range(10000, 20000);
+ return 0;
+}
+
+static int imx390_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx390 *imx390 = to_imx390(sd);
+ int ret;
+
+ mutex_lock(&imx390->lock);
+ if (imx390->streaming == enable) {
+ mutex_unlock(&imx390->lock);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(imx390->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(imx390->dev);
+ goto err_unlock;
+ }
+
+ ret = imx390_start_stream(imx390);
+ if (ret < 0)
+ goto err_runtime_put;
+ } else {
+ ret = imx390_stop_stream(imx390);
+ if (ret < 0)
+ goto err_runtime_put;
+ pm_runtime_mark_last_busy(imx390->dev);
+ pm_runtime_put_autosuspend(imx390->dev);
+ }
+
+ imx390->streaming = enable;
+ /* WDR, HFLIP, VFLIP, TEST PATTERN cannot change during streaming */
+ __v4l2_ctrl_grab(imx390->ctrl.wdr, enable);
+ __v4l2_ctrl_grab(imx390->ctrl.h_flip, enable);
+ __v4l2_ctrl_grab(imx390->ctrl.v_flip, enable);
+ __v4l2_ctrl_grab(imx390->ctrl.pg_mode, enable);
+
+ mutex_unlock(&imx390->lock);
+ return 0;
+
+err_runtime_put:
+ pm_runtime_put(imx390->dev);
+
+err_unlock:
+ mutex_unlock(&imx390->lock);
+ dev_err(imx390->dev,
+ "%s: failed to setup streaming %d\n", __func__, ret);
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops imx390_subdev_video_ops = {
+ .g_frame_interval = imx390_get_frame_interval,
+ .s_frame_interval = imx390_set_frame_interval,
+ .s_stream = imx390_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx390_subdev_pad_ops = {
+ .init_cfg = imx390_init_cfg,
+ .enum_mbus_code = imx390_enum_mbus_code,
+ .enum_frame_size = imx390_enum_frame_sizes,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = imx390_set_fmt,
+ .set_routing = imx390_set_routing,
+ .get_frame_desc = imx390_get_frame_desc,
+};
+
+static const struct v4l2_subdev_ops imx390_subdev_ops = {
+ .video = &imx390_subdev_video_ops,
+ .pad = &imx390_subdev_pad_ops,
+};
+
+static const struct v4l2_ctrl_ops imx390_ctrl_ops = {
+ .s_ctrl = imx390_set_ctrl,
+};
+
+static int imx390_probe(struct i2c_client *client)
+{
+ struct imx390 *imx390;
+ struct v4l2_subdev *sd;
+ struct v4l2_ctrl_handler *ctrl_hdr;
+ int ret;
+
+ imx390 = devm_kzalloc(&client->dev, sizeof(*imx390), GFP_KERNEL);
+ if (!imx390)
+ return -ENOMEM;
+
+ imx390->dev = &client->dev;
+
+ imx390->regmap = devm_regmap_init_i2c(client, &imx390_regmap_config);
+ if (IS_ERR(imx390->regmap))
+ return PTR_ERR(imx390->regmap);
+
+ imx390->xclr_gpio = devm_gpiod_get_optional(imx390->dev,
+ "xclr", GPIOD_OUT_LOW);
+ if (IS_ERR(imx390->xclr_gpio))
+ return PTR_ERR(imx390->xclr_gpio);
+
+ imx390->clk = devm_clk_get(imx390->dev, "inck");
+ if (IS_ERR(imx390->clk))
+ return PTR_ERR(imx390->clk);
+
+ imx390->clk_rate = clk_get_rate(imx390->clk);
+ dev_info(imx390->dev, "inck rate: %lu Hz\n", imx390->clk_rate);
+
+ if (imx390->clk_rate < 5900000 || imx390->clk_rate > 27100000)
+ return -EINVAL;
+
+ ret = imx390_power_on(imx390);
+ if (ret < 0)
+ return ret;
+
+ ret = imx390_detect(imx390);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the subdev and its controls. */
+ sd = &imx390->subdev;
+ v4l2_i2c_subdev_init(sd, client, &imx390_subdev_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS |
+ V4L2_SUBDEV_FL_MULTIPLEXED;
+
+ /* Initialize the media entity. */
+ imx390->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sd->entity, 1, &imx390->pad);
+ if (ret < 0) {
+ dev_err(imx390->dev,
+ "%s: media entity init failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* Initialize controls */
+ ctrl_hdr = &imx390->ctrl.handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdr, 9);
+ if (ret < 0) {
+ dev_err(imx390->dev,
+ "%s: ctrl handler init failed: %d\n", __func__, ret);
+ goto err_media_cleanup;
+ }
+
+ mutex_init(&imx390->lock);
+ imx390->ctrl.handler.lock = &imx390->lock;
+ imx390->fps = IMX390_FRAMERATE_DEFAULT;
+
+ /* Add new controls */
+ imx390->ctrl.exposure = v4l2_ctrl_new_std(ctrl_hdr, &imx390_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0,
+ IMX390_EXPOSURE_MAX(imx390->fps),
+ 1, IMX390_EXPOSURE_DEFAULT);
+
+ imx390->ctrl.again = v4l2_ctrl_new_std(ctrl_hdr, &imx390_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN, 0,
+ IMX390_ANALOG_GAIN_MAX, 1,
+ IMX390_ANALOG_GAIN_DEFAULT);
+
+ imx390->ctrl.dgain = v4l2_ctrl_new_std(ctrl_hdr, &imx390_ctrl_ops,
+ V4L2_CID_DIGITAL_GAIN, 0,
+ IMX390_DIGITAL_GAIN_MAX, 1,
+ IMX390_DIGITAL_GAIN_DEFAULT);
+
+ imx390->ctrl.r_balance = v4l2_ctrl_new_std(ctrl_hdr, &imx390_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0,
+ IMX390_R_B_BALANCE_MAX, 1,
+ IMX390_R_B_BALANCE_DEFAULT);
+
+ imx390->ctrl.b_balance = v4l2_ctrl_new_std(ctrl_hdr, &imx390_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0,
+ IMX390_R_B_BALANCE_MAX, 1,
+ IMX390_R_B_BALANCE_DEFAULT);
+
+ imx390->ctrl.wdr = v4l2_ctrl_new_std(ctrl_hdr, &imx390_ctrl_ops,
+ V4L2_CID_WIDE_DYNAMIC_RANGE,
+ 0, 1, 1, 1);
+
+ imx390->ctrl.h_flip = v4l2_ctrl_new_std(ctrl_hdr, &imx390_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ imx390->ctrl.v_flip = v4l2_ctrl_new_std(ctrl_hdr, &imx390_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ imx390->ctrl.pg_mode = v4l2_ctrl_new_std_menu_items(ctrl_hdr,
+ &imx390_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx390_ctrl_pg_qmenu) - 1,
+ 0, 0, imx390_ctrl_pg_qmenu);
+
+ imx390->subdev.ctrl_handler = ctrl_hdr;
+ if (imx390->ctrl.handler.error) {
+ ret = imx390->ctrl.handler.error;
+ dev_err(imx390->dev,
+ "%s: failed to add the ctrls: %d\n", __func__, ret);
+ goto err_ctrl_free;
+ }
+
+ pm_runtime_set_active(imx390->dev);
+ pm_runtime_enable(imx390->dev);
+ pm_runtime_set_autosuspend_delay(imx390->dev, IMX390_PM_IDLE_TIMEOUT);
+ pm_runtime_use_autosuspend(imx390->dev);
+ pm_runtime_get_noresume(imx390->dev);
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret < 0)
+ goto err_pm_disable;
+
+ /* Finally, register the subdev. */
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0) {
+ dev_err(imx390->dev,
+ "%s: v4l2 subdev register failed %d\n", __func__, ret);
+ goto err_subdev_cleanup;
+ }
+
+ dev_info(imx390->dev, "imx390 probed\n");
+ pm_runtime_mark_last_busy(imx390->dev);
+ pm_runtime_put_autosuspend(imx390->dev);
+ return 0;
+
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(&imx390->subdev);
+
+err_pm_disable:
+ pm_runtime_dont_use_autosuspend(imx390->dev);
+ pm_runtime_put_noidle(imx390->dev);
+ pm_runtime_disable(imx390->dev);
+
+err_ctrl_free:
+ v4l2_ctrl_handler_free(ctrl_hdr);
+ mutex_destroy(&imx390->lock);
+
+err_media_cleanup:
+ media_entity_cleanup(&imx390->subdev.entity);
+
+ return ret;
+}
+
+static int __maybe_unused imx390_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx390 *imx390 = to_imx390(sd);
+ int ret;
+
+ ret = imx390_power_on(imx390);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused imx390_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx390 *imx390 = to_imx390(sd);
+
+ imx390_power_off(imx390);
+
+ return 0;
+}
+
+static int __maybe_unused imx390_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx390 *imx390 = to_imx390(sd);
+ int ret;
+
+ if (imx390->streaming)
+ imx390_stop_stream(imx390);
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0)
+ dev_warn(dev, "%s: failed to suspend: %i\n", __func__, ret);
+
+ return 0;
+}
+
+static int __maybe_unused imx390_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx390 *imx390 = to_imx390(sd);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ dev_warn(dev, "%s: failed to resume: %i\n", __func__, ret);
+
+ if (imx390->streaming)
+ ret = imx390_start_stream(imx390);
+
+ if (ret < 0) {
+ imx390_stop_stream(imx390);
+ imx390->streaming = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx390_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx390 *imx390 = to_imx390(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&imx390->ctrl.handler);
+ v4l2_subdev_cleanup(&imx390->subdev);
+ media_entity_cleanup(&sd->entity);
+ mutex_destroy(&imx390->lock);
+
+ pm_runtime_disable(imx390->dev);
+ pm_runtime_dont_use_autosuspend(imx390->dev);
+ pm_runtime_set_suspended(imx390->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx390_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx390_runtime_suspend,
+ imx390_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(imx390_suspend, imx390_resume)
+};
+
+static const struct of_device_id imx390_dt_id[] = {
+ { .compatible = "sony,imx390" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, imx390_dt_id);
+
+static struct i2c_driver imx390_i2c_driver = {
+ .driver = {
+ .name = "imx390",
+ .of_match_table = of_match_ptr(imx390_dt_id),
+ .pm = &imx390_pm_ops,
+ },
+ .probe_new = imx390_probe,
+ .remove = imx390_remove,
+};
+
+module_i2c_driver(imx390_i2c_driver);
+
+MODULE_DESCRIPTION("Camera Sensor Driver for Sony IMX390");
+MODULE_AUTHOR("Apurva Nandan <a-nandan@ti.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx390.h b/drivers/media/i2c/imx390.h
new file mode 100644
index 000000000000..30e518afb163
--- /dev/null
+++ b/drivers/media/i2c/imx390.h
@@ -0,0 +1,7158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Sony IMX390 CMOS Image Sensor Driver
+ *
+ * Copyright (c) 2021 Apurva Nandan <a-nandan@ti.com>
+ *
+ * Copyright (c) 2021 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include <linux/types.h>
+
+#define IMX390_ACTIVE_AREA_WIDTH 1936
+#define IMX390_ACTIVE_AREA_HEIGHT 1096
+
+/* SMPG cannot be disabled, and datatype is the same as the pixel data */
+#define IMX390_SMPG_HEIGHT 4
+
+#define IMX390_OUT_WIDTH IMX390_ACTIVE_AREA_WIDTH
+#define IMX390_OUT_HEIGHT (IMX390_ACTIVE_AREA_HEIGHT + IMX390_SMPG_HEIGHT)
+
+#define IMX390_FRAMERATE_DEFAULT 30
+#define IMX390_FRAMERATE_MIN 25
+#define IMX390_FRAMERATE_MAX 60
+#define IMX390_FRAMERATE_MAX_LINEAR 30
+
+#define IMX390_MODE_VMAX 0x465
+#define IMX390_MODE_HMAX_DEFAULT 0x1130
+#define IMX390_REG_MODE_HMAX 0x200C
+#define IMX390_FPS_TO_MODE_HMAX(_fps) \
+ ((IMX390_MODE_HMAX_DEFAULT * IMX390_FRAMERATE_DEFAULT) / (_fps))
+
+#define IMX390_REG_VERSION_ROM_VERSION 0x0330
+#define IMX390_ROM_VERSION 0x3815
+
+#define IMX390_REG_CAT0_STANDBY 0x0000
+
+/* Exposure control */
+#define IMX390_REG_CAT0_SHS1 0x000C
+#define IMX390_EXPOSURE_LINES_MAX (IMX390_MODE_VMAX - 2)
+#define IMX390_EXPOSURE_MAX(_fps) \
+ ((IMX390_EXPOSURE_LINES_MAX * 1000000) / (IMX390_MODE_VMAX * (_fps)))
+
+#define IMX390_EXPOSURE_SHS_VAL(_exp, _fps) \
+ (IMX390_MODE_VMAX - ((_exp) * (_fps) * IMX390_MODE_VMAX) / 1000000)
+
+#define IMX390_EXPOSURE_DEFAULT 11111
+
+/* Analog gain control */
+#define IMX390_REG_CAT0_AGAIN_SP1H 0x0018
+#define IMX390_REG_CAT0_AGAIN_SP1L 0x001A
+#define IMX390_ANALOG_GAIN_MAX 0x3FF
+#define IMX390_ANALOG_GAIN_DEFAULT 0x20
+#define IMX390_AGAIN_CONV_GAIN_RATIO 4
+
+/* Digital gain control */
+#define IMX390_REG_CAT0_PGA_GAIN 0x0024
+#define IMX390_DIGITAL_GAIN_MAX 0x1FF
+#define IMX390_DIGITAL_GAIN_DEFAULT 0x20
+
+/* White Balance control */
+#define IMX390_REG_CAT0_WBGAIN_R 0x0030
+#define IMX390_REG_CAT0_WBGAIN_B 0x0036
+#define IMX390_R_B_BALANCE_MAX 0xFFF
+#define IMX390_R_B_BALANCE_DEFAULT 0x200
+
+/* Vertical and Horizontal Flip control */
+#define IMX390_REG_CAT0_V_H_REVERSE 0x0074
+#define IMX390_V_REV_MASK BIT(0)
+#define IMX390_H_REV_MASK BIT(1)
+#define IMX390_REG_SM_CFG_REVERSE_APL 0x03C0
+#define IMX390_V_REV_APL_MASK BIT(2)
+#define IMX390_H_REV_APL_MASK BIT(3)
+
+/* Test Pattern control */
+#define IMX390_REG_CAT0_PGMODE_PGREGEN 0x01DB
+#define IMX390_SM_CFG_SM_PGREGEN_APL 0x03C0
+#define IMX390_SM_PG_APL_MASK BIT(1)
+
+#define IMX390_PM_IDLE_TIMEOUT 1000
+
+struct imx390_ctrl {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *wdr;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *again;
+ struct v4l2_ctrl *dgain;
+ struct v4l2_ctrl *r_balance;
+ struct v4l2_ctrl *b_balance;
+ struct v4l2_ctrl *h_flip;
+ struct v4l2_ctrl *v_flip;
+ struct v4l2_ctrl *pg_mode;
+};
+
+/*
+ * struct im390 - imx390 device structure
+ * @dev: Device handle
+ * @clk: Pointer to imx390 clock
+ * @client: Pointer to I2C client
+ * @regmap: Pointer to regmap structure
+ * @xclr_gpio: Pointer to XCLR gpio
+ * @subddev: V4L2 subdevice structure
+ * @format: V4L2 media bus frame format structure
+ * (width and height are in sync with the compose rect)
+ * @pad: Media pad structure
+ * @ctrl: imx390 control structure
+ * @frame_interval: Time period of one frame in seconds
+ * @clk_rate: Frequency of imx390 clock
+ * @lock: Mutex structure for V4L2 ctrl handler
+ * @streaming: Flag to store the streaming on/off status
+ */
+struct imx390 {
+ struct device *dev;
+
+ struct clk *clk;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct gpio_desc *xclr_gpio;
+
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt format;
+ struct media_pad pad;
+
+ struct imx390_ctrl ctrl;
+
+ unsigned long clk_rate;
+ u32 fps;
+
+ /* mutex for V4L2 ctrl handler */
+ struct mutex lock;
+ bool streaming;
+};
+
+static const struct v4l2_area imx390_framesizes[] = {
+ {
+ .width = IMX390_OUT_WIDTH,
+ .height = IMX390_OUT_HEIGHT,
+ },
+};
+
+static const u32 imx390_mbus_formats[] = {
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+};
+
+static const struct regmap_config imx390_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+};
+
+static const u32 imx390_pg_mode_reg_val[] = {
+ 0x00,
+ 0x32,
+ 0x33,
+ 0x35,
+ 0x3D,
+};
+
+static const char *const imx390_ctrl_pg_qmenu[] = {
+ "Disabled",
+ "Horizontal Color Bars",
+ "Vertical Color Bars",
+ "Gradation",
+ "Checker Pattern",
+};
+
+static const struct reg_sequence imx390_wdr_1936x1096[] = {
+ { 0x000C, 0x7F },
+ { 0x000D, 0x01 },
+ { 0x000E, 0x00 },
+ { 0x0010, 0x7F },
+ { 0x0011, 0x01 },
+ { 0x0012, 0x00 },
+ { 0x0018, 0x20 },
+ { 0x0019, 0x00 },
+ { 0x001A, 0x0C },
+ { 0x001B, 0x00 },
+ { 0x0038, 0x00 },
+ { 0x003C, 0x00 },
+ { 0x003D, 0x00 },
+ { 0x003E, 0x00 },
+ { 0x0040, 0x00 },
+ { 0x0041, 0x00 },
+ { 0x0042, 0x00 },
+ { 0x0044, 0x00 },
+ { 0x0045, 0x00 },
+ { 0x0046, 0x00 },
+ { 0x0048, 0x00 },
+ { 0x0049, 0x00 },
+ { 0x004A, 0x00 },
+ { 0x004C, 0x00 },
+ { 0x004D, 0x00 },
+ { 0x004E, 0x00 },
+ { 0x0050, 0x00 },
+ { 0x0051, 0x00 },
+ { 0x0052, 0x00 },
+ { 0x0054, 0x00 },
+ { 0x0055, 0x00 },
+ { 0x0056, 0x00 },
+ { 0x0058, 0x00 },
+ { 0x0059, 0x00 },
+ { 0x005A, 0x00 },
+ { 0x005C, 0x00 },
+ { 0x005D, 0x00 },
+ { 0x005E, 0x00 },
+ { 0x0060, 0x00 },
+ { 0x0061, 0x00 },
+ { 0x0062, 0x00 },
+ { 0x0064, 0x00 },
+ { 0x0065, 0x00 },
+ { 0x0066, 0x00 },
+ { 0x0068, 0x00 },
+ { 0x0069, 0x00 },
+ { 0x006A, 0x00 },
+ { 0x0078, 0x01 },
+ { 0x007C, 0x08 },
+ { 0x007D, 0x00 },
+ { 0x0080, 0x08 },
+ { 0x0081, 0x00 },
+ { 0x0090, 0x00 },
+ { 0x00F4, 0x1C },
+ { 0x00F5, 0xF8 },
+ { 0x00F6, 0x01 },
+ { 0x00F8, 0x03 },
+ { 0x00F9, 0x01 },
+ { 0x00FA, 0x00 },
+ { 0x00FB, 0x02 },
+ { 0x0114, 0x00 },
+ { 0x0115, 0x01 },
+ { 0x0118, 0x20 },
+ { 0x0119, 0x03 },
+ { 0x011A, 0x00 },
+ { 0x011B, 0x41 },
+ { 0x011C, 0x80 },
+ { 0x011D, 0x00 },
+ { 0x0120, 0x20 },
+ { 0x0121, 0x00 },
+ { 0x0122, 0x00 },
+ { 0x0123, 0x44 },
+ { 0x0124, 0x00 },
+ { 0x0125, 0x01 },
+ { 0x0128, 0xAC },
+ { 0x0129, 0x0D },
+ { 0x012A, 0x00 },
+ { 0x012B, 0xA4 },
+ { 0x012C, 0x00 },
+ { 0x012D, 0x01 },
+ { 0x0130, 0xC4 },
+ { 0x0131, 0x09 },
+ { 0x0132, 0x00 },
+ { 0x0133, 0xDA },
+ { 0x0138, 0x00 }, /* DB_CMN_POST_PEDESTAL (0x0138: 0x0139) = 0 */
+ { 0x0139, 0x00 }, /* DB_CMN_POST_PEDESTAL (0x0138: 0x0139) = 0 */
+ { 0x013A, 0x00 },
+ { 0x013B, 0x00 },
+ { 0x013C, 0x00 },
+ { 0x013D, 0x00 },
+ { 0x013E, 0x00 },
+ { 0x0140, 0x00 },
+ { 0x0141, 0x00 },
+ { 0x0142, 0x00 },
+ /* Start of PWL Registers */
+ { 0x0144, 0x00 }, /* pwl_cp1_x 2048 */
+ { 0x0145, 0x08 }, /* pwl_cp1_x 2048 */
+ { 0x0146, 0x00 }, /* pwl_cp1_x 2048 */
+ { 0x0147, 0x00 }, /* pwl_cp1_x 2048 */
+ { 0x0148, 0x00 }, /* pwl_cp1_y 51 */
+ { 0x0149, 0x02 }, /* pwl_cp1_y 51 */
+ { 0x014A, 0x00 }, /* pwl_cp1_y 51 */
+ { 0x014B, 0x00 }, /* pwl_cp1_y 51 */
+ { 0x014C, 0x00 }, /* pwl_cp2_x 1638 */
+ { 0x014D, 0x40 }, /* pwl_cp2_x 1638 */
+ { 0x014E, 0x00 }, /* pwl_cp2_x 1638 */
+ { 0x014F, 0x00 }, /* pwl_cp2_x 1638 */
+ { 0x0150, 0x80 }, /* pwl_cp2_y 1408 */
+ { 0x0151, 0x05 }, /* pwl_cp2_y 1408 */
+ { 0x0152, 0x00 }, /* pwl_cp2_y 1408 */
+ { 0x0153, 0x00 }, /* pwl_cp2_y 1408 */
+ { 0x0154, 0x00 }, /* pwl_cp3_x 6553 */
+ { 0x0155, 0x00 }, /* pwl_cp3_x 6553 */
+ { 0x0156, 0x01 }, /* pwl_cp3_x 6553 */
+ { 0x0157, 0x00 }, /* pwl_cp3_x 6553 */
+ { 0x0158, 0x80 }, /* pwl_cp3_y 2176 */
+ { 0x0159, 0x08 }, /* pwl_cp3_y 2176 */
+ { 0x015A, 0x00 }, /* pwl_cp3_y 2176 */
+ { 0x015B, 0x00 }, /* pwl_cp3_y 2176 */
+ { 0x015C, 0x00 }, /* pwl_cp4_x 0x100000 */
+ { 0x015D, 0x00 }, /* pwl_cp4_x 0x100000 */
+ { 0x015E, 0x10 }, /* pwl_cp4_x 0x100000 */
+ { 0x015F, 0x00 }, /* pwl_cp4_x 0x100000 */
+ { 0x0160, 0x00 }, /* pwl_cp4_y 0x1000 */
+ { 0x0161, 0x10 }, /* pwl_cp4_y 0x1000 */
+ { 0x0162, 0x00 }, /* pwl_cp4_y 0x1000 */
+ { 0x0163, 0x00 }, /* pwl_cp4_y 0x1000 */
+ { 0x0164, 0x00 }, /* pwl_cp5_x 0x100000 */
+ { 0x0165, 0x00 }, /* pwl_cp5_x 0x100000 */
+ { 0x0166, 0x10 }, /* pwl_cp5_x 0x100000 */
+ { 0x0167, 0x00 }, /* pwl_cp5_x 0x100000 */
+ { 0x0168, 0x00 }, /* pwl_cp5_y 0x1000 */
+ { 0x0169, 0x10 }, /* pwl_cp5_y 0x1000 */
+ { 0x016A, 0x00 }, /* pwl_cp5_y 0x1000 */
+ { 0x016B, 0x00 }, /* pwl_cp5_y 0x1000 */
+ { 0x016C, 0x00 }, /* pwl_cp6_x 0x100000 */
+ { 0x016D, 0x00 }, /* pwl_cp6_x 0x100000 */
+ { 0x016E, 0x10 }, /* pwl_cp6_x 0x100000 */
+ { 0x016F, 0x00 }, /* pwl_cp6_x 0x100000 */
+ { 0x0170, 0x00 }, /* pwl_cp6_y 0x1000 */
+ { 0x0171, 0x10 }, /* pwl_cp6_y 0x1000 */
+ { 0x0172, 0x00 }, /* pwl_cp6_y 0x1000 */
+ { 0x0173, 0x00 }, /* pwl_cp6_y 0x1000 */
+ { 0x0174, 0x00 }, /* pwl_cp7_x 0x100000 */
+ { 0x0175, 0x00 }, /* pwl_cp7_x 0x100000 */
+ { 0x0176, 0x10 }, /* pwl_cp7_x 0x100000 */
+ { 0x0177, 0x00 }, /* pwl_cp7_x 0x100000 */
+ { 0x0178, 0x00 }, /* pwl_cp7_y 0x1000 */
+ { 0x0179, 0x10 }, /* pwl_cp7_y 0x1000 */
+ { 0x017A, 0x00 }, /* pwl_cp7_y 0x1000 */
+ { 0x017B, 0x00 }, /* pwl_cp7_y 0x1000 */
+ { 0x017C, 0x00 }, /* pwl_cp8_x 0x100000 */
+ { 0x017D, 0x00 }, /* pwl_cp8_x 0x100000 */
+ { 0x017E, 0x10 }, /* pwl_cp8_x 0x100000 */
+ { 0x017F, 0x00 }, /* pwl_cp8_x 0x100000 */
+ { 0x0180, 0x00 }, /* pwl_cp8_y 0x1000 */
+ { 0x0181, 0x10 }, /* pwl_cp8_y 0x1000 */
+ { 0x0182, 0x00 }, /* pwl_cp8_y 0x1000 */
+ { 0x0183, 0x00 }, /* pwl_cp8_y 0x1000 */
+ { 0x0184, 0x00 }, /* pwl_cp9_x 0x100000 */
+ { 0x0185, 0x00 }, /* pwl_cp9_x 0x100000 */
+ { 0x0186, 0x10 }, /* pwl_cp9_x 0x100000 */
+ { 0x0187, 0x00 }, /* pwl_cp9_x 0x100000 */
+ { 0x0188, 0x00 }, /* pwl_cp9_y 0x1000 */
+ { 0x0189, 0x10 }, /* pwl_cp9_y 0x1000 */
+ { 0x018A, 0x00 }, /* pwl_cp9_y 0x1000 */
+ { 0x018B, 0x00 }, /* pwl_cp9_y 0x1000 */
+ { 0x018C, 0x00 }, /* pwl_cp10_x 0x10000 */
+ { 0x018D, 0x00 }, /* pwl_cp10_x 0x10000 */
+ { 0x018E, 0x10 }, /* pwl_cp10_x 0x10000 */
+ { 0x018F, 0x00 }, /* pwl_cp10_x 0x10000 */
+ { 0x0190, 0x00 }, /* pwl_cp10_y 0x100 */
+ { 0x0191, 0x10 }, /* pwl_cp10_y 0x100 */
+ { 0x0192, 0x00 }, /* pwl_cp10_y 0x100 */
+ { 0x0193, 0x00 }, /* pwl_cp10_y 0x100 */
+ { 0x0198, 0x00 }, /* pwl gain0 0x040 0000 */
+ { 0x0199, 0x00 },
+ { 0x019A, 0x40 },
+ { 0x019B, 0x00 },
+ { 0x019C, 0x00 }, /* pwl gain1 0x010 0000 */
+ { 0x019D, 0x00 },
+ { 0x019E, 0x10 },
+ { 0x019F, 0x00 },
+ { 0x01A0, 0x00 }, /* pwl gain2 0x004 0000 */
+ { 0x01A1, 0x00 },
+ { 0x01A2, 0x04 },
+ { 0x01A3, 0x00 },
+ { 0x01A4, 0x00 }, /* pwl gain3 0x000 8000 */
+ { 0x01A5, 0x80 },
+ { 0x01A6, 0x00 },
+ { 0x01A7, 0x00 },
+ { 0x01A8, 0x00 },
+ { 0x01A9, 0x00 },
+ { 0x01AA, 0x00 },
+ { 0x01AB, 0x00 },
+ /* End of PWL Registers*/
+ { 0x01AC, 0x00 },
+ { 0x01AD, 0x00 },
+ { 0x01AE, 0x00 },
+ { 0x01AF, 0x00 },
+ { 0x01B0, 0x00 },
+ { 0x01B1, 0x00 },
+ { 0x01B2, 0x00 },
+ { 0x01B3, 0x00 },
+ { 0x01B4, 0x00 },
+ { 0x01B5, 0x00 },
+ { 0x01B6, 0x00 },
+ { 0x01B7, 0x00 },
+ { 0x01B8, 0x00 },
+ { 0x01B9, 0x00 },
+ { 0x01BA, 0x00 },
+ { 0x01BB, 0x00 },
+ { 0x01BC, 0x00 },
+ { 0x01BD, 0x00 },
+ { 0x01BE, 0x00 },
+ { 0x01BF, 0x00 },
+ { 0x01C0, 0x00 },
+ { 0x01C1, 0x00 },
+ { 0x01C2, 0x00 },
+ { 0x01C3, 0x00 },
+ { 0x01C4, 0x00 },
+ { 0x01C5, 0x00 },
+ { 0x01CC, 0x01 },
+ { 0x01D0, 0x09 },
+ { 0x01D4, 0x01 },
+ { 0x0332, 0x67 },
+ { 0x0333, 0x02 },
+ { 0x0390, 0x00 },
+ { 0x0391, 0x00 },
+ { 0x0392, 0x00 },
+ { 0x03C0, 0x01 },
+ { 0x2000, 0x55 },
+ { 0x2001, 0x55 },
+ { 0x2002, 0x55 },
+ { 0x2003, 0x05 },
+ { 0x2004, 0x02 },
+ { 0x2008, 0x65 },
+ { 0x2009, 0x04 },
+ { 0x200A, 0x00 },
+ { 0x200C, 0x98 },
+ { 0x200D, 0x08 },
+ { 0x2010, 0x04 },
+ { 0x2014, 0x00 },
+ { 0x2018, 0x02 },
+ { 0x2019, 0x04 },
+ { 0x201A, 0x00 },
+ { 0x201C, 0x21 },
+ { 0x201D, 0x11 },
+ { 0x201E, 0x00 },
+ { 0x201F, 0x00 },
+ { 0x2020, 0xBC },
+ { 0x2021, 0x00 },
+ { 0x2022, 0x7F },
+ { 0x2023, 0x00 },
+ { 0x2024, 0xBA },
+ { 0x2025, 0x00 },
+ { 0x2026, 0x81 },
+ { 0x2027, 0x00 },
+ { 0x2028, 0x7D },
+ { 0x2029, 0x90 },
+ { 0x202A, 0x05 },
+ { 0x202C, 0xFC },
+ { 0x202D, 0x02 },
+ { 0x202E, 0x25 },
+ { 0x202F, 0x03 },
+ { 0x2030, 0x05 },
+ { 0x2031, 0x02 },
+ { 0x2032, 0xCA },
+ { 0x2033, 0x02 },
+ { 0x2034, 0xFC },
+ { 0x2035, 0x02 },
+ { 0x2036, 0x25 },
+ { 0x2037, 0x03 },
+ { 0x2038, 0xF8 },
+ { 0x2039, 0xE4 },
+ { 0x203A, 0xE3 },
+ { 0x203B, 0x01 },
+ { 0x203C, 0xF5 },
+ { 0x203D, 0x8E },
+ { 0x203E, 0x0C },
+ { 0x203F, 0x2D },
+ { 0x2040, 0x69 },
+ { 0x2041, 0x01 },
+ { 0x2042, 0x8E },
+ { 0x2043, 0x01 },
+ { 0x2044, 0x0C },
+ { 0x2045, 0x02 },
+ { 0x2046, 0x31 },
+ { 0x2047, 0x02 },
+ { 0x2048, 0x6A },
+ { 0x2049, 0x01 },
+ { 0x204A, 0x8E },
+ { 0x204B, 0x01 },
+ { 0x204C, 0x0D },
+ { 0x204D, 0x02 },
+ { 0x204E, 0x31 },
+ { 0x204F, 0x02 },
+ { 0x2050, 0x7B },
+ { 0x2051, 0x00 },
+ { 0x2052, 0x7D },
+ { 0x2053, 0x00 },
+ { 0x2054, 0x95 },
+ { 0x2055, 0x00 },
+ { 0x2056, 0x97 },
+ { 0x2057, 0x00 },
+ { 0x2058, 0xAD },
+ { 0x2059, 0x00 },
+ { 0x205A, 0xAF },
+ { 0x205B, 0x00 },
+ { 0x205C, 0x92 },
+ { 0x205D, 0x00 },
+ { 0x205E, 0x94 },
+ { 0x205F, 0x00 },
+ { 0x2060, 0x8E },
+ { 0x2061, 0x00 },
+ { 0x2062, 0x90 },
+ { 0x2063, 0x00 },
+ { 0x2064, 0xB1 },
+ { 0x2065, 0x00 },
+ { 0x2066, 0xB3 },
+ { 0x2067, 0x00 },
+ { 0x2068, 0x08 },
+ { 0x2069, 0x00 },
+ { 0x206A, 0x04 },
+ { 0x206B, 0x00 },
+ { 0x206C, 0x84 },
+ { 0x206D, 0x00 },
+ { 0x206E, 0x80 },
+ { 0x206F, 0x00 },
+ { 0x2070, 0x04 },
+ { 0x2071, 0x00 },
+ { 0x2072, 0x46 },
+ { 0x2073, 0x00 },
+ { 0x2074, 0xE9 },
+ { 0x2075, 0x01 },
+ { 0x2076, 0x74 },
+ { 0x2077, 0x02 },
+ { 0x2078, 0x80 },
+ { 0x2079, 0x00 },
+ { 0x207A, 0xC1 },
+ { 0x207B, 0x00 },
+ { 0x207C, 0xFF },
+ { 0x207D, 0x03 },
+ { 0x207E, 0xFF },
+ { 0x207F, 0x03 },
+ { 0x2080, 0x78 },
+ { 0x2081, 0x00 },
+ { 0x2082, 0x6A },
+ { 0x2083, 0x01 },
+ { 0x2084, 0xE4 },
+ { 0x2085, 0x01 },
+ { 0x2086, 0x2B },
+ { 0x2087, 0x03 },
+ { 0x2088, 0x00 },
+ { 0x2089, 0x00 },
+ { 0x208A, 0xFF },
+ { 0x208B, 0x03 },
+ { 0x208C, 0xFF },
+ { 0x208D, 0x03 },
+ { 0x208E, 0xFF },
+ { 0x208F, 0x03 },
+ { 0x2090, 0x7D },
+ { 0x2091, 0x00 },
+ { 0x2092, 0x62 },
+ { 0x2093, 0x01 },
+ { 0x2094, 0xE9 },
+ { 0x2095, 0x01 },
+ { 0x2096, 0x00 },
+ { 0x2097, 0x00 },
+ { 0x2098, 0x7C },
+ { 0x2099, 0x00 },
+ { 0x209A, 0x21 },
+ { 0x209B, 0x03 },
+ { 0x209C, 0xE9 },
+ { 0x209D, 0x01 },
+ { 0x209E, 0x21 },
+ { 0x209F, 0x03 },
+ { 0x20A0, 0xFF },
+ { 0x20A1, 0x03 },
+ { 0x20A2, 0xFF },
+ { 0x20A3, 0x03 },
+ { 0x20A4, 0xFF },
+ { 0x20A5, 0x03 },
+ { 0x20A6, 0xFF },
+ { 0x20A7, 0x03 },
+ { 0x20A8, 0xFF },
+ { 0x20A9, 0x03 },
+ { 0x20AA, 0xFF },
+ { 0x20AB, 0x03 },
+ { 0x20AC, 0xFF },
+ { 0x20AD, 0x03 },
+ { 0x20AE, 0xFF },
+ { 0x20AF, 0x03 },
+ { 0x20B0, 0xFF },
+ { 0x20B1, 0x03 },
+ { 0x20B2, 0xFF },
+ { 0x20B3, 0x03 },
+ { 0x20B4, 0x87 },
+ { 0x20B5, 0xCC },
+ { 0x20B6, 0x87 },
+ { 0x20B7, 0x08 },
+ { 0x20B8, 0xF4 },
+ { 0x20B9, 0xA5 },
+ { 0x20BA, 0x07 },
+ { 0x20BC, 0x1F },
+ { 0x20BD, 0x01 },
+ { 0x20BE, 0xF6 },
+ { 0x20BF, 0x00 },
+ { 0x20C0, 0x90 },
+ { 0x20C1, 0x01 },
+ { 0x20C2, 0x67 },
+ { 0x20C3, 0x01 },
+ { 0x20C4, 0xFF },
+ { 0x20C5, 0x03 },
+ { 0x20C6, 0xFF },
+ { 0x20C7, 0x03 },
+ { 0x20C8, 0x33 },
+ { 0x20C9, 0x02 },
+ { 0x20CA, 0x0A },
+ { 0x20CB, 0x02 },
+ { 0x20CC, 0x7F },
+ { 0x20CD, 0x00 },
+ { 0x20CE, 0xD2 },
+ { 0x20CF, 0x00 },
+ { 0x20D0, 0x81 },
+ { 0x20D1, 0x00 },
+ { 0x20D2, 0x87 },
+ { 0x20D3, 0x00 },
+ { 0x20D4, 0x09 },
+ { 0x20D5, 0x00 },
+ { 0x20D8, 0x7F },
+ { 0x20D9, 0x00 },
+ { 0x20DA, 0x62 },
+ { 0x20DB, 0x01 },
+ { 0x20DC, 0x7F },
+ { 0x20DD, 0x00 },
+ { 0x20DE, 0x62 },
+ { 0x20DF, 0x01 },
+ { 0x20E0, 0x65 },
+ { 0x20E1, 0x00 },
+ { 0x20E2, 0x75 },
+ { 0x20E3, 0x00 },
+ { 0x20E4, 0xE0 },
+ { 0x20E5, 0x00 },
+ { 0x20E6, 0xF0 },
+ { 0x20E7, 0x00 },
+ { 0x20E8, 0x4C },
+ { 0x20E9, 0x01 },
+ { 0x20EA, 0x5C },
+ { 0x20EB, 0x01 },
+ { 0x20EC, 0xD1 },
+ { 0x20ED, 0x01 },
+ { 0x20EE, 0xE1 },
+ { 0x20EF, 0x01 },
+ { 0x20F0, 0x93 },
+ { 0x20F1, 0x02 },
+ { 0x20F2, 0xA3 },
+ { 0x20F3, 0x02 },
+ { 0x20F4, 0x0D },
+ { 0x20F5, 0x03 },
+ { 0x20F6, 0x1D },
+ { 0x20F7, 0x03 },
+ { 0x20F8, 0x57 },
+ { 0x20F9, 0x00 },
+ { 0x20FA, 0x7B },
+ { 0x20FB, 0x00 },
+ { 0x20FC, 0xD2 },
+ { 0x20FD, 0x00 },
+ { 0x20FE, 0xF6 },
+ { 0x20FF, 0x00 },
+ { 0x2100, 0x3E },
+ { 0x2101, 0x01 },
+ { 0x2102, 0x60 },
+ { 0x2103, 0x01 },
+ { 0x2104, 0xC3 },
+ { 0x2105, 0x01 },
+ { 0x2106, 0xE5 },
+ { 0x2107, 0x01 },
+ { 0x2108, 0x85 },
+ { 0x2109, 0x02 },
+ { 0x210A, 0xA9 },
+ { 0x210B, 0x02 },
+ { 0x210C, 0xFF },
+ { 0x210D, 0x02 },
+ { 0x210E, 0x21 },
+ { 0x210F, 0x03 },
+ { 0x2110, 0xFF },
+ { 0x2111, 0x03 },
+ { 0x2112, 0x00 },
+ { 0x2113, 0x00 },
+ { 0x2114, 0xFF },
+ { 0x2115, 0x03 },
+ { 0x2116, 0xFF },
+ { 0x2117, 0x03 },
+ { 0x2118, 0xFF },
+ { 0x2119, 0x03 },
+ { 0x211A, 0xFF },
+ { 0x211B, 0x03 },
+ { 0x211C, 0xFF },
+ { 0x211D, 0x03 },
+ { 0x211E, 0xFF },
+ { 0x211F, 0x03 },
+ { 0x2120, 0xFF },
+ { 0x2121, 0x03 },
+ { 0x2122, 0xFF },
+ { 0x2123, 0x03 },
+ { 0x2124, 0xFF },
+ { 0x2125, 0x03 },
+ { 0x2126, 0xFF },
+ { 0x2127, 0x03 },
+ { 0x2128, 0x7D },
+ { 0x2129, 0x90 },
+ { 0x212A, 0xD5 },
+ { 0x212B, 0x07 },
+ { 0x212C, 0x64 },
+ { 0x212D, 0x01 },
+ { 0x2130, 0x5F },
+ { 0x2131, 0x7D },
+ { 0x2132, 0x05 },
+ { 0x2134, 0x78 },
+ { 0x2135, 0x00 },
+ { 0x2136, 0x76 },
+ { 0x2137, 0x00 },
+ { 0x2138, 0xF3 },
+ { 0x2139, 0x00 },
+ { 0x213A, 0xF1 },
+ { 0x213B, 0x00 },
+ { 0x213C, 0xA6 },
+ { 0x213D, 0x02 },
+ { 0x213E, 0xA4 },
+ { 0x213F, 0x02 },
+ { 0x2140, 0x7D },
+ { 0x2141, 0x00 },
+ { 0x2142, 0x8D },
+ { 0x2143, 0x00 },
+ { 0x2144, 0xA1 },
+ { 0x2145, 0x01 },
+ { 0x2146, 0xB1 },
+ { 0x2147, 0x01 },
+ { 0x2148, 0xAB },
+ { 0x2149, 0x02 },
+ { 0x214A, 0xBB },
+ { 0x214B, 0x02 },
+ { 0x214C, 0x17 },
+ { 0x214D, 0x5C },
+ { 0x214E, 0x00 },
+ { 0x2150, 0x00 },
+ { 0x2151, 0x00 },
+ { 0x2152, 0xF8 },
+ { 0x2153, 0x00 },
+ { 0x2154, 0xBE },
+ { 0x2155, 0x00 },
+ { 0x2156, 0x7D },
+ { 0x2157, 0x00 },
+ { 0x2158, 0x25 },
+ { 0x2159, 0x00 },
+ { 0x215A, 0x7D },
+ { 0x215B, 0x00 },
+ { 0x215C, 0x62 },
+ { 0x215D, 0x01 },
+ { 0x215E, 0xFF },
+ { 0x215F, 0x03 },
+ { 0x2160, 0x26 },
+ { 0x2161, 0x00 },
+ { 0x2162, 0x7D },
+ { 0x2163, 0x00 },
+ { 0x2164, 0x63 },
+ { 0x2165, 0x01 },
+ { 0x2166, 0xFF },
+ { 0x2167, 0x03 },
+ { 0x2168, 0xCB },
+ { 0x2169, 0x02 },
+ { 0x216A, 0xCF },
+ { 0x216B, 0x02 },
+ { 0x216C, 0xFF },
+ { 0x216D, 0x03 },
+ { 0x216E, 0xFF },
+ { 0x216F, 0x03 },
+ { 0x2170, 0xFF },
+ { 0x2171, 0x03 },
+ { 0x2172, 0xFF },
+ { 0x2173, 0x03 },
+ { 0x2174, 0xFF },
+ { 0x2175, 0x03 },
+ { 0x2176, 0xFF },
+ { 0x2177, 0x03 },
+ { 0x2178, 0x7E },
+ { 0x2179, 0x00 },
+ { 0x217A, 0xBD },
+ { 0x217B, 0x00 },
+ { 0x217C, 0xEC },
+ { 0x217D, 0x01 },
+ { 0x217E, 0x7B },
+ { 0x217F, 0x02 },
+ { 0x2180, 0xD1 },
+ { 0x2181, 0x02 },
+ { 0x2182, 0x25 },
+ { 0x2183, 0x03 },
+ { 0x2184, 0x7F },
+ { 0x2185, 0x00 },
+ { 0x2186, 0xBD },
+ { 0x2187, 0x00 },
+ { 0x2188, 0xED },
+ { 0x2189, 0x01 },
+ { 0x218A, 0x7B },
+ { 0x218B, 0x02 },
+ { 0x218C, 0xD2 },
+ { 0x218D, 0x02 },
+ { 0x218E, 0x25 },
+ { 0x218F, 0x03 },
+ { 0x2190, 0xFF },
+ { 0x2191, 0x03 },
+ { 0x2192, 0xFF },
+ { 0x2193, 0x03 },
+ { 0x2194, 0xE9 },
+ { 0x2195, 0x01 },
+ { 0x2196, 0x21 },
+ { 0x2197, 0x03 },
+ { 0x2198, 0x17 },
+ { 0x2199, 0xFC },
+ { 0x219A, 0x7F },
+ { 0x219B, 0x01 },
+ { 0x219C, 0xFF },
+ { 0x219D, 0x03 },
+ { 0x21A0, 0x1B },
+ { 0x21A1, 0x1B },
+ { 0x21A2, 0x1B },
+ { 0x21A3, 0x1B },
+ { 0x21A4, 0x2B },
+ { 0x21A5, 0x80 },
+ { 0x21A6, 0x00 },
+ { 0x21A8, 0x04 },
+ { 0x21A9, 0x98 },
+ { 0x21AA, 0x60 },
+ { 0x21AB, 0x03 },
+ { 0x21AC, 0x7F },
+ { 0x21AD, 0x80 },
+ { 0x21AE, 0x09 },
+ { 0x21B0, 0x1C },
+ { 0x21B1, 0x00 },
+ { 0x21B2, 0xA0 },
+ { 0x21B3, 0x00 },
+ { 0x21B4, 0x0C },
+ { 0x21B5, 0x00 },
+ { 0x21B6, 0x2D },
+ { 0x21B7, 0x00 },
+ { 0x21B8, 0x20 },
+ { 0x21B9, 0x00 },
+ { 0x21BA, 0x02 },
+ { 0x21BB, 0x00 },
+ { 0x21BC, 0xCC },
+ { 0x21BD, 0x00 },
+ { 0x21BE, 0x4A },
+ { 0x21BF, 0x00 },
+ { 0x21C0, 0xD0 },
+ { 0x21C1, 0x00 },
+ { 0x21C2, 0x44 },
+ { 0x21C3, 0x00 },
+ { 0x21C4, 0x00 },
+ { 0x21C5, 0xE0 },
+ { 0x21C6, 0x00 },
+ { 0x21C8, 0x11 },
+ { 0x21C9, 0x00 },
+ { 0x21CA, 0x02 },
+ { 0x21CC, 0x08 },
+ { 0x21CD, 0xC0 },
+ { 0x21CE, 0x0C },
+ { 0x21D0, 0x44 },
+ { 0x21D1, 0x00 },
+ { 0x21D2, 0x02 },
+ { 0x21D4, 0x02 },
+ { 0x21D5, 0x20 },
+ { 0x21D6, 0x2C },
+ { 0x21D8, 0xFE },
+ { 0x21D9, 0x9D },
+ { 0x21DA, 0xDF },
+ { 0x21DB, 0x03 },
+ { 0x21DC, 0x62 },
+ { 0x21DD, 0x01 },
+ { 0x21DE, 0x7F },
+ { 0x21DF, 0x00 },
+ { 0x21E0, 0xB7 },
+ { 0x21E1, 0x01 },
+ { 0x21E2, 0xB5 },
+ { 0x21E3, 0x01 },
+ { 0x21E4, 0xC1 },
+ { 0x21E5, 0x02 },
+ { 0x21E6, 0xBF },
+ { 0x21E7, 0x02 },
+ { 0x21E8, 0xB3 },
+ { 0x21E9, 0x0D },
+ { 0x21EA, 0x00 },
+ { 0x21EB, 0x04 },
+ { 0x21EC, 0x90 },
+ { 0x21ED, 0x07 },
+ { 0x21EE, 0x58 },
+ { 0x21EF, 0x04 },
+ { 0x21F0, 0x54 },
+ { 0x21F1, 0x04 },
+ { 0x21F4, 0x02 },
+ { 0x21F5, 0x00 },
+ { 0x21F6, 0x00 },
+ { 0x21F8, 0x3C },
+ { 0x21F9, 0x00 },
+ { 0x21FC, 0x28 },
+ { 0x21FD, 0x00 },
+ { 0x21FE, 0x3C },
+ { 0x21FF, 0x00 },
+ { 0x2200, 0x00 },
+ { 0x2204, 0x4C },
+ { 0x2205, 0x04 },
+ { 0x2206, 0x65 },
+ { 0x2207, 0x04 },
+ { 0x2208, 0x0A },
+ { 0x2209, 0x00 },
+ { 0x220C, 0x57 },
+ { 0x220D, 0x00 },
+ { 0x220E, 0x37 },
+ { 0x220F, 0x00 },
+ { 0x2210, 0x1F },
+ { 0x2211, 0x00 },
+ { 0x2212, 0x1F },
+ { 0x2213, 0x00 },
+ { 0x2214, 0x1F },
+ { 0x2215, 0x00 },
+ { 0x2216, 0x77 },
+ { 0x2217, 0x00 },
+ { 0x2218, 0x1F },
+ { 0x2219, 0x00 },
+ { 0x221A, 0x17 },
+ { 0x221B, 0x00 },
+ { 0x221C, 0x03 },
+ { 0x2220, 0x24 },
+ { 0x2221, 0x00 },
+ { 0x2222, 0x00 },
+ { 0x2223, 0x00 },
+ { 0x2224, 0xA7 },
+ { 0x2225, 0xAA },
+ { 0x2226, 0x80 },
+ { 0x2227, 0x08 },
+ { 0x2228, 0x01 },
+ { 0x2260, 0xFF },
+ { 0x2261, 0x1F },
+ { 0x2262, 0x00 },
+ { 0x2263, 0x00 },
+ { 0x2264, 0x00 },
+ { 0x2265, 0x00 },
+ { 0x2266, 0xFF },
+ { 0x2267, 0x1F },
+ { 0x2268, 0x00 },
+ { 0x2269, 0x00 },
+ { 0x226A, 0xFF },
+ { 0x226B, 0x1F },
+ { 0x226C, 0x00 },
+ { 0x226D, 0x00 },
+ { 0x226E, 0xFF },
+ { 0x226F, 0x1F },
+ { 0x227C, 0xB2 },
+ { 0x227D, 0x0C },
+ { 0x227E, 0x6A },
+ { 0x227F, 0x09 },
+ { 0x2280, 0xD2 },
+ { 0x2281, 0x0C },
+ { 0x2282, 0x5A },
+ { 0x2283, 0x09 },
+ { 0x2284, 0xC4 },
+ { 0x2285, 0x0C },
+ { 0x2286, 0x54 },
+ { 0x2287, 0x09 },
+ { 0x22B2, 0x92 },
+ { 0x22B4, 0x20 },
+ { 0x22B5, 0x00 },
+ { 0x22B6, 0x20 },
+ { 0x22B7, 0x00 },
+ { 0x22B8, 0x20 },
+ { 0x22B9, 0x00 },
+ { 0x22BA, 0x20 },
+ { 0x22BB, 0x00 },
+ { 0x22BC, 0x20 },
+ { 0x22BD, 0x00 },
+ { 0x22BE, 0x20 },
+ { 0x22BF, 0x00 },
+ { 0x22C0, 0x20 },
+ { 0x22C1, 0x00 },
+ { 0x22C2, 0x20 },
+ { 0x22C3, 0x00 },
+ { 0x22C4, 0x20 },
+ { 0x22C5, 0x00 },
+ { 0x22C6, 0x20 },
+ { 0x22C7, 0x00 },
+ { 0x22C8, 0x20 },
+ { 0x22C9, 0x00 },
+ { 0x22CA, 0x20 },
+ { 0x22CB, 0x00 },
+ { 0x22CC, 0x20 },
+ { 0x22CD, 0x00 },
+ { 0x22CE, 0x20 },
+ { 0x22CF, 0x00 },
+ { 0x22DA, 0x00 },
+ { 0x22EF, 0x82 },
+ { 0x2308, 0x01 },
+ { 0x2310, 0x4B },
+ { 0x2311, 0x09 },
+ { 0x2318, 0x40 },
+ { 0x2319, 0xCD },
+ { 0x231A, 0x54 },
+ { 0x2324, 0x20 },
+ { 0x2325, 0x00 },
+ { 0x2328, 0x00 },
+ { 0x234A, 0x9F },
+ { 0x234B, 0x07 },
+ { 0x2354, 0x0C },
+ { 0x23C0, 0x5D },
+ { 0x244C, 0xFF },
+ { 0x244D, 0x03 },
+ { 0x244E, 0xFF },
+ { 0x244F, 0x03 },
+ { 0x24A0, 0x00 },
+ { 0x24A4, 0x16 },
+ { 0x24A5, 0x01 },
+ { 0x24A6, 0xA6 },
+ { 0x24A7, 0x02 },
+ { 0x24A8, 0xD5 },
+ { 0x24A9, 0x02 },
+ { 0x24BC, 0x17 },
+ { 0x24BD, 0x01 },
+ { 0x24BE, 0xA7 },
+ { 0x24BF, 0x02 },
+ { 0x24C0, 0xD5 },
+ { 0x24C1, 0x02 },
+ { 0x24DA, 0x6F },
+ { 0x24DB, 0x00 },
+ { 0x24DC, 0x62 },
+ { 0x24DD, 0x01 },
+ { 0x24EA, 0x32 },
+ { 0x24EB, 0x00 },
+ { 0x24EC, 0xDC },
+ { 0x24ED, 0x00 },
+ { 0x24FA, 0x32 },
+ { 0x24FB, 0x00 },
+ { 0x24FC, 0xDD },
+ { 0x24FD, 0x00 },
+ { 0x254A, 0x15 },
+ { 0x254B, 0x01 },
+ { 0x255A, 0x15 },
+ { 0x255B, 0x01 },
+ { 0x2560, 0x01 },
+ { 0x2561, 0x00 },
+ { 0x2562, 0x2A },
+ { 0x2563, 0x00 },
+ { 0x2564, 0xF8 },
+ { 0x2565, 0x00 },
+ { 0x2566, 0x15 },
+ { 0x2567, 0x01 },
+ { 0x2568, 0x0C },
+ { 0x2569, 0x02 },
+ { 0x256A, 0x31 },
+ { 0x256B, 0x02 },
+ { 0x2578, 0x90 },
+ { 0x2579, 0x01 },
+ { 0x257A, 0x92 },
+ { 0x257B, 0x01 },
+ { 0x257C, 0xB8 },
+ { 0x257D, 0x02 },
+ { 0x257E, 0xBA },
+ { 0x257F, 0x02 },
+ { 0x2584, 0x90 },
+ { 0x2585, 0x01 },
+ { 0x2586, 0x92 },
+ { 0x2587, 0x01 },
+ { 0x2588, 0xB8 },
+ { 0x2589, 0x02 },
+ { 0x258A, 0xBA },
+ { 0x258B, 0x02 },
+ { 0x267A, 0xF8 },
+ { 0x267B, 0x00 },
+ { 0x267C, 0x16 },
+ { 0x267D, 0x01 },
+ { 0x267E, 0xA6 },
+ { 0x267F, 0x02 },
+ { 0x2680, 0xD5 },
+ { 0x2681, 0x02 },
+ { 0x2690, 0xF8 },
+ { 0x2691, 0x00 },
+ { 0x2694, 0xA6 },
+ { 0x2695, 0x02 },
+ { 0x2696, 0x16 },
+ { 0x2697, 0x01 },
+ { 0x269A, 0xD5 },
+ { 0x269B, 0x02 },
+ { 0x26B8, 0x10 },
+ { 0x26B9, 0x00 },
+ { 0x26BA, 0x33 },
+ { 0x26BB, 0x00 },
+ { 0x26BC, 0x89 },
+ { 0x26BD, 0x00 },
+ { 0x26BE, 0xB0 },
+ { 0x26BF, 0x00 },
+ { 0x26C4, 0x4E },
+ { 0x26C5, 0x00 },
+ { 0x26C8, 0xC9 },
+ { 0x26C9, 0x00 },
+ { 0x26CC, 0x35 },
+ { 0x26CD, 0x01 },
+ { 0x26D0, 0xBA },
+ { 0x26D1, 0x01 },
+ { 0x26D4, 0x7C },
+ { 0x26D5, 0x02 },
+ { 0x26D8, 0xF6 },
+ { 0x26D9, 0x02 },
+ { 0x26DE, 0x51 },
+ { 0x26DF, 0x00 },
+ { 0x26E0, 0x7F },
+ { 0x26E1, 0x00 },
+ { 0x26E2, 0xCC },
+ { 0x26E3, 0x00 },
+ { 0x26E4, 0xF8 },
+ { 0x26E5, 0x00 },
+ { 0x26E6, 0x38 },
+ { 0x26E7, 0x01 },
+ { 0x26E8, 0x65 },
+ { 0x26E9, 0x01 },
+ { 0x26EA, 0xBD },
+ { 0x26EB, 0x01 },
+ { 0x26EE, 0x7F },
+ { 0x26EF, 0x02 },
+ { 0x26F0, 0xAB },
+ { 0x26F1, 0x02 },
+ { 0x26F2, 0xF9 },
+ { 0x26F3, 0x02 },
+ { 0x2722, 0x59 },
+ { 0x2723, 0x02 },
+ { 0x2938, 0x55 },
+ { 0x2939, 0x00 },
+ { 0x293A, 0x17 },
+ { 0x293B, 0x00 },
+ { 0x293C, 0xD0 },
+ { 0x293D, 0x00 },
+ { 0x293E, 0x91 },
+ { 0x293F, 0x00 },
+ { 0x2940, 0x3C },
+ { 0x2941, 0x01 },
+ { 0x2942, 0x0C },
+ { 0x2943, 0x01 },
+ { 0x2944, 0xC1 },
+ { 0x2945, 0x01 },
+ { 0x2946, 0x76 },
+ { 0x2947, 0x01 },
+ { 0x2948, 0x83 },
+ { 0x2949, 0x02 },
+ { 0x294A, 0xFB },
+ { 0x294B, 0x01 },
+ { 0x294C, 0xFD },
+ { 0x294D, 0x02 },
+ { 0x294E, 0xBF },
+ { 0x294F, 0x02 },
+ { 0x2A06, 0x25 },
+ { 0x2A07, 0x03 },
+ { 0x2A0C, 0x45 },
+ { 0x2A0D, 0x00 },
+ { 0x2A0E, 0x00 },
+ { 0x2A0F, 0x00 },
+ { 0x2A20, 0x00 },
+ { 0x2A21, 0x00 },
+ { 0x2A22, 0x7D },
+ { 0x2A23, 0x00 },
+ { 0x2B11, 0x1A },
+ { 0x2B13, 0x11 },
+ { 0x2B14, 0x11 },
+ { 0x2B15, 0x11 },
+ { 0x2B16, 0x11 },
+ { 0x2B17, 0x10 },
+ { 0x2B18, 0x0F },
+ { 0x2B19, 0x0E },
+ { 0x2B1A, 0x0D },
+ { 0x2B1B, 0x0C },
+ { 0x2B1C, 0x0B },
+ { 0x2B1D, 0x0B },
+ { 0x2B1E, 0x0A },
+ { 0x2B1F, 0x09 },
+ { 0x2B20, 0x08 },
+ { 0x2B21, 0x07 },
+ { 0x2B22, 0x06 },
+ { 0x2B23, 0x05 },
+ { 0x2B24, 0x04 },
+ { 0x2B25, 0x03 },
+ { 0x2B26, 0x03 },
+ { 0x2B38, 0x01 },
+ { 0x2B45, 0xE3 },
+ { 0x2B50, 0x01 },
+ { 0x2B51, 0x00 },
+ { 0x2B62, 0x66 },
+ { 0x2B6D, 0x47 },
+ { 0x2B70, 0x03 },
+ { 0x2B71, 0x02 },
+ { 0x2B72, 0x02 },
+ { 0x2B7B, 0x42 },
+ { 0x2B7F, 0x7F },
+ { 0x2B80, 0x94 },
+ { 0x2B81, 0x06 },
+ { 0x2B87, 0x1B },
+ { 0x2B88, 0x1A },
+ { 0x2B89, 0x17 },
+ { 0x2B8A, 0x17 },
+ { 0x2B8B, 0x12 },
+ { 0x2B8D, 0x2B },
+ { 0x2B8E, 0x2B },
+ { 0x2B8F, 0x2B },
+ { 0x2B90, 0x7F },
+ { 0x2B91, 0x0F },
+ { 0x2B92, 0x31 },
+ { 0x2B93, 0x07 },
+ { 0x2B94, 0xFE },
+ { 0x2B95, 0x26 },
+ { 0x2B96, 0x84 },
+ { 0x2B97, 0x0C },
+ { 0x2B98, 0xFE },
+ { 0x2B99, 0x56 },
+ { 0x2B9B, 0x2A },
+ { 0x2BA8, 0xBC },
+ { 0x2BA9, 0x62 },
+ { 0x2BC1, 0x70 },
+ { 0x2BC5, 0x80 },
+ { 0x2BD5, 0x30 },
+ { 0x2BD6, 0xF0 },
+ { 0x2BD8, 0xDB },
+ { 0x2BD9, 0xF6 },
+ { 0x2BDA, 0x21 },
+ { 0x2BDB, 0x06 },
+ { 0x2BDC, 0x57 },
+ { 0x2BFE, 0x00 },
+ { 0x2BFF, 0x00 },
+ { 0x2C98, 0xE1 },
+ { 0x2C99, 0x2E },
+ { 0x2C9B, 0x80 },
+ { 0x2CA9, 0x80 },
+ { 0x2CAA, 0x01 },
+ { 0x2CBF, 0x08 },
+ { 0x2D39, 0x0E },
+ { 0x2D50, 0x80 },
+ { 0x2D54, 0x00 },
+ { 0x2D5B, 0x58 },
+ { 0x2DFD, 0x01 },
+ { 0x2D64, 0x64 },
+ { 0x2D65, 0x80 },
+ { 0x3000, 0x00 },
+ { 0x3001, 0x00 },
+ { 0x3002, 0x23 },
+ { 0x3003, 0xA1 },
+ { 0x3004, 0x00 },
+ { 0x3005, 0x20 },
+ { 0x3006, 0x58 },
+ { 0x3007, 0x00 },
+ { 0x3008, 0x06 },
+ { 0x3009, 0xB4 },
+ { 0x300A, 0x1F },
+ { 0x300B, 0x00 },
+ { 0x300C, 0x00 },
+ { 0x300D, 0x1B },
+ { 0x300E, 0x90 },
+ { 0x300F, 0x97 },
+ { 0x3010, 0x00 },
+ { 0x3011, 0x00 },
+ { 0x3012, 0x20 },
+ { 0x3013, 0x21 },
+ { 0x3014, 0x00 },
+ { 0x3015, 0x20 },
+ { 0x3016, 0x84 },
+ { 0x3017, 0x00 },
+ { 0x3018, 0x30 },
+ { 0x3019, 0x09 },
+ { 0x301A, 0x46 },
+ { 0x301B, 0x00 },
+ { 0x3070, 0xC1 },
+ { 0x3071, 0x81 },
+ { 0x3072, 0x29 },
+ { 0x3073, 0x81 },
+ { 0x3080, 0xC4 },
+ { 0x3081, 0x0C },
+ { 0x3082, 0xD2 },
+ { 0x3083, 0x0C },
+ { 0x3084, 0x5C },
+ { 0x3085, 0x00 },
+ { 0x3086, 0x90 },
+ { 0x3087, 0x00 },
+ { 0x3088, 0x07 },
+ { 0x3089, 0x0A },
+ { 0x308A, 0x52 },
+ { 0x308B, 0x09 },
+ { 0x308C, 0x44 },
+ { 0x308D, 0x03 },
+ { 0x308E, 0x70 },
+ { 0x308F, 0x03 },
+ { 0x3090, 0x54 },
+ { 0x3091, 0x09 },
+ { 0x3092, 0x5A },
+ { 0x3093, 0x09 },
+ { 0x3094, 0x1C },
+ { 0x3095, 0x00 },
+ { 0x3096, 0x10 },
+ { 0x3097, 0x00 },
+ { 0x3098, 0x70 },
+ { 0x3099, 0x03 },
+ { 0x309A, 0xF8 },
+ { 0x309B, 0x04 },
+ { 0x309C, 0x74 },
+ { 0x309D, 0x01 },
+ { 0x309E, 0x60 },
+ { 0x309F, 0x01 },
+ { 0x3370, 0x01 },
+ { 0x3374, 0xF0 },
+ { 0x3375, 0x00 },
+ { 0x3376, 0x01 },
+ { 0x3377, 0x00 },
+ { 0x3410, (IMX390_OUT_WIDTH & 0xFF) },
+ { 0x3411, (IMX390_OUT_WIDTH >> 8) },
+ { 0x3418, (IMX390_OUT_HEIGHT & 0xFF) },
+ { 0x3419, (IMX390_OUT_HEIGHT >> 8) },
+ { 0x34A0, 0x30 },
+ { 0x34BE, 0x6A },
+ { 0x34BF, 0x01 },
+ { 0x34C0, 0x40 },
+ { 0x34C1, 0x00 },
+ { 0x34C2, 0x40 },
+ { 0x34C3, 0x00 },
+ { 0x34C4, 0x40 },
+ { 0x34C5, 0x00 },
+ { 0x34C6, 0x40 },
+ { 0x34C7, 0x00 },
+ { 0x34C8, 0x40 },
+ { 0x34C9, 0x00 },
+ { 0x34CA, 0x40 },
+ { 0x34CB, 0x00 },
+ { 0x34CC, 0x40 },
+ { 0x34CD, 0x00 },
+ { 0x34CE, 0x40 },
+ { 0x34CF, 0x00 },
+ { 0x3584, 0x00 },
+ { 0x3586, 0x00 },
+ { 0x3587, 0x01 },
+ { 0x3588, 0xE6 },
+ { 0x3589, 0x00 },
+ { 0x3590, 0x00 },
+ { 0x3591, 0x00 },
+ { 0x3594, 0x40 },
+ { 0x3598, 0x03 },
+ { 0x3599, 0x00 },
+ { 0x359A, 0x80 },
+ { 0x359B, 0x00 },
+ { 0x359C, 0x00 },
+ { 0x359D, 0x01 },
+ { 0x359E, 0x00 },
+ { 0x359F, 0x02 },
+ { 0x35A0, 0x00 },
+ { 0x35A1, 0x04 },
+ { 0x35A2, 0x20 },
+ { 0x35A3, 0x00 },
+ { 0x35A4, 0x40 },
+ { 0x35A5, 0x00 },
+ { 0x35A6, 0x80 },
+ { 0x35A7, 0x00 },
+ { 0x35A8, 0x00 },
+ { 0x35A9, 0x01 },
+ { 0x35AC, 0x80 },
+ { 0x35AD, 0x00 },
+ { 0x35AE, 0x00 },
+ { 0x35AF, 0x01 },
+ { 0x35B0, 0x00 },
+ { 0x35B1, 0x02 },
+ { 0x35B2, 0x00 },
+ { 0x35B3, 0x04 },
+ { 0x35B4, 0x02 },
+ { 0x35B5, 0x00 },
+ { 0x35B6, 0x04 },
+ { 0x35B7, 0x00 },
+ { 0x35B8, 0x08 },
+ { 0x35B9, 0x00 },
+ { 0x35BA, 0x10 },
+ { 0x35BB, 0x00 },
+ { 0x35C8, 0x00 },
+ { 0x35C9, 0x01 },
+ { 0x35CA, 0x00 },
+ { 0x35CB, 0x04 },
+ { 0x35CC, 0x00 },
+ { 0x35CD, 0x10 },
+ { 0x35CE, 0x00 },
+ { 0x35CF, 0x40 },
+ { 0x35D0, 0x00 },
+ { 0x35D1, 0x0C },
+ { 0x35D2, 0x00 },
+ { 0x35D3, 0x0C },
+ { 0x35D4, 0x00 },
+ { 0x35D5, 0x0C },
+ { 0x35D6, 0x00 },
+ { 0x35D7, 0x0C },
+ { 0x35D8, 0x00 },
+ { 0x35D9, 0x00 },
+ { 0x35DA, 0x08 },
+ { 0x35DB, 0x00 },
+ { 0x35DC, 0xD8 },
+ { 0x35DD, 0x0E },
+ { 0x35F0, 0x00 },
+ { 0x35F1, 0x10 },
+ { 0x35F2, 0x00 },
+ { 0x35F3, 0x10 },
+ { 0x35F4, 0x00 },
+ { 0x35F5, 0x10 },
+ { 0x35F6, 0x00 },
+ { 0x35F7, 0x03 },
+ { 0x35F8, 0x00 },
+ { 0x35F9, 0x02 },
+ { 0x35FA, 0x38 },
+ { 0x35FB, 0x00 },
+ { 0x35FC, 0xB3 },
+ { 0x35FD, 0x01 },
+ { 0x35FE, 0x00 },
+ { 0x35FF, 0x00 },
+ { 0x3600, 0x05 },
+ { 0x3601, 0x06 },
+ { 0x3604, 0x03 },
+ { 0x3605, 0x00 },
+ { 0x3608, 0x03 },
+ { 0x3609, 0x00 },
+ { 0x360C, 0x00 },
+ { 0x360D, 0x00 },
+ { 0x3610, 0x10 },
+ { 0x3611, 0x01 },
+ { 0x3612, 0x00 },
+ { 0x3613, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3615, 0x00 },
+ { 0x361C, 0x00 },
+ { 0x361D, 0x01 },
+ { 0x361E, 0x00 },
+ { 0x361F, 0x01 },
+ { 0x3620, 0x00 },
+ { 0x3621, 0x00 },
+ { 0x3622, 0xB0 },
+ { 0x3623, 0x04 },
+ { 0x3624, 0xDC },
+ { 0x3625, 0x05 },
+ { 0x3626, 0x00 },
+ { 0x3627, 0x01 },
+ { 0x3628, 0xFF },
+ { 0x3629, 0x0F },
+ { 0x362A, 0x00 },
+ { 0x362B, 0x10 },
+ { 0x362C, 0x00 },
+ { 0x362D, 0x01 },
+ { 0x3630, 0x41 },
+ { 0x3631, 0x00 },
+ { 0x3632, 0x41 },
+ { 0x3633, 0x00 },
+ { 0x3634, 0x41 },
+ { 0x3635, 0x00 },
+ { 0x3636, 0x41 },
+ { 0x3637, 0x00 },
+ { 0x3638, 0x44 },
+ { 0x3639, 0x00 },
+ { 0x363A, 0x47 },
+ { 0x363B, 0x00 },
+ { 0x363C, 0x47 },
+ { 0x363D, 0x00 },
+ { 0x363E, 0x44 },
+ { 0x363F, 0x00 },
+ { 0x36C4, 0xFF },
+ { 0x36C5, 0x0F },
+ { 0x36C6, 0xFF },
+ { 0x36C7, 0x0F },
+ { 0x36C8, 0xFF },
+ { 0x36C9, 0x0F },
+ { 0x36CC, 0x00 },
+ { 0x36CD, 0x00 },
+ { 0x36CE, 0x00 },
+ { 0x36CF, 0x00 },
+ { 0x36D0, 0x00 },
+ { 0x36D1, 0x00 },
+ { 0x36D4, 0xFF },
+ { 0x36D5, 0x0F },
+ { 0x36D6, 0xFF },
+ { 0x36D7, 0x0F },
+ { 0x36D8, 0xFF },
+ { 0x36D9, 0x0F },
+ { 0x36DC, 0xFF },
+ { 0x36DD, 0x0F },
+ { 0x36DE, 0xFF },
+ { 0x36DF, 0x0F },
+ { 0x36E0, 0xFF },
+ { 0x36E1, 0x0F },
+ { 0x36E4, 0xFF },
+ { 0x36E5, 0x0F },
+ { 0x36E6, 0xFF },
+ { 0x36E7, 0x0F },
+ { 0x36E8, 0xFF },
+ { 0x36E9, 0x0F },
+ { 0x36EE, 0x00 },
+ { 0x36EF, 0x00 },
+ { 0x36F0, 0x00 },
+ { 0x36F1, 0x80 },
+ { 0x36F8, 0x01 },
+ { 0x3700, 0x03 },
+ { 0x3701, 0x05 },
+ { 0x3702, 0x03 },
+ { 0x3703, 0x04 },
+ { 0x3704, 0x08 },
+ { 0x3705, 0x03 },
+ { 0x3706, 0x03 },
+ { 0x3707, 0x03 },
+ { 0x3708, 0x03 },
+ { 0x3709, 0x03 },
+ { 0x370A, 0x03 },
+ { 0x370B, 0x03 },
+ { 0x370C, 0x03 },
+ { 0x370D, 0x03 },
+ { 0x370E, 0x0E },
+ { 0x3718, 0x64 },
+ { 0x3719, 0x47 },
+ { 0x371A, 0x36 },
+ { 0x371B, 0x1E },
+ { 0x371C, 0x50 },
+ { 0x371D, 0x41 },
+ { 0x371E, 0x2F },
+ { 0x371F, 0x1A },
+ { 0x3720, 0x95 },
+ { 0x3721, 0x9D },
+ { 0x3722, 0xA5 },
+ { 0x3723, 0xAD },
+ { 0x3748, 0xA8 },
+ { 0x3749, 0x9E },
+ { 0x374A, 0x94 },
+ { 0x374B, 0x80 },
+ { 0x37C0, 0x00 },
+ { 0x37C1, 0x00 },
+ { 0x37C2, 0x00 },
+ { 0x37C4, 0x00 },
+ { 0x37C5, 0x00 },
+ { 0x37C6, 0x00 },
+ { 0x37C8, 0x00 },
+ { 0x37C9, 0x00 },
+ { 0x37CA, 0x00 },
+ { 0x37CC, 0x00 },
+ { 0x37CD, 0x00 },
+ { 0x37CE, 0x00 },
+ { 0x37D0, 0x00 },
+ { 0x37D1, 0x00 },
+ { 0x37D2, 0x00 },
+ { 0x37D4, 0x00 },
+ { 0x37D5, 0x00 },
+ { 0x37D6, 0x00 },
+ { 0x37D8, 0x00 },
+ { 0x37D9, 0x00 },
+ { 0x37DA, 0x00 },
+ { 0x37DC, 0x00 },
+ { 0x37DD, 0x00 },
+ { 0x37DE, 0x00 },
+ { 0x37E0, 0x00 },
+ { 0x37E1, 0x00 },
+ { 0x37E2, 0x00 },
+ { 0x37E4, 0x00 },
+ { 0x37E5, 0x00 },
+ { 0x37E6, 0x00 },
+ { 0x37E8, 0x00 },
+ { 0x37E9, 0x00 },
+ { 0x37EA, 0x00 },
+ { 0x37EC, 0x00 },
+ { 0x37ED, 0x00 },
+ { 0x37EE, 0x00 },
+ { 0x37F0, 0x00 },
+ { 0x37F4, 0x00 },
+ { 0x37F5, 0x1E },
+ { 0x37F6, 0x34 },
+ { 0x37F7, 0x00 },
+ { 0x37F8, 0xFF },
+ { 0x37F9, 0xFF },
+ { 0x37FA, 0x03 },
+ { 0x37FC, 0x00 },
+ { 0x37FD, 0x00 },
+ { 0x37FE, 0x04 },
+ { 0x3800, 0xFF },
+ { 0x3801, 0xFF },
+ { 0x3802, 0x03 },
+ { 0x3804, 0x00 },
+ { 0x3805, 0x00 },
+ { 0x3806, 0x04 },
+ { 0x3808, 0x00 },
+ { 0x3809, 0x00 },
+ { 0x380A, 0x00 },
+ { 0x380C, 0x00 },
+ { 0x380D, 0x00 },
+ { 0x380E, 0x00 },
+ { 0x3810, 0x00 },
+ { 0x3811, 0x00 },
+ { 0x3812, 0x00 },
+ { 0x3814, 0x00 },
+ { 0x3815, 0x00 },
+ { 0x3816, 0x00 },
+ { 0x3818, 0x00 },
+ { 0x3819, 0x00 },
+ { 0x381A, 0x00 },
+ { 0x381C, 0x00 },
+ { 0x381D, 0x00 },
+ { 0x381E, 0x00 },
+ { 0x3820, 0x00 },
+ { 0x3821, 0x00 },
+ { 0x3822, 0x00 },
+ { 0x3824, 0x00 },
+ { 0x3825, 0x00 },
+ { 0x3826, 0x00 },
+ { 0x3828, 0x00 },
+ { 0x3829, 0x00 },
+ { 0x382A, 0x00 },
+ { 0x382C, 0x00 },
+ { 0x382D, 0x00 },
+ { 0x382E, 0x00 },
+ { 0x3830, 0x00 },
+ { 0x3831, 0x00 },
+ { 0x3832, 0x00 },
+ { 0x3834, 0x00 },
+ { 0x3835, 0x00 },
+ { 0x3836, 0x00 },
+ { 0x3838, 0x47 },
+ { 0x3839, 0x00 },
+ { 0x383A, 0x34 },
+ { 0x383B, 0x00 },
+ { 0x383C, 0x48 },
+ { 0x383D, 0x00 },
+ { 0x383E, 0x39 },
+ { 0x383F, 0x00 },
+ { 0x3840, 0x13 },
+ { 0x3841, 0x00 },
+ { 0x3842, 0x13 },
+ { 0x3843, 0x00 },
+ { 0x3844, 0x1D },
+ { 0x3845, 0x00 },
+ { 0x3846, 0x1D },
+ { 0x3847, 0x00 },
+ { 0x3848, 0x08 },
+ { 0x3849, 0x00 },
+ { 0x384A, 0x07 },
+ { 0x384B, 0x00 },
+ { 0x384C, 0x05 },
+ { 0x384D, 0x00 },
+ { 0x384E, 0x00 },
+ { 0x384F, 0x00 },
+ { 0x3850, 0xFF },
+ { 0x3851, 0x0F },
+ { 0x3852, 0x00 },
+ { 0x3853, 0x10 },
+ { 0x3854, 0xFF },
+ { 0x3855, 0x0F },
+ { 0x3856, 0x00 },
+ { 0x3857, 0x10 },
+ { 0x3858, 0xFF },
+ { 0x3859, 0x0F },
+ { 0x385A, 0x00 },
+ { 0x385B, 0x10 },
+ { 0x385C, 0x02 },
+ { 0x385D, 0x00 },
+ { 0x385E, 0x06 },
+ { 0x385F, 0x00 },
+ { 0x3860, 0x06 },
+ { 0x3861, 0x00 },
+ { 0x3862, 0x08 },
+ { 0x3863, 0x00 },
+ { 0x3864, 0x02 },
+ { 0x3865, 0x00 },
+ { 0x3870, 0x00 },
+ { 0x3871, 0x01 },
+ { 0x38A0, 0x01 },
+ { 0x38A1, 0x01 },
+ { 0x38A2, 0x00 },
+ { 0x38A3, 0x01 },
+ { 0x38A4, 0x07 },
+ { 0x38A5, 0x00 },
+ { 0x38A6, 0x04 },
+ { 0x38A7, 0x04 },
+ { 0x38A8, 0x00 },
+ { 0x38A9, 0x00 },
+ { 0x38AC, 0x00 },
+ { 0x38AD, 0x00 },
+ { 0x38AE, 0x01 },
+ { 0x38B0, 0x02 },
+ { 0x38B2, 0x43 },
+ { 0x38B3, 0x00 },
+ { 0x38B4, 0x10 },
+ { 0x38B5, 0x00 },
+ { 0x38B6, 0x09 },
+ { 0x38B7, 0x00 },
+ { 0x38B8, 0x09 },
+ { 0x38B9, 0x00 },
+ { 0x38BA, 0x47 },
+ { 0x38BB, 0x00 },
+ { 0x38BC, 0x16 },
+ { 0x38BD, 0x00 },
+ { 0x38BE, 0x0E },
+ { 0x38BF, 0x00 },
+ { 0x38C0, 0x0B },
+ { 0x38C1, 0x00 },
+ { 0x38C2, 0x4A },
+ { 0x38C3, 0x00 },
+ { 0x38C4, 0x1C },
+ { 0x38C5, 0x00 },
+ { 0x38C6, 0x12 },
+ { 0x38C7, 0x00 },
+ { 0x38C8, 0x0D },
+ { 0x38C9, 0x00 },
+ { 0x38CA, 0x51 },
+ { 0x38CB, 0x00 },
+ { 0x38CC, 0x24 },
+ { 0x38CD, 0x00 },
+ { 0x38CE, 0x19 },
+ { 0x38CF, 0x00 },
+ { 0x38D0, 0x10 },
+ { 0x38D1, 0x00 },
+ { 0x38D2, 0x5D },
+ { 0x38D3, 0x00 },
+ { 0x38D4, 0x30 },
+ { 0x38D5, 0x00 },
+ { 0x38D6, 0x23 },
+ { 0x38D7, 0x00 },
+ { 0x38D8, 0x17 },
+ { 0x38D9, 0x00 },
+ { 0x38DA, 0x72 },
+ { 0x38DB, 0x00 },
+ { 0x38DC, 0x43 },
+ { 0x38DD, 0x00 },
+ { 0x38DE, 0x31 },
+ { 0x38DF, 0x00 },
+ { 0x38E0, 0x20 },
+ { 0x38E1, 0x00 },
+ { 0x38E2, 0x96 },
+ { 0x38E3, 0x00 },
+ { 0x38E4, 0x5E },
+ { 0x38E5, 0x00 },
+ { 0x38E6, 0x46 },
+ { 0x38E7, 0x00 },
+ { 0x38E8, 0x2E },
+ { 0x38E9, 0x00 },
+ { 0x38EA, 0xD4 },
+ { 0x38EB, 0x00 },
+ { 0x38EC, 0x87 },
+ { 0x38ED, 0x00 },
+ { 0x38EE, 0x65 },
+ { 0x38EF, 0x00 },
+ { 0x38F0, 0x43 },
+ { 0x38F1, 0x00 },
+ { 0x38F2, 0x3F },
+ { 0x38F3, 0x01 },
+ { 0x38F4, 0xC4 },
+ { 0x38F5, 0x00 },
+ { 0x38F6, 0x94 },
+ { 0x38F7, 0x00 },
+ { 0x38F8, 0x64 },
+ { 0x38F9, 0x00 },
+ { 0x38FA, 0x00 },
+ { 0x38FB, 0x01 },
+ { 0x38FC, 0x00 },
+ { 0x38FD, 0x01 },
+ { 0x38FE, 0x00 },
+ { 0x38FF, 0x01 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x01 },
+ { 0x3902, 0x60 },
+ { 0x3903, 0x00 },
+ { 0x3904, 0x25 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x18 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x10 },
+ { 0x3909, 0x00 },
+ { 0x390A, 0xE6 },
+ { 0x390B, 0x00 },
+ { 0x390C, 0xD5 },
+ { 0x390D, 0x00 },
+ { 0x390E, 0xAA },
+ { 0x390F, 0x00 },
+ { 0x3910, 0x85 },
+ { 0x3911, 0x00 },
+ { 0x3912, 0xE6 },
+ { 0x3913, 0x00 },
+ { 0x3914, 0xD5 },
+ { 0x3915, 0x00 },
+ { 0x3916, 0xAA },
+ { 0x3917, 0x00 },
+ { 0x3918, 0x85 },
+ { 0x3919, 0x00 },
+ { 0x391A, 0xE6 },
+ { 0x391B, 0x00 },
+ { 0x391C, 0xD5 },
+ { 0x391D, 0x00 },
+ { 0x391E, 0xAA },
+ { 0x391F, 0x00 },
+ { 0x3920, 0x85 },
+ { 0x3921, 0x00 },
+ { 0x3922, 0x40 },
+ { 0x3923, 0x00 },
+ { 0x3924, 0x40 },
+ { 0x3925, 0x00 },
+ { 0x3926, 0x40 },
+ { 0x3927, 0x00 },
+ { 0x3928, 0x40 },
+ { 0x3929, 0x00 },
+ { 0x392A, 0x80 },
+ { 0x392B, 0x00 },
+ { 0x392C, 0x80 },
+ { 0x392D, 0x00 },
+ { 0x392E, 0x80 },
+ { 0x392F, 0x00 },
+ { 0x3930, 0x80 },
+ { 0x3931, 0x00 },
+ { 0x3932, 0x4C },
+ { 0x3933, 0x4C },
+ { 0x3934, 0x4C },
+ { 0x3940, 0x01 },
+ { 0x3941, 0x01 },
+ { 0x3942, 0x00 },
+ { 0x3943, 0x01 },
+ { 0x3944, 0x07 },
+ { 0x3945, 0x00 },
+ { 0x3946, 0x04 },
+ { 0x3947, 0x04 },
+ { 0x3948, 0x00 },
+ { 0x3949, 0x00 },
+ { 0x394C, 0x00 },
+ { 0x394D, 0x00 },
+ { 0x394E, 0x01 },
+ { 0x3950, 0x03 },
+ { 0x3952, 0x1B },
+ { 0x3953, 0x00 },
+ { 0x3954, 0x0C },
+ { 0x3955, 0x00 },
+ { 0x3956, 0x09 },
+ { 0x3957, 0x00 },
+ { 0x3958, 0x07 },
+ { 0x3959, 0x00 },
+ { 0x395A, 0x1D },
+ { 0x395B, 0x00 },
+ { 0x395C, 0x0E },
+ { 0x395D, 0x00 },
+ { 0x395E, 0x0B },
+ { 0x395F, 0x00 },
+ { 0x3960, 0x08 },
+ { 0x3961, 0x00 },
+ { 0x3962, 0x1E },
+ { 0x3963, 0x00 },
+ { 0x3964, 0x10 },
+ { 0x3965, 0x00 },
+ { 0x3966, 0x0C },
+ { 0x3967, 0x00 },
+ { 0x3968, 0x09 },
+ { 0x3969, 0x00 },
+ { 0x396A, 0x21 },
+ { 0x396B, 0x00 },
+ { 0x396C, 0x13 },
+ { 0x396D, 0x00 },
+ { 0x396E, 0x0E },
+ { 0x396F, 0x00 },
+ { 0x3970, 0x0A },
+ { 0x3971, 0x00 },
+ { 0x3972, 0x25 },
+ { 0x3973, 0x00 },
+ { 0x3974, 0x19 },
+ { 0x3975, 0x00 },
+ { 0x3976, 0x12 },
+ { 0x3977, 0x00 },
+ { 0x3978, 0x0D },
+ { 0x3979, 0x00 },
+ { 0x397A, 0x2E },
+ { 0x397B, 0x00 },
+ { 0x397C, 0x21 },
+ { 0x397D, 0x00 },
+ { 0x397E, 0x19 },
+ { 0x397F, 0x00 },
+ { 0x3980, 0x11 },
+ { 0x3981, 0x00 },
+ { 0x3982, 0x3C },
+ { 0x3983, 0x00 },
+ { 0x3984, 0x2F },
+ { 0x3985, 0x00 },
+ { 0x3986, 0x23 },
+ { 0x3987, 0x00 },
+ { 0x3988, 0x19 },
+ { 0x3989, 0x00 },
+ { 0x398A, 0x56 },
+ { 0x398B, 0x00 },
+ { 0x398C, 0x44 },
+ { 0x398D, 0x00 },
+ { 0x398E, 0x35 },
+ { 0x398F, 0x00 },
+ { 0x3990, 0x27 },
+ { 0x3991, 0x00 },
+ { 0x3992, 0x84 },
+ { 0x3993, 0x00 },
+ { 0x3994, 0x68 },
+ { 0x3995, 0x00 },
+ { 0x3996, 0x53 },
+ { 0x3997, 0x00 },
+ { 0x3998, 0x40 },
+ { 0x3999, 0x00 },
+ { 0x399A, 0x00 },
+ { 0x399B, 0x01 },
+ { 0x399C, 0x00 },
+ { 0x399D, 0x01 },
+ { 0x399E, 0x00 },
+ { 0x399F, 0x01 },
+ { 0x39A0, 0x00 },
+ { 0x39A1, 0x01 },
+ { 0x39A2, 0x60 },
+ { 0x39A3, 0x00 },
+ { 0x39A4, 0x20 },
+ { 0x39A5, 0x00 },
+ { 0x39A6, 0x15 },
+ { 0x39A7, 0x00 },
+ { 0x39A8, 0x10 },
+ { 0x39A9, 0x00 },
+ { 0x39AA, 0xE6 },
+ { 0x39AB, 0x00 },
+ { 0x39AC, 0xD5 },
+ { 0x39AD, 0x00 },
+ { 0x39AE, 0xAA },
+ { 0x39AF, 0x00 },
+ { 0x39B0, 0x85 },
+ { 0x39B1, 0x00 },
+ { 0x39B2, 0xE6 },
+ { 0x39B3, 0x00 },
+ { 0x39B4, 0xD5 },
+ { 0x39B5, 0x00 },
+ { 0x39B6, 0xAA },
+ { 0x39B7, 0x00 },
+ { 0x39B8, 0x85 },
+ { 0x39B9, 0x00 },
+ { 0x39BA, 0xE6 },
+ { 0x39BB, 0x00 },
+ { 0x39BC, 0xD5 },
+ { 0x39BD, 0x00 },
+ { 0x39BE, 0xAA },
+ { 0x39BF, 0x00 },
+ { 0x39C0, 0x85 },
+ { 0x39C1, 0x00 },
+ { 0x39C2, 0x40 },
+ { 0x39C3, 0x00 },
+ { 0x39C4, 0x40 },
+ { 0x39C5, 0x00 },
+ { 0x39C6, 0x40 },
+ { 0x39C7, 0x00 },
+ { 0x39C8, 0x40 },
+ { 0x39C9, 0x00 },
+ { 0x39CA, 0x80 },
+ { 0x39CB, 0x00 },
+ { 0x39CC, 0x80 },
+ { 0x39CD, 0x00 },
+ { 0x39CE, 0x80 },
+ { 0x39CF, 0x00 },
+ { 0x39D0, 0x80 },
+ { 0x39D1, 0x00 },
+ { 0x39D2, 0x4C },
+ { 0x39D3, 0x4C },
+ { 0x39D4, 0x4C },
+ { 0x39E0, 0x01 },
+ { 0x39E1, 0x00 },
+ { 0x39E4, 0x40 },
+ { 0x39E5, 0x01 },
+ { 0x39E6, 0x01 },
+ { 0x39E8, 0x00 },
+ { 0x39E9, 0x01 },
+ { 0x39EA, 0x00 },
+ { 0x39EB, 0x00 },
+ { 0x39EC, 0x01 },
+ { 0x39ED, 0x00 },
+ { 0x39EE, 0x01 },
+ { 0x39F0, 0x03 },
+ { 0x39F1, 0x04 },
+ { 0x39F2, 0x0E },
+ { 0x39F4, 0x19 },
+ { 0x39F5, 0x00 },
+ { 0x39F6, 0x12 },
+ { 0x39F7, 0x00 },
+ { 0x39F8, 0x0D },
+ { 0x39F9, 0x00 },
+ { 0x39FA, 0x07 },
+ { 0x39FB, 0x00 },
+ { 0x39FC, 0x2B },
+ { 0x39FD, 0x00 },
+ { 0x39FE, 0x1B },
+ { 0x39FF, 0x00 },
+ { 0x3A00, 0x11 },
+ { 0x3A01, 0x00 },
+ { 0x3A02, 0x08 },
+ { 0x3A03, 0x00 },
+ { 0x3A04, 0x37 },
+ { 0x3A05, 0x00 },
+ { 0x3A06, 0x21 },
+ { 0x3A07, 0x00 },
+ { 0x3A08, 0x14 },
+ { 0x3A09, 0x00 },
+ { 0x3A0A, 0x09 },
+ { 0x3A0B, 0x00 },
+ { 0x3A0C, 0x4A },
+ { 0x3A0D, 0x00 },
+ { 0x3A0E, 0x2C },
+ { 0x3A0F, 0x00 },
+ { 0x3A10, 0x18 },
+ { 0x3A11, 0x00 },
+ { 0x3A12, 0x0B },
+ { 0x3A13, 0x00 },
+ { 0x3A14, 0x66 },
+ { 0x3A15, 0x00 },
+ { 0x3A16, 0x3B },
+ { 0x3A17, 0x00 },
+ { 0x3A18, 0x20 },
+ { 0x3A19, 0x00 },
+ { 0x3A1A, 0x0F },
+ { 0x3A1B, 0x00 },
+ { 0x3A1C, 0x8E },
+ { 0x3A1D, 0x00 },
+ { 0x3A1E, 0x51 },
+ { 0x3A1F, 0x00 },
+ { 0x3A20, 0x2B },
+ { 0x3A21, 0x00 },
+ { 0x3A22, 0x14 },
+ { 0x3A23, 0x00 },
+ { 0x3A24, 0xC8 },
+ { 0x3A25, 0x00 },
+ { 0x3A26, 0x72 },
+ { 0x3A27, 0x00 },
+ { 0x3A28, 0x3C },
+ { 0x3A29, 0x00 },
+ { 0x3A2A, 0x1B },
+ { 0x3A2B, 0x00 },
+ { 0x3A2C, 0x19 },
+ { 0x3A2D, 0x01 },
+ { 0x3A2E, 0xA0 },
+ { 0x3A2F, 0x00 },
+ { 0x3A30, 0x54 },
+ { 0x3A31, 0x00 },
+ { 0x3A32, 0x25 },
+ { 0x3A33, 0x00 },
+ { 0x3A34, 0x8D },
+ { 0x3A35, 0x01 },
+ { 0x3A36, 0xE1 },
+ { 0x3A37, 0x00 },
+ { 0x3A38, 0x76 },
+ { 0x3A39, 0x00 },
+ { 0x3A3A, 0x35 },
+ { 0x3A3B, 0x00 },
+ { 0x3A3C, 0x00 },
+ { 0x3A3D, 0x01 },
+ { 0x3A3E, 0x00 },
+ { 0x3A3F, 0x01 },
+ { 0x3A40, 0x00 },
+ { 0x3A41, 0x01 },
+ { 0x3A42, 0x00 },
+ { 0x3A43, 0x01 },
+ { 0x3A44, 0x70 },
+ { 0x3A45, 0x00 },
+ { 0x3A46, 0x25 },
+ { 0x3A47, 0x00 },
+ { 0x3A48, 0x18 },
+ { 0x3A49, 0x00 },
+ { 0x3A4A, 0x10 },
+ { 0x3A4B, 0x00 },
+ { 0x3A4C, 0xE6 },
+ { 0x3A4D, 0x00 },
+ { 0x3A4E, 0xD5 },
+ { 0x3A4F, 0x00 },
+ { 0x3A50, 0xAA },
+ { 0x3A51, 0x00 },
+ { 0x3A52, 0x85 },
+ { 0x3A53, 0x00 },
+ { 0x3A54, 0xE6 },
+ { 0x3A55, 0x00 },
+ { 0x3A56, 0xD5 },
+ { 0x3A57, 0x00 },
+ { 0x3A58, 0xAA },
+ { 0x3A59, 0x00 },
+ { 0x3A5A, 0x85 },
+ { 0x3A5B, 0x00 },
+ { 0x3A5C, 0xE6 },
+ { 0x3A5D, 0x00 },
+ { 0x3A5E, 0xD5 },
+ { 0x3A5F, 0x00 },
+ { 0x3A60, 0xAA },
+ { 0x3A61, 0x00 },
+ { 0x3A62, 0x85 },
+ { 0x3A63, 0x00 },
+ { 0x3A64, 0x19 },
+ { 0x3A65, 0x00 },
+ { 0x3A66, 0x12 },
+ { 0x3A67, 0x00 },
+ { 0x3A68, 0x0D },
+ { 0x3A69, 0x00 },
+ { 0x3A6A, 0x07 },
+ { 0x3A6B, 0x00 },
+ { 0x3A6C, 0x0C },
+ { 0x3A6D, 0x00 },
+ { 0x3A6E, 0x07 },
+ { 0x3A6F, 0x00 },
+ { 0x3A70, 0x05 },
+ { 0x3A71, 0x00 },
+ { 0x3A72, 0x04 },
+ { 0x3A73, 0x00 },
+ { 0x3A74, 0x1B },
+ { 0x3A75, 0x00 },
+ { 0x3A76, 0x15 },
+ { 0x3A77, 0x00 },
+ { 0x3A78, 0x0C },
+ { 0x3A79, 0x00 },
+ { 0x3A7A, 0x08 },
+ { 0x3A7B, 0x00 },
+ { 0x3A7C, 0x80 },
+ { 0x3A7D, 0x00 },
+ { 0x3A7E, 0x80 },
+ { 0x3A7F, 0x00 },
+ { 0x3A80, 0x80 },
+ { 0x3A81, 0x00 },
+ { 0x3A82, 0x80 },
+ { 0x3A83, 0x00 },
+ { 0x3A84, 0x09 },
+ { 0x3A85, 0x00 },
+ { 0x3A86, 0x06 },
+ { 0x3A87, 0x00 },
+ { 0x3A88, 0x04 },
+ { 0x3A89, 0x00 },
+ { 0x3A8A, 0x03 },
+ { 0x3A8B, 0x00 },
+ { 0x3A8C, 0xFA },
+ { 0x3A8D, 0x00 },
+ { 0x3A8E, 0xC8 },
+ { 0x3A8F, 0x00 },
+ { 0x3A90, 0x96 },
+ { 0x3A91, 0x00 },
+ { 0x3A92, 0x64 },
+ { 0x3A93, 0x00 },
+ { 0x3A94, 0xE1 },
+ { 0x3A95, 0x00 },
+ { 0x3A96, 0xC8 },
+ { 0x3A97, 0x00 },
+ { 0x3A98, 0x96 },
+ { 0x3A99, 0x00 },
+ { 0x3A9A, 0x64 },
+ { 0x3A9B, 0x00 },
+ { 0x3A9C, 0x08 },
+ { 0x3A9D, 0x10 },
+ { 0x3A9E, 0x4C },
+ { 0x3A9F, 0x4C },
+ { 0x3AA0, 0x4C },
+ { 0x3AA1, 0x04 },
+ { 0x3AA2, 0x04 },
+ { 0x3AC0, 0x01 },
+ { 0x3AC4, 0x81 },
+ { 0x3AC5, 0x00 },
+ { 0x3AC6, 0x00 },
+ { 0x3AC7, 0x00 },
+ { 0x3AC8, 0x00 },
+ { 0x3AC9, 0x00 },
+ { 0x3ACA, 0x00 },
+ { 0x3ACB, 0x00 },
+ { 0x3ACC, 0x02 },
+ { 0x3ACD, 0x00 },
+ { 0x3ACE, 0x81 },
+ { 0x3ACF, 0x00 },
+ { 0x3AD0, 0x00 },
+ { 0x3AD1, 0x00 },
+ { 0x3AD2, 0xFD },
+ { 0x3AD3, 0x03 },
+ { 0x3AD4, 0x02 },
+ { 0x3AD5, 0x00 },
+ { 0x3AD6, 0x00 },
+ { 0x3AD7, 0x00 },
+ { 0x3AD8, 0x81 },
+ { 0x3AD9, 0x00 },
+ { 0x3ADA, 0xFD },
+ { 0x3ADB, 0x03 },
+ { 0x3ADC, 0xFF },
+ { 0x3ADD, 0x03 },
+ { 0x3ADE, 0x01 },
+ { 0x3ADF, 0x00 },
+ { 0x3AE0, 0x01 },
+ { 0x3AE1, 0x00 },
+ { 0x3AE2, 0x7E },
+ { 0x3AE3, 0x00 },
+ { 0x3AF4, 0x00 },
+ { 0x3AF6, 0x40 },
+ { 0x3AF7, 0x1E },
+ { 0x3AF8, 0x00 },
+ { 0x3AFA, 0x00 },
+ { 0x3AFB, 0x00 },
+ { 0x3AFC, 0x00 },
+ { 0x3AFD, 0x00 },
+ { 0x3AFE, 0x00 },
+ { 0x3AFF, 0x00 },
+ { 0x3B00, 0x00 },
+ { 0x3B01, 0x00 },
+ { 0x3B02, 0x00 },
+ { 0x3B03, 0x00 },
+ { 0x3B04, 0x00 },
+ { 0x3B05, 0x00 },
+ { 0x3B06, 0x00 },
+ { 0x3B07, 0x00 },
+ { 0x3B08, 0x00 },
+ { 0x3B09, 0x00 },
+ { 0x3B0A, 0x00 },
+ { 0x3B0B, 0x00 },
+ { 0x3B0C, 0x00 },
+ { 0x3B0D, 0x00 },
+ { 0x3B0E, 0x00 },
+ { 0x3B0F, 0x00 },
+ { 0x3B10, 0x00 },
+ { 0x3B11, 0x00 },
+ { 0x3B12, 0x00 },
+ { 0x3B13, 0x00 },
+ { 0x3B14, 0x00 },
+ { 0x3B15, 0x00 },
+ { 0x3B16, 0x00 },
+ { 0x3B17, 0x00 },
+ { 0x3B18, 0x00 },
+ { 0x3B19, 0x00 },
+ { 0x3B1A, 0x00 },
+ { 0x3B1B, 0x00 },
+ { 0x3B1C, 0x00 },
+ { 0x3B1D, 0x00 },
+ { 0x3B1E, 0x00 },
+ { 0x3B1F, 0x00 },
+ { 0x3B20, 0x00 },
+ { 0x3B21, 0x00 },
+ { 0x3B22, 0x00 },
+ { 0x3B23, 0x00 },
+ { 0x3B24, 0x00 },
+ { 0x3B25, 0x00 },
+ { 0x3B26, 0x00 },
+ { 0x3B27, 0x00 },
+ { 0x3B28, 0x00 },
+ { 0x3B29, 0x00 },
+ { 0x3B2A, 0x00 },
+ { 0x3B2C, 0x00 },
+ { 0x3B2E, 0x00 },
+ { 0x3B30, 0x00 },
+ { 0x3B32, 0x0C },
+ { 0x4000, 0xAF },
+ { 0x4001, 0xA7 },
+ { 0x4002, 0xA8 },
+ { 0x4003, 0xA5 },
+ { 0x4004, 0x98 },
+ { 0x4005, 0x93 },
+ { 0x4006, 0x94 },
+ { 0x4007, 0x93 },
+ { 0x4008, 0x8E },
+ { 0x4009, 0x8C },
+ { 0x400A, 0x8C },
+ { 0x400B, 0x8C },
+ { 0x400C, 0x89 },
+ { 0x400D, 0x88 },
+ { 0x400E, 0x89 },
+ { 0x400F, 0x89 },
+ { 0x4010, 0x87 },
+ { 0x4011, 0x87 },
+ { 0x4012, 0x87 },
+ { 0x4013, 0x86 },
+ { 0x4014, 0x88 },
+ { 0x4015, 0x87 },
+ { 0x4016, 0x87 },
+ { 0x4017, 0x87 },
+ { 0x4018, 0x8B },
+ { 0x4019, 0x89 },
+ { 0x401A, 0x89 },
+ { 0x401B, 0x8A },
+ { 0x401C, 0x92 },
+ { 0x401D, 0x8F },
+ { 0x401E, 0x8F },
+ { 0x401F, 0x8F },
+ { 0x4020, 0xA2 },
+ { 0x4021, 0x9C },
+ { 0x4022, 0x9B },
+ { 0x4023, 0x9C },
+ { 0x4024, 0xA1 },
+ { 0x4025, 0x9A },
+ { 0x4026, 0x9B },
+ { 0x4027, 0x99 },
+ { 0x4028, 0x94 },
+ { 0x4029, 0x90 },
+ { 0x402A, 0x90 },
+ { 0x402B, 0x90 },
+ { 0x402C, 0x8B },
+ { 0x402D, 0x89 },
+ { 0x402E, 0x89 },
+ { 0x402F, 0x89 },
+ { 0x4030, 0x86 },
+ { 0x4031, 0x85 },
+ { 0x4032, 0x86 },
+ { 0x4033, 0x85 },
+ { 0x4034, 0x84 },
+ { 0x4035, 0x84 },
+ { 0x4036, 0x84 },
+ { 0x4037, 0x84 },
+ { 0x4038, 0x85 },
+ { 0x4039, 0x85 },
+ { 0x403A, 0x85 },
+ { 0x403B, 0x85 },
+ { 0x403C, 0x88 },
+ { 0x403D, 0x87 },
+ { 0x403E, 0x87 },
+ { 0x403F, 0x87 },
+ { 0x4040, 0x8E },
+ { 0x4041, 0x8C },
+ { 0x4042, 0x8C },
+ { 0x4043, 0x8C },
+ { 0x4044, 0x98 },
+ { 0x4045, 0x93 },
+ { 0x4046, 0x93 },
+ { 0x4047, 0x94 },
+ { 0x4048, 0x9D },
+ { 0x4049, 0x96 },
+ { 0x404A, 0x97 },
+ { 0x404B, 0x96 },
+ { 0x404C, 0x91 },
+ { 0x404D, 0x8C },
+ { 0x404E, 0x8D },
+ { 0x404F, 0x8C },
+ { 0x4050, 0x89 },
+ { 0x4051, 0x86 },
+ { 0x4052, 0x87 },
+ { 0x4053, 0x86 },
+ { 0x4054, 0x83 },
+ { 0x4055, 0x82 },
+ { 0x4056, 0x82 },
+ { 0x4057, 0x82 },
+ { 0x4058, 0x80 },
+ { 0x4059, 0x80 },
+ { 0x405A, 0x80 },
+ { 0x405B, 0x80 },
+ { 0x405C, 0x82 },
+ { 0x405D, 0x82 },
+ { 0x405E, 0x82 },
+ { 0x405F, 0x82 },
+ { 0x4060, 0x86 },
+ { 0x4061, 0x85 },
+ { 0x4062, 0x85 },
+ { 0x4063, 0x85 },
+ { 0x4064, 0x8B },
+ { 0x4065, 0x8A },
+ { 0x4066, 0x89 },
+ { 0x4067, 0x89 },
+ { 0x4068, 0x94 },
+ { 0x4069, 0x91 },
+ { 0x406A, 0x90 },
+ { 0x406B, 0x91 },
+ { 0x406C, 0x9E },
+ { 0x406D, 0x95 },
+ { 0x406E, 0x96 },
+ { 0x406F, 0x95 },
+ { 0x4070, 0x91 },
+ { 0x4071, 0x8C },
+ { 0x4072, 0x8C },
+ { 0x4073, 0x8C },
+ { 0x4074, 0x89 },
+ { 0x4075, 0x86 },
+ { 0x4076, 0x86 },
+ { 0x4077, 0x86 },
+ { 0x4078, 0x83 },
+ { 0x4079, 0x82 },
+ { 0x407A, 0x82 },
+ { 0x407B, 0x82 },
+ { 0x407C, 0x80 },
+ { 0x407D, 0x80 },
+ { 0x407E, 0x80 },
+ { 0x407F, 0x80 },
+ { 0x4080, 0x82 },
+ { 0x4081, 0x81 },
+ { 0x4082, 0x81 },
+ { 0x4083, 0x81 },
+ { 0x4084, 0x85 },
+ { 0x4085, 0x85 },
+ { 0x4086, 0x85 },
+ { 0x4087, 0x84 },
+ { 0x4088, 0x8B },
+ { 0x4089, 0x8A },
+ { 0x408A, 0x89 },
+ { 0x408B, 0x89 },
+ { 0x408C, 0x93 },
+ { 0x408D, 0x90 },
+ { 0x408E, 0x8F },
+ { 0x408F, 0x8F },
+ { 0x4090, 0xA3 },
+ { 0x4091, 0x99 },
+ { 0x4092, 0x9A },
+ { 0x4093, 0x99 },
+ { 0x4094, 0x95 },
+ { 0x4095, 0x8F },
+ { 0x4096, 0x8F },
+ { 0x4097, 0x8F },
+ { 0x4098, 0x8B },
+ { 0x4099, 0x87 },
+ { 0x409A, 0x87 },
+ { 0x409B, 0x87 },
+ { 0x409C, 0x86 },
+ { 0x409D, 0x84 },
+ { 0x409E, 0x84 },
+ { 0x409F, 0x84 },
+ { 0x40A0, 0x84 },
+ { 0x40A1, 0x83 },
+ { 0x40A2, 0x83 },
+ { 0x40A3, 0x82 },
+ { 0x40A4, 0x84 },
+ { 0x40A5, 0x84 },
+ { 0x40A6, 0x83 },
+ { 0x40A7, 0x83 },
+ { 0x40A8, 0x88 },
+ { 0x40A9, 0x87 },
+ { 0x40AA, 0x86 },
+ { 0x40AB, 0x86 },
+ { 0x40AC, 0x8E },
+ { 0x40AD, 0x8C },
+ { 0x40AE, 0x8C },
+ { 0x40AF, 0x8B },
+ { 0x40B0, 0x9A },
+ { 0x40B1, 0x96 },
+ { 0x40B2, 0x96 },
+ { 0x40B3, 0x95 },
+ { 0x40B4, 0xBA },
+ { 0x40B5, 0xAC },
+ { 0x40B6, 0xAD },
+ { 0x40B7, 0xAC },
+ { 0x40B8, 0x99 },
+ { 0x40B9, 0x90 },
+ { 0x40BA, 0x91 },
+ { 0x40BB, 0x90 },
+ { 0x40BC, 0x90 },
+ { 0x40BD, 0x8A },
+ { 0x40BE, 0x8A },
+ { 0x40BF, 0x8A },
+ { 0x40C0, 0x89 },
+ { 0x40C1, 0x86 },
+ { 0x40C2, 0x86 },
+ { 0x40C3, 0x87 },
+ { 0x40C4, 0x87 },
+ { 0x40C5, 0x85 },
+ { 0x40C6, 0x85 },
+ { 0x40C7, 0x85 },
+ { 0x40C8, 0x87 },
+ { 0x40C9, 0x86 },
+ { 0x40CA, 0x85 },
+ { 0x40CB, 0x85 },
+ { 0x40CC, 0x8A },
+ { 0x40CD, 0x88 },
+ { 0x40CE, 0x88 },
+ { 0x40CF, 0x87 },
+ { 0x40D0, 0x92 },
+ { 0x40D1, 0x8F },
+ { 0x40D2, 0x8E },
+ { 0x40D3, 0x8E },
+ { 0x40D4, 0xA2 },
+ { 0x40D5, 0x9D },
+ { 0x40D6, 0x9D },
+ { 0x40D7, 0x9B },
+ { 0x4100, 0x80 },
+ { 0x4101, 0x80 },
+ { 0x4102, 0x80 },
+ { 0x4103, 0x80 },
+ { 0x4104, 0x80 },
+ { 0x4105, 0x80 },
+ { 0x4106, 0x80 },
+ { 0x4107, 0x80 },
+ { 0x4108, 0x80 },
+ { 0x4109, 0x80 },
+ { 0x410A, 0x80 },
+ { 0x410B, 0x80 },
+ { 0x410C, 0x80 },
+ { 0x410D, 0x80 },
+ { 0x410E, 0x80 },
+ { 0x410F, 0x80 },
+ { 0x4110, 0x80 },
+ { 0x4111, 0x80 },
+ { 0x4112, 0x80 },
+ { 0x4113, 0x80 },
+ { 0x4114, 0x80 },
+ { 0x4115, 0x80 },
+ { 0x4116, 0x80 },
+ { 0x4117, 0x80 },
+ { 0x4118, 0x80 },
+ { 0x4119, 0x80 },
+ { 0x411A, 0x80 },
+ { 0x411B, 0x80 },
+ { 0x411C, 0x80 },
+ { 0x411D, 0x80 },
+ { 0x411E, 0x80 },
+ { 0x411F, 0x80 },
+ { 0x4120, 0x80 },
+ { 0x4121, 0x80 },
+ { 0x4122, 0x80 },
+ { 0x4123, 0x80 },
+ { 0x4124, 0x80 },
+ { 0x4125, 0x80 },
+ { 0x4126, 0x80 },
+ { 0x4127, 0x80 },
+ { 0x4128, 0x80 },
+ { 0x4129, 0x80 },
+ { 0x412A, 0x80 },
+ { 0x412B, 0x80 },
+ { 0x412C, 0x80 },
+ { 0x412D, 0x80 },
+ { 0x412E, 0x80 },
+ { 0x412F, 0x80 },
+ { 0x4130, 0x80 },
+ { 0x4131, 0x80 },
+ { 0x4132, 0x80 },
+ { 0x4133, 0x80 },
+ { 0x4134, 0x80 },
+ { 0x4135, 0x80 },
+ { 0x4136, 0x80 },
+ { 0x4137, 0x80 },
+ { 0x4138, 0x80 },
+ { 0x4139, 0x80 },
+ { 0x413A, 0x80 },
+ { 0x413B, 0x80 },
+ { 0x413C, 0x80 },
+ { 0x413D, 0x80 },
+ { 0x413E, 0x80 },
+ { 0x413F, 0x80 },
+ { 0x4140, 0x80 },
+ { 0x4141, 0x80 },
+ { 0x4142, 0x80 },
+ { 0x4143, 0x80 },
+ { 0x4144, 0x80 },
+ { 0x4145, 0x80 },
+ { 0x4146, 0x80 },
+ { 0x4147, 0x80 },
+ { 0x4148, 0x80 },
+ { 0x4149, 0x80 },
+ { 0x414A, 0x80 },
+ { 0x414B, 0x80 },
+ { 0x414C, 0x80 },
+ { 0x414D, 0x80 },
+ { 0x414E, 0x80 },
+ { 0x414F, 0x80 },
+ { 0x4150, 0x80 },
+ { 0x4151, 0x80 },
+ { 0x4152, 0x80 },
+ { 0x4153, 0x80 },
+ { 0x4154, 0x80 },
+ { 0x4155, 0x80 },
+ { 0x4156, 0x80 },
+ { 0x4157, 0x80 },
+ { 0x4158, 0x80 },
+ { 0x4159, 0x80 },
+ { 0x415A, 0x80 },
+ { 0x415B, 0x80 },
+ { 0x415C, 0x80 },
+ { 0x415D, 0x80 },
+ { 0x415E, 0x80 },
+ { 0x415F, 0x80 },
+ { 0x4160, 0x80 },
+ { 0x4161, 0x80 },
+ { 0x4162, 0x80 },
+ { 0x4163, 0x80 },
+ { 0x4164, 0x80 },
+ { 0x4165, 0x80 },
+ { 0x4166, 0x80 },
+ { 0x4167, 0x80 },
+ { 0x4168, 0x80 },
+ { 0x4169, 0x80 },
+ { 0x416A, 0x80 },
+ { 0x416B, 0x80 },
+ { 0x416C, 0x80 },
+ { 0x416D, 0x80 },
+ { 0x416E, 0x80 },
+ { 0x416F, 0x80 },
+ { 0x4170, 0x80 },
+ { 0x4171, 0x80 },
+ { 0x4172, 0x80 },
+ { 0x4173, 0x80 },
+ { 0x4174, 0x80 },
+ { 0x4175, 0x80 },
+ { 0x4176, 0x80 },
+ { 0x4177, 0x80 },
+ { 0x4178, 0x80 },
+ { 0x4179, 0x80 },
+ { 0x417A, 0x80 },
+ { 0x417B, 0x80 },
+ { 0x417C, 0x80 },
+ { 0x417D, 0x80 },
+ { 0x417E, 0x80 },
+ { 0x417F, 0x80 },
+ { 0x4180, 0x80 },
+ { 0x4181, 0x80 },
+ { 0x4182, 0x80 },
+ { 0x4183, 0x80 },
+ { 0x4184, 0x80 },
+ { 0x4185, 0x80 },
+ { 0x4186, 0x80 },
+ { 0x4187, 0x80 },
+ { 0x4188, 0x80 },
+ { 0x4189, 0x80 },
+ { 0x418A, 0x80 },
+ { 0x418B, 0x80 },
+ { 0x418C, 0x80 },
+ { 0x418D, 0x80 },
+ { 0x418E, 0x80 },
+ { 0x418F, 0x80 },
+ { 0x4190, 0x80 },
+ { 0x4191, 0x80 },
+ { 0x4192, 0x80 },
+ { 0x4193, 0x80 },
+ { 0x4194, 0x80 },
+ { 0x4195, 0x80 },
+ { 0x4196, 0x80 },
+ { 0x4197, 0x80 },
+ { 0x4198, 0x80 },
+ { 0x4199, 0x80 },
+ { 0x419A, 0x80 },
+ { 0x419B, 0x80 },
+ { 0x419C, 0x80 },
+ { 0x419D, 0x80 },
+ { 0x419E, 0x80 },
+ { 0x419F, 0x80 },
+ { 0x41A0, 0x80 },
+ { 0x41A1, 0x80 },
+ { 0x41A2, 0x80 },
+ { 0x41A3, 0x80 },
+ { 0x41A4, 0x80 },
+ { 0x41A5, 0x80 },
+ { 0x41A6, 0x80 },
+ { 0x41A7, 0x80 },
+ { 0x41A8, 0x80 },
+ { 0x41A9, 0x80 },
+ { 0x41AA, 0x80 },
+ { 0x41AB, 0x80 },
+ { 0x41AC, 0x80 },
+ { 0x41AD, 0x80 },
+ { 0x41AE, 0x80 },
+ { 0x41AF, 0x80 },
+ { 0x41B0, 0x80 },
+ { 0x41B1, 0x80 },
+ { 0x41B2, 0x80 },
+ { 0x41B3, 0x80 },
+ { 0x41B4, 0x80 },
+ { 0x41B5, 0x80 },
+ { 0x41B6, 0x80 },
+ { 0x41B7, 0x80 },
+ { 0x41B8, 0x80 },
+ { 0x41B9, 0x80 },
+ { 0x41BA, 0x80 },
+ { 0x41BB, 0x80 },
+ { 0x41BC, 0x80 },
+ { 0x41BD, 0x80 },
+ { 0x41BE, 0x80 },
+ { 0x41BF, 0x80 },
+ { 0x41C0, 0x80 },
+ { 0x41C1, 0x80 },
+ { 0x41C2, 0x80 },
+ { 0x41C3, 0x80 },
+ { 0x41C4, 0x80 },
+ { 0x41C5, 0x80 },
+ { 0x41C6, 0x80 },
+ { 0x41C7, 0x80 },
+ { 0x41C8, 0x80 },
+ { 0x41C9, 0x80 },
+ { 0x41CA, 0x80 },
+ { 0x41CB, 0x80 },
+ { 0x41CC, 0x80 },
+ { 0x41CD, 0x80 },
+ { 0x41CE, 0x80 },
+ { 0x41CF, 0x80 },
+ { 0x41D0, 0x80 },
+ { 0x41D1, 0x80 },
+ { 0x41D2, 0x80 },
+ { 0x41D3, 0x80 },
+ { 0x41D4, 0x80 },
+ { 0x41D5, 0x80 },
+ { 0x41D6, 0x80 },
+ { 0x41D7, 0x80 },
+ { 0x4200, 0x80 },
+ { 0x4201, 0x80 },
+ { 0x4202, 0x80 },
+ { 0x4203, 0x80 },
+ { 0x4204, 0x80 },
+ { 0x4205, 0x80 },
+ { 0x4206, 0x80 },
+ { 0x4207, 0x80 },
+ { 0x4208, 0x80 },
+ { 0x4209, 0x80 },
+ { 0x420A, 0x80 },
+ { 0x420B, 0x80 },
+ { 0x420C, 0x80 },
+ { 0x420D, 0x80 },
+ { 0x420E, 0x80 },
+ { 0x420F, 0x80 },
+ { 0x4210, 0x80 },
+ { 0x4211, 0x80 },
+ { 0x4212, 0x80 },
+ { 0x4213, 0x80 },
+ { 0x4214, 0x80 },
+ { 0x4215, 0x80 },
+ { 0x4216, 0x80 },
+ { 0x4217, 0x80 },
+ { 0x4218, 0x80 },
+ { 0x4219, 0x80 },
+ { 0x421A, 0x80 },
+ { 0x421B, 0x80 },
+ { 0x421C, 0x80 },
+ { 0x421D, 0x80 },
+ { 0x421E, 0x80 },
+ { 0x421F, 0x80 },
+ { 0x4220, 0x80 },
+ { 0x4221, 0x80 },
+ { 0x4222, 0x80 },
+ { 0x4223, 0x80 },
+ { 0x4224, 0x80 },
+ { 0x4225, 0x80 },
+ { 0x4226, 0x80 },
+ { 0x4227, 0x80 },
+ { 0x4228, 0x80 },
+ { 0x4229, 0x80 },
+ { 0x422A, 0x80 },
+ { 0x422B, 0x80 },
+ { 0x422C, 0x80 },
+ { 0x422D, 0x80 },
+ { 0x422E, 0x80 },
+ { 0x422F, 0x80 },
+ { 0x4230, 0x80 },
+ { 0x4231, 0x80 },
+ { 0x4232, 0x80 },
+ { 0x4233, 0x80 },
+ { 0x4234, 0x80 },
+ { 0x4235, 0x80 },
+ { 0x4236, 0x80 },
+ { 0x4237, 0x80 },
+ { 0x4238, 0x80 },
+ { 0x4239, 0x80 },
+ { 0x423A, 0x80 },
+ { 0x423B, 0x80 },
+ { 0x423C, 0x80 },
+ { 0x423D, 0x80 },
+ { 0x423E, 0x80 },
+ { 0x423F, 0x80 },
+ { 0x4240, 0x80 },
+ { 0x4241, 0x80 },
+ { 0x4242, 0x80 },
+ { 0x4243, 0x80 },
+ { 0x4244, 0x80 },
+ { 0x4245, 0x80 },
+ { 0x4246, 0x80 },
+ { 0x4247, 0x80 },
+ { 0x4248, 0x80 },
+ { 0x4249, 0x80 },
+ { 0x424A, 0x80 },
+ { 0x424B, 0x80 },
+ { 0x424C, 0x80 },
+ { 0x424D, 0x80 },
+ { 0x424E, 0x80 },
+ { 0x424F, 0x80 },
+ { 0x4250, 0x80 },
+ { 0x4251, 0x80 },
+ { 0x4252, 0x80 },
+ { 0x4253, 0x80 },
+ { 0x4254, 0x80 },
+ { 0x4255, 0x80 },
+ { 0x4256, 0x80 },
+ { 0x4257, 0x80 },
+ { 0x4258, 0x80 },
+ { 0x4259, 0x80 },
+ { 0x425A, 0x80 },
+ { 0x425B, 0x80 },
+ { 0x425C, 0x80 },
+ { 0x425D, 0x80 },
+ { 0x425E, 0x80 },
+ { 0x425F, 0x80 },
+ { 0x4260, 0x80 },
+ { 0x4261, 0x80 },
+ { 0x4262, 0x80 },
+ { 0x4263, 0x80 },
+ { 0x4264, 0x80 },
+ { 0x4265, 0x80 },
+ { 0x4266, 0x80 },
+ { 0x4267, 0x80 },
+ { 0x4268, 0x80 },
+ { 0x4269, 0x80 },
+ { 0x426A, 0x80 },
+ { 0x426B, 0x80 },
+ { 0x426C, 0x80 },
+ { 0x426D, 0x80 },
+ { 0x426E, 0x80 },
+ { 0x426F, 0x80 },
+ { 0x4270, 0x80 },
+ { 0x4271, 0x80 },
+ { 0x4272, 0x80 },
+ { 0x4273, 0x80 },
+ { 0x4274, 0x80 },
+ { 0x4275, 0x80 },
+ { 0x4276, 0x80 },
+ { 0x4277, 0x80 },
+ { 0x4278, 0x80 },
+ { 0x4279, 0x80 },
+ { 0x427A, 0x80 },
+ { 0x427B, 0x80 },
+ { 0x427C, 0x80 },
+ { 0x427D, 0x80 },
+ { 0x427E, 0x80 },
+ { 0x427F, 0x80 },
+ { 0x4280, 0x80 },
+ { 0x4281, 0x80 },
+ { 0x4282, 0x80 },
+ { 0x4283, 0x80 },
+ { 0x4284, 0x80 },
+ { 0x4285, 0x80 },
+ { 0x4286, 0x80 },
+ { 0x4287, 0x80 },
+ { 0x4288, 0x80 },
+ { 0x4289, 0x80 },
+ { 0x428A, 0x80 },
+ { 0x428B, 0x80 },
+ { 0x428C, 0x80 },
+ { 0x428D, 0x80 },
+ { 0x428E, 0x80 },
+ { 0x428F, 0x80 },
+ { 0x4290, 0x80 },
+ { 0x4291, 0x80 },
+ { 0x4292, 0x80 },
+ { 0x4293, 0x80 },
+ { 0x4294, 0x80 },
+ { 0x4295, 0x80 },
+ { 0x4296, 0x80 },
+ { 0x4297, 0x80 },
+ { 0x4298, 0x80 },
+ { 0x4299, 0x80 },
+ { 0x429A, 0x80 },
+ { 0x429B, 0x80 },
+ { 0x429C, 0x80 },
+ { 0x429D, 0x80 },
+ { 0x429E, 0x80 },
+ { 0x429F, 0x80 },
+ { 0x42A0, 0x80 },
+ { 0x42A1, 0x80 },
+ { 0x42A2, 0x80 },
+ { 0x42A3, 0x80 },
+ { 0x42A4, 0x80 },
+ { 0x42A5, 0x80 },
+ { 0x42A6, 0x80 },
+ { 0x42A7, 0x80 },
+ { 0x42A8, 0x80 },
+ { 0x42A9, 0x80 },
+ { 0x42AA, 0x80 },
+ { 0x42AB, 0x80 },
+ { 0x42AC, 0x80 },
+ { 0x42AD, 0x80 },
+ { 0x42AE, 0x80 },
+ { 0x42AF, 0x80 },
+ { 0x42B0, 0x80 },
+ { 0x42B1, 0x80 },
+ { 0x42B2, 0x80 },
+ { 0x42B3, 0x80 },
+ { 0x42B4, 0x80 },
+ { 0x42B5, 0x80 },
+ { 0x42B6, 0x80 },
+ { 0x42B7, 0x80 },
+ { 0x42B8, 0x80 },
+ { 0x42B9, 0x80 },
+ { 0x42BA, 0x80 },
+ { 0x42BB, 0x80 },
+ { 0x42BC, 0x80 },
+ { 0x42BD, 0x80 },
+ { 0x42BE, 0x80 },
+ { 0x42BF, 0x80 },
+ { 0x42C0, 0x80 },
+ { 0x42C1, 0x80 },
+ { 0x42C2, 0x80 },
+ { 0x42C3, 0x80 },
+ { 0x42C4, 0x80 },
+ { 0x42C5, 0x80 },
+ { 0x42C6, 0x80 },
+ { 0x42C7, 0x80 },
+ { 0x42C8, 0x80 },
+ { 0x42C9, 0x80 },
+ { 0x42CA, 0x80 },
+ { 0x42CB, 0x80 },
+ { 0x42CC, 0x80 },
+ { 0x42CD, 0x80 },
+ { 0x42CE, 0x80 },
+ { 0x42CF, 0x80 },
+ { 0x42D0, 0x80 },
+ { 0x42D1, 0x80 },
+ { 0x42D2, 0x80 },
+ { 0x42D3, 0x80 },
+ { 0x42D4, 0x80 },
+ { 0x42D5, 0x80 },
+ { 0x42D6, 0x80 },
+ { 0x42D7, 0x80 },
+ { 0x42D8, 0x00 },
+ { 0x42D9, 0x00 },
+ { 0x4300, 0xA2 },
+ { 0x4301, 0xAA },
+ { 0x4302, 0xA7 },
+ { 0x4303, 0xAD },
+ { 0x4304, 0x8E },
+ { 0x4305, 0x92 },
+ { 0x4306, 0x90 },
+ { 0x4307, 0x93 },
+ { 0x4308, 0x86 },
+ { 0x4309, 0x89 },
+ { 0x430A, 0x87 },
+ { 0x430B, 0x88 },
+ { 0x430C, 0x82 },
+ { 0x430D, 0x84 },
+ { 0x430E, 0x83 },
+ { 0x430F, 0x84 },
+ { 0x4310, 0x80 },
+ { 0x4311, 0x82 },
+ { 0x4312, 0x82 },
+ { 0x4313, 0x82 },
+ { 0x4314, 0x83 },
+ { 0x4315, 0x85 },
+ { 0x4316, 0x84 },
+ { 0x4317, 0x85 },
+ { 0x4318, 0x8D },
+ { 0x4319, 0x8D },
+ { 0x431A, 0x8D },
+ { 0x431B, 0x8D },
+ { 0x431C, 0x99 },
+ { 0x431D, 0x9A },
+ { 0x431E, 0x9A },
+ { 0x431F, 0x9A },
+ { 0x4320, 0xAE },
+ { 0x4321, 0xB4 },
+ { 0x4322, 0xB4 },
+ { 0x4323, 0xB5 },
+ { 0x4324, 0x9A },
+ { 0x4325, 0x9D },
+ { 0x4326, 0x9B },
+ { 0x4327, 0x9E },
+ { 0x4328, 0x8C },
+ { 0x4329, 0x8F },
+ { 0x432A, 0x8D },
+ { 0x432B, 0x8F },
+ { 0x432C, 0x83 },
+ { 0x432D, 0x85 },
+ { 0x432E, 0x85 },
+ { 0x432F, 0x85 },
+ { 0x4330, 0x80 },
+ { 0x4331, 0x81 },
+ { 0x4332, 0x81 },
+ { 0x4333, 0x81 },
+ { 0x4334, 0x80 },
+ { 0x4335, 0x80 },
+ { 0x4336, 0x80 },
+ { 0x4337, 0x81 },
+ { 0x4338, 0x83 },
+ { 0x4339, 0x83 },
+ { 0x433A, 0x83 },
+ { 0x433B, 0x83 },
+ { 0x433C, 0x88 },
+ { 0x433D, 0x88 },
+ { 0x433E, 0x88 },
+ { 0x433F, 0x88 },
+ { 0x4340, 0x93 },
+ { 0x4341, 0x93 },
+ { 0x4342, 0x93 },
+ { 0x4343, 0x93 },
+ { 0x4344, 0xA2 },
+ { 0x4345, 0xA4 },
+ { 0x4346, 0xA4 },
+ { 0x4347, 0xA4 },
+ { 0x4348, 0x97 },
+ { 0x4349, 0x99 },
+ { 0x434A, 0x97 },
+ { 0x434B, 0x97 },
+ { 0x434C, 0x89 },
+ { 0x434D, 0x8C },
+ { 0x434E, 0x8B },
+ { 0x434F, 0x8A },
+ { 0x4350, 0x81 },
+ { 0x4351, 0x83 },
+ { 0x4352, 0x83 },
+ { 0x4353, 0x83 },
+ { 0x4354, 0x7F },
+ { 0x4355, 0x80 },
+ { 0x4356, 0x80 },
+ { 0x4357, 0x80 },
+ { 0x4358, 0x7F },
+ { 0x4359, 0x7F },
+ { 0x435A, 0x7F },
+ { 0x435B, 0x7F },
+ { 0x435C, 0x82 },
+ { 0x435D, 0x81 },
+ { 0x435E, 0x81 },
+ { 0x435F, 0x82 },
+ { 0x4360, 0x86 },
+ { 0x4361, 0x86 },
+ { 0x4362, 0x86 },
+ { 0x4363, 0x87 },
+ { 0x4364, 0x8F },
+ { 0x4365, 0x8F },
+ { 0x4366, 0x8F },
+ { 0x4367, 0x90 },
+ { 0x4368, 0x9E },
+ { 0x4369, 0x9E },
+ { 0x436A, 0x9E },
+ { 0x436B, 0x9F },
+ { 0x436C, 0x99 },
+ { 0x436D, 0x9B },
+ { 0x436E, 0x9A },
+ { 0x436F, 0x98 },
+ { 0x4370, 0x8B },
+ { 0x4371, 0x8D },
+ { 0x4372, 0x8D },
+ { 0x4373, 0x8B },
+ { 0x4374, 0x83 },
+ { 0x4375, 0x84 },
+ { 0x4376, 0x84 },
+ { 0x4377, 0x83 },
+ { 0x4378, 0x80 },
+ { 0x4379, 0x81 },
+ { 0x437A, 0x81 },
+ { 0x437B, 0x80 },
+ { 0x437C, 0x81 },
+ { 0x437D, 0x80 },
+ { 0x437E, 0x80 },
+ { 0x437F, 0x80 },
+ { 0x4380, 0x83 },
+ { 0x4381, 0x83 },
+ { 0x4382, 0x83 },
+ { 0x4383, 0x83 },
+ { 0x4384, 0x88 },
+ { 0x4385, 0x87 },
+ { 0x4386, 0x87 },
+ { 0x4387, 0x88 },
+ { 0x4388, 0x91 },
+ { 0x4389, 0x90 },
+ { 0x438A, 0x90 },
+ { 0x438B, 0x91 },
+ { 0x438C, 0x9E },
+ { 0x438D, 0x9E },
+ { 0x438E, 0x9E },
+ { 0x438F, 0xA0 },
+ { 0x4390, 0xA0 },
+ { 0x4391, 0xA2 },
+ { 0x4392, 0xA2 },
+ { 0x4393, 0xA0 },
+ { 0x4394, 0x92 },
+ { 0x4395, 0x94 },
+ { 0x4396, 0x94 },
+ { 0x4397, 0x91 },
+ { 0x4398, 0x89 },
+ { 0x4399, 0x8A },
+ { 0x439A, 0x89 },
+ { 0x439B, 0x88 },
+ { 0x439C, 0x85 },
+ { 0x439D, 0x85 },
+ { 0x439E, 0x85 },
+ { 0x439F, 0x84 },
+ { 0x43A0, 0x85 },
+ { 0x43A1, 0x84 },
+ { 0x43A2, 0x84 },
+ { 0x43A3, 0x84 },
+ { 0x43A4, 0x88 },
+ { 0x43A5, 0x86 },
+ { 0x43A6, 0x86 },
+ { 0x43A7, 0x87 },
+ { 0x43A8, 0x8E },
+ { 0x43A9, 0x8B },
+ { 0x43AA, 0x8B },
+ { 0x43AB, 0x8D },
+ { 0x43AC, 0x97 },
+ { 0x43AD, 0x96 },
+ { 0x43AE, 0x96 },
+ { 0x43AF, 0x98 },
+ { 0x43B0, 0xA5 },
+ { 0x43B1, 0xA5 },
+ { 0x43B2, 0xA5 },
+ { 0x43B3, 0xA9 },
+ { 0x43B4, 0xB7 },
+ { 0x43B5, 0xBB },
+ { 0x43B6, 0xBB },
+ { 0x43B7, 0xB8 },
+ { 0x43B8, 0x9C },
+ { 0x43B9, 0x9C },
+ { 0x43BA, 0x9C },
+ { 0x43BB, 0x9A },
+ { 0x43BC, 0x92 },
+ { 0x43BD, 0x92 },
+ { 0x43BE, 0x92 },
+ { 0x43BF, 0x8F },
+ { 0x43C0, 0x8B },
+ { 0x43C1, 0x8B },
+ { 0x43C2, 0x8B },
+ { 0x43C3, 0x89 },
+ { 0x43C4, 0x8A },
+ { 0x43C5, 0x89 },
+ { 0x43C6, 0x89 },
+ { 0x43C7, 0x88 },
+ { 0x43C8, 0x8D },
+ { 0x43C9, 0x8B },
+ { 0x43CA, 0x8B },
+ { 0x43CB, 0x8C },
+ { 0x43CC, 0x94 },
+ { 0x43CD, 0x91 },
+ { 0x43CE, 0x92 },
+ { 0x43CF, 0x93 },
+ { 0x43D0, 0x9E },
+ { 0x43D1, 0x9D },
+ { 0x43D2, 0x9D },
+ { 0x43D3, 0xA1 },
+ { 0x43D4, 0xB2 },
+ { 0x43D5, 0xB4 },
+ { 0x43D6, 0xB5 },
+ { 0x43D7, 0xBD },
+ { 0x4400, 0x80 },
+ { 0x4401, 0x80 },
+ { 0x4402, 0x80 },
+ { 0x4403, 0x80 },
+ { 0x4404, 0x80 },
+ { 0x4405, 0x80 },
+ { 0x4406, 0x80 },
+ { 0x4407, 0x80 },
+ { 0x4408, 0x80 },
+ { 0x4409, 0x80 },
+ { 0x440A, 0x80 },
+ { 0x440B, 0x80 },
+ { 0x440C, 0x80 },
+ { 0x440D, 0x80 },
+ { 0x440E, 0x80 },
+ { 0x440F, 0x80 },
+ { 0x4410, 0x80 },
+ { 0x4411, 0x80 },
+ { 0x4412, 0x80 },
+ { 0x4413, 0x80 },
+ { 0x4414, 0x80 },
+ { 0x4415, 0x80 },
+ { 0x4416, 0x80 },
+ { 0x4417, 0x80 },
+ { 0x4418, 0x80 },
+ { 0x4419, 0x80 },
+ { 0x441A, 0x80 },
+ { 0x441B, 0x80 },
+ { 0x441C, 0x80 },
+ { 0x441D, 0x80 },
+ { 0x441E, 0x80 },
+ { 0x441F, 0x80 },
+ { 0x4420, 0x80 },
+ { 0x4421, 0x80 },
+ { 0x4422, 0x80 },
+ { 0x4423, 0x80 },
+ { 0x4424, 0x80 },
+ { 0x4425, 0x80 },
+ { 0x4426, 0x80 },
+ { 0x4427, 0x80 },
+ { 0x4428, 0x80 },
+ { 0x4429, 0x80 },
+ { 0x442A, 0x80 },
+ { 0x442B, 0x80 },
+ { 0x442C, 0x80 },
+ { 0x442D, 0x80 },
+ { 0x442E, 0x80 },
+ { 0x442F, 0x80 },
+ { 0x4430, 0x80 },
+ { 0x4431, 0x80 },
+ { 0x4432, 0x80 },
+ { 0x4433, 0x80 },
+ { 0x4434, 0x80 },
+ { 0x4435, 0x80 },
+ { 0x4436, 0x80 },
+ { 0x4437, 0x80 },
+ { 0x4438, 0x80 },
+ { 0x4439, 0x80 },
+ { 0x443A, 0x80 },
+ { 0x443B, 0x80 },
+ { 0x443C, 0x80 },
+ { 0x443D, 0x80 },
+ { 0x443E, 0x80 },
+ { 0x443F, 0x80 },
+ { 0x4440, 0x80 },
+ { 0x4441, 0x80 },
+ { 0x4442, 0x80 },
+ { 0x4443, 0x80 },
+ { 0x4444, 0x80 },
+ { 0x4445, 0x80 },
+ { 0x4446, 0x80 },
+ { 0x4447, 0x80 },
+ { 0x4448, 0x80 },
+ { 0x4449, 0x80 },
+ { 0x444A, 0x80 },
+ { 0x444B, 0x80 },
+ { 0x444C, 0x80 },
+ { 0x444D, 0x80 },
+ { 0x444E, 0x80 },
+ { 0x444F, 0x80 },
+ { 0x4450, 0x80 },
+ { 0x4451, 0x80 },
+ { 0x4452, 0x80 },
+ { 0x4453, 0x80 },
+ { 0x4454, 0x80 },
+ { 0x4455, 0x80 },
+ { 0x4456, 0x80 },
+ { 0x4457, 0x80 },
+ { 0x4458, 0x80 },
+ { 0x4459, 0x80 },
+ { 0x445A, 0x80 },
+ { 0x445B, 0x80 },
+ { 0x445C, 0x80 },
+ { 0x445D, 0x80 },
+ { 0x445E, 0x80 },
+ { 0x445F, 0x80 },
+ { 0x4460, 0x80 },
+ { 0x4461, 0x80 },
+ { 0x4462, 0x80 },
+ { 0x4463, 0x80 },
+ { 0x4464, 0x80 },
+ { 0x4465, 0x80 },
+ { 0x4466, 0x80 },
+ { 0x4467, 0x80 },
+ { 0x4468, 0x80 },
+ { 0x4469, 0x80 },
+ { 0x446A, 0x80 },
+ { 0x446B, 0x80 },
+ { 0x446C, 0x80 },
+ { 0x446D, 0x80 },
+ { 0x446E, 0x80 },
+ { 0x446F, 0x80 },
+ { 0x4470, 0x80 },
+ { 0x4471, 0x80 },
+ { 0x4472, 0x80 },
+ { 0x4473, 0x80 },
+ { 0x4474, 0x80 },
+ { 0x4475, 0x80 },
+ { 0x4476, 0x80 },
+ { 0x4477, 0x80 },
+ { 0x4478, 0x80 },
+ { 0x4479, 0x80 },
+ { 0x447A, 0x80 },
+ { 0x447B, 0x80 },
+ { 0x447C, 0x80 },
+ { 0x447D, 0x80 },
+ { 0x447E, 0x80 },
+ { 0x447F, 0x80 },
+ { 0x4480, 0x80 },
+ { 0x4481, 0x80 },
+ { 0x4482, 0x80 },
+ { 0x4483, 0x80 },
+ { 0x4484, 0x80 },
+ { 0x4485, 0x80 },
+ { 0x4486, 0x80 },
+ { 0x4487, 0x80 },
+ { 0x4488, 0x80 },
+ { 0x4489, 0x80 },
+ { 0x448A, 0x80 },
+ { 0x448B, 0x80 },
+ { 0x448C, 0x80 },
+ { 0x448D, 0x80 },
+ { 0x448E, 0x80 },
+ { 0x448F, 0x80 },
+ { 0x4490, 0x80 },
+ { 0x4491, 0x80 },
+ { 0x4492, 0x80 },
+ { 0x4493, 0x80 },
+ { 0x4494, 0x80 },
+ { 0x4495, 0x80 },
+ { 0x4496, 0x80 },
+ { 0x4497, 0x80 },
+ { 0x4498, 0x80 },
+ { 0x4499, 0x80 },
+ { 0x449A, 0x80 },
+ { 0x449B, 0x80 },
+ { 0x449C, 0x80 },
+ { 0x449D, 0x80 },
+ { 0x449E, 0x80 },
+ { 0x449F, 0x80 },
+ { 0x44A0, 0x80 },
+ { 0x44A1, 0x80 },
+ { 0x44A2, 0x80 },
+ { 0x44A3, 0x80 },
+ { 0x44A4, 0x80 },
+ { 0x44A5, 0x80 },
+ { 0x44A6, 0x80 },
+ { 0x44A7, 0x80 },
+ { 0x44A8, 0x80 },
+ { 0x44A9, 0x80 },
+ { 0x44AA, 0x80 },
+ { 0x44AB, 0x80 },
+ { 0x44AC, 0x80 },
+ { 0x44AD, 0x80 },
+ { 0x44AE, 0x80 },
+ { 0x44AF, 0x80 },
+ { 0x44B0, 0x80 },
+ { 0x44B1, 0x80 },
+ { 0x44B2, 0x80 },
+ { 0x44B3, 0x80 },
+ { 0x44B4, 0x80 },
+ { 0x44B5, 0x80 },
+ { 0x44B6, 0x80 },
+ { 0x44B7, 0x80 },
+ { 0x44B8, 0x80 },
+ { 0x44B9, 0x80 },
+ { 0x44BA, 0x80 },
+ { 0x44BB, 0x80 },
+ { 0x44BC, 0x80 },
+ { 0x44BD, 0x80 },
+ { 0x44BE, 0x80 },
+ { 0x44BF, 0x80 },
+ { 0x44C0, 0x80 },
+ { 0x44C1, 0x80 },
+ { 0x44C2, 0x80 },
+ { 0x44C3, 0x80 },
+ { 0x44C4, 0x80 },
+ { 0x44C5, 0x80 },
+ { 0x44C6, 0x80 },
+ { 0x44C7, 0x80 },
+ { 0x44C8, 0x80 },
+ { 0x44C9, 0x80 },
+ { 0x44CA, 0x80 },
+ { 0x44CB, 0x80 },
+ { 0x44CC, 0x80 },
+ { 0x44CD, 0x80 },
+ { 0x44CE, 0x80 },
+ { 0x44CF, 0x80 },
+ { 0x44D0, 0x80 },
+ { 0x44D1, 0x80 },
+ { 0x44D2, 0x80 },
+ { 0x44D3, 0x80 },
+ { 0x44D4, 0x80 },
+ { 0x44D5, 0x80 },
+ { 0x44D6, 0x80 },
+ { 0x44D7, 0x80 },
+ { 0x4500, 0x80 },
+ { 0x4501, 0x80 },
+ { 0x4502, 0x80 },
+ { 0x4503, 0x80 },
+ { 0x4504, 0x80 },
+ { 0x4505, 0x80 },
+ { 0x4506, 0x80 },
+ { 0x4507, 0x80 },
+ { 0x4508, 0x80 },
+ { 0x4509, 0x80 },
+ { 0x450A, 0x80 },
+ { 0x450B, 0x80 },
+ { 0x450C, 0x80 },
+ { 0x450D, 0x80 },
+ { 0x450E, 0x80 },
+ { 0x450F, 0x80 },
+ { 0x4510, 0x80 },
+ { 0x4511, 0x80 },
+ { 0x4512, 0x80 },
+ { 0x4513, 0x80 },
+ { 0x4514, 0x80 },
+ { 0x4515, 0x80 },
+ { 0x4516, 0x80 },
+ { 0x4517, 0x80 },
+ { 0x4518, 0x80 },
+ { 0x4519, 0x80 },
+ { 0x451A, 0x80 },
+ { 0x451B, 0x80 },
+ { 0x451C, 0x80 },
+ { 0x451D, 0x80 },
+ { 0x451E, 0x80 },
+ { 0x451F, 0x80 },
+ { 0x4520, 0x80 },
+ { 0x4521, 0x80 },
+ { 0x4522, 0x80 },
+ { 0x4523, 0x80 },
+ { 0x4524, 0x80 },
+ { 0x4525, 0x80 },
+ { 0x4526, 0x80 },
+ { 0x4527, 0x80 },
+ { 0x4528, 0x80 },
+ { 0x4529, 0x80 },
+ { 0x452A, 0x80 },
+ { 0x452B, 0x80 },
+ { 0x452C, 0x80 },
+ { 0x452D, 0x80 },
+ { 0x452E, 0x80 },
+ { 0x452F, 0x80 },
+ { 0x4530, 0x80 },
+ { 0x4531, 0x80 },
+ { 0x4532, 0x80 },
+ { 0x4533, 0x80 },
+ { 0x4534, 0x80 },
+ { 0x4535, 0x80 },
+ { 0x4536, 0x80 },
+ { 0x4537, 0x80 },
+ { 0x4538, 0x80 },
+ { 0x4539, 0x80 },
+ { 0x453A, 0x80 },
+ { 0x453B, 0x80 },
+ { 0x453C, 0x80 },
+ { 0x453D, 0x80 },
+ { 0x453E, 0x80 },
+ { 0x453F, 0x80 },
+ { 0x4540, 0x80 },
+ { 0x4541, 0x80 },
+ { 0x4542, 0x80 },
+ { 0x4543, 0x80 },
+ { 0x4544, 0x80 },
+ { 0x4545, 0x80 },
+ { 0x4546, 0x80 },
+ { 0x4547, 0x80 },
+ { 0x4548, 0x80 },
+ { 0x4549, 0x80 },
+ { 0x454A, 0x80 },
+ { 0x454B, 0x80 },
+ { 0x454C, 0x80 },
+ { 0x454D, 0x80 },
+ { 0x454E, 0x80 },
+ { 0x454F, 0x80 },
+ { 0x4550, 0x80 },
+ { 0x4551, 0x80 },
+ { 0x4552, 0x80 },
+ { 0x4553, 0x80 },
+ { 0x4554, 0x80 },
+ { 0x4555, 0x80 },
+ { 0x4556, 0x80 },
+ { 0x4557, 0x80 },
+ { 0x4558, 0x80 },
+ { 0x4559, 0x80 },
+ { 0x455A, 0x80 },
+ { 0x455B, 0x80 },
+ { 0x455C, 0x80 },
+ { 0x455D, 0x80 },
+ { 0x455E, 0x80 },
+ { 0x455F, 0x80 },
+ { 0x4560, 0x80 },
+ { 0x4561, 0x80 },
+ { 0x4562, 0x80 },
+ { 0x4563, 0x80 },
+ { 0x4564, 0x80 },
+ { 0x4565, 0x80 },
+ { 0x4566, 0x80 },
+ { 0x4567, 0x80 },
+ { 0x4568, 0x80 },
+ { 0x4569, 0x80 },
+ { 0x456A, 0x80 },
+ { 0x456B, 0x80 },
+ { 0x456C, 0x80 },
+ { 0x456D, 0x80 },
+ { 0x456E, 0x80 },
+ { 0x456F, 0x80 },
+ { 0x4570, 0x80 },
+ { 0x4571, 0x80 },
+ { 0x4572, 0x80 },
+ { 0x4573, 0x80 },
+ { 0x4574, 0x80 },
+ { 0x4575, 0x80 },
+ { 0x4576, 0x80 },
+ { 0x4577, 0x80 },
+ { 0x4578, 0x80 },
+ { 0x4579, 0x80 },
+ { 0x457A, 0x80 },
+ { 0x457B, 0x80 },
+ { 0x457C, 0x80 },
+ { 0x457D, 0x80 },
+ { 0x457E, 0x80 },
+ { 0x457F, 0x80 },
+ { 0x4580, 0x80 },
+ { 0x4581, 0x80 },
+ { 0x4582, 0x80 },
+ { 0x4583, 0x80 },
+ { 0x4584, 0x80 },
+ { 0x4585, 0x80 },
+ { 0x4586, 0x80 },
+ { 0x4587, 0x80 },
+ { 0x4588, 0x80 },
+ { 0x4589, 0x80 },
+ { 0x458A, 0x80 },
+ { 0x458B, 0x80 },
+ { 0x458C, 0x80 },
+ { 0x458D, 0x80 },
+ { 0x458E, 0x80 },
+ { 0x458F, 0x80 },
+ { 0x4590, 0x80 },
+ { 0x4591, 0x80 },
+ { 0x4592, 0x80 },
+ { 0x4593, 0x80 },
+ { 0x4594, 0x80 },
+ { 0x4595, 0x80 },
+ { 0x4596, 0x80 },
+ { 0x4597, 0x80 },
+ { 0x4598, 0x80 },
+ { 0x4599, 0x80 },
+ { 0x459A, 0x80 },
+ { 0x459B, 0x80 },
+ { 0x459C, 0x80 },
+ { 0x459D, 0x80 },
+ { 0x459E, 0x80 },
+ { 0x459F, 0x80 },
+ { 0x45A0, 0x80 },
+ { 0x45A1, 0x80 },
+ { 0x45A2, 0x80 },
+ { 0x45A3, 0x80 },
+ { 0x45A4, 0x80 },
+ { 0x45A5, 0x80 },
+ { 0x45A6, 0x80 },
+ { 0x45A7, 0x80 },
+ { 0x45A8, 0x80 },
+ { 0x45A9, 0x80 },
+ { 0x45AA, 0x80 },
+ { 0x45AB, 0x80 },
+ { 0x45AC, 0x80 },
+ { 0x45AD, 0x80 },
+ { 0x45AE, 0x80 },
+ { 0x45AF, 0x80 },
+ { 0x45B0, 0x80 },
+ { 0x45B1, 0x80 },
+ { 0x45B2, 0x80 },
+ { 0x45B3, 0x80 },
+ { 0x45B4, 0x80 },
+ { 0x45B5, 0x80 },
+ { 0x45B6, 0x80 },
+ { 0x45B7, 0x80 },
+ { 0x45B8, 0x80 },
+ { 0x45B9, 0x80 },
+ { 0x45BA, 0x80 },
+ { 0x45BB, 0x80 },
+ { 0x45BC, 0x80 },
+ { 0x45BD, 0x80 },
+ { 0x45BE, 0x80 },
+ { 0x45BF, 0x80 },
+ { 0x45C0, 0x80 },
+ { 0x45C1, 0x80 },
+ { 0x45C2, 0x80 },
+ { 0x45C3, 0x80 },
+ { 0x45C4, 0x80 },
+ { 0x45C5, 0x80 },
+ { 0x45C6, 0x80 },
+ { 0x45C7, 0x80 },
+ { 0x45C8, 0x80 },
+ { 0x45C9, 0x80 },
+ { 0x45CA, 0x80 },
+ { 0x45CB, 0x80 },
+ { 0x45CC, 0x80 },
+ { 0x45CD, 0x80 },
+ { 0x45CE, 0x80 },
+ { 0x45CF, 0x80 },
+ { 0x45D0, 0x80 },
+ { 0x45D1, 0x80 },
+ { 0x45D2, 0x80 },
+ { 0x45D3, 0x80 },
+ { 0x45D4, 0x80 },
+ { 0x45D5, 0x80 },
+ { 0x45D6, 0x80 },
+ { 0x45D7, 0x80 },
+ { 0x7000, 0xAB },
+ { 0x7001, 0xBA },
+ { 0x7002, 0x40 },
+ { 0x7003, 0x02 },
+ { 0x7004, 0x00 },
+ { 0x7005, 0x00 },
+ { 0x7006, 0x00 },
+ { 0x7007, 0x00 },
+ { 0x7008, 0x00 },
+ { 0x7009, 0x00 },
+ { 0x700A, 0x00 },
+ { 0x700B, 0x00 },
+ { 0x700C, 0x00 },
+ { 0x700D, 0x00 },
+ { 0x700E, 0x00 },
+ { 0x700F, 0x00 },
+ { 0x7010, 0x55 },
+ { 0x7011, 0x88 },
+ { 0x7012, 0x40 },
+ { 0x7013, 0x01 },
+ { 0x7014, 0x72 },
+ { 0x7015, 0xF1 },
+ { 0x7016, 0x02 },
+ { 0x7017, 0xF8 },
+ { 0x7018, 0x00 },
+ { 0x7019, 0x00 },
+ { 0x701A, 0x00 },
+ { 0x701B, 0x00 },
+ { 0x701C, 0x00 },
+ { 0x701D, 0x00 },
+ { 0x701E, 0x00 },
+ { 0x701F, 0x00 },
+ { 0x7020, 0x00 },
+ { 0x7021, 0x00 },
+ { 0x7022, 0x00 },
+ { 0x7023, 0x00 },
+ { 0x7024, 0x00 },
+ { 0x7025, 0x00 },
+ { 0x7026, 0x00 },
+ { 0x7027, 0x00 },
+ { 0x7028, 0x00 },
+ { 0x7029, 0x00 },
+ { 0x702A, 0x00 },
+ { 0x702B, 0x00 },
+ { 0x702C, 0x00 },
+ { 0x702D, 0x00 },
+ { 0x702E, 0x00 },
+ { 0x702F, 0x00 },
+ { 0x7030, 0x00 },
+ { 0x7031, 0x00 },
+ { 0x7032, 0x00 },
+ { 0x7033, 0x00 },
+ { 0x7034, 0x00 },
+ { 0x7035, 0x00 },
+ { 0x7036, 0x00 },
+ { 0x7037, 0x00 },
+ { 0x7038, 0x00 },
+ { 0x7039, 0x00 },
+ { 0x703A, 0x00 },
+ { 0x703B, 0x00 },
+ { 0x703C, 0x00 },
+ { 0x703D, 0x00 },
+ { 0x703E, 0x00 },
+ { 0x703F, 0x00 },
+ { 0x7040, 0x00 },
+ { 0x7041, 0x00 },
+ { 0x7042, 0x00 },
+ { 0x7043, 0x00 },
+ { 0x7044, 0x00 },
+ { 0x7045, 0x00 },
+ { 0x7046, 0x00 },
+ { 0x7047, 0x00 },
+ { 0x7048, 0x00 },
+ { 0x7049, 0x00 },
+ { 0x704A, 0x00 },
+ { 0x704B, 0x00 },
+ { 0x704C, 0x00 },
+ { 0x704D, 0x00 },
+ { 0x704E, 0x00 },
+ { 0x704F, 0x00 },
+ { 0x7050, 0x00 },
+ { 0x7051, 0x00 },
+ { 0x7052, 0x00 },
+ { 0x7053, 0x00 },
+ { 0x7054, 0x00 },
+ { 0x7055, 0x00 },
+ { 0x7056, 0x00 },
+ { 0x7057, 0x00 },
+ { 0x7058, 0x00 },
+ { 0x7059, 0x00 },
+ { 0x705A, 0x00 },
+ { 0x705B, 0x00 },
+ { 0x705C, 0x00 },
+ { 0x705D, 0x00 },
+ { 0x705E, 0x00 },
+ { 0x705F, 0x00 },
+ { 0x7060, 0x00 },
+ { 0x7061, 0x00 },
+ { 0x7062, 0x00 },
+ { 0x7063, 0x00 },
+ { 0x7064, 0x00 },
+ { 0x7065, 0x00 },
+ { 0x7066, 0x00 },
+ { 0x7067, 0x00 },
+ { 0x7068, 0x00 },
+ { 0x7069, 0x00 },
+ { 0x706A, 0x00 },
+ { 0x706B, 0x00 },
+ { 0x706C, 0x00 },
+ { 0x706D, 0x00 },
+ { 0x706E, 0x00 },
+ { 0x706F, 0x00 },
+ { 0x7070, 0x00 },
+ { 0x7071, 0x00 },
+ { 0x7072, 0x00 },
+ { 0x7073, 0x00 },
+ { 0x7074, 0x00 },
+ { 0x7075, 0x00 },
+ { 0x7076, 0x00 },
+ { 0x7077, 0x00 },
+ { 0x7078, 0x00 },
+ { 0x7079, 0x00 },
+ { 0x707A, 0x00 },
+ { 0x707B, 0x00 },
+ { 0x707C, 0x00 },
+ { 0x707D, 0x00 },
+ { 0x707E, 0x00 },
+ { 0x707F, 0x00 },
+ { 0x7080, 0x00 },
+ { 0x7081, 0x00 },
+ { 0x7082, 0x00 },
+ { 0x7083, 0x00 },
+ { 0x7084, 0x00 },
+ { 0x7085, 0x00 },
+ { 0x7086, 0x00 },
+ { 0x7087, 0x00 },
+ { 0x7088, 0x00 },
+ { 0x7089, 0x00 },
+ { 0x708A, 0x00 },
+ { 0x708B, 0x00 },
+ { 0x708C, 0x00 },
+ { 0x708D, 0x00 },
+ { 0x708E, 0x00 },
+ { 0x708F, 0x00 },
+ { 0x7090, 0x00 },
+ { 0x7091, 0xF0 },
+ { 0x7092, 0x02 },
+ { 0x7093, 0xF8 },
+ { 0x7094, 0x8D },
+ { 0x7095, 0xF6 },
+ { 0x7096, 0xFA },
+ { 0x7097, 0xFF },
+ { 0x7098, 0xF0 },
+ { 0x7099, 0xB5 },
+ { 0x709A, 0x04 },
+ { 0x709B, 0x46 },
+ { 0x709C, 0x8F },
+ { 0x709D, 0xB0 },
+ { 0x709E, 0x5F },
+ { 0x709F, 0x48 },
+ { 0x70A0, 0x0C },
+ { 0x70A1, 0x90 },
+ { 0x70A2, 0x5F },
+ { 0x70A3, 0x48 },
+ { 0x70A4, 0x06 },
+ { 0x70A5, 0x90 },
+ { 0x70A6, 0x20 },
+ { 0x70A7, 0x46 },
+ { 0x70A8, 0x34 },
+ { 0x70A9, 0x30 },
+ { 0x70AA, 0x0B },
+ { 0x70AB, 0x90 },
+ { 0x70AC, 0x5B },
+ { 0x70AD, 0x48 },
+ { 0x70AE, 0x5A },
+ { 0x70AF, 0x49 },
+ { 0x70B0, 0x26 },
+ { 0x70B1, 0x46 },
+ { 0x70B2, 0x66 },
+ { 0x70B3, 0x30 },
+ { 0x70B4, 0x3A },
+ { 0x70B5, 0x31 },
+ { 0x70B6, 0x3C },
+ { 0x70B7, 0x36 },
+ { 0x70B8, 0x05 },
+ { 0x70B9, 0x90 },
+ { 0x70BA, 0x0A },
+ { 0x70BB, 0x30 },
+ { 0x70BC, 0x04 },
+ { 0x70BD, 0x90 },
+ { 0x70BE, 0x59 },
+ { 0x70BF, 0x48 },
+ { 0x70C0, 0x55 },
+ { 0x70C1, 0x4A },
+ { 0x70C2, 0x40 },
+ { 0x70C3, 0x6E },
+ { 0x70C4, 0xC0 },
+ { 0x70C5, 0x07 },
+ { 0x70C6, 0x7D },
+ { 0x70C7, 0xD1 },
+ { 0x70C8, 0x17 },
+ { 0x70C9, 0x88 },
+ { 0x70CA, 0x0A },
+ { 0x70CB, 0x5E },
+ { 0x70CC, 0x0D },
+ { 0x70CD, 0x92 },
+ { 0x70CE, 0x53 },
+ { 0x70CF, 0x49 },
+ { 0x70D0, 0x55 },
+ { 0x70D1, 0x48 },
+ { 0x70D2, 0x94 },
+ { 0x70D3, 0x31 },
+ { 0x70D4, 0x89 },
+ { 0x70D5, 0x6B },
+ { 0x70D6, 0x80 },
+ { 0x70D7, 0x68 },
+ { 0x70D8, 0x09 },
+ { 0x70D9, 0x02 },
+ { 0x70DA, 0x00 },
+ { 0x70DB, 0x03 },
+ { 0x70DC, 0x09 },
+ { 0x70DD, 0x0E },
+ { 0x70DE, 0x00 },
+ { 0x70DF, 0x0B },
+ { 0x70E0, 0x49 },
+ { 0x70E1, 0x1C },
+ { 0x70E2, 0x48 },
+ { 0x70E3, 0x43 },
+ { 0x70E4, 0x4D },
+ { 0x70E5, 0x49 },
+ { 0x70E6, 0x6C },
+ { 0x70E7, 0x39 },
+ { 0x70E8, 0x8A },
+ { 0x70E9, 0x6A },
+ { 0x70EA, 0x07 },
+ { 0x70EB, 0x92 },
+ { 0x70EC, 0xCA },
+ { 0x70ED, 0x6A },
+ { 0x70EE, 0x00 },
+ { 0x70EF, 0x21 },
+ { 0x70F0, 0xC9 },
+ { 0x70F1, 0x43 },
+ { 0x70F2, 0x03 },
+ { 0x70F3, 0x92 },
+ { 0x70F4, 0x00 },
+ { 0x70F5, 0x22 },
+ { 0x70F6, 0x00 },
+ { 0x70F7, 0x91 },
+ { 0x70F8, 0x01 },
+ { 0x70F9, 0x92 },
+ { 0x70FA, 0x39 },
+ { 0x70FB, 0x46 },
+ { 0x70FC, 0x8F },
+ { 0x70FD, 0xF6 },
+ { 0x70FE, 0xCE },
+ { 0x70FF, 0xFB },
+ { 0x7100, 0x01 },
+ { 0x7101, 0x22 },
+ { 0x7102, 0x00 },
+ { 0x7103, 0x23 },
+ { 0x7104, 0x8C },
+ { 0x7105, 0xF6 },
+ { 0x7106, 0x02 },
+ { 0x7107, 0xFA },
+ { 0x7108, 0x00 },
+ { 0x7109, 0x21 },
+ { 0x710A, 0x05 },
+ { 0x710B, 0x46 },
+ { 0x710C, 0x01 },
+ { 0x710D, 0x91 },
+ { 0x710E, 0x00 },
+ { 0x710F, 0x90 },
+ { 0x7110, 0x39 },
+ { 0x7111, 0x46 },
+ { 0x7112, 0x07 },
+ { 0x7113, 0x98 },
+ { 0x7114, 0x8F },
+ { 0x7115, 0xF6 },
+ { 0x7116, 0xC2 },
+ { 0x7117, 0xFB },
+ { 0x7118, 0x0D },
+ { 0x7119, 0x9A },
+ { 0x711A, 0xD3 },
+ { 0x711B, 0x17 },
+ { 0x711C, 0x80 },
+ { 0x711D, 0x18 },
+ { 0x711E, 0x59 },
+ { 0x711F, 0x41 },
+ { 0x7120, 0x01 },
+ { 0x7121, 0x22 },
+ { 0x7122, 0x00 },
+ { 0x7123, 0x23 },
+ { 0x7124, 0x8C },
+ { 0x7125, 0xF6 },
+ { 0x7126, 0xCD },
+ { 0x7127, 0xF9 },
+ { 0x7128, 0x07 },
+ { 0x7129, 0x90 },
+ { 0x712A, 0x00 },
+ { 0x712B, 0x20 },
+ { 0x712C, 0x01 },
+ { 0x712D, 0x90 },
+ { 0x712E, 0x00 },
+ { 0x712F, 0x95 },
+ { 0x7130, 0x39 },
+ { 0x7131, 0x46 },
+ { 0x7132, 0x03 },
+ { 0x7133, 0x98 },
+ { 0x7134, 0x8F },
+ { 0x7135, 0xF6 },
+ { 0x7136, 0xB2 },
+ { 0x7137, 0xFB },
+ { 0x7138, 0x01 },
+ { 0x7139, 0x22 },
+ { 0x713A, 0x00 },
+ { 0x713B, 0x23 },
+ { 0x713C, 0x8C },
+ { 0x713D, 0xF6 },
+ { 0x713E, 0xE6 },
+ { 0x713F, 0xF9 },
+ { 0x7140, 0x02 },
+ { 0x7141, 0x46 },
+ { 0x7142, 0x07 },
+ { 0x7143, 0x98 },
+ { 0x7144, 0x00 },
+ { 0x7145, 0x23 },
+ { 0x7146, 0x81 },
+ { 0x7147, 0x0B },
+ { 0x7148, 0x80 },
+ { 0x7149, 0x04 },
+ { 0x714A, 0x7A },
+ { 0x714B, 0xF6 },
+ { 0x714C, 0x54 },
+ { 0x714D, 0xF8 },
+ { 0x714E, 0x37 },
+ { 0x714F, 0x4A },
+ { 0x7150, 0x00 },
+ { 0x7151, 0x23 },
+ { 0x7152, 0x00 },
+ { 0x7153, 0x92 },
+ { 0x7154, 0x01 },
+ { 0x7155, 0x93 },
+ { 0x7156, 0x01 },
+ { 0x7157, 0x22 },
+ { 0x7158, 0x8C },
+ { 0x7159, 0xF6 },
+ { 0x715A, 0xD8 },
+ { 0x715B, 0xF9 },
+ { 0x715C, 0x05 },
+ { 0x715D, 0x46 },
+ { 0x715E, 0x60 },
+ { 0x715F, 0x68 },
+ { 0x7160, 0x00 },
+ { 0x7161, 0x23 },
+ { 0x7162, 0x01 },
+ { 0x7163, 0x0C },
+ { 0x7164, 0x00 },
+ { 0x7165, 0x04 },
+ { 0x7166, 0xE2 },
+ { 0x7167, 0x68 },
+ { 0x7168, 0x7A },
+ { 0x7169, 0xF6 },
+ { 0x716A, 0x45 },
+ { 0x716B, 0xF8 },
+ { 0x716C, 0x00 },
+ { 0x716D, 0x22 },
+ { 0x716E, 0xD2 },
+ { 0x716F, 0x43 },
+ { 0x7170, 0x00 },
+ { 0x7171, 0x23 },
+ { 0x7172, 0x00 },
+ { 0x7173, 0x92 },
+ { 0x7174, 0x01 },
+ { 0x7175, 0x93 },
+ { 0x7176, 0x1A },
+ { 0x7177, 0x46 },
+ { 0x7178, 0x8C },
+ { 0x7179, 0xF6 },
+ { 0x717A, 0xC8 },
+ { 0x717B, 0xF9 },
+ { 0x717C, 0x29 },
+ { 0x717D, 0x46 },
+ { 0x717E, 0x8F },
+ { 0x717F, 0xF6 },
+ { 0x7180, 0x8D },
+ { 0x7181, 0xFB },
+ { 0x7182, 0x8A },
+ { 0x7183, 0x03 },
+ { 0x7184, 0x80 },
+ { 0x7185, 0x0C },
+ { 0x7186, 0x10 },
+ { 0x7187, 0x43 },
+ { 0x7188, 0x00 },
+ { 0x7189, 0x22 },
+ { 0x718A, 0xD2 },
+ { 0x718B, 0x43 },
+ { 0x718C, 0x00 },
+ { 0x718D, 0x23 },
+ { 0x718E, 0x00 },
+ { 0x718F, 0x92 },
+ { 0x7190, 0x89 },
+ { 0x7191, 0x0C },
+ { 0x7192, 0x01 },
+ { 0x7193, 0x93 },
+ { 0x7194, 0x1A },
+ { 0x7195, 0x46 },
+ { 0x7196, 0x8C },
+ { 0x7197, 0xF6 },
+ { 0x7198, 0xB9 },
+ { 0x7199, 0xF9 },
+ { 0x719A, 0x00 },
+ { 0x719B, 0x24 },
+ { 0x719C, 0x03 },
+ { 0x719D, 0x90 },
+ { 0x719E, 0x0C },
+ { 0x719F, 0x98 },
+ { 0x71A0, 0x61 },
+ { 0x71A1, 0x00 },
+ { 0x71A2, 0x45 },
+ { 0x71A3, 0x5A },
+ { 0x71A4, 0x06 },
+ { 0x71A5, 0x98 },
+ { 0x71A6, 0x22 },
+ { 0x71A7, 0x4A },
+ { 0x71A8, 0x40 },
+ { 0x71A9, 0x5A },
+ { 0x71AA, 0x00 },
+ { 0x71AB, 0x21 },
+ { 0x71AC, 0x8C },
+ { 0x71AD, 0xF6 },
+ { 0x71AE, 0xBE },
+ { 0x71AF, 0xF9 },
+ { 0x71B0, 0x07 },
+ { 0x71B1, 0x46 },
+ { 0x71B2, 0x28 },
+ { 0x71B3, 0x46 },
+ { 0x71B4, 0x03 },
+ { 0x71B5, 0x99 },
+ { 0x71B6, 0x8F },
+ { 0x71B7, 0xF6 },
+ { 0x71B8, 0x71 },
+ { 0x71B9, 0xFB },
+ { 0x71BA, 0x3A },
+ { 0x71BB, 0x46 },
+ { 0x71BC, 0x00 },
+ { 0x71BD, 0x23 },
+ { 0x71BE, 0x79 },
+ { 0x71BF, 0xF6 },
+ { 0x71C0, 0xCA },
+ { 0x71C1, 0xFF },
+ { 0x71C2, 0x00 },
+ { 0x71C3, 0xE0 },
+ { 0x71C4, 0x0F },
+ { 0x71C5, 0xE0 },
+ { 0x71C6, 0x8A },
+ { 0x71C7, 0x02 },
+ { 0x71C8, 0x80 },
+ { 0x71C9, 0x0D },
+ { 0x71CA, 0x10 },
+ { 0x71CB, 0x43 },
+ { 0x71CC, 0x19 },
+ { 0x71CD, 0x4A },
+ { 0x71CE, 0x00 },
+ { 0x71CF, 0x23 },
+ { 0x71D0, 0x00 },
+ { 0x71D1, 0x92 },
+ { 0x71D2, 0x89 },
+ { 0x71D3, 0x0D },
+ { 0x71D4, 0x01 },
+ { 0x71D5, 0x93 },
+ { 0x71D6, 0x40 },
+ { 0x71D7, 0x22 },
+ { 0x71D8, 0x8C },
+ { 0x71D9, 0xF6 },
+ { 0x71DA, 0x98 },
+ { 0x71DB, 0xF9 },
+ { 0x71DC, 0xA1 },
+ { 0x71DD, 0x00 },
+ { 0x71DE, 0x64 },
+ { 0x71DF, 0x1C },
+ { 0x71E0, 0x70 },
+ { 0x71E1, 0x50 },
+ { 0x71E2, 0x04 },
+ { 0x71E3, 0x2C },
+ { 0x71E4, 0xDB },
+ { 0x71E5, 0xD3 },
+ { 0x71E6, 0x14 },
+ { 0x71E7, 0x4D },
+ { 0x71E8, 0x00 },
+ { 0x71E9, 0x24 },
+ { 0x71EA, 0x0B },
+ { 0x71EB, 0x98 },
+ { 0x71EC, 0x67 },
+ { 0x71ED, 0x00 },
+ { 0x71EE, 0xC0 },
+ { 0x71EF, 0x5B },
+ { 0x71F0, 0x2A },
+ { 0x71F1, 0x46 },
+ { 0x71F2, 0x40 },
+ { 0x71F3, 0x21 },
+ { 0x71F4, 0x8C },
+ { 0x71F5, 0xF6 },
+ { 0x71F6, 0x9A },
+ { 0x71F7, 0xF9 },
+ { 0x71F8, 0x05 },
+ { 0x71F9, 0x99 },
+ { 0x71FA, 0x0E },
+ { 0x71FB, 0x4A },
+ { 0x71FC, 0xC8 },
+ { 0x71FD, 0x53 },
+ { 0x71FE, 0xA7 },
+ { 0x71FF, 0x00 },
+ { 0x7200, 0xF0 },
+ { 0x7201, 0x59 },
+ { 0x7202, 0x40 },
+ { 0x7203, 0x21 },
+ { 0x7204, 0x8C },
+ { 0x7205, 0xF6 },
+ { 0x7206, 0x7B },
+ { 0x7207, 0xF9 },
+ { 0x7208, 0x04 },
+ { 0x7209, 0x99 },
+ { 0x720A, 0x64 },
+ { 0x720B, 0x1C },
+ { 0x720C, 0xC8 },
+ { 0x720D, 0x51 },
+ { 0x720E, 0x04 },
+ { 0x720F, 0x2C },
+ { 0x7210, 0xEB },
+ { 0x7211, 0xD3 },
+ { 0x7212, 0x0F },
+ { 0x7213, 0xB0 },
+ { 0x7214, 0xF0 },
+ { 0x7215, 0xBD },
+ { 0x7216, 0x00 },
+ { 0x7217, 0x00 },
+ { 0x7218, 0x76 },
+ { 0x7219, 0x69 },
+ { 0x721A, 0x18 },
+ { 0x721B, 0x00 },
+ { 0x721C, 0xEC },
+ { 0x721D, 0x58 },
+ { 0x721E, 0x18 },
+ { 0x721F, 0x00 },
+ { 0x7220, 0x38 },
+ { 0x7221, 0x36 },
+ { 0x7222, 0x18 },
+ { 0x7223, 0x00 },
+ { 0x7224, 0x00 },
+ { 0x7225, 0x35 },
+ { 0x7226, 0x18 },
+ { 0x7227, 0x00 },
+ { 0x7228, 0x00 },
+ { 0x7229, 0x20 },
+ { 0x722A, 0x18 },
+ { 0x722B, 0x00 },
+ { 0x722C, 0xFF },
+ { 0x722D, 0xFF },
+ { 0x722E, 0xFF },
+ { 0x722F, 0x3F },
+ { 0x7230, 0xFF },
+ { 0x7231, 0x07 },
+ { 0x7232, 0x00 },
+ { 0x7233, 0x00 },
+ { 0x7234, 0xFF },
+ { 0x7235, 0xFF },
+ { 0x7236, 0x07 },
+ { 0x7237, 0x00 },
+ { 0x7238, 0xFF },
+ { 0x7239, 0x1F },
+ { 0x723A, 0x00 },
+ { 0x723B, 0x00 },
+ { 0x723C, 0x01 },
+ { 0x723D, 0xF6 },
+ { 0x723E, 0x45 },
+ { 0x723F, 0x12 },
+};
+
+static const struct reg_sequence imx390_linear_1936x1096[] = {
+ { 0x000C, 0xF2 },
+ { 0x000D, 0x02 },
+ { 0x000E, 0x00 },
+ { 0x0010, 0xF2 },
+ { 0x0011, 0x02 },
+ { 0x0012, 0x00 },
+ { 0x0018, 0x0F },
+ { 0x0019, 0x00 },
+ { 0x001A, 0x0C },
+ { 0x001B, 0x00 },
+ { 0x0038, 0x00 },
+ { 0x003C, 0x00 }, /* OBB_CLAMP_R_SP1H */
+ { 0x003D, 0x00 }, /* OBB_CLAMP_R_SP1H */
+ { 0x003E, 0x00 },
+ { 0x0040, 0x00 }, /* OBB_CLAMP_GR_SP1H */
+ { 0x0041, 0x00 }, /* OBB_CLAMP_GR_SP1H */
+ { 0x0042, 0x00 },
+ { 0x0044, 0x00 }, /* OBB_CLAMP_GB_SP1H */
+ { 0x0045, 0x00 }, /* OBB_CLAMP_GB_SP1H */
+ { 0x0046, 0x00 },
+ { 0x0048, 0x00 },
+ { 0x0049, 0x00 },
+ { 0x004A, 0x00 },
+ { 0x004C, 0x00 },
+ { 0x004D, 0x00 },
+ { 0x004E, 0x00 },
+ { 0x0050, 0x00 },
+ { 0x0051, 0x00 },
+ { 0x0052, 0x00 },
+ { 0x0054, 0x00 },
+ { 0x0055, 0x00 },
+ { 0x0056, 0x00 },
+ { 0x0058, 0x00 },
+ { 0x0059, 0x00 },
+ { 0x005A, 0x00 },
+ { 0x005C, 0x00 },
+ { 0x005D, 0x00 },
+ { 0x005E, 0x00 },
+ { 0x0060, 0x00 },
+ { 0x0061, 0x00 },
+ { 0x0062, 0x00 },
+ { 0x0064, 0x00 },
+ { 0x0065, 0x00 },
+ { 0x0066, 0x00 },
+ { 0x0068, 0x00 },
+ { 0x0069, 0x00 },
+ { 0x006A, 0x00 },
+ { 0x0074, 0x00 },
+ { 0x0078, 0x00 },
+ { 0x007C, 0x00 },
+ { 0x007D, 0x00 },
+ { 0x0080, 0x00 },
+ { 0x0081, 0x00 },
+ { 0x00F4, 0x1C },
+ { 0x00F5, 0xF8 },
+ { 0x00F6, 0x01 },
+ { 0x00F8, 0x03 },
+ { 0x00F9, 0x00 },
+ { 0x00FA, 0x00 },
+ { 0x00FB, 0x00 },
+ { 0x0114, 0x00 },
+ { 0x0115, 0x01 },
+ { 0x0118, 0x20 },
+ { 0x0119, 0x03 },
+ { 0x011A, 0x00 },
+ { 0x011B, 0x41 },
+ { 0x011C, 0x80 },
+ { 0x011D, 0x00 },
+ { 0x0120, 0x20 },
+ { 0x0121, 0x00 },
+ { 0x0122, 0x00 },
+ { 0x0123, 0x44 },
+ { 0x0124, 0x00 },
+ { 0x0125, 0x01 },
+ { 0x0128, 0xAC },
+ { 0x0129, 0x0D },
+ { 0x012A, 0x00 },
+ { 0x012B, 0xA4 },
+ { 0x012C, 0x00 },
+ { 0x012D, 0x01 },
+ { 0x0130, 0xC4 },
+ { 0x0131, 0x09 },
+ { 0x0132, 0x00 },
+ { 0x0133, 0xDA },
+ { 0x013B, 0x01 },
+ { 0x01C4, 0x00 },
+ { 0x01C5, 0x00 },
+ { 0x01CC, 0x01 },
+ { 0x01D0, 0x09 },
+ { 0x01D4, 0x01 },
+ { 0x0232, 0x18 },
+ { 0x0233, 0x00 },
+ { 0x0390, 0x00 },
+ { 0x0391, 0x00 },
+ { 0x0392, 0x00 },
+ { 0x03C0, 0x04 },
+ { 0x2000, 0x55 },
+ { 0x2001, 0x55 },
+ { 0x2002, 0x55 },
+ { 0x2003, 0x05 },
+ { 0x2004, 0x02 },
+ { 0x2008, 0x65 },
+ { 0x2009, 0x04 },
+ { 0x200A, 0x00 },
+ { 0x200C, 0x30 },
+ { 0x200D, 0x11 },
+ { 0x2010, 0x04 },
+ { 0x2014, 0x01 },
+ { 0x2018, 0x02 },
+ { 0x2019, 0x04 },
+ { 0x201A, 0x00 },
+ { 0x201C, 0x21 },
+ { 0x201D, 0x11 },
+ { 0x201E, 0x00 },
+ { 0x201F, 0x00 },
+ { 0x2020, 0xBC },
+ { 0x2021, 0x00 },
+ { 0x2022, 0x7F },
+ { 0x2023, 0x00 },
+ { 0x2024, 0xBA },
+ { 0x2025, 0x00 },
+ { 0x2026, 0x81 },
+ { 0x2027, 0x00 },
+ { 0x2028, 0x7D },
+ { 0x2029, 0x90 },
+ { 0x202A, 0x05 },
+ { 0x202C, 0xFC },
+ { 0x202D, 0x02 },
+ { 0x202E, 0x25 },
+ { 0x202F, 0x03 },
+ { 0x2030, 0x05 },
+ { 0x2031, 0x02 },
+ { 0x2032, 0xCA },
+ { 0x2033, 0x02 },
+ { 0x2034, 0xFC },
+ { 0x2035, 0x02 },
+ { 0x2036, 0x25 },
+ { 0x2037, 0x03 },
+ { 0x2038, 0x25 },
+ { 0x2039, 0x97 },
+ { 0x203A, 0xEC },
+ { 0x203B, 0x01 },
+ { 0x203C, 0xF5 },
+ { 0x203D, 0x8E },
+ { 0x203E, 0x0C },
+ { 0x203F, 0x2D },
+ { 0x2040, 0x69 },
+ { 0x2041, 0x01 },
+ { 0x2042, 0x8E },
+ { 0x2043, 0x01 },
+ { 0x2044, 0x0C },
+ { 0x2045, 0x02 },
+ { 0x2046, 0x31 },
+ { 0x2047, 0x02 },
+ { 0x2048, 0x6A },
+ { 0x2049, 0x01 },
+ { 0x204A, 0x8E },
+ { 0x204B, 0x01 },
+ { 0x204C, 0x0D },
+ { 0x204D, 0x02 },
+ { 0x204E, 0x31 },
+ { 0x204F, 0x02 },
+ { 0x2050, 0x7B },
+ { 0x2051, 0x00 },
+ { 0x2052, 0x7D },
+ { 0x2053, 0x00 },
+ { 0x2054, 0x95 },
+ { 0x2055, 0x00 },
+ { 0x2056, 0x97 },
+ { 0x2057, 0x00 },
+ { 0x2058, 0xAD },
+ { 0x2059, 0x00 },
+ { 0x205A, 0xAF },
+ { 0x205B, 0x00 },
+ { 0x205C, 0x92 },
+ { 0x205D, 0x00 },
+ { 0x205E, 0x94 },
+ { 0x205F, 0x00 },
+ { 0x2060, 0x8E },
+ { 0x2061, 0x00 },
+ { 0x2062, 0x90 },
+ { 0x2063, 0x00 },
+ { 0x2064, 0xB1 },
+ { 0x2065, 0x00 },
+ { 0x2066, 0xB3 },
+ { 0x2067, 0x00 },
+ { 0x2068, 0x08 },
+ { 0x2069, 0x00 },
+ { 0x206A, 0x04 },
+ { 0x206B, 0x00 },
+ { 0x206C, 0x84 },
+ { 0x206D, 0x00 },
+ { 0x206E, 0x80 },
+ { 0x206F, 0x00 },
+ { 0x2070, 0x04 },
+ { 0x2071, 0x00 },
+ { 0x2072, 0x46 },
+ { 0x2073, 0x00 },
+ { 0x2074, 0xE9 },
+ { 0x2075, 0x01 },
+ { 0x2076, 0x74 },
+ { 0x2077, 0x02 },
+ { 0x2078, 0x80 },
+ { 0x2079, 0x00 },
+ { 0x207A, 0xC1 },
+ { 0x207B, 0x00 },
+ { 0x207C, 0xFF },
+ { 0x207D, 0x03 },
+ { 0x207E, 0xFF },
+ { 0x207F, 0x03 },
+ { 0x2080, 0x78 },
+ { 0x2081, 0x00 },
+ { 0x2082, 0x6A },
+ { 0x2083, 0x01 },
+ { 0x2084, 0xE4 },
+ { 0x2085, 0x01 },
+ { 0x2086, 0x2B },
+ { 0x2087, 0x03 },
+ { 0x2088, 0x00 },
+ { 0x2089, 0x00 },
+ { 0x208A, 0xFF },
+ { 0x208B, 0x03 },
+ { 0x208C, 0xFF },
+ { 0x208D, 0x03 },
+ { 0x208E, 0xFF },
+ { 0x208F, 0x03 },
+ { 0x2090, 0x7D },
+ { 0x2091, 0x00 },
+ { 0x2092, 0x62 },
+ { 0x2093, 0x01 },
+ { 0x2094, 0xE9 },
+ { 0x2095, 0x01 },
+ { 0x2096, 0x00 },
+ { 0x2097, 0x00 },
+ { 0x2098, 0x7C },
+ { 0x2099, 0x00 },
+ { 0x209A, 0x21 },
+ { 0x209B, 0x03 },
+ { 0x209C, 0xE9 },
+ { 0x209D, 0x01 },
+ { 0x209E, 0x21 },
+ { 0x209F, 0x03 },
+ { 0x20A0, 0xFF },
+ { 0x20A1, 0x03 },
+ { 0x20A2, 0xFF },
+ { 0x20A3, 0x03 },
+ { 0x20A4, 0xFF },
+ { 0x20A5, 0x03 },
+ { 0x20A6, 0xFF },
+ { 0x20A7, 0x03 },
+ { 0x20A8, 0xFF },
+ { 0x20A9, 0x03 },
+ { 0x20AA, 0xFF },
+ { 0x20AB, 0x03 },
+ { 0x20AC, 0xFF },
+ { 0x20AD, 0x03 },
+ { 0x20AE, 0xFF },
+ { 0x20AF, 0x03 },
+ { 0x20B0, 0xFF },
+ { 0x20B1, 0x03 },
+ { 0x20B2, 0xFF },
+ { 0x20B3, 0x03 },
+ { 0x20B4, 0x87 },
+ { 0x20B5, 0xCC },
+ { 0x20B6, 0x87 },
+ { 0x20B7, 0x08 },
+ { 0x20B8, 0xF4 },
+ { 0x20B9, 0xA5 },
+ { 0x20BA, 0x07 },
+ { 0x20BC, 0x1F },
+ { 0x20BD, 0x01 },
+ { 0x20BE, 0xF6 },
+ { 0x20BF, 0x00 },
+ { 0x20C0, 0x90 },
+ { 0x20C1, 0x01 },
+ { 0x20C2, 0x67 },
+ { 0x20C3, 0x01 },
+ { 0x20C4, 0xFF },
+ { 0x20C5, 0x03 },
+ { 0x20C6, 0xFF },
+ { 0x20C7, 0x03 },
+ { 0x20C8, 0x33 },
+ { 0x20C9, 0x02 },
+ { 0x20CA, 0x0A },
+ { 0x20CB, 0x02 },
+ { 0x20CC, 0x7F },
+ { 0x20CD, 0x00 },
+ { 0x20CE, 0xD2 },
+ { 0x20CF, 0x00 },
+ { 0x20D0, 0x81 },
+ { 0x20D1, 0x00 },
+ { 0x20D2, 0x87 },
+ { 0x20D3, 0x00 },
+ { 0x20D4, 0x09 },
+ { 0x20D5, 0x00 },
+ { 0x20D8, 0x7F },
+ { 0x20D9, 0x00 },
+ { 0x20DA, 0x62 },
+ { 0x20DB, 0x01 },
+ { 0x20DC, 0x7F },
+ { 0x20DD, 0x00 },
+ { 0x20DE, 0x62 },
+ { 0x20DF, 0x01 },
+ { 0x20E0, 0x65 },
+ { 0x20E1, 0x00 },
+ { 0x20E2, 0x75 },
+ { 0x20E3, 0x00 },
+ { 0x20E4, 0xE0 },
+ { 0x20E5, 0x00 },
+ { 0x20E6, 0xF0 },
+ { 0x20E7, 0x00 },
+ { 0x20E8, 0x4C },
+ { 0x20E9, 0x01 },
+ { 0x20EA, 0x5C },
+ { 0x20EB, 0x01 },
+ { 0x20EC, 0xD1 },
+ { 0x20ED, 0x01 },
+ { 0x20EE, 0xE1 },
+ { 0x20EF, 0x01 },
+ { 0x20F0, 0x93 },
+ { 0x20F1, 0x02 },
+ { 0x20F2, 0xA3 },
+ { 0x20F3, 0x02 },
+ { 0x20F4, 0x0D },
+ { 0x20F5, 0x03 },
+ { 0x20F6, 0x1D },
+ { 0x20F7, 0x03 },
+ { 0x20F8, 0x57 },
+ { 0x20F9, 0x00 },
+ { 0x20FA, 0x7B },
+ { 0x20FB, 0x00 },
+ { 0x20FC, 0xD2 },
+ { 0x20FD, 0x00 },
+ { 0x20FE, 0xF6 },
+ { 0x20FF, 0x00 },
+ { 0x2100, 0x3E },
+ { 0x2101, 0x01 },
+ { 0x2102, 0x60 },
+ { 0x2103, 0x01 },
+ { 0x2104, 0xC3 },
+ { 0x2105, 0x01 },
+ { 0x2106, 0xE5 },
+ { 0x2107, 0x01 },
+ { 0x2108, 0x85 },
+ { 0x2109, 0x02 },
+ { 0x210A, 0xA9 },
+ { 0x210B, 0x02 },
+ { 0x210C, 0xFF },
+ { 0x210D, 0x02 },
+ { 0x210E, 0x21 },
+ { 0x210F, 0x03 },
+ { 0x2110, 0xFF },
+ { 0x2111, 0x03 },
+ { 0x2112, 0x00 },
+ { 0x2113, 0x00 },
+ { 0x2114, 0xFF },
+ { 0x2115, 0x03 },
+ { 0x2116, 0xFF },
+ { 0x2117, 0x03 },
+ { 0x2118, 0xFF },
+ { 0x2119, 0x03 },
+ { 0x211A, 0xFF },
+ { 0x211B, 0x03 },
+ { 0x211C, 0xFF },
+ { 0x211D, 0x03 },
+ { 0x211E, 0xFF },
+ { 0x211F, 0x03 },
+ { 0x2120, 0xFF },
+ { 0x2121, 0x03 },
+ { 0x2122, 0xFF },
+ { 0x2123, 0x03 },
+ { 0x2124, 0xFF },
+ { 0x2125, 0x03 },
+ { 0x2126, 0xFF },
+ { 0x2127, 0x03 },
+ { 0x2128, 0x7D },
+ { 0x2129, 0x90 },
+ { 0x212A, 0xD5 },
+ { 0x212B, 0x07 },
+ { 0x212C, 0x64 },
+ { 0x212D, 0x01 },
+ { 0x2130, 0x5F },
+ { 0x2131, 0x7D },
+ { 0x2132, 0x05 },
+ { 0x2134, 0x78 },
+ { 0x2135, 0x00 },
+ { 0x2136, 0x76 },
+ { 0x2137, 0x00 },
+ { 0x2138, 0xF3 },
+ { 0x2139, 0x00 },
+ { 0x213A, 0xF1 },
+ { 0x213B, 0x00 },
+ { 0x213C, 0xA6 },
+ { 0x213D, 0x02 },
+ { 0x213E, 0xA4 },
+ { 0x213F, 0x02 },
+ { 0x2140, 0x7D },
+ { 0x2141, 0x00 },
+ { 0x2142, 0x8D },
+ { 0x2143, 0x00 },
+ { 0x2144, 0xA1 },
+ { 0x2145, 0x01 },
+ { 0x2146, 0xB1 },
+ { 0x2147, 0x01 },
+ { 0x2148, 0xAB },
+ { 0x2149, 0x02 },
+ { 0x214A, 0xBB },
+ { 0x214B, 0x02 },
+ { 0x214C, 0x17 },
+ { 0x214D, 0x5C },
+ { 0x214E, 0x00 },
+ { 0x2150, 0x00 },
+ { 0x2151, 0x00 },
+ { 0x2152, 0xF8 },
+ { 0x2153, 0x00 },
+ { 0x2154, 0xBE },
+ { 0x2155, 0x00 },
+ { 0x2156, 0x7D },
+ { 0x2157, 0x00 },
+ { 0x2158, 0x25 },
+ { 0x2159, 0x00 },
+ { 0x215A, 0x7D },
+ { 0x215B, 0x00 },
+ { 0x215C, 0x62 },
+ { 0x215D, 0x01 },
+ { 0x215E, 0xFF },
+ { 0x215F, 0x03 },
+ { 0x2160, 0x26 },
+ { 0x2161, 0x00 },
+ { 0x2162, 0x7D },
+ { 0x2163, 0x00 },
+ { 0x2164, 0x63 },
+ { 0x2165, 0x01 },
+ { 0x2166, 0xFF },
+ { 0x2167, 0x03 },
+ { 0x2168, 0xCB },
+ { 0x2169, 0x02 },
+ { 0x216A, 0xCF },
+ { 0x216B, 0x02 },
+ { 0x216C, 0xFF },
+ { 0x216D, 0x03 },
+ { 0x216E, 0xFF },
+ { 0x216F, 0x03 },
+ { 0x2170, 0xFF },
+ { 0x2171, 0x03 },
+ { 0x2172, 0xFF },
+ { 0x2173, 0x03 },
+ { 0x2174, 0xFF },
+ { 0x2175, 0x03 },
+ { 0x2176, 0xFF },
+ { 0x2177, 0x03 },
+ { 0x2178, 0x7E },
+ { 0x2179, 0x00 },
+ { 0x217A, 0xBD },
+ { 0x217B, 0x00 },
+ { 0x217C, 0xEC },
+ { 0x217D, 0x01 },
+ { 0x217E, 0x7B },
+ { 0x217F, 0x02 },
+ { 0x2180, 0xD1 },
+ { 0x2181, 0x02 },
+ { 0x2182, 0x25 },
+ { 0x2183, 0x03 },
+ { 0x2184, 0x7F },
+ { 0x2185, 0x00 },
+ { 0x2186, 0xBD },
+ { 0x2187, 0x00 },
+ { 0x2188, 0xED },
+ { 0x2189, 0x01 },
+ { 0x218A, 0x7B },
+ { 0x218B, 0x02 },
+ { 0x218C, 0xD2 },
+ { 0x218D, 0x02 },
+ { 0x218E, 0x25 },
+ { 0x218F, 0x03 },
+ { 0x2190, 0xFF },
+ { 0x2191, 0x03 },
+ { 0x2192, 0xFF },
+ { 0x2193, 0x03 },
+ { 0x2194, 0xE9 },
+ { 0x2195, 0x01 },
+ { 0x2196, 0x21 },
+ { 0x2197, 0x03 },
+ { 0x2198, 0x17 },
+ { 0x2199, 0xFC },
+ { 0x219A, 0x7F },
+ { 0x219B, 0x01 },
+ { 0x219C, 0xFF },
+ { 0x219D, 0x03 },
+ { 0x21A0, 0x1B },
+ { 0x21A1, 0x1B },
+ { 0x21A2, 0x1B },
+ { 0x21A3, 0x1B },
+ { 0x21A4, 0x2E },
+ { 0x21A5, 0x80 },
+ { 0x21A6, 0x00 },
+ { 0x21A8, 0x04 },
+ { 0x21A9, 0x98 },
+ { 0x21AA, 0x60 },
+ { 0x21AB, 0x03 },
+ { 0x21AC, 0x7F },
+ { 0x21AD, 0x80 },
+ { 0x21AE, 0x09 },
+ { 0x21B0, 0x1C },
+ { 0x21B1, 0x00 },
+ { 0x21B2, 0xA0 },
+ { 0x21B3, 0x00 },
+ { 0x21B4, 0x0C },
+ { 0x21B5, 0x00 },
+ { 0x21B6, 0x2D },
+ { 0x21B7, 0x00 },
+ { 0x21B8, 0x20 },
+ { 0x21B9, 0x00 },
+ { 0x21BA, 0x02 },
+ { 0x21BB, 0x00 },
+ { 0x21BC, 0xCC },
+ { 0x21BD, 0x00 },
+ { 0x21BE, 0x4A },
+ { 0x21BF, 0x00 },
+ { 0x21C0, 0xD0 },
+ { 0x21C1, 0x00 },
+ { 0x21C2, 0x44 },
+ { 0x21C3, 0x00 },
+ { 0x21C4, 0x00 },
+ { 0x21C5, 0xE0 },
+ { 0x21C6, 0x00 },
+ { 0x21C8, 0x11 },
+ { 0x21C9, 0x00 },
+ { 0x21CA, 0x02 },
+ { 0x21CC, 0x08 },
+ { 0x21CD, 0xC0 },
+ { 0x21CE, 0x0C },
+ { 0x21D0, 0x44 },
+ { 0x21D1, 0x00 },
+ { 0x21D2, 0x02 },
+ { 0x21D4, 0x02 },
+ { 0x21D5, 0x20 },
+ { 0x21D6, 0x2C },
+ { 0x21D8, 0xFE },
+ { 0x21D9, 0x9D },
+ { 0x21DA, 0xDF },
+ { 0x21DB, 0x03 },
+ { 0x21DC, 0x62 },
+ { 0x21DD, 0x01 },
+ { 0x21DE, 0x7F },
+ { 0x21DF, 0x00 },
+ { 0x21E0, 0xB7 },
+ { 0x21E1, 0x01 },
+ { 0x21E2, 0xB5 },
+ { 0x21E3, 0x01 },
+ { 0x21E4, 0xC1 },
+ { 0x21E5, 0x02 },
+ { 0x21E6, 0xBF },
+ { 0x21E7, 0x02 },
+ { 0x21E8, 0xB3 },
+ { 0x21E9, 0x0D },
+ { 0x21EA, 0x00 },
+ { 0x21EB, 0x04 },
+ { 0x21EC, 0x90 },
+ { 0x21ED, 0x07 },
+ { 0x21EE, 0x58 },
+ { 0x21EF, 0x04 },
+ { 0x21F0, 0x54 },
+ { 0x21F1, 0x04 },
+ { 0x21F4, 0x02 },
+ { 0x21F5, 0x00 },
+ { 0x21F6, 0x00 },
+ { 0x21F8, 0x3C },
+ { 0x21F9, 0x00 },
+ { 0x21FC, 0x28 },
+ { 0x21FD, 0x00 },
+ { 0x21FE, 0x3C },
+ { 0x21FF, 0x00 },
+ { 0x2200, 0x00 },
+ { 0x2204, 0x4C },
+ { 0x2205, 0x04 },
+ { 0x2206, 0x65 },
+ { 0x2207, 0x04 },
+ { 0x2208, 0x0A },
+ { 0x2209, 0x00 },
+ { 0x220C, 0x47 },
+ { 0x220D, 0x00 },
+ { 0x220E, 0x1F },
+ { 0x220F, 0x00 },
+ { 0x2210, 0x17 },
+ { 0x2211, 0x00 },
+ { 0x2212, 0x0F },
+ { 0x2213, 0x00 },
+ { 0x2214, 0x17 },
+ { 0x2215, 0x00 },
+ { 0x2216, 0x47 },
+ { 0x2217, 0x00 },
+ { 0x2218, 0x0F },
+ { 0x2219, 0x00 },
+ { 0x221A, 0x0F },
+ { 0x221B, 0x00 },
+ { 0x221C, 0x03 },
+ { 0x2220, 0x20 },
+ { 0x2221, 0x20 },
+ { 0x2222, 0x22 },
+ { 0x2223, 0x02 },
+ { 0x2224, 0xA7 },
+ { 0x2225, 0xAA },
+ { 0x2226, 0x80 },
+ { 0x2227, 0x08 },
+ { 0x2228, 0x01 },
+ { 0x22B2, 0x92 },
+ { 0x22B4, 0x20 },
+ { 0x22B5, 0x00 },
+ { 0x22B6, 0x20 },
+ { 0x22B7, 0x00 },
+ { 0x22B8, 0x20 },
+ { 0x22B9, 0x00 },
+ { 0x22BA, 0x20 },
+ { 0x22BB, 0x00 },
+ { 0x22BC, 0x20 },
+ { 0x22BD, 0x00 },
+ { 0x22BE, 0x20 },
+ { 0x22BF, 0x00 },
+ { 0x22C0, 0x20 },
+ { 0x22C1, 0x00 },
+ { 0x22C2, 0x20 },
+ { 0x22C3, 0x00 },
+ { 0x22C4, 0x20 },
+ { 0x22C5, 0x00 },
+ { 0x22C6, 0x20 },
+ { 0x22C7, 0x00 },
+ { 0x22C8, 0x20 },
+ { 0x22C9, 0x00 },
+ { 0x22CA, 0x20 },
+ { 0x22CB, 0x00 },
+ { 0x22CC, 0x20 },
+ { 0x22CD, 0x00 },
+ { 0x22CE, 0x20 },
+ { 0x22CF, 0x00 },
+ { 0x22DA, 0x00 },
+ { 0x2308, 0x01 },
+ { 0x2311, 0x09 },
+ { 0x2318, 0x40 },
+ { 0x2319, 0xCD },
+ { 0x231A, 0x54 },
+ { 0x2324, 0x10 },
+ { 0x2325, 0x00 },
+ { 0x2328, 0x00 },
+ { 0x2354, 0x0C },
+ { 0x23C0, 0x5D },
+ { 0x244C, 0x00 },
+ { 0x244D, 0x02 },
+ { 0x244E, 0x54 },
+ { 0x244F, 0x02 },
+ { 0x24A0, 0x00 },
+ { 0x24DA, 0x6F },
+ { 0x24DB, 0x00 },
+ { 0x24DC, 0x62 },
+ { 0x24DD, 0x01 },
+ { 0x24EA, 0x32 },
+ { 0x24EB, 0x00 },
+ { 0x24EC, 0xDC },
+ { 0x24ED, 0x00 },
+ { 0x24FA, 0x32 },
+ { 0x24FB, 0x00 },
+ { 0x24FC, 0xDD },
+ { 0x24FD, 0x00 },
+ { 0x254A, 0x15 },
+ { 0x254B, 0x01 },
+ { 0x255A, 0x15 },
+ { 0x255B, 0x01 },
+ { 0x2560, 0x01 },
+ { 0x2561, 0x00 },
+ { 0x2562, 0x2A },
+ { 0x2563, 0x00 },
+ { 0x2564, 0xF8 },
+ { 0x2565, 0x00 },
+ { 0x2566, 0x15 },
+ { 0x2567, 0x01 },
+ { 0x2568, 0x0C },
+ { 0x2569, 0x02 },
+ { 0x256A, 0x31 },
+ { 0x256B, 0x02 },
+ { 0x2578, 0x90 },
+ { 0x2579, 0x01 },
+ { 0x257A, 0x92 },
+ { 0x257B, 0x01 },
+ { 0x257C, 0xB8 },
+ { 0x257D, 0x02 },
+ { 0x257E, 0xBA },
+ { 0x257F, 0x02 },
+ { 0x2584, 0x90 },
+ { 0x2585, 0x01 },
+ { 0x2586, 0x92 },
+ { 0x2587, 0x01 },
+ { 0x2588, 0xB8 },
+ { 0x2589, 0x02 },
+ { 0x258A, 0xBA },
+ { 0x258B, 0x02 },
+ { 0x26B8, 0x10 },
+ { 0x26B9, 0x00 },
+ { 0x26BA, 0x33 },
+ { 0x26BB, 0x00 },
+ { 0x26BC, 0x89 },
+ { 0x26BD, 0x00 },
+ { 0x26BE, 0xB0 },
+ { 0x26BF, 0x00 },
+ { 0x26C4, 0x4E },
+ { 0x26C5, 0x00 },
+ { 0x26C8, 0xC9 },
+ { 0x26C9, 0x00 },
+ { 0x26CC, 0x35 },
+ { 0x26CD, 0x01 },
+ { 0x26D0, 0xBA },
+ { 0x26D1, 0x01 },
+ { 0x26D4, 0x7C },
+ { 0x26D5, 0x02 },
+ { 0x26D8, 0xF6 },
+ { 0x26D9, 0x02 },
+ { 0x26DE, 0x51 },
+ { 0x26DF, 0x00 },
+ { 0x26E0, 0x7F },
+ { 0x26E1, 0x00 },
+ { 0x26E2, 0xCC },
+ { 0x26E3, 0x00 },
+ { 0x26E4, 0xF8 },
+ { 0x26E5, 0x00 },
+ { 0x26E6, 0x38 },
+ { 0x26E7, 0x01 },
+ { 0x26E8, 0x65 },
+ { 0x26E9, 0x01 },
+ { 0x26EA, 0xBD },
+ { 0x26EB, 0x01 },
+ { 0x26EE, 0x7F },
+ { 0x26EF, 0x02 },
+ { 0x26F0, 0xAB },
+ { 0x26F1, 0x02 },
+ { 0x26F2, 0xF9 },
+ { 0x26F3, 0x02 },
+ { 0x2722, 0x59 },
+ { 0x2723, 0x02 },
+ { 0x2938, 0x55 },
+ { 0x2939, 0x00 },
+ { 0x293A, 0x17 },
+ { 0x293B, 0x00 },
+ { 0x293C, 0xD0 },
+ { 0x293D, 0x00 },
+ { 0x293E, 0x91 },
+ { 0x293F, 0x00 },
+ { 0x2940, 0x3C },
+ { 0x2941, 0x01 },
+ { 0x2942, 0x0C },
+ { 0x2943, 0x01 },
+ { 0x2944, 0xC1 },
+ { 0x2945, 0x01 },
+ { 0x2946, 0x76 },
+ { 0x2947, 0x01 },
+ { 0x2948, 0x83 },
+ { 0x2949, 0x02 },
+ { 0x294A, 0xFB },
+ { 0x294B, 0x01 },
+ { 0x294C, 0xFD },
+ { 0x294D, 0x02 },
+ { 0x294E, 0xBF },
+ { 0x294F, 0x02 },
+ { 0x2A06, 0xFF },
+ { 0x2A07, 0x03 },
+ { 0x2A20, 0x00 },
+ { 0x2A21, 0x00 },
+ { 0x2A22, 0x7D },
+ { 0x2A23, 0x00 },
+ { 0x2B11, 0x19 },
+ { 0x2B13, 0x15 },
+ { 0x2B14, 0x14 },
+ { 0x2B15, 0x13 },
+ { 0x2B16, 0x12 },
+ { 0x2B17, 0x11 },
+ { 0x2B18, 0x10 },
+ { 0x2B19, 0x0F },
+ { 0x2B1A, 0x0E },
+ { 0x2B1B, 0x0D },
+ { 0x2B1C, 0x0C },
+ { 0x2B1D, 0x0B },
+ { 0x2B1E, 0x0A },
+ { 0x2B1F, 0x09 },
+ { 0x2B20, 0x08 },
+ { 0x2B21, 0x07 },
+ { 0x2B22, 0x06 },
+ { 0x2B23, 0x05 },
+ { 0x2B24, 0x04 },
+ { 0x2B25, 0x03 },
+ { 0x2B26, 0x03 },
+ { 0x2B38, 0x01 },
+ { 0x2B45, 0xE3 },
+ { 0x2B50, 0x01 },
+ { 0x2B51, 0x00 },
+ { 0x2B6D, 0x47 },
+ { 0x2B70, 0x02 },
+ { 0x2B71, 0x02 },
+ { 0x2B72, 0x02 },
+ { 0x2B7F, 0x7F },
+ { 0x2B80, 0x94 },
+ { 0x2B81, 0x06 },
+ { 0x2B87, 0x1B },
+ { 0x2B88, 0x1B },
+ { 0x2B89, 0x17 },
+ { 0x2B8A, 0x12 },
+ { 0x2B8B, 0x12 },
+ { 0x2B8D, 0x2B },
+ { 0x2B8E, 0x2B },
+ { 0x2B8F, 0x2B },
+ { 0x2B90, 0x7F },
+ { 0x2B91, 0x1F },
+ { 0x2B94, 0x7F },
+ { 0x2B95, 0x27 },
+ { 0x2B98, 0x7F },
+ { 0x2B99, 0x57 },
+ { 0x2BA8, 0xBC },
+ { 0x2BA9, 0x62 },
+ { 0x2BC1, 0x70 },
+ { 0x2BC5, 0x80 },
+ { 0x2BD5, 0x30 },
+ { 0x2BD6, 0xF0 },
+ { 0x2BD8, 0xDB },
+ { 0x2BD9, 0xF6 },
+ { 0x2BDA, 0x63 },
+ { 0x2BDB, 0x0C },
+ { 0x2BDC, 0x5C },
+ { 0x2C98, 0xE1 },
+ { 0x2C99, 0x2E },
+ { 0x2C9B, 0x86 },
+ { 0x2CA9, 0x80 },
+ { 0x2CAA, 0x01 },
+ { 0x2D39, 0x0E },
+ { 0x2D54, 0x00 },
+ { 0x2D5B, 0x58 },
+ { 0x2D64, 0x64 },
+ { 0x2D65, 0x80 },
+ { 0x3000, 0x00 },
+ { 0x3001, 0x00 },
+ { 0x3002, 0x23 },
+ { 0x3003, 0xA1 },
+ { 0x3004, 0x00 },
+ { 0x3005, 0x20 },
+ { 0x3006, 0x84 },
+ { 0x3007, 0x00 },
+ { 0x3008, 0x06 },
+ { 0x3009, 0xB4 },
+ { 0x300A, 0x1F },
+ { 0x300B, 0x00 },
+ { 0x300C, 0x00 },
+ { 0x300D, 0x1B },
+ { 0x300E, 0x90 },
+ { 0x300F, 0x97 },
+ { 0x3010, 0x00 },
+ { 0x3011, 0x00 },
+ { 0x3012, 0x21 },
+ { 0x3013, 0x21 },
+ { 0x3014, 0x00 },
+ { 0x3015, 0x20 },
+ { 0x3016, 0x84 },
+ { 0x3017, 0x00 },
+ { 0x3018, 0x30 },
+ { 0x3019, 0x09 },
+ { 0x301A, 0x46 },
+ { 0x301B, 0x00 },
+ { 0x3070, 0xC1 },
+ { 0x3071, 0x81 },
+ { 0x3072, 0x29 },
+ { 0x3073, 0x81 },
+ { 0x3410, (IMX390_OUT_WIDTH & 0xFF) },
+ { 0x3411, (IMX390_OUT_WIDTH >> 8) },
+ { 0x3418, (IMX390_OUT_HEIGHT & 0xFF) },
+ { 0x3419, (IMX390_OUT_HEIGHT >> 8) },
+ { 0x34A0, 0x30 },
+ { 0x34C0, 0xD3 },
+ { 0x34C1, 0x00 },
+ { 0x34C2, 0xD3 },
+ { 0x34C3, 0x00 },
+ { 0x34C4, 0xD3 },
+ { 0x34C5, 0x00 },
+ { 0x34C6, 0xD3 },
+ { 0x34C7, 0x00 },
+ { 0x34C8, 0xE2 },
+ { 0x34C9, 0x21 },
+ { 0x34CA, 0xE0 },
+ { 0x34CB, 0x1F },
+ { 0x34CC, 0x06 },
+ { 0x34CD, 0x20 },
+ { 0x34CE, 0x28 },
+ { 0x34CF, 0x1F },
+ { 0x3584, 0x00 },
+ { 0x3586, 0x00 },
+ { 0x3587, 0x01 },
+ { 0x3588, 0xE6 },
+ { 0x3589, 0x00 },
+ { 0x3590, 0x00 },
+ { 0x3591, 0x00 },
+ { 0x3594, 0x40 },
+ { 0x3598, 0x03 },
+ { 0x3599, 0x00 },
+ { 0x359A, 0x80 },
+ { 0x359B, 0x00 },
+ { 0x359C, 0x00 },
+ { 0x359D, 0x01 },
+ { 0x359E, 0x00 },
+ { 0x359F, 0x02 },
+ { 0x35A0, 0x00 },
+ { 0x35A1, 0x04 },
+ { 0x35A2, 0x20 },
+ { 0x35A3, 0x00 },
+ { 0x35A4, 0x40 },
+ { 0x35A5, 0x00 },
+ { 0x35A6, 0x80 },
+ { 0x35A7, 0x00 },
+ { 0x35A8, 0x00 },
+ { 0x35A9, 0x01 },
+ { 0x35AA, 0x3A },
+ { 0x35AB, 0x00 },
+ { 0x35AC, 0x80 },
+ { 0x35AD, 0x00 },
+ { 0x35AE, 0x00 },
+ { 0x35AF, 0x01 },
+ { 0x35B0, 0x00 },
+ { 0x35B1, 0x02 },
+ { 0x35B2, 0x00 },
+ { 0x35B3, 0x04 },
+ { 0x35B4, 0x02 },
+ { 0x35B5, 0x00 },
+ { 0x35B6, 0x04 },
+ { 0x35B7, 0x00 },
+ { 0x35B8, 0x08 },
+ { 0x35B9, 0x00 },
+ { 0x35BA, 0x10 },
+ { 0x35BB, 0x00 },
+ { 0x35BC, 0x03 },
+ { 0x35BD, 0x00 },
+ { 0x35C8, 0x00 },
+ { 0x35C9, 0x01 },
+ { 0x35CA, 0x00 },
+ { 0x35CB, 0x04 },
+ { 0x35CC, 0x00 },
+ { 0x35CD, 0x10 },
+ { 0x35CE, 0x00 },
+ { 0x35CF, 0x40 },
+ { 0x35D0, 0x00 },
+ { 0x35D1, 0x0C },
+ { 0x35D2, 0x00 },
+ { 0x35D3, 0x0C },
+ { 0x35D4, 0x00 },
+ { 0x35D5, 0x0C },
+ { 0x35D6, 0x00 },
+ { 0x35D7, 0x0C },
+ { 0x35D8, 0x00 },
+ { 0x35D9, 0x00 },
+ { 0x35DA, 0x08 },
+ { 0x35DB, 0x00 },
+ { 0x35DC, 0xD8 },
+ { 0x35DD, 0x0E },
+ { 0x35F0, 0x00 },
+ { 0x35F1, 0x10 },
+ { 0x35F2, 0x00 },
+ { 0x35F3, 0x10 },
+ { 0x35F4, 0x00 },
+ { 0x35F5, 0x10 },
+ { 0x35F6, 0x00 },
+ { 0x35F7, 0x03 },
+ { 0x35F8, 0x00 },
+ { 0x35F9, 0x01 },
+ { 0x35FA, 0x38 },
+ { 0x35FB, 0x00 },
+ { 0x35FC, 0xB3 },
+ { 0x35FD, 0x01 },
+ { 0x35FE, 0x00 },
+ { 0x35FF, 0x00 },
+ { 0x3600, 0x04 },
+ { 0x3601, 0x06 },
+ { 0x3604, 0x03 },
+ { 0x3605, 0x00 },
+ { 0x3608, 0x03 },
+ { 0x3609, 0x00 },
+ { 0x360C, 0x00 },
+ { 0x360D, 0x00 },
+ { 0x3610, 0x10 },
+ { 0x3611, 0x01 },
+ { 0x3612, 0x00 },
+ { 0x3613, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3615, 0x00 },
+ { 0x361C, 0x00 },
+ { 0x361D, 0x01 },
+ { 0x361E, 0x00 },
+ { 0x361F, 0x01 },
+ { 0x3620, 0x01 },
+ { 0x3621, 0x00 },
+ { 0x3622, 0xB0 },
+ { 0x3623, 0x04 },
+ { 0x3624, 0xDC },
+ { 0x3625, 0x05 },
+ { 0x3626, 0x00 },
+ { 0x3627, 0x01 },
+ { 0x3628, 0xFF },
+ { 0x3629, 0x0F },
+ { 0x362A, 0x00 },
+ { 0x362B, 0x10 },
+ { 0x362C, 0x00 },
+ { 0x362D, 0x01 },
+ { 0x3630, 0x40 },
+ { 0x3631, 0x00 },
+ { 0x3632, 0x40 },
+ { 0x3633, 0x00 },
+ { 0x3634, 0x40 },
+ { 0x3635, 0x00 },
+ { 0x3636, 0x40 },
+ { 0x3637, 0x00 },
+ { 0x3638, 0x40 },
+ { 0x3639, 0x00 },
+ { 0x363A, 0x40 },
+ { 0x363B, 0x00 },
+ { 0x363C, 0x40 },
+ { 0x363D, 0x00 },
+ { 0x363E, 0x40 },
+ { 0x363F, 0x00 },
+ { 0x36C4, 0x99 },
+ { 0x36C5, 0x09 },
+ { 0x36C6, 0x18 },
+ { 0x36C7, 0x07 },
+ { 0x36C8, 0x65 },
+ { 0x36C9, 0x0E },
+ { 0x36CC, 0x99 },
+ { 0x36CD, 0x01 },
+ { 0x36CE, 0x47 },
+ { 0x36CF, 0x00 },
+ { 0x36D0, 0x04 },
+ { 0x36D1, 0x00 },
+ { 0x36D4, 0x65 },
+ { 0x36D5, 0x0E },
+ { 0x36D6, 0xA4 },
+ { 0x36D7, 0x0A },
+ { 0x36D8, 0x65 },
+ { 0x36D9, 0x0E },
+ { 0x36DC, 0x65 },
+ { 0x36DD, 0x0E },
+ { 0x36DE, 0xA4 },
+ { 0x36DF, 0x0A },
+ { 0x36E0, 0x65 },
+ { 0x36E1, 0x0E },
+ { 0x36E4, 0x65 },
+ { 0x36E5, 0x0E },
+ { 0x36E6, 0xA4 },
+ { 0x36E7, 0x0A },
+ { 0x36E8, 0x65 },
+ { 0x36E9, 0x0E },
+ { 0x36EE, 0x00 },
+ { 0x36EF, 0x00 },
+ { 0x36F0, 0x00 },
+ { 0x36F1, 0x80 },
+ { 0x36F8, 0x00 },
+ { 0x3702, 0x03 },
+ { 0x3703, 0x04 },
+ { 0x3704, 0x08 },
+ { 0x370E, 0x0E },
+ { 0x3718, 0x62 },
+ { 0x3719, 0x4A },
+ { 0x371A, 0x38 },
+ { 0x371B, 0x20 },
+ { 0x371C, 0x64 },
+ { 0x371D, 0x42 },
+ { 0x371E, 0x32 },
+ { 0x371F, 0x1B },
+ { 0x3720, 0x9C },
+ { 0x3721, 0xA4 },
+ { 0x3722, 0xAC },
+ { 0x3723, 0xB4 },
+ { 0x3748, 0xAA },
+ { 0x3749, 0x96 },
+ { 0x374A, 0x7D },
+ { 0x374B, 0x69 },
+ { 0x37C0, 0x00 },
+ { 0x37C1, 0x00 },
+ { 0x37C2, 0x00 },
+ { 0x37C4, 0x00 },
+ { 0x37C5, 0x00 },
+ { 0x37C6, 0x00 },
+ { 0x37C8, 0x00 },
+ { 0x37C9, 0x00 },
+ { 0x37CA, 0x00 },
+ { 0x37CC, 0x00 },
+ { 0x37CD, 0x00 },
+ { 0x37CE, 0x00 },
+ { 0x37D0, 0x00 },
+ { 0x37D1, 0x00 },
+ { 0x37D2, 0x00 },
+ { 0x37D4, 0x00 },
+ { 0x37D5, 0x00 },
+ { 0x37D6, 0x00 },
+ { 0x37D8, 0x00 },
+ { 0x37D9, 0x00 },
+ { 0x37DA, 0x00 },
+ { 0x37DC, 0x00 },
+ { 0x37DD, 0x00 },
+ { 0x37DE, 0x00 },
+ { 0x37E0, 0x00 },
+ { 0x37E1, 0x00 },
+ { 0x37E2, 0x00 },
+ { 0x37E4, 0x00 },
+ { 0x37E5, 0x00 },
+ { 0x37E6, 0x00 },
+ { 0x37E8, 0x00 },
+ { 0x37E9, 0x00 },
+ { 0x37EA, 0x00 },
+ { 0x37EC, 0x00 },
+ { 0x37ED, 0x00 },
+ { 0x37EE, 0x00 },
+ { 0x37F0, 0x00 },
+ { 0x37F4, 0x00 },
+ { 0x37F5, 0x1E },
+ { 0x37F6, 0x34 },
+ { 0x37F7, 0x00 },
+ { 0x37F8, 0xFF },
+ { 0x37F9, 0xFF },
+ { 0x37FA, 0x03 },
+ { 0x37FC, 0x00 },
+ { 0x37FD, 0x00 },
+ { 0x37FE, 0x04 },
+ { 0x3800, 0xFF },
+ { 0x3801, 0xFF },
+ { 0x3802, 0x03 },
+ { 0x3804, 0x00 },
+ { 0x3805, 0x00 },
+ { 0x3806, 0x04 },
+ { 0x3808, 0x00 },
+ { 0x3809, 0x00 },
+ { 0x380A, 0x00 },
+ { 0x380C, 0x00 },
+ { 0x380D, 0x00 },
+ { 0x380E, 0x00 },
+ { 0x3810, 0x00 },
+ { 0x3811, 0x00 },
+ { 0x3812, 0x00 },
+ { 0x3814, 0x00 },
+ { 0x3815, 0x00 },
+ { 0x3816, 0x00 },
+ { 0x3818, 0x00 },
+ { 0x3819, 0x00 },
+ { 0x381A, 0x00 },
+ { 0x381C, 0x00 },
+ { 0x381D, 0x00 },
+ { 0x381E, 0x00 },
+ { 0x3820, 0x00 },
+ { 0x3821, 0x00 },
+ { 0x3822, 0x00 },
+ { 0x3824, 0x00 },
+ { 0x3825, 0x00 },
+ { 0x3826, 0x00 },
+ { 0x3828, 0x00 },
+ { 0x3829, 0x00 },
+ { 0x382A, 0x00 },
+ { 0x382C, 0x00 },
+ { 0x382D, 0x00 },
+ { 0x382E, 0x00 },
+ { 0x3830, 0x00 },
+ { 0x3831, 0x00 },
+ { 0x3832, 0x00 },
+ { 0x3834, 0x00 },
+ { 0x3835, 0x00 },
+ { 0x3836, 0x00 },
+ { 0x3838, 0x00 },
+ { 0x3839, 0x00 },
+ { 0x383A, 0x00 },
+ { 0x383B, 0x00 },
+ { 0x383C, 0x00 },
+ { 0x383D, 0x00 },
+ { 0x383E, 0x00 },
+ { 0x383F, 0x00 },
+ { 0x3840, 0x00 },
+ { 0x3841, 0x00 },
+ { 0x3842, 0x00 },
+ { 0x3843, 0x00 },
+ { 0x3844, 0x00 },
+ { 0x3845, 0x00 },
+ { 0x3846, 0x00 },
+ { 0x3847, 0x00 },
+ { 0x3848, 0x00 },
+ { 0x3849, 0x00 },
+ { 0x384A, 0x00 },
+ { 0x384B, 0x00 },
+ { 0x384C, 0x00 },
+ { 0x384D, 0x00 },
+ { 0x384E, 0x00 },
+ { 0x384F, 0x00 },
+ { 0x3850, 0xFF },
+ { 0x3851, 0x0F },
+ { 0x3852, 0x00 },
+ { 0x3853, 0x10 },
+ { 0x3854, 0xFF },
+ { 0x3855, 0x0F },
+ { 0x3856, 0x00 },
+ { 0x3857, 0x10 },
+ { 0x3858, 0xFF },
+ { 0x3859, 0x0F },
+ { 0x385A, 0x00 },
+ { 0x385B, 0x10 },
+ { 0x385C, 0x02 },
+ { 0x385D, 0x00 },
+ { 0x385E, 0x06 },
+ { 0x385F, 0x00 },
+ { 0x3860, 0x06 },
+ { 0x3861, 0x00 },
+ { 0x3862, 0x08 },
+ { 0x3863, 0x00 },
+ { 0x3864, 0x02 },
+ { 0x3865, 0x00 },
+ { 0x38A0, 0x01 },
+ { 0x38A1, 0x01 },
+ { 0x38A2, 0x00 },
+ { 0x38A3, 0x01 },
+ { 0x38A4, 0x07 },
+ { 0x38A5, 0x00 },
+ { 0x38A6, 0x04 },
+ { 0x38A7, 0x05 },
+ { 0x38A8, 0x00 },
+ { 0x38A9, 0x00 },
+ { 0x38AC, 0x00 },
+ { 0x38AD, 0x00 },
+ { 0x38AE, 0x01 },
+ { 0x38B0, 0x02 },
+ { 0x38B2, 0x22 },
+ { 0x38B3, 0x00 },
+ { 0x38B4, 0x17 },
+ { 0x38B5, 0x00 },
+ { 0x38B6, 0x11 },
+ { 0x38B7, 0x00 },
+ { 0x38B8, 0x0E },
+ { 0x38B9, 0x00 },
+ { 0x38BA, 0x2A },
+ { 0x38BB, 0x00 },
+ { 0x38BC, 0x1C },
+ { 0x38BD, 0x00 },
+ { 0x38BE, 0x14 },
+ { 0x38BF, 0x00 },
+ { 0x38C0, 0x10 },
+ { 0x38C1, 0x00 },
+ { 0x38C2, 0x31 },
+ { 0x38C3, 0x00 },
+ { 0x38C4, 0x21 },
+ { 0x38C5, 0x00 },
+ { 0x38C6, 0x18 },
+ { 0x38C7, 0x00 },
+ { 0x38C8, 0x12 },
+ { 0x38C9, 0x00 },
+ { 0x38CA, 0x3C },
+ { 0x38CB, 0x00 },
+ { 0x38CC, 0x29 },
+ { 0x38CD, 0x00 },
+ { 0x38CE, 0x1D },
+ { 0x38CF, 0x00 },
+ { 0x38D0, 0x15 },
+ { 0x38D1, 0x00 },
+ { 0x38D2, 0x4E },
+ { 0x38D3, 0x00 },
+ { 0x38D4, 0x35 },
+ { 0x38D5, 0x00 },
+ { 0x38D6, 0x26 },
+ { 0x38D7, 0x00 },
+ { 0x38D8, 0x1A },
+ { 0x38D9, 0x00 },
+ { 0x38DA, 0x69 },
+ { 0x38DB, 0x00 },
+ { 0x38DC, 0x48 },
+ { 0x38DD, 0x00 },
+ { 0x38DE, 0x33 },
+ { 0x38DF, 0x00 },
+ { 0x38E0, 0x22 },
+ { 0x38E1, 0x00 },
+ { 0x38E2, 0x93 },
+ { 0x38E3, 0x00 },
+ { 0x38E4, 0x64 },
+ { 0x38E5, 0x00 },
+ { 0x38E6, 0x48 },
+ { 0x38E7, 0x00 },
+ { 0x38E8, 0x30 },
+ { 0x38E9, 0x00 },
+ { 0x38EA, 0xD3 },
+ { 0x38EB, 0x00 },
+ { 0x38EC, 0x90 },
+ { 0x38ED, 0x00 },
+ { 0x38EE, 0x69 },
+ { 0x38EF, 0x00 },
+ { 0x38F0, 0x49 },
+ { 0x38F1, 0x00 },
+ { 0x38F2, 0x39 },
+ { 0x38F3, 0x01 },
+ { 0x38F4, 0xD5 },
+ { 0x38F5, 0x00 },
+ { 0x38F6, 0x9F },
+ { 0x38F7, 0x00 },
+ { 0x38F8, 0x75 },
+ { 0x38F9, 0x00 },
+ { 0x38FA, 0x00 },
+ { 0x38FB, 0x01 },
+ { 0x38FC, 0x00 },
+ { 0x38FD, 0x01 },
+ { 0x38FE, 0x00 },
+ { 0x38FF, 0x01 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x01 },
+ { 0x3902, 0x70 },
+ { 0x3903, 0x00 },
+ { 0x3904, 0x30 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x25 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x20 },
+ { 0x3909, 0x00 },
+ { 0x390A, 0xB2 },
+ { 0x390B, 0x00 },
+ { 0x390C, 0x80 },
+ { 0x390D, 0x00 },
+ { 0x390E, 0x70 },
+ { 0x390F, 0x00 },
+ { 0x3910, 0x50 },
+ { 0x3911, 0x00 },
+ { 0x3912, 0xB2 },
+ { 0x3913, 0x00 },
+ { 0x3914, 0x80 },
+ { 0x3915, 0x00 },
+ { 0x3916, 0x70 },
+ { 0x3917, 0x00 },
+ { 0x3918, 0x50 },
+ { 0x3919, 0x00 },
+ { 0x391A, 0xB2 },
+ { 0x391B, 0x00 },
+ { 0x391C, 0x80 },
+ { 0x391D, 0x00 },
+ { 0x391E, 0x70 },
+ { 0x391F, 0x00 },
+ { 0x3920, 0x50 },
+ { 0x3921, 0x00 },
+ { 0x3922, 0x40 },
+ { 0x3923, 0x00 },
+ { 0x3924, 0x40 },
+ { 0x3925, 0x00 },
+ { 0x3926, 0x40 },
+ { 0x3927, 0x00 },
+ { 0x3928, 0x40 },
+ { 0x3929, 0x00 },
+ { 0x392A, 0x80 },
+ { 0x392B, 0x00 },
+ { 0x392C, 0x80 },
+ { 0x392D, 0x00 },
+ { 0x392E, 0x80 },
+ { 0x392F, 0x00 },
+ { 0x3930, 0x80 },
+ { 0x3931, 0x00 },
+ { 0x3932, 0x80 },
+ { 0x3933, 0x80 },
+ { 0x3934, 0x80 },
+ { 0x3940, 0x01 },
+ { 0x3941, 0x01 },
+ { 0x3942, 0x00 },
+ { 0x3943, 0x01 },
+ { 0x3944, 0x07 },
+ { 0x3945, 0x00 },
+ { 0x3946, 0x04 },
+ { 0x3947, 0x05 },
+ { 0x3948, 0x00 },
+ { 0x3949, 0x00 },
+ { 0x394C, 0x00 },
+ { 0x394D, 0x00 },
+ { 0x394E, 0x01 },
+ { 0x3950, 0x03 },
+ { 0x3952, 0x14 },
+ { 0x3953, 0x00 },
+ { 0x3954, 0x0F },
+ { 0x3955, 0x00 },
+ { 0x3956, 0x0E },
+ { 0x3957, 0x00 },
+ { 0x3958, 0x0E },
+ { 0x3959, 0x00 },
+ { 0x395A, 0x19 },
+ { 0x395B, 0x00 },
+ { 0x395C, 0x11 },
+ { 0x395D, 0x00 },
+ { 0x395E, 0x0F },
+ { 0x395F, 0x00 },
+ { 0x3960, 0x0E },
+ { 0x3961, 0x00 },
+ { 0x3962, 0x1C },
+ { 0x3963, 0x00 },
+ { 0x3964, 0x13 },
+ { 0x3965, 0x00 },
+ { 0x3966, 0x0F },
+ { 0x3967, 0x00 },
+ { 0x3968, 0x0E },
+ { 0x3969, 0x00 },
+ { 0x396A, 0x23 },
+ { 0x396B, 0x00 },
+ { 0x396C, 0x15 },
+ { 0x396D, 0x00 },
+ { 0x396E, 0x11 },
+ { 0x396F, 0x00 },
+ { 0x3970, 0x0E },
+ { 0x3971, 0x00 },
+ { 0x3972, 0x2E },
+ { 0x3973, 0x00 },
+ { 0x3974, 0x1A },
+ { 0x3975, 0x00 },
+ { 0x3976, 0x14 },
+ { 0x3977, 0x00 },
+ { 0x3978, 0x0F },
+ { 0x3979, 0x00 },
+ { 0x397A, 0x3E },
+ { 0x397B, 0x00 },
+ { 0x397C, 0x23 },
+ { 0x397D, 0x00 },
+ { 0x397E, 0x1A },
+ { 0x397F, 0x00 },
+ { 0x3980, 0x12 },
+ { 0x3981, 0x00 },
+ { 0x3982, 0x56 },
+ { 0x3983, 0x00 },
+ { 0x3984, 0x31 },
+ { 0x3985, 0x00 },
+ { 0x3986, 0x25 },
+ { 0x3987, 0x00 },
+ { 0x3988, 0x1A },
+ { 0x3989, 0x00 },
+ { 0x398A, 0x7B },
+ { 0x398B, 0x00 },
+ { 0x398C, 0x49 },
+ { 0x398D, 0x00 },
+ { 0x398E, 0x39 },
+ { 0x398F, 0x00 },
+ { 0x3990, 0x2C },
+ { 0x3991, 0x00 },
+ { 0x3992, 0xB4 },
+ { 0x3993, 0x00 },
+ { 0x3994, 0x75 },
+ { 0x3995, 0x00 },
+ { 0x3996, 0x61 },
+ { 0x3997, 0x00 },
+ { 0x3998, 0x53 },
+ { 0x3999, 0x00 },
+ { 0x399A, 0x00 },
+ { 0x399B, 0x01 },
+ { 0x399C, 0x00 },
+ { 0x399D, 0x01 },
+ { 0x399E, 0x00 },
+ { 0x399F, 0x01 },
+ { 0x39A0, 0x00 },
+ { 0x39A1, 0x01 },
+ { 0x39A2, 0x70 },
+ { 0x39A3, 0x00 },
+ { 0x39A4, 0x30 },
+ { 0x39A5, 0x00 },
+ { 0x39A6, 0x25 },
+ { 0x39A7, 0x00 },
+ { 0x39A8, 0x20 },
+ { 0x39A9, 0x00 },
+ { 0x39AA, 0xB2 },
+ { 0x39AB, 0x00 },
+ { 0x39AC, 0x80 },
+ { 0x39AD, 0x00 },
+ { 0x39AE, 0x70 },
+ { 0x39AF, 0x00 },
+ { 0x39B0, 0x80 },
+ { 0x39B1, 0x00 },
+ { 0x39B2, 0xB2 },
+ { 0x39B3, 0x00 },
+ { 0x39B4, 0x80 },
+ { 0x39B5, 0x00 },
+ { 0x39B6, 0x70 },
+ { 0x39B7, 0x00 },
+ { 0x39B8, 0x80 },
+ { 0x39B9, 0x00 },
+ { 0x39BA, 0xB2 },
+ { 0x39BB, 0x00 },
+ { 0x39BC, 0x80 },
+ { 0x39BD, 0x00 },
+ { 0x39BE, 0x70 },
+ { 0x39BF, 0x00 },
+ { 0x39C0, 0x80 },
+ { 0x39C1, 0x00 },
+ { 0x39C2, 0x40 },
+ { 0x39C3, 0x00 },
+ { 0x39C4, 0x40 },
+ { 0x39C5, 0x00 },
+ { 0x39C6, 0x40 },
+ { 0x39C7, 0x00 },
+ { 0x39C8, 0x40 },
+ { 0x39C9, 0x00 },
+ { 0x39CA, 0x80 },
+ { 0x39CB, 0x00 },
+ { 0x39CC, 0x80 },
+ { 0x39CD, 0x00 },
+ { 0x39CE, 0x80 },
+ { 0x39CF, 0x00 },
+ { 0x39D0, 0x80 },
+ { 0x39D1, 0x00 },
+ { 0x39D2, 0x80 },
+ { 0x39D3, 0x80 },
+ { 0x39D4, 0x80 },
+ { 0x39E0, 0x01 },
+ { 0x39E1, 0x00 },
+ { 0x39E4, 0x40 },
+ { 0x39E5, 0x01 },
+ { 0x39E6, 0x01 },
+ { 0x39E8, 0x00 },
+ { 0x39E9, 0x01 },
+ { 0x39EA, 0x00 },
+ { 0x39EB, 0x00 },
+ { 0x39EC, 0x01 },
+ { 0x39ED, 0x00 },
+ { 0x39EE, 0x01 },
+ { 0x39F0, 0x03 },
+ { 0x39F1, 0x04 },
+ { 0x39F2, 0x0E },
+ { 0x39F4, 0x1C },
+ { 0x39F5, 0x00 },
+ { 0x39F6, 0x13 },
+ { 0x39F7, 0x00 },
+ { 0x39F8, 0x0D },
+ { 0x39F9, 0x00 },
+ { 0x39FA, 0x07 },
+ { 0x39FB, 0x00 },
+ { 0x39FC, 0x38 },
+ { 0x39FD, 0x00 },
+ { 0x39FE, 0x1C },
+ { 0x39FF, 0x00 },
+ { 0x3A00, 0x11 },
+ { 0x3A01, 0x00 },
+ { 0x3A02, 0x08 },
+ { 0x3A03, 0x00 },
+ { 0x3A04, 0x4A },
+ { 0x3A05, 0x00 },
+ { 0x3A06, 0x23 },
+ { 0x3A07, 0x00 },
+ { 0x3A08, 0x15 },
+ { 0x3A09, 0x00 },
+ { 0x3A0A, 0x09 },
+ { 0x3A0B, 0x00 },
+ { 0x3A0C, 0x65 },
+ { 0x3A0D, 0x00 },
+ { 0x3A0E, 0x2D },
+ { 0x3A0F, 0x00 },
+ { 0x3A10, 0x1A },
+ { 0x3A11, 0x00 },
+ { 0x3A12, 0x0B },
+ { 0x3A13, 0x00 },
+ { 0x3A14, 0x8D },
+ { 0x3A15, 0x00 },
+ { 0x3A16, 0x3D },
+ { 0x3A17, 0x00 },
+ { 0x3A18, 0x23 },
+ { 0x3A19, 0x00 },
+ { 0x3A1A, 0x0E },
+ { 0x3A1B, 0x00 },
+ { 0x3A1C, 0xC5 },
+ { 0x3A1D, 0x00 },
+ { 0x3A1E, 0x55 },
+ { 0x3A1F, 0x00 },
+ { 0x3A20, 0x30 },
+ { 0x3A21, 0x00 },
+ { 0x3A22, 0x13 },
+ { 0x3A23, 0x00 },
+ { 0x3A24, 0x16 },
+ { 0x3A25, 0x01 },
+ { 0x3A26, 0x76 },
+ { 0x3A27, 0x00 },
+ { 0x3A28, 0x42 },
+ { 0x3A29, 0x00 },
+ { 0x3A2A, 0x1A },
+ { 0x3A2B, 0x00 },
+ { 0x3A2C, 0x88 },
+ { 0x3A2D, 0x01 },
+ { 0x3A2E, 0xA7 },
+ { 0x3A2F, 0x00 },
+ { 0x3A30, 0x5D },
+ { 0x3A31, 0x00 },
+ { 0x3A32, 0x24 },
+ { 0x3A33, 0x00 },
+ { 0x3A34, 0x2A },
+ { 0x3A35, 0x02 },
+ { 0x3A36, 0xEB },
+ { 0x3A37, 0x00 },
+ { 0x3A38, 0x83 },
+ { 0x3A39, 0x00 },
+ { 0x3A3A, 0x32 },
+ { 0x3A3B, 0x00 },
+ { 0x3A3C, 0x00 },
+ { 0x3A3D, 0x01 },
+ { 0x3A3E, 0x00 },
+ { 0x3A3F, 0x01 },
+ { 0x3A40, 0x00 },
+ { 0x3A41, 0x01 },
+ { 0x3A42, 0x00 },
+ { 0x3A43, 0x01 },
+ { 0x3A44, 0x80 },
+ { 0x3A45, 0x00 },
+ { 0x3A46, 0x50 },
+ { 0x3A47, 0x00 },
+ { 0x3A48, 0x30 },
+ { 0x3A49, 0x00 },
+ { 0x3A4A, 0x20 },
+ { 0x3A4B, 0x00 },
+ { 0x3A4C, 0x99 },
+ { 0x3A4D, 0x00 },
+ { 0x3A4E, 0x80 },
+ { 0x3A4F, 0x00 },
+ { 0x3A50, 0x80 },
+ { 0x3A51, 0x00 },
+ { 0x3A52, 0x80 },
+ { 0x3A53, 0x00 },
+ { 0x3A54, 0x99 },
+ { 0x3A55, 0x00 },
+ { 0x3A56, 0x80 },
+ { 0x3A57, 0x00 },
+ { 0x3A58, 0x80 },
+ { 0x3A59, 0x00 },
+ { 0x3A5A, 0x80 },
+ { 0x3A5B, 0x00 },
+ { 0x3A5C, 0x99 },
+ { 0x3A5D, 0x00 },
+ { 0x3A5E, 0x80 },
+ { 0x3A5F, 0x00 },
+ { 0x3A60, 0x80 },
+ { 0x3A61, 0x00 },
+ { 0x3A62, 0x80 },
+ { 0x3A63, 0x00 },
+ { 0x3A64, 0x1C },
+ { 0x3A65, 0x00 },
+ { 0x3A66, 0x13 },
+ { 0x3A67, 0x00 },
+ { 0x3A68, 0x0D },
+ { 0x3A69, 0x00 },
+ { 0x3A6A, 0x07 },
+ { 0x3A6B, 0x00 },
+ { 0x3A6C, 0x0C },
+ { 0x3A6D, 0x00 },
+ { 0x3A6E, 0x09 },
+ { 0x3A6F, 0x00 },
+ { 0x3A70, 0x06 },
+ { 0x3A71, 0x00 },
+ { 0x3A72, 0x03 },
+ { 0x3A73, 0x00 },
+ { 0x3A74, 0x1F },
+ { 0x3A75, 0x00 },
+ { 0x3A76, 0x1B },
+ { 0x3A77, 0x00 },
+ { 0x3A78, 0x0F },
+ { 0x3A79, 0x00 },
+ { 0x3A7A, 0x08 },
+ { 0x3A7B, 0x00 },
+ { 0x3A7C, 0x80 },
+ { 0x3A7D, 0x00 },
+ { 0x3A7E, 0x80 },
+ { 0x3A7F, 0x00 },
+ { 0x3A80, 0x80 },
+ { 0x3A81, 0x00 },
+ { 0x3A82, 0x80 },
+ { 0x3A83, 0x00 },
+ { 0x3A84, 0x09 },
+ { 0x3A85, 0x00 },
+ { 0x3A86, 0x04 },
+ { 0x3A87, 0x00 },
+ { 0x3A88, 0x03 },
+ { 0x3A89, 0x00 },
+ { 0x3A8A, 0x01 },
+ { 0x3A8B, 0x00 },
+ { 0x3A8C, 0x19 },
+ { 0x3A8D, 0x01 },
+ { 0x3A8E, 0xD2 },
+ { 0x3A8F, 0x00 },
+ { 0x3A90, 0x8C },
+ { 0x3A91, 0x00 },
+ { 0x3A92, 0x64 },
+ { 0x3A93, 0x00 },
+ { 0x3A94, 0xFF },
+ { 0x3A95, 0x00 },
+ { 0x3A96, 0xD2 },
+ { 0x3A97, 0x00 },
+ { 0x3A98, 0x8C },
+ { 0x3A99, 0x00 },
+ { 0x3A9A, 0x64 },
+ { 0x3A9B, 0x00 },
+ { 0x3A9C, 0x08 },
+ { 0x3A9D, 0x10 },
+ { 0x3A9E, 0x80 },
+ { 0x3A9F, 0x80 },
+ { 0x3AA0, 0x80 },
+ { 0x3AA1, 0x04 },
+ { 0x3AA2, 0x05 },
+ { 0x3AC0, 0x01 },
+ { 0x3AC4, 0x81 },
+ { 0x3AC5, 0x00 },
+ { 0x3AC6, 0x00 },
+ { 0x3AC7, 0x00 },
+ { 0x3AC8, 0x00 },
+ { 0x3AC9, 0x00 },
+ { 0x3ACA, 0x00 },
+ { 0x3ACB, 0x00 },
+ { 0x3ACC, 0x02 },
+ { 0x3ACD, 0x00 },
+ { 0x3ACE, 0x81 },
+ { 0x3ACF, 0x00 },
+ { 0x3AD0, 0x00 },
+ { 0x3AD1, 0x00 },
+ { 0x3AD2, 0xFD },
+ { 0x3AD3, 0x03 },
+ { 0x3AD4, 0x02 },
+ { 0x3AD5, 0x00 },
+ { 0x3AD6, 0x00 },
+ { 0x3AD7, 0x00 },
+ { 0x3AD8, 0x81 },
+ { 0x3AD9, 0x00 },
+ { 0x3ADA, 0xFD },
+ { 0x3ADB, 0x03 },
+ { 0x3ADC, 0xFF },
+ { 0x3ADD, 0x03 },
+ { 0x3ADE, 0x01 },
+ { 0x3ADF, 0x00 },
+ { 0x3AE0, 0x01 },
+ { 0x3AE1, 0x00 },
+ { 0x3AE2, 0x7E },
+ { 0x3AE3, 0x00 },
+ { 0x3AF4, 0x00 },
+ { 0x3AF6, 0x40 },
+ { 0x3AF7, 0x1E },
+ { 0x3AF8, 0x01 },
+ { 0x3AFA, 0x63 },
+ { 0x3AFB, 0x09 },
+ { 0x3AFC, 0x11 },
+ { 0x3AFD, 0x09 },
+ { 0x3AFE, 0x00 },
+ { 0x3AFF, 0x00 },
+ { 0x3B00, 0x00 },
+ { 0x3B01, 0x00 },
+ { 0x3B02, 0x84 },
+ { 0x3B03, 0x06 },
+ { 0x3B04, 0x30 },
+ { 0x3B05, 0x06 },
+ { 0x3B06, 0x00 },
+ { 0x3B07, 0x00 },
+ { 0x3B08, 0x00 },
+ { 0x3B09, 0x00 },
+ { 0x3B0A, 0x00 },
+ { 0x3B0B, 0x00 },
+ { 0x3B0C, 0x00 },
+ { 0x3B0D, 0x00 },
+ { 0x3B0E, 0x00 },
+ { 0x3B0F, 0x00 },
+ { 0x3B10, 0x00 },
+ { 0x3B11, 0x00 },
+ { 0x3B12, 0x00 },
+ { 0x3B13, 0x00 },
+ { 0x3B14, 0x00 },
+ { 0x3B15, 0x00 },
+ { 0x3B16, 0x00 },
+ { 0x3B17, 0x00 },
+ { 0x3B18, 0x00 },
+ { 0x3B19, 0x00 },
+ { 0x3B1A, 0x00 },
+ { 0x3B1B, 0x00 },
+ { 0x3B1C, 0x00 },
+ { 0x3B1D, 0x00 },
+ { 0x3B1E, 0x00 },
+ { 0x3B1F, 0x00 },
+ { 0x3B20, 0x00 },
+ { 0x3B21, 0x00 },
+ { 0x3B22, 0x00 },
+ { 0x3B23, 0x00 },
+ { 0x3B24, 0x00 },
+ { 0x3B25, 0x00 },
+ { 0x3B26, 0x00 },
+ { 0x3B27, 0x00 },
+ { 0x3B28, 0x00 },
+ { 0x3B29, 0x00 },
+ { 0x3B2A, 0x00 },
+ { 0x3B2C, 0x00 },
+ { 0x3B2E, 0x00 },
+ { 0x3B30, 0x00 },
+ { 0x3B32, 0x0C },
+ { 0x4000, 0xD1 },
+ { 0x4001, 0xC0 },
+ { 0x4002, 0xC0 },
+ { 0x4003, 0xB8 },
+ { 0x4004, 0xC0 },
+ { 0x4005, 0xB8 },
+ { 0x4006, 0xB9 },
+ { 0x4007, 0xB7 },
+ { 0x4008, 0xB0 },
+ { 0x4009, 0xAB },
+ { 0x400A, 0xAC },
+ { 0x400B, 0xAB },
+ { 0x400C, 0xA8 },
+ { 0x400D, 0xA6 },
+ { 0x400E, 0xA6 },
+ { 0x400F, 0xA5 },
+ { 0x4010, 0xA2 },
+ { 0x4011, 0xA0 },
+ { 0x4012, 0xA0 },
+ { 0x4013, 0x9F },
+ { 0x4014, 0xA4 },
+ { 0x4015, 0xA2 },
+ { 0x4016, 0xA2 },
+ { 0x4017, 0x9C },
+ { 0x4018, 0xA8 },
+ { 0x4019, 0xA6 },
+ { 0x401A, 0xA8 },
+ { 0x401B, 0xAA },
+ { 0x401C, 0xB0 },
+ { 0x401D, 0xAE },
+ { 0x401E, 0xAE },
+ { 0x401F, 0xAE },
+ { 0x4020, 0xBA },
+ { 0x4021, 0xAE },
+ { 0x4022, 0xAF },
+ { 0x4023, 0xAE },
+ { 0x4024, 0xC6 },
+ { 0x4025, 0xBD },
+ { 0x4026, 0xBD },
+ { 0x4027, 0xBA },
+ { 0x4028, 0xB0 },
+ { 0x4029, 0xA9 },
+ { 0x402A, 0xAA },
+ { 0x402B, 0xA8 },
+ { 0x402C, 0x9F },
+ { 0x402D, 0x9C },
+ { 0x402E, 0x9C },
+ { 0x402F, 0x9B },
+ { 0x4030, 0x93 },
+ { 0x4031, 0x91 },
+ { 0x4032, 0x92 },
+ { 0x4033, 0x91 },
+ { 0x4034, 0x8D },
+ { 0x4035, 0x8C },
+ { 0x4036, 0x8C },
+ { 0x4037, 0x8C },
+ { 0x4038, 0x8F },
+ { 0x4039, 0x8E },
+ { 0x403A, 0x8E },
+ { 0x403B, 0x8E },
+ { 0x403C, 0x98 },
+ { 0x403D, 0x96 },
+ { 0x403E, 0x96 },
+ { 0x403F, 0x95 },
+ { 0x4040, 0xA4 },
+ { 0x4041, 0xA0 },
+ { 0x4042, 0xA0 },
+ { 0x4043, 0x9E },
+ { 0x4044, 0xB3 },
+ { 0x4045, 0xAE },
+ { 0x4046, 0xAF },
+ { 0x4047, 0xAB },
+ { 0x4048, 0xC2 },
+ { 0x4049, 0xB7 },
+ { 0x404A, 0xB8 },
+ { 0x404B, 0xB5 },
+ { 0x404C, 0xAB },
+ { 0x404D, 0xA4 },
+ { 0x404E, 0xA5 },
+ { 0x404F, 0xA3 },
+ { 0x4050, 0x99 },
+ { 0x4051, 0x96 },
+ { 0x4052, 0x96 },
+ { 0x4053, 0x96 },
+ { 0x4054, 0x8B },
+ { 0x4055, 0x8A },
+ { 0x4056, 0x8A },
+ { 0x4057, 0x8A },
+ { 0x4058, 0x82 },
+ { 0x4059, 0x81 },
+ { 0x405A, 0x81 },
+ { 0x405B, 0x81 },
+ { 0x405C, 0x85 },
+ { 0x405D, 0x86 },
+ { 0x405E, 0x85 },
+ { 0x405F, 0x85 },
+ { 0x4060, 0x90 },
+ { 0x4061, 0x90 },
+ { 0x4062, 0x8F },
+ { 0x4063, 0x8F },
+ { 0x4064, 0x9D },
+ { 0x4065, 0x9B },
+ { 0x4066, 0x9B },
+ { 0x4067, 0x9A },
+ { 0x4068, 0xAF },
+ { 0x4069, 0xAA },
+ { 0x406A, 0xAC },
+ { 0x406B, 0xAA },
+ { 0x406C, 0xC2 },
+ { 0x406D, 0xB7 },
+ { 0x406E, 0xB8 },
+ { 0x406F, 0xB5 },
+ { 0x4070, 0xAB },
+ { 0x4071, 0xA4 },
+ { 0x4072, 0xA4 },
+ { 0x4073, 0xA3 },
+ { 0x4074, 0x99 },
+ { 0x4075, 0x96 },
+ { 0x4076, 0x96 },
+ { 0x4077, 0x96 },
+ { 0x4078, 0x8B },
+ { 0x4079, 0x8A },
+ { 0x407A, 0x8A },
+ { 0x407B, 0x8A },
+ { 0x407C, 0x82 },
+ { 0x407D, 0x82 },
+ { 0x407E, 0x82 },
+ { 0x407F, 0x82 },
+ { 0x4080, 0x85 },
+ { 0x4081, 0x86 },
+ { 0x4082, 0x86 },
+ { 0x4083, 0x86 },
+ { 0x4084, 0x90 },
+ { 0x4085, 0x90 },
+ { 0x4086, 0x8F },
+ { 0x4087, 0x8F },
+ { 0x4088, 0x9D },
+ { 0x4089, 0x9B },
+ { 0x408A, 0x9B },
+ { 0x408B, 0x99 },
+ { 0x408C, 0xAE },
+ { 0x408D, 0xAA },
+ { 0x408E, 0xAA },
+ { 0x408F, 0xA7 },
+ { 0x4090, 0xC7 },
+ { 0x4091, 0xBA },
+ { 0x4092, 0xBC },
+ { 0x4093, 0xB9 },
+ { 0x4094, 0xB1 },
+ { 0x4095, 0xA8 },
+ { 0x4096, 0xA8 },
+ { 0x4097, 0xA7 },
+ { 0x4098, 0x9F },
+ { 0x4099, 0x9B },
+ { 0x409A, 0x9B },
+ { 0x409B, 0x9B },
+ { 0x409C, 0x93 },
+ { 0x409D, 0x91 },
+ { 0x409E, 0x91 },
+ { 0x409F, 0x91 },
+ { 0x40A0, 0x8D },
+ { 0x40A1, 0x8C },
+ { 0x40A2, 0x8C },
+ { 0x40A3, 0x8C },
+ { 0x40A4, 0x8E },
+ { 0x40A5, 0x8E },
+ { 0x40A6, 0x8D },
+ { 0x40A7, 0x8D },
+ { 0x40A8, 0x96 },
+ { 0x40A9, 0x95 },
+ { 0x40AA, 0x95 },
+ { 0x40AB, 0x94 },
+ { 0x40AC, 0xA2 },
+ { 0x40AD, 0x9F },
+ { 0x40AE, 0x9F },
+ { 0x40AF, 0x9D },
+ { 0x40B0, 0xB1 },
+ { 0x40B1, 0xAC },
+ { 0x40B2, 0xAB },
+ { 0x40B3, 0xAA },
+ { 0x40B4, 0xD3 },
+ { 0x40B5, 0xBC },
+ { 0x40B6, 0xBD },
+ { 0x40B7, 0xBC },
+ { 0x40B8, 0xC1 },
+ { 0x40B9, 0xB7 },
+ { 0x40BA, 0xB7 },
+ { 0x40BB, 0xB5 },
+ { 0x40BC, 0xB0 },
+ { 0x40BD, 0xAA },
+ { 0x40BE, 0xAA },
+ { 0x40BF, 0xAA },
+ { 0x40C0, 0xA8 },
+ { 0x40C1, 0xA4 },
+ { 0x40C2, 0xA4 },
+ { 0x40C3, 0xA4 },
+ { 0x40C4, 0xA2 },
+ { 0x40C5, 0x9F },
+ { 0x40C6, 0x9F },
+ { 0x40C7, 0x9F },
+ { 0x40C8, 0xA3 },
+ { 0x40C9, 0xA0 },
+ { 0x40CA, 0xA0 },
+ { 0x40CB, 0xA0 },
+ { 0x40CC, 0xA6 },
+ { 0x40CD, 0xA3 },
+ { 0x40CE, 0xA3 },
+ { 0x40CF, 0xA2 },
+ { 0x40D0, 0xAF },
+ { 0x40D1, 0xAB },
+ { 0x40D2, 0xAA },
+ { 0x40D3, 0xA8 },
+ { 0x40D4, 0xBA },
+ { 0x40D5, 0xAE },
+ { 0x40D6, 0xAE },
+ { 0x40D7, 0xAB },
+ { 0x4100, 0xBD },
+ { 0x4101, 0xBA },
+ { 0x4102, 0xBD },
+ { 0x4103, 0xB7 },
+ { 0x4104, 0xB7 },
+ { 0x4105, 0xB7 },
+ { 0x4106, 0xB8 },
+ { 0x4107, 0xB5 },
+ { 0x4108, 0xAB },
+ { 0x4109, 0xAA },
+ { 0x410A, 0xAC },
+ { 0x410B, 0xAB },
+ { 0x410C, 0xA4 },
+ { 0x410D, 0xA5 },
+ { 0x410E, 0xA5 },
+ { 0x410F, 0xA4 },
+ { 0x4110, 0x9F },
+ { 0x4111, 0xA0 },
+ { 0x4112, 0xA0 },
+ { 0x4113, 0x9F },
+ { 0x4114, 0xA0 },
+ { 0x4115, 0xA0 },
+ { 0x4116, 0xA0 },
+ { 0x4117, 0x9F },
+ { 0x4118, 0xA1 },
+ { 0x4119, 0xA1 },
+ { 0x411A, 0xA1 },
+ { 0x411B, 0xA0 },
+ { 0x411C, 0xA7 },
+ { 0x411D, 0xA6 },
+ { 0x411E, 0xA6 },
+ { 0x411F, 0xA6 },
+ { 0x4120, 0xA7 },
+ { 0x4121, 0xA6 },
+ { 0x4122, 0xA6 },
+ { 0x4123, 0xA3 },
+ { 0x4124, 0xB9 },
+ { 0x4125, 0xB9 },
+ { 0x4126, 0xBA },
+ { 0x4127, 0xB8 },
+ { 0x4128, 0xA6 },
+ { 0x4129, 0xA7 },
+ { 0x412A, 0xA7 },
+ { 0x412B, 0xA6 },
+ { 0x412C, 0x9B },
+ { 0x412D, 0x9B },
+ { 0x412E, 0x9B },
+ { 0x412F, 0x9B },
+ { 0x4130, 0x91 },
+ { 0x4131, 0x92 },
+ { 0x4132, 0x92 },
+ { 0x4133, 0x91 },
+ { 0x4134, 0x8C },
+ { 0x4135, 0x8C },
+ { 0x4136, 0x8C },
+ { 0x4137, 0x8C },
+ { 0x4138, 0x8D },
+ { 0x4139, 0x8D },
+ { 0x413A, 0x8D },
+ { 0x413B, 0x8D },
+ { 0x413C, 0x93 },
+ { 0x413D, 0x93 },
+ { 0x413E, 0x93 },
+ { 0x413F, 0x92 },
+ { 0x4140, 0x9A },
+ { 0x4141, 0x9A },
+ { 0x4142, 0x9A },
+ { 0x4143, 0x99 },
+ { 0x4144, 0xA7 },
+ { 0x4145, 0xA5 },
+ { 0x4146, 0xA6 },
+ { 0x4147, 0xA6 },
+ { 0x4148, 0xB8 },
+ { 0x4149, 0xB4 },
+ { 0x414A, 0xB4 },
+ { 0x414B, 0xB3 },
+ { 0x414C, 0xA3 },
+ { 0x414D, 0xA2 },
+ { 0x414E, 0xA3 },
+ { 0x414F, 0xA2 },
+ { 0x4150, 0x96 },
+ { 0x4151, 0x96 },
+ { 0x4152, 0x96 },
+ { 0x4153, 0x96 },
+ { 0x4154, 0x8A },
+ { 0x4155, 0x8A },
+ { 0x4156, 0x8A },
+ { 0x4157, 0x8A },
+ { 0x4158, 0x82 },
+ { 0x4159, 0x82 },
+ { 0x415A, 0x82 },
+ { 0x415B, 0x82 },
+ { 0x415C, 0x84 },
+ { 0x415D, 0x85 },
+ { 0x415E, 0x84 },
+ { 0x415F, 0x84 },
+ { 0x4160, 0x8D },
+ { 0x4161, 0x8D },
+ { 0x4162, 0x8D },
+ { 0x4163, 0x8D },
+ { 0x4164, 0x96 },
+ { 0x4165, 0x96 },
+ { 0x4166, 0x96 },
+ { 0x4167, 0x95 },
+ { 0x4168, 0xA5 },
+ { 0x4169, 0xA2 },
+ { 0x416A, 0xA3 },
+ { 0x416B, 0xA2 },
+ { 0x416C, 0xB7 },
+ { 0x416D, 0xB3 },
+ { 0x416E, 0xB5 },
+ { 0x416F, 0xB4 },
+ { 0x4170, 0xA4 },
+ { 0x4171, 0xA2 },
+ { 0x4172, 0xA3 },
+ { 0x4173, 0xA2 },
+ { 0x4174, 0x97 },
+ { 0x4175, 0x96 },
+ { 0x4176, 0x96 },
+ { 0x4177, 0x96 },
+ { 0x4178, 0x8B },
+ { 0x4179, 0x8A },
+ { 0x417A, 0x8A },
+ { 0x417B, 0x8A },
+ { 0x417C, 0x81 },
+ { 0x417D, 0x81 },
+ { 0x417E, 0x81 },
+ { 0x417F, 0x81 },
+ { 0x4180, 0x84 },
+ { 0x4181, 0x84 },
+ { 0x4182, 0x84 },
+ { 0x4183, 0x84 },
+ { 0x4184, 0x8C },
+ { 0x4185, 0x8D },
+ { 0x4186, 0x8D },
+ { 0x4187, 0x8D },
+ { 0x4188, 0x95 },
+ { 0x4189, 0x96 },
+ { 0x418A, 0x96 },
+ { 0x418B, 0x95 },
+ { 0x418C, 0xA1 },
+ { 0x418D, 0xA1 },
+ { 0x418E, 0xA1 },
+ { 0x418F, 0xA0 },
+ { 0x4190, 0xBC },
+ { 0x4191, 0xB8 },
+ { 0x4192, 0xB8 },
+ { 0x4193, 0xB9 },
+ { 0x4194, 0xA8 },
+ { 0x4195, 0xA5 },
+ { 0x4196, 0xA6 },
+ { 0x4197, 0xA5 },
+ { 0x4198, 0x9C },
+ { 0x4199, 0x9A },
+ { 0x419A, 0x9A },
+ { 0x419B, 0x9A },
+ { 0x419C, 0x91 },
+ { 0x419D, 0x91 },
+ { 0x419E, 0x91 },
+ { 0x419F, 0x91 },
+ { 0x41A0, 0x8B },
+ { 0x41A1, 0x8B },
+ { 0x41A2, 0x8B },
+ { 0x41A3, 0x8B },
+ { 0x41A4, 0x8C },
+ { 0x41A5, 0x8C },
+ { 0x41A6, 0x8C },
+ { 0x41A7, 0x8C },
+ { 0x41A8, 0x91 },
+ { 0x41A9, 0x92 },
+ { 0x41AA, 0x91 },
+ { 0x41AB, 0x91 },
+ { 0x41AC, 0x98 },
+ { 0x41AD, 0x99 },
+ { 0x41AE, 0x99 },
+ { 0x41AF, 0x98 },
+ { 0x41B0, 0xA3 },
+ { 0x41B1, 0xA3 },
+ { 0x41B2, 0xA3 },
+ { 0x41B3, 0xA2 },
+ { 0x41B4, 0xC1 },
+ { 0x41B5, 0xB8 },
+ { 0x41B6, 0xB9 },
+ { 0x41B7, 0xBA },
+ { 0x41B8, 0xB8 },
+ { 0x41B9, 0xB4 },
+ { 0x41BA, 0xB4 },
+ { 0x41BB, 0xB4 },
+ { 0x41BC, 0xAA },
+ { 0x41BD, 0xA7 },
+ { 0x41BE, 0xA7 },
+ { 0x41BF, 0xA8 },
+ { 0x41C0, 0xA4 },
+ { 0x41C1, 0xA2 },
+ { 0x41C2, 0xA2 },
+ { 0x41C3, 0xA3 },
+ { 0x41C4, 0x9E },
+ { 0x41C5, 0x9D },
+ { 0x41C6, 0x9D },
+ { 0x41C7, 0x9D },
+ { 0x41C8, 0x9E },
+ { 0x41C9, 0x9D },
+ { 0x41CA, 0x9D },
+ { 0x41CB, 0x9D },
+ { 0x41CC, 0x9E },
+ { 0x41CD, 0x9E },
+ { 0x41CE, 0x9E },
+ { 0x41CF, 0x9E },
+ { 0x41D0, 0xA3 },
+ { 0x41D1, 0xA3 },
+ { 0x41D2, 0xA2 },
+ { 0x41D3, 0xA1 },
+ { 0x41D4, 0xA7 },
+ { 0x41D5, 0xA7 },
+ { 0x41D6, 0xA7 },
+ { 0x41D7, 0xA3 },
+ { 0x4200, 0xCE },
+ { 0x4201, 0xC0 },
+ { 0x4202, 0xC1 },
+ { 0x4203, 0xB9 },
+ { 0x4204, 0xC3 },
+ { 0x4205, 0xB9 },
+ { 0x4206, 0xBC },
+ { 0x4207, 0xBD },
+ { 0x4208, 0xB3 },
+ { 0x4209, 0xAE },
+ { 0x420A, 0xAF },
+ { 0x420B, 0xAE },
+ { 0x420C, 0xAA },
+ { 0x420D, 0xA8 },
+ { 0x420E, 0xA8 },
+ { 0x420F, 0xA6 },
+ { 0x4210, 0xA4 },
+ { 0x4211, 0xA2 },
+ { 0x4212, 0xA2 },
+ { 0x4213, 0xA0 },
+ { 0x4214, 0xA4 },
+ { 0x4215, 0xA3 },
+ { 0x4216, 0xA2 },
+ { 0x4217, 0xA0 },
+ { 0x4218, 0xA7 },
+ { 0x4219, 0xA5 },
+ { 0x421A, 0xA3 },
+ { 0x421B, 0xA1 },
+ { 0x421C, 0xB0 },
+ { 0x421D, 0xA8 },
+ { 0x421E, 0xA8 },
+ { 0x421F, 0xA6 },
+ { 0x4220, 0xB4 },
+ { 0x4221, 0xAA },
+ { 0x4222, 0xA5 },
+ { 0x4223, 0xA3 },
+ { 0x4224, 0xC7 },
+ { 0x4225, 0xBC },
+ { 0x4226, 0xBE },
+ { 0x4227, 0xBC },
+ { 0x4228, 0xB0 },
+ { 0x4229, 0xA9 },
+ { 0x422A, 0xA9 },
+ { 0x422B, 0xA8 },
+ { 0x422C, 0xA0 },
+ { 0x422D, 0x9D },
+ { 0x422E, 0x9D },
+ { 0x422F, 0x9C },
+ { 0x4230, 0x94 },
+ { 0x4231, 0x93 },
+ { 0x4232, 0x93 },
+ { 0x4233, 0x92 },
+ { 0x4234, 0x8E },
+ { 0x4235, 0x8D },
+ { 0x4236, 0x8D },
+ { 0x4237, 0x8C },
+ { 0x4238, 0x8F },
+ { 0x4239, 0x8E },
+ { 0x423A, 0x8E },
+ { 0x423B, 0x8D },
+ { 0x423C, 0x96 },
+ { 0x423D, 0x94 },
+ { 0x423E, 0x94 },
+ { 0x423F, 0x92 },
+ { 0x4240, 0xA1 },
+ { 0x4241, 0x9C },
+ { 0x4242, 0x9C },
+ { 0x4243, 0x99 },
+ { 0x4244, 0xB0 },
+ { 0x4245, 0xA8 },
+ { 0x4246, 0xAB },
+ { 0x4247, 0xA7 },
+ { 0x4248, 0xC3 },
+ { 0x4249, 0xB7 },
+ { 0x424A, 0xB7 },
+ { 0x424B, 0xBC },
+ { 0x424C, 0xAB },
+ { 0x424D, 0xA4 },
+ { 0x424E, 0xA5 },
+ { 0x424F, 0xA5 },
+ { 0x4250, 0x9A },
+ { 0x4251, 0x97 },
+ { 0x4252, 0x97 },
+ { 0x4253, 0x98 },
+ { 0x4254, 0x8C },
+ { 0x4255, 0x8B },
+ { 0x4256, 0x8B },
+ { 0x4257, 0x8B },
+ { 0x4258, 0x82 },
+ { 0x4259, 0x82 },
+ { 0x425A, 0x82 },
+ { 0x425B, 0x82 },
+ { 0x425C, 0x85 },
+ { 0x425D, 0x85 },
+ { 0x425E, 0x85 },
+ { 0x425F, 0x84 },
+ { 0x4260, 0x8F },
+ { 0x4261, 0x8E },
+ { 0x4262, 0x8E },
+ { 0x4263, 0x8D },
+ { 0x4264, 0x9B },
+ { 0x4265, 0x98 },
+ { 0x4266, 0x98 },
+ { 0x4267, 0x95 },
+ { 0x4268, 0xAE },
+ { 0x4269, 0xA5 },
+ { 0x426A, 0xA7 },
+ { 0x426B, 0xA2 },
+ { 0x426C, 0xC2 },
+ { 0x426D, 0xB7 },
+ { 0x426E, 0xB8 },
+ { 0x426F, 0xB9 },
+ { 0x4270, 0xAA },
+ { 0x4271, 0xA4 },
+ { 0x4272, 0xA4 },
+ { 0x4273, 0xA5 },
+ { 0x4274, 0x99 },
+ { 0x4275, 0x96 },
+ { 0x4276, 0x97 },
+ { 0x4277, 0x98 },
+ { 0x4278, 0x8B },
+ { 0x4279, 0x8A },
+ { 0x427A, 0x8A },
+ { 0x427B, 0x8B },
+ { 0x427C, 0x81 },
+ { 0x427D, 0x81 },
+ { 0x427E, 0x81 },
+ { 0x427F, 0x82 },
+ { 0x4280, 0x84 },
+ { 0x4281, 0x84 },
+ { 0x4282, 0x84 },
+ { 0x4283, 0x84 },
+ { 0x4284, 0x8E },
+ { 0x4285, 0x8E },
+ { 0x4286, 0x8D },
+ { 0x4287, 0x8C },
+ { 0x4288, 0x9A },
+ { 0x4289, 0x97 },
+ { 0x428A, 0x97 },
+ { 0x428B, 0x95 },
+ { 0x428C, 0xAA },
+ { 0x428D, 0xA3 },
+ { 0x428E, 0xA3 },
+ { 0x428F, 0xA2 },
+ { 0x4290, 0xC7 },
+ { 0x4291, 0xBA },
+ { 0x4292, 0xC0 },
+ { 0x4293, 0xC3 },
+ { 0x4294, 0xB0 },
+ { 0x4295, 0xA7 },
+ { 0x4296, 0xA7 },
+ { 0x4297, 0xA9 },
+ { 0x4298, 0x9F },
+ { 0x4299, 0x9B },
+ { 0x429A, 0x9B },
+ { 0x429B, 0x9D },
+ { 0x429C, 0x93 },
+ { 0x429D, 0x91 },
+ { 0x429E, 0x91 },
+ { 0x429F, 0x92 },
+ { 0x42A0, 0x8C },
+ { 0x42A1, 0x8B },
+ { 0x42A2, 0x8B },
+ { 0x42A3, 0x8C },
+ { 0x42A4, 0x8D },
+ { 0x42A5, 0x8C },
+ { 0x42A6, 0x8C },
+ { 0x42A7, 0x8C },
+ { 0x42A8, 0x94 },
+ { 0x42A9, 0x93 },
+ { 0x42AA, 0x92 },
+ { 0x42AB, 0x91 },
+ { 0x42AC, 0x9E },
+ { 0x42AD, 0x9B },
+ { 0x42AE, 0x9B },
+ { 0x42AF, 0x98 },
+ { 0x42B0, 0xAC },
+ { 0x42B1, 0xA6 },
+ { 0x42B2, 0xA6 },
+ { 0x42B3, 0xA2 },
+ { 0x42B4, 0xCE },
+ { 0x42B5, 0xBA },
+ { 0x42B6, 0xBC },
+ { 0x42B7, 0xB7 },
+ { 0x42B8, 0xC5 },
+ { 0x42B9, 0xB5 },
+ { 0x42BA, 0xBA },
+ { 0x42BB, 0xC0 },
+ { 0x42BC, 0xB1 },
+ { 0x42BD, 0xA8 },
+ { 0x42BE, 0xAE },
+ { 0x42BF, 0xAF },
+ { 0x42C0, 0xA7 },
+ { 0x42C1, 0xA3 },
+ { 0x42C2, 0xA3 },
+ { 0x42C3, 0xA5 },
+ { 0x42C4, 0xA0 },
+ { 0x42C5, 0x9D },
+ { 0x42C6, 0x9D },
+ { 0x42C7, 0x9F },
+ { 0x42C8, 0xA0 },
+ { 0x42C9, 0x9E },
+ { 0x42CA, 0x9E },
+ { 0x42CB, 0x9F },
+ { 0x42CC, 0xA2 },
+ { 0x42CD, 0xA0 },
+ { 0x42CE, 0xA0 },
+ { 0x42CF, 0xA0 },
+ { 0x42D0, 0xA8 },
+ { 0x42D1, 0xA5 },
+ { 0x42D2, 0xA5 },
+ { 0x42D3, 0xA2 },
+ { 0x42D4, 0xB3 },
+ { 0x42D5, 0xAA },
+ { 0x42D6, 0xAB },
+ { 0x42D7, 0xA3 },
+ { 0x42D8, 0x00 },
+ { 0x42D9, 0x00 },
+ { 0x4300, 0xA2 },
+ { 0x4301, 0xAE },
+ { 0x4302, 0xAD },
+ { 0x4303, 0xB5 },
+ { 0x4304, 0x95 },
+ { 0x4305, 0x9A },
+ { 0x4306, 0x98 },
+ { 0x4307, 0x9B },
+ { 0x4308, 0x8D },
+ { 0x4309, 0x90 },
+ { 0x430A, 0x8F },
+ { 0x430B, 0x91 },
+ { 0x430C, 0x86 },
+ { 0x430D, 0x88 },
+ { 0x430E, 0x87 },
+ { 0x430F, 0x89 },
+ { 0x4310, 0x86 },
+ { 0x4311, 0x87 },
+ { 0x4312, 0x86 },
+ { 0x4313, 0x88 },
+ { 0x4314, 0x89 },
+ { 0x4315, 0x88 },
+ { 0x4316, 0x88 },
+ { 0x4317, 0x8E },
+ { 0x4318, 0x90 },
+ { 0x4319, 0x8F },
+ { 0x431A, 0x8C },
+ { 0x431B, 0x8C },
+ { 0x431C, 0x9C },
+ { 0x431D, 0x99 },
+ { 0x431E, 0x98 },
+ { 0x431F, 0x99 },
+ { 0x4320, 0xAB },
+ { 0x4321, 0xB0 },
+ { 0x4322, 0xAD },
+ { 0x4323, 0xAF },
+ { 0x4324, 0x9B },
+ { 0x4325, 0x9F },
+ { 0x4326, 0x9E },
+ { 0x4327, 0xA1 },
+ { 0x4328, 0x8E },
+ { 0x4329, 0x91 },
+ { 0x432A, 0x90 },
+ { 0x432B, 0x93 },
+ { 0x432C, 0x86 },
+ { 0x432D, 0x88 },
+ { 0x432E, 0x87 },
+ { 0x432F, 0x89 },
+ { 0x4330, 0x82 },
+ { 0x4331, 0x84 },
+ { 0x4332, 0x83 },
+ { 0x4333, 0x84 },
+ { 0x4334, 0x82 },
+ { 0x4335, 0x82 },
+ { 0x4336, 0x82 },
+ { 0x4337, 0x83 },
+ { 0x4338, 0x85 },
+ { 0x4339, 0x84 },
+ { 0x433A, 0x84 },
+ { 0x433B, 0x85 },
+ { 0x433C, 0x8A },
+ { 0x433D, 0x89 },
+ { 0x433E, 0x88 },
+ { 0x433F, 0x89 },
+ { 0x4340, 0x93 },
+ { 0x4341, 0x91 },
+ { 0x4342, 0x91 },
+ { 0x4343, 0x93 },
+ { 0x4344, 0xA0 },
+ { 0x4345, 0x9E },
+ { 0x4346, 0x9D },
+ { 0x4347, 0xA1 },
+ { 0x4348, 0x95 },
+ { 0x4349, 0x9B },
+ { 0x434A, 0x9A },
+ { 0x434B, 0x9C },
+ { 0x434C, 0x8A },
+ { 0x434D, 0x8D },
+ { 0x434E, 0x8C },
+ { 0x434F, 0x8D },
+ { 0x4350, 0x83 },
+ { 0x4351, 0x85 },
+ { 0x4352, 0x84 },
+ { 0x4353, 0x85 },
+ { 0x4354, 0x80 },
+ { 0x4355, 0x81 },
+ { 0x4356, 0x81 },
+ { 0x4357, 0x81 },
+ { 0x4358, 0x80 },
+ { 0x4359, 0x80 },
+ { 0x435A, 0x80 },
+ { 0x435B, 0x80 },
+ { 0x435C, 0x82 },
+ { 0x435D, 0x81 },
+ { 0x435E, 0x81 },
+ { 0x435F, 0x81 },
+ { 0x4360, 0x85 },
+ { 0x4361, 0x84 },
+ { 0x4362, 0x84 },
+ { 0x4363, 0x85 },
+ { 0x4364, 0x8D },
+ { 0x4365, 0x8B },
+ { 0x4366, 0x8B },
+ { 0x4367, 0x8D },
+ { 0x4368, 0x98 },
+ { 0x4369, 0x98 },
+ { 0x436A, 0x95 },
+ { 0x436B, 0x98 },
+ { 0x436C, 0x95 },
+ { 0x436D, 0x9A },
+ { 0x436E, 0x99 },
+ { 0x436F, 0x9A },
+ { 0x4370, 0x8A },
+ { 0x4371, 0x8D },
+ { 0x4372, 0x8C },
+ { 0x4373, 0x8C },
+ { 0x4374, 0x83 },
+ { 0x4375, 0x85 },
+ { 0x4376, 0x84 },
+ { 0x4377, 0x84 },
+ { 0x4378, 0x80 },
+ { 0x4379, 0x80 },
+ { 0x437A, 0x80 },
+ { 0x437B, 0x80 },
+ { 0x437C, 0x7F },
+ { 0x437D, 0x7F },
+ { 0x437E, 0x7F },
+ { 0x437F, 0x7F },
+ { 0x4380, 0x81 },
+ { 0x4381, 0x80 },
+ { 0x4382, 0x80 },
+ { 0x4383, 0x81 },
+ { 0x4384, 0x84 },
+ { 0x4385, 0x83 },
+ { 0x4386, 0x83 },
+ { 0x4387, 0x84 },
+ { 0x4388, 0x8B },
+ { 0x4389, 0x8A },
+ { 0x438A, 0x8A },
+ { 0x438B, 0x8C },
+ { 0x438C, 0x97 },
+ { 0x438D, 0x96 },
+ { 0x438E, 0x96 },
+ { 0x438F, 0x99 },
+ { 0x4390, 0x99 },
+ { 0x4391, 0x9F },
+ { 0x4392, 0x9E },
+ { 0x4393, 0x9D },
+ { 0x4394, 0x8D },
+ { 0x4395, 0x90 },
+ { 0x4396, 0x90 },
+ { 0x4397, 0x8F },
+ { 0x4398, 0x85 },
+ { 0x4399, 0x87 },
+ { 0x439A, 0x87 },
+ { 0x439B, 0x86 },
+ { 0x439C, 0x81 },
+ { 0x439D, 0x83 },
+ { 0x439E, 0x82 },
+ { 0x439F, 0x82 },
+ { 0x43A0, 0x80 },
+ { 0x43A1, 0x81 },
+ { 0x43A2, 0x81 },
+ { 0x43A3, 0x81 },
+ { 0x43A4, 0x82 },
+ { 0x43A5, 0x82 },
+ { 0x43A6, 0x82 },
+ { 0x43A7, 0x82 },
+ { 0x43A8, 0x86 },
+ { 0x43A9, 0x85 },
+ { 0x43AA, 0x85 },
+ { 0x43AB, 0x87 },
+ { 0x43AC, 0x8D },
+ { 0x43AD, 0x8D },
+ { 0x43AE, 0x8D },
+ { 0x43AF, 0x90 },
+ { 0x43B0, 0x9A },
+ { 0x43B1, 0x9A },
+ { 0x43B2, 0x9B },
+ { 0x43B3, 0x9D },
+ { 0x43B4, 0xA0 },
+ { 0x43B5, 0xAD },
+ { 0x43B6, 0xAC },
+ { 0x43B7, 0xAA },
+ { 0x43B8, 0x93 },
+ { 0x43B9, 0x97 },
+ { 0x43BA, 0x97 },
+ { 0x43BB, 0x96 },
+ { 0x43BC, 0x8B },
+ { 0x43BD, 0x8E },
+ { 0x43BE, 0x8E },
+ { 0x43BF, 0x8C },
+ { 0x43C0, 0x83 },
+ { 0x43C1, 0x85 },
+ { 0x43C2, 0x85 },
+ { 0x43C3, 0x84 },
+ { 0x43C4, 0x82 },
+ { 0x43C5, 0x84 },
+ { 0x43C6, 0x83 },
+ { 0x43C7, 0x83 },
+ { 0x43C8, 0x83 },
+ { 0x43C9, 0x84 },
+ { 0x43CA, 0x84 },
+ { 0x43CB, 0x85 },
+ { 0x43CC, 0x8A },
+ { 0x43CD, 0x8A },
+ { 0x43CE, 0x8A },
+ { 0x43CF, 0x8C },
+ { 0x43D0, 0x92 },
+ { 0x43D1, 0x93 },
+ { 0x43D2, 0x93 },
+ { 0x43D3, 0x96 },
+ { 0x43D4, 0x9F },
+ { 0x43D5, 0xA6 },
+ { 0x43D6, 0xA5 },
+ { 0x43D7, 0xAA },
+ { 0x4400, 0xA1 },
+ { 0x4401, 0xAB },
+ { 0x4402, 0xA7 },
+ { 0x4403, 0xB0 },
+ { 0x4404, 0x91 },
+ { 0x4405, 0x96 },
+ { 0x4406, 0x94 },
+ { 0x4407, 0x99 },
+ { 0x4408, 0x8A },
+ { 0x4409, 0x8E },
+ { 0x440A, 0x8C },
+ { 0x440B, 0x8F },
+ { 0x440C, 0x85 },
+ { 0x440D, 0x86 },
+ { 0x440E, 0x86 },
+ { 0x440F, 0x88 },
+ { 0x4410, 0x85 },
+ { 0x4411, 0x86 },
+ { 0x4412, 0x85 },
+ { 0x4413, 0x87 },
+ { 0x4414, 0x88 },
+ { 0x4415, 0x87 },
+ { 0x4416, 0x87 },
+ { 0x4417, 0x89 },
+ { 0x4418, 0x91 },
+ { 0x4419, 0x8F },
+ { 0x441A, 0x8F },
+ { 0x441B, 0x90 },
+ { 0x441C, 0x9C },
+ { 0x441D, 0x9B },
+ { 0x441E, 0x9A },
+ { 0x441F, 0x9A },
+ { 0x4420, 0xB3 },
+ { 0x4421, 0xB1 },
+ { 0x4422, 0xB0 },
+ { 0x4423, 0xB2 },
+ { 0x4424, 0x96 },
+ { 0x4425, 0x9C },
+ { 0x4426, 0x9A },
+ { 0x4427, 0x9E },
+ { 0x4428, 0x8B },
+ { 0x4429, 0x8F },
+ { 0x442A, 0x8E },
+ { 0x442B, 0x91 },
+ { 0x442C, 0x84 },
+ { 0x442D, 0x87 },
+ { 0x442E, 0x86 },
+ { 0x442F, 0x88 },
+ { 0x4430, 0x82 },
+ { 0x4431, 0x83 },
+ { 0x4432, 0x82 },
+ { 0x4433, 0x84 },
+ { 0x4434, 0x82 },
+ { 0x4435, 0x82 },
+ { 0x4436, 0x82 },
+ { 0x4437, 0x83 },
+ { 0x4438, 0x84 },
+ { 0x4439, 0x84 },
+ { 0x443A, 0x84 },
+ { 0x443B, 0x84 },
+ { 0x443C, 0x8B },
+ { 0x443D, 0x89 },
+ { 0x443E, 0x89 },
+ { 0x443F, 0x89 },
+ { 0x4440, 0x95 },
+ { 0x4441, 0x93 },
+ { 0x4442, 0x93 },
+ { 0x4443, 0x93 },
+ { 0x4444, 0xA2 },
+ { 0x4445, 0xA2 },
+ { 0x4446, 0xA1 },
+ { 0x4447, 0xA0 },
+ { 0x4448, 0x8F },
+ { 0x4449, 0x97 },
+ { 0x444A, 0x97 },
+ { 0x444B, 0x98 },
+ { 0x444C, 0x87 },
+ { 0x444D, 0x8B },
+ { 0x444E, 0x8A },
+ { 0x444F, 0x8B },
+ { 0x4450, 0x81 },
+ { 0x4451, 0x83 },
+ { 0x4452, 0x83 },
+ { 0x4453, 0x84 },
+ { 0x4454, 0x7F },
+ { 0x4455, 0x80 },
+ { 0x4456, 0x80 },
+ { 0x4457, 0x81 },
+ { 0x4458, 0x80 },
+ { 0x4459, 0x80 },
+ { 0x445A, 0x80 },
+ { 0x445B, 0x80 },
+ { 0x445C, 0x82 },
+ { 0x445D, 0x81 },
+ { 0x445E, 0x81 },
+ { 0x445F, 0x81 },
+ { 0x4460, 0x87 },
+ { 0x4461, 0x85 },
+ { 0x4462, 0x85 },
+ { 0x4463, 0x86 },
+ { 0x4464, 0x90 },
+ { 0x4465, 0x8E },
+ { 0x4466, 0x8E },
+ { 0x4467, 0x8E },
+ { 0x4468, 0x9B },
+ { 0x4469, 0x9C },
+ { 0x446A, 0x9A },
+ { 0x446B, 0x9A },
+ { 0x446C, 0x91 },
+ { 0x446D, 0x97 },
+ { 0x446E, 0x95 },
+ { 0x446F, 0x95 },
+ { 0x4470, 0x87 },
+ { 0x4471, 0x8A },
+ { 0x4472, 0x8A },
+ { 0x4473, 0x89 },
+ { 0x4474, 0x81 },
+ { 0x4475, 0x83 },
+ { 0x4476, 0x83 },
+ { 0x4477, 0x83 },
+ { 0x4478, 0x7F },
+ { 0x4479, 0x80 },
+ { 0x447A, 0x80 },
+ { 0x447B, 0x80 },
+ { 0x447C, 0x80 },
+ { 0x447D, 0x80 },
+ { 0x447E, 0x80 },
+ { 0x447F, 0x7F },
+ { 0x4480, 0x81 },
+ { 0x4481, 0x81 },
+ { 0x4482, 0x81 },
+ { 0x4483, 0x81 },
+ { 0x4484, 0x85 },
+ { 0x4485, 0x85 },
+ { 0x4486, 0x85 },
+ { 0x4487, 0x85 },
+ { 0x4488, 0x8E },
+ { 0x4489, 0x8D },
+ { 0x448A, 0x8D },
+ { 0x448B, 0x8E },
+ { 0x448C, 0x9D },
+ { 0x448D, 0x9C },
+ { 0x448E, 0x9C },
+ { 0x448F, 0x9C },
+ { 0x4490, 0x94 },
+ { 0x4491, 0x9B },
+ { 0x4492, 0x9A },
+ { 0x4493, 0x97 },
+ { 0x4494, 0x8A },
+ { 0x4495, 0x8E },
+ { 0x4496, 0x8E },
+ { 0x4497, 0x8C },
+ { 0x4498, 0x84 },
+ { 0x4499, 0x86 },
+ { 0x449A, 0x86 },
+ { 0x449B, 0x84 },
+ { 0x449C, 0x81 },
+ { 0x449D, 0x83 },
+ { 0x449E, 0x83 },
+ { 0x449F, 0x81 },
+ { 0x44A0, 0x81 },
+ { 0x44A1, 0x82 },
+ { 0x44A2, 0x82 },
+ { 0x44A3, 0x81 },
+ { 0x44A4, 0x83 },
+ { 0x44A5, 0x83 },
+ { 0x44A6, 0x83 },
+ { 0x44A7, 0x83 },
+ { 0x44A8, 0x88 },
+ { 0x44A9, 0x88 },
+ { 0x44AA, 0x88 },
+ { 0x44AB, 0x88 },
+ { 0x44AC, 0x91 },
+ { 0x44AD, 0x91 },
+ { 0x44AE, 0x91 },
+ { 0x44AF, 0x92 },
+ { 0x44B0, 0xA0 },
+ { 0x44B1, 0xA0 },
+ { 0x44B2, 0xA0 },
+ { 0x44B3, 0xA0 },
+ { 0x44B4, 0x9E },
+ { 0x44B5, 0xA9 },
+ { 0x44B6, 0xA8 },
+ { 0x44B7, 0xA3 },
+ { 0x44B8, 0x90 },
+ { 0x44B9, 0x95 },
+ { 0x44BA, 0x95 },
+ { 0x44BB, 0x92 },
+ { 0x44BC, 0x8A },
+ { 0x44BD, 0x8E },
+ { 0x44BE, 0x8E },
+ { 0x44BF, 0x8B },
+ { 0x44C0, 0x84 },
+ { 0x44C1, 0x86 },
+ { 0x44C2, 0x86 },
+ { 0x44C3, 0x84 },
+ { 0x44C4, 0x84 },
+ { 0x44C5, 0x85 },
+ { 0x44C6, 0x85 },
+ { 0x44C7, 0x84 },
+ { 0x44C8, 0x86 },
+ { 0x44C9, 0x87 },
+ { 0x44CA, 0x87 },
+ { 0x44CB, 0x86 },
+ { 0x44CC, 0x8D },
+ { 0x44CD, 0x8E },
+ { 0x44CE, 0x8E },
+ { 0x44CF, 0x8D },
+ { 0x44D0, 0x98 },
+ { 0x44D1, 0x98 },
+ { 0x44D2, 0x99 },
+ { 0x44D3, 0x9A },
+ { 0x44D4, 0xA9 },
+ { 0x44D5, 0xAA },
+ { 0x44D6, 0xAA },
+ { 0x44D7, 0xAD },
+ { 0x4500, 0x9F },
+ { 0x4501, 0xA8 },
+ { 0x4502, 0xA5 },
+ { 0x4503, 0xAF },
+ { 0x4504, 0x8F },
+ { 0x4505, 0x96 },
+ { 0x4506, 0x92 },
+ { 0x4507, 0x94 },
+ { 0x4508, 0x89 },
+ { 0x4509, 0x8D },
+ { 0x450A, 0x8A },
+ { 0x450B, 0x8E },
+ { 0x450C, 0x84 },
+ { 0x450D, 0x85 },
+ { 0x450E, 0x84 },
+ { 0x450F, 0x87 },
+ { 0x4510, 0x84 },
+ { 0x4511, 0x85 },
+ { 0x4512, 0x84 },
+ { 0x4513, 0x86 },
+ { 0x4514, 0x87 },
+ { 0x4515, 0x86 },
+ { 0x4516, 0x86 },
+ { 0x4517, 0x88 },
+ { 0x4518, 0x8F },
+ { 0x4519, 0x8D },
+ { 0x451A, 0x8D },
+ { 0x451B, 0x8F },
+ { 0x451C, 0x9A },
+ { 0x451D, 0x9A },
+ { 0x451E, 0x98 },
+ { 0x451F, 0x9A },
+ { 0x4520, 0xAF },
+ { 0x4521, 0xAF },
+ { 0x4522, 0xB2 },
+ { 0x4523, 0xB1 },
+ { 0x4524, 0x95 },
+ { 0x4525, 0x9B },
+ { 0x4526, 0x97 },
+ { 0x4527, 0x9C },
+ { 0x4528, 0x8A },
+ { 0x4529, 0x8E },
+ { 0x452A, 0x8D },
+ { 0x452B, 0x90 },
+ { 0x452C, 0x84 },
+ { 0x452D, 0x86 },
+ { 0x452E, 0x85 },
+ { 0x452F, 0x87 },
+ { 0x4530, 0x81 },
+ { 0x4531, 0x82 },
+ { 0x4532, 0x82 },
+ { 0x4533, 0x83 },
+ { 0x4534, 0x81 },
+ { 0x4535, 0x81 },
+ { 0x4536, 0x81 },
+ { 0x4537, 0x82 },
+ { 0x4538, 0x84 },
+ { 0x4539, 0x83 },
+ { 0x453A, 0x83 },
+ { 0x453B, 0x84 },
+ { 0x453C, 0x8A },
+ { 0x453D, 0x88 },
+ { 0x453E, 0x88 },
+ { 0x453F, 0x89 },
+ { 0x4540, 0x94 },
+ { 0x4541, 0x92 },
+ { 0x4542, 0x91 },
+ { 0x4543, 0x92 },
+ { 0x4544, 0xA1 },
+ { 0x4545, 0xA0 },
+ { 0x4546, 0x9C },
+ { 0x4547, 0x9D },
+ { 0x4548, 0x8F },
+ { 0x4549, 0x96 },
+ { 0x454A, 0x95 },
+ { 0x454B, 0x92 },
+ { 0x454C, 0x87 },
+ { 0x454D, 0x8A },
+ { 0x454E, 0x89 },
+ { 0x454F, 0x8A },
+ { 0x4550, 0x81 },
+ { 0x4551, 0x83 },
+ { 0x4552, 0x82 },
+ { 0x4553, 0x83 },
+ { 0x4554, 0x7F },
+ { 0x4555, 0x80 },
+ { 0x4556, 0x80 },
+ { 0x4557, 0x81 },
+ { 0x4558, 0x7F },
+ { 0x4559, 0x80 },
+ { 0x455A, 0x7F },
+ { 0x455B, 0x80 },
+ { 0x455C, 0x81 },
+ { 0x455D, 0x81 },
+ { 0x455E, 0x81 },
+ { 0x455F, 0x81 },
+ { 0x4560, 0x86 },
+ { 0x4561, 0x85 },
+ { 0x4562, 0x85 },
+ { 0x4563, 0x85 },
+ { 0x4564, 0x8F },
+ { 0x4565, 0x8D },
+ { 0x4566, 0x8D },
+ { 0x4567, 0x8D },
+ { 0x4568, 0x99 },
+ { 0x4569, 0x9A },
+ { 0x456A, 0x97 },
+ { 0x456B, 0x99 },
+ { 0x456C, 0x90 },
+ { 0x456D, 0x95 },
+ { 0x456E, 0x93 },
+ { 0x456F, 0x92 },
+ { 0x4570, 0x87 },
+ { 0x4571, 0x8A },
+ { 0x4572, 0x88 },
+ { 0x4573, 0x87 },
+ { 0x4574, 0x81 },
+ { 0x4575, 0x83 },
+ { 0x4576, 0x82 },
+ { 0x4577, 0x82 },
+ { 0x4578, 0x7F },
+ { 0x4579, 0x80 },
+ { 0x457A, 0x80 },
+ { 0x457B, 0x80 },
+ { 0x457C, 0x80 },
+ { 0x457D, 0x80 },
+ { 0x457E, 0x80 },
+ { 0x457F, 0x80 },
+ { 0x4580, 0x81 },
+ { 0x4581, 0x81 },
+ { 0x4582, 0x81 },
+ { 0x4583, 0x81 },
+ { 0x4584, 0x85 },
+ { 0x4585, 0x85 },
+ { 0x4586, 0x84 },
+ { 0x4587, 0x85 },
+ { 0x4588, 0x8E },
+ { 0x4589, 0x8D },
+ { 0x458A, 0x8C },
+ { 0x458B, 0x8D },
+ { 0x458C, 0x9B },
+ { 0x458D, 0x9B },
+ { 0x458E, 0x9A },
+ { 0x458F, 0x98 },
+ { 0x4590, 0x94 },
+ { 0x4591, 0x9A },
+ { 0x4592, 0x94 },
+ { 0x4593, 0x90 },
+ { 0x4594, 0x8A },
+ { 0x4595, 0x8D },
+ { 0x4596, 0x8C },
+ { 0x4597, 0x89 },
+ { 0x4598, 0x84 },
+ { 0x4599, 0x86 },
+ { 0x459A, 0x85 },
+ { 0x459B, 0x83 },
+ { 0x459C, 0x82 },
+ { 0x459D, 0x83 },
+ { 0x459E, 0x82 },
+ { 0x459F, 0x80 },
+ { 0x45A0, 0x81 },
+ { 0x45A1, 0x82 },
+ { 0x45A2, 0x81 },
+ { 0x45A3, 0x80 },
+ { 0x45A4, 0x83 },
+ { 0x45A5, 0x83 },
+ { 0x45A6, 0x83 },
+ { 0x45A7, 0x83 },
+ { 0x45A8, 0x88 },
+ { 0x45A9, 0x87 },
+ { 0x45AA, 0x87 },
+ { 0x45AB, 0x88 },
+ { 0x45AC, 0x91 },
+ { 0x45AD, 0x90 },
+ { 0x45AE, 0x90 },
+ { 0x45AF, 0x91 },
+ { 0x45B0, 0x9F },
+ { 0x45B1, 0x9F },
+ { 0x45B2, 0x9E },
+ { 0x45B3, 0x9F },
+ { 0x45B4, 0x9F },
+ { 0x45B5, 0xA8 },
+ { 0x45B6, 0xA6 },
+ { 0x45B7, 0xA7 },
+ { 0x45B8, 0x8D },
+ { 0x45B9, 0x95 },
+ { 0x45BA, 0x90 },
+ { 0x45BB, 0x8A },
+ { 0x45BC, 0x89 },
+ { 0x45BD, 0x8D },
+ { 0x45BE, 0x88 },
+ { 0x45BF, 0x86 },
+ { 0x45C0, 0x84 },
+ { 0x45C1, 0x86 },
+ { 0x45C2, 0x85 },
+ { 0x45C3, 0x82 },
+ { 0x45C4, 0x84 },
+ { 0x45C5, 0x85 },
+ { 0x45C6, 0x85 },
+ { 0x45C7, 0x83 },
+ { 0x45C8, 0x86 },
+ { 0x45C9, 0x86 },
+ { 0x45CA, 0x86 },
+ { 0x45CB, 0x85 },
+ { 0x45CC, 0x8E },
+ { 0x45CD, 0x8D },
+ { 0x45CE, 0x8D },
+ { 0x45CF, 0x8C },
+ { 0x45D0, 0x99 },
+ { 0x45D1, 0x98 },
+ { 0x45D2, 0x98 },
+ { 0x45D3, 0x98 },
+ { 0x45D4, 0xA6 },
+ { 0x45D5, 0xA9 },
+ { 0x45D6, 0xA7 },
+ { 0x45D7, 0xAC },
+};
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 21666d705e37..e29be0242f07 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -539,17 +539,19 @@ static int __find_resolution(struct v4l2_subdev *sd,
}
static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which,
enum m5mols_restype type)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return cfg ? v4l2_subdev_get_try_format(&info->sd, cfg, 0) : NULL;
+ return sd_state ? v4l2_subdev_get_try_format(&info->sd,
+ sd_state, 0) : NULL;
return &info->ffmt[type];
}
-static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int m5mols_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct m5mols_info *info = to_m5mols(sd);
@@ -558,7 +560,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
mutex_lock(&info->lock);
- format = __find_format(info, cfg, fmt->which, info->res_type);
+ format = __find_format(info, sd_state, fmt->which, info->res_type);
if (format)
fmt->format = *format;
else
@@ -568,7 +570,8 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
return ret;
}
-static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int m5mols_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct m5mols_info *info = to_m5mols(sd);
@@ -582,7 +585,7 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
if (ret < 0)
return ret;
- sfmt = __find_format(info, cfg, fmt->which, type);
+ sfmt = __find_format(info, sd_state, fmt->which, type);
if (!sfmt)
return 0;
@@ -648,7 +651,7 @@ static int m5mols_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!code || code->index >= SIZE_DEFAULT_FFMT)
@@ -909,7 +912,9 @@ static const struct v4l2_subdev_core_ops m5mols_core_ops = {
*/
static int m5mols_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
*format = m5mols_default_ffmt[0];
return 0;
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index b1e2476d3c9e..b858a9b71dee 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -681,7 +681,7 @@ static int max9286_s_stream(struct v4l2_subdev *sd, int enable)
}
static int max9286_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -694,12 +694,12 @@ static int max9286_enum_mbus_code(struct v4l2_subdev *sd,
static struct v4l2_mbus_framefmt *
max9286_get_pad_format(struct max9286_priv *priv,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &priv->fmt[pad];
default:
@@ -708,7 +708,7 @@ max9286_get_pad_format(struct max9286_priv *priv,
}
static int max9286_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct max9286_priv *priv = sd_to_max9286(sd);
@@ -729,7 +729,8 @@ static int max9286_set_fmt(struct v4l2_subdev *sd,
break;
}
- cfg_fmt = max9286_get_pad_format(priv, cfg, format->pad, format->which);
+ cfg_fmt = max9286_get_pad_format(priv, sd_state, format->pad,
+ format->which);
if (!cfg_fmt)
return -EINVAL;
@@ -741,7 +742,7 @@ static int max9286_set_fmt(struct v4l2_subdev *sd,
}
static int max9286_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct max9286_priv *priv = sd_to_max9286(sd);
@@ -757,7 +758,7 @@ static int max9286_get_fmt(struct v4l2_subdev *sd,
if (pad == MAX9286_SRC_PAD)
pad = __ffs(priv->bound_sources);
- cfg_fmt = max9286_get_pad_format(priv, cfg, pad, format->which);
+ cfg_fmt = max9286_get_pad_format(priv, sd_state, pad, format->which);
if (!cfg_fmt)
return -EINVAL;
@@ -801,7 +802,7 @@ static int max9286_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
unsigned int i;
for (i = 0; i < MAX9286_N_SINKS; i++) {
- format = v4l2_subdev_get_try_format(subdev, fh->pad, i);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, i);
max9286_init_format(format);
}
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index ff212335326a..4a1410ebb4c8 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -188,7 +188,7 @@ static int ml86v7667_g_input_status(struct v4l2_subdev *sd, u32 *status)
}
static int ml86v7667_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -200,7 +200,7 @@ static int ml86v7667_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ml86v7667_fill_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ml86v7667_priv *priv = to_ml86v7667(sd);
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 3b0ba8ed5233..b3448af0876d 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -253,7 +253,7 @@ put_unlock:
}
static int mt9m001_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -294,7 +294,7 @@ static int mt9m001_set_selection(struct v4l2_subdev *sd,
}
static int mt9m001_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -319,7 +319,7 @@ static int mt9m001_get_selection(struct v4l2_subdev *sd,
}
static int mt9m001_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -330,7 +330,7 @@ static int mt9m001_get_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mf;
return 0;
}
@@ -376,7 +376,7 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd,
}
static int mt9m001_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -410,7 +410,7 @@ static int mt9m001_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return mt9m001_s_fmt(sd, fmt, mf);
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
@@ -656,12 +656,12 @@ static const struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
};
static int mt9m001_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, cfg, 0);
+ v4l2_subdev_get_try_format(sd, sd_state, 0);
try_fmt->width = MT9M001_MAX_WIDTH;
try_fmt->height = MT9M001_MAX_HEIGHT;
@@ -676,7 +676,7 @@ static int mt9m001_init_cfg(struct v4l2_subdev *sd,
}
static int mt9m001_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 5a4c0f9d1eee..8a0741058c98 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -304,7 +304,7 @@ static int mt9m032_setup_pll(struct mt9m032 *sensor)
*/
static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -315,7 +315,7 @@ static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index != 0 || fse->code != MEDIA_BUS_FMT_Y8_1X8)
@@ -338,12 +338,13 @@ static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev,
* Returns a pointer the current active or fh relative try crop rect
*/
static struct v4l2_rect *
-__mt9m032_get_pad_crop(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cfg,
+__mt9m032_get_pad_crop(struct mt9m032 *sensor,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&sensor->subdev, cfg, 0);
+ return v4l2_subdev_get_try_crop(&sensor->subdev, sd_state, 0);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->crop;
default:
@@ -360,12 +361,14 @@ __mt9m032_get_pad_crop(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cf
* Returns a pointer the current active or fh relative try format
*/
static struct v4l2_mbus_framefmt *
-__mt9m032_get_pad_format(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cfg,
+__mt9m032_get_pad_format(struct mt9m032 *sensor,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&sensor->subdev, cfg, 0);
+ return v4l2_subdev_get_try_format(&sensor->subdev, sd_state,
+ 0);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->format;
default:
@@ -374,20 +377,20 @@ __mt9m032_get_pad_format(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *
}
static int mt9m032_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
mutex_lock(&sensor->lock);
- fmt->format = *__mt9m032_get_pad_format(sensor, cfg, fmt->which);
+ fmt->format = *__mt9m032_get_pad_format(sensor, sd_state, fmt->which);
mutex_unlock(&sensor->lock);
return 0;
}
static int mt9m032_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -401,7 +404,7 @@ static int mt9m032_set_pad_format(struct v4l2_subdev *subdev,
}
/* Scaling is not supported, the format is thus fixed. */
- fmt->format = *__mt9m032_get_pad_format(sensor, cfg, fmt->which);
+ fmt->format = *__mt9m032_get_pad_format(sensor, sd_state, fmt->which);
ret = 0;
done:
@@ -410,7 +413,7 @@ done:
}
static int mt9m032_get_pad_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -419,14 +422,14 @@ static int mt9m032_get_pad_selection(struct v4l2_subdev *subdev,
return -EINVAL;
mutex_lock(&sensor->lock);
- sel->r = *__mt9m032_get_pad_crop(sensor, cfg, sel->which);
+ sel->r = *__mt9m032_get_pad_crop(sensor, sd_state, sel->which);
mutex_unlock(&sensor->lock);
return 0;
}
static int mt9m032_set_pad_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -462,13 +465,14 @@ static int mt9m032_set_pad_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9m032_get_pad_crop(sensor, cfg, sel->which);
+ __crop = __mt9m032_get_pad_crop(sensor, sd_state, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- format = __mt9m032_get_pad_format(sensor, cfg, sel->which);
+ format = __mt9m032_get_pad_format(sensor, sd_state,
+ sel->which);
format->width = rect.width;
format->height = rect.height;
}
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 69697386ffcd..b7127728b1b9 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -449,7 +449,7 @@ static int mt9m111_reset(struct mt9m111 *mt9m111)
}
static int mt9m111_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -493,7 +493,7 @@ static int mt9m111_set_selection(struct v4l2_subdev *sd,
}
static int mt9m111_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -518,7 +518,7 @@ static int mt9m111_get_selection(struct v4l2_subdev *sd,
}
static int mt9m111_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -529,7 +529,7 @@ static int mt9m111_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mf = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format = *mf;
return 0;
#else
@@ -624,7 +624,7 @@ static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
}
static int mt9m111_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -678,7 +678,7 @@ static int mt9m111_set_fmt(struct v4l2_subdev *sd,
mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
@@ -1100,7 +1100,7 @@ static int mt9m111_s_frame_interval(struct v4l2_subdev *sd,
}
static int mt9m111_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(mt9m111_colour_fmts))
@@ -1119,11 +1119,11 @@ static int mt9m111_s_stream(struct v4l2_subdev *sd, int enable)
}
static int mt9m111_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, cfg, 0);
+ v4l2_subdev_get_try_format(sd, sd_state, 0);
format->width = MT9M111_MAX_WIDTH;
format->height = MT9M111_MAX_HEIGHT;
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 18440c5104ad..9563bad20839 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -497,7 +497,7 @@ static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
}
static int mt9p031_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -510,7 +510,7 @@ static int mt9p031_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9p031_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -528,12 +528,14 @@ static int mt9p031_enum_frame_size(struct v4l2_subdev *subdev,
}
static struct v4l2_mbus_framefmt *
-__mt9p031_get_pad_format(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config *cfg,
+__mt9p031_get_pad_format(struct mt9p031 *mt9p031,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&mt9p031->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&mt9p031->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->format;
default:
@@ -542,12 +544,14 @@ __mt9p031_get_pad_format(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config
}
static struct v4l2_rect *
-__mt9p031_get_pad_crop(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, u32 which)
+__mt9p031_get_pad_crop(struct mt9p031 *mt9p031,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&mt9p031->subdev, cfg, pad);
+ return v4l2_subdev_get_try_crop(&mt9p031->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->crop;
default:
@@ -556,18 +560,18 @@ __mt9p031_get_pad_crop(struct mt9p031 *mt9p031, struct v4l2_subdev_pad_config *c
}
static int mt9p031_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
- fmt->format = *__mt9p031_get_pad_format(mt9p031, cfg, fmt->pad,
+ fmt->format = *__mt9p031_get_pad_format(mt9p031, sd_state, fmt->pad,
fmt->which);
return 0;
}
static int mt9p031_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -578,7 +582,7 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
unsigned int hratio;
unsigned int vratio;
- __crop = __mt9p031_get_pad_crop(mt9p031, cfg, format->pad,
+ __crop = __mt9p031_get_pad_crop(mt9p031, sd_state, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
@@ -594,7 +598,7 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
vratio = DIV_ROUND_CLOSEST(__crop->height, height);
- __format = __mt9p031_get_pad_format(mt9p031, cfg, format->pad,
+ __format = __mt9p031_get_pad_format(mt9p031, sd_state, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
@@ -605,7 +609,7 @@ static int mt9p031_set_format(struct v4l2_subdev *subdev,
}
static int mt9p031_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -613,12 +617,13 @@ static int mt9p031_get_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__mt9p031_get_pad_crop(mt9p031, cfg, sel->pad, sel->which);
+ sel->r = *__mt9p031_get_pad_crop(mt9p031, sd_state, sel->pad,
+ sel->which);
return 0;
}
static int mt9p031_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -648,13 +653,15 @@ static int mt9p031_set_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9p031_get_pad_crop(mt9p031, cfg, sel->pad, sel->which);
+ __crop = __mt9p031_get_pad_crop(mt9p031, sd_state, sel->pad,
+ sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9p031_get_pad_format(mt9p031, cfg, sel->pad,
+ __format = __mt9p031_get_pad_format(mt9p031, sd_state,
+ sel->pad,
sel->which);
__format->width = rect.width;
__format->height = rect.height;
@@ -969,13 +976,13 @@ static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(subdev, fh->pad, 0);
+ crop = v4l2_subdev_get_try_crop(subdev, fh->state, 0);
crop->left = MT9P031_COLUMN_START_DEF;
crop->top = MT9P031_ROW_START_DEF;
crop->width = MT9P031_WINDOW_WIDTH_DEF;
crop->height = MT9P031_WINDOW_HEIGHT_DEF;
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
format->code = MEDIA_BUS_FMT_Y12_1X12;
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 2e96ff5234b4..b651ee4a26e8 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -252,12 +252,14 @@ e_power:
*/
static struct v4l2_mbus_framefmt *
-__mt9t001_get_pad_format(struct mt9t001 *mt9t001, struct v4l2_subdev_pad_config *cfg,
+__mt9t001_get_pad_format(struct mt9t001 *mt9t001,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&mt9t001->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&mt9t001->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9t001->format;
default:
@@ -266,12 +268,14 @@ __mt9t001_get_pad_format(struct mt9t001 *mt9t001, struct v4l2_subdev_pad_config
}
static struct v4l2_rect *
-__mt9t001_get_pad_crop(struct mt9t001 *mt9t001, struct v4l2_subdev_pad_config *cfg,
+__mt9t001_get_pad_crop(struct mt9t001 *mt9t001,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&mt9t001->subdev, cfg, pad);
+ return v4l2_subdev_get_try_crop(&mt9t001->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9t001->crop;
default:
@@ -335,7 +339,7 @@ static int mt9t001_s_stream(struct v4l2_subdev *subdev, int enable)
}
static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -346,7 +350,7 @@ static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= 8 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
@@ -361,18 +365,19 @@ static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev,
}
static int mt9t001_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
- format->format = *__mt9t001_get_pad_format(mt9t001, cfg, format->pad,
+ format->format = *__mt9t001_get_pad_format(mt9t001, sd_state,
+ format->pad,
format->which);
return 0;
}
static int mt9t001_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -383,7 +388,7 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
unsigned int hratio;
unsigned int vratio;
- __crop = __mt9t001_get_pad_crop(mt9t001, cfg, format->pad,
+ __crop = __mt9t001_get_pad_crop(mt9t001, sd_state, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
@@ -399,7 +404,7 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
vratio = DIV_ROUND_CLOSEST(__crop->height, height);
- __format = __mt9t001_get_pad_format(mt9t001, cfg, format->pad,
+ __format = __mt9t001_get_pad_format(mt9t001, sd_state, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
@@ -410,7 +415,7 @@ static int mt9t001_set_format(struct v4l2_subdev *subdev,
}
static int mt9t001_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -418,12 +423,13 @@ static int mt9t001_get_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__mt9t001_get_pad_crop(mt9t001, cfg, sel->pad, sel->which);
+ sel->r = *__mt9t001_get_pad_crop(mt9t001, sd_state, sel->pad,
+ sel->which);
return 0;
}
static int mt9t001_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -455,13 +461,15 @@ static int mt9t001_set_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int, rect.height,
MT9T001_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9t001_get_pad_crop(mt9t001, cfg, sel->pad, sel->which);
+ __crop = __mt9t001_get_pad_crop(mt9t001, sd_state, sel->pad,
+ sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9t001_get_pad_format(mt9t001, cfg, sel->pad,
+ __format = __mt9t001_get_pad_format(mt9t001, sd_state,
+ sel->pad,
sel->which);
__format->width = rect.width;
__format->height = rect.height;
@@ -798,13 +806,13 @@ static int mt9t001_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(subdev, fh->pad, 0);
+ crop = v4l2_subdev_get_try_crop(subdev, fh->state, 0);
crop->left = MT9T001_COLUMN_START_DEF;
crop->top = MT9T001_ROW_START_DEF;
crop->width = MT9T001_WINDOW_WIDTH_DEF + 1;
crop->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
format->width = MT9T001_WINDOW_WIDTH_DEF + 1;
format->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index ae3c336eadf5..8d2e3caa9b28 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -872,8 +872,8 @@ static int mt9t112_set_params(struct mt9t112_priv *priv,
}
static int mt9t112_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
@@ -897,7 +897,7 @@ static int mt9t112_get_selection(struct v4l2_subdev *sd,
}
static int mt9t112_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -912,7 +912,7 @@ static int mt9t112_set_selection(struct v4l2_subdev *sd,
}
static int mt9t112_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -953,7 +953,7 @@ static int mt9t112_s_fmt(struct v4l2_subdev *sd,
}
static int mt9t112_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -982,13 +982,13 @@ static int mt9t112_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return mt9t112_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
static int mt9t112_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 46ef74a2ca36..7699e64e1127 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -327,7 +327,7 @@ static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
}
static int mt9v011_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -338,7 +338,7 @@ static int mt9v011_enum_mbus_code(struct v4l2_subdev *sd,
}
static int mt9v011_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -358,7 +358,7 @@ static int mt9v011_set_fmt(struct v4l2_subdev *sd,
set_res(sd);
} else {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
}
return 0;
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 5bd3ae82992f..4cfdd3dfbd42 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -349,12 +349,14 @@ static int __mt9v032_set_power(struct mt9v032 *mt9v032, bool on)
*/
static struct v4l2_mbus_framefmt *
-__mt9v032_get_pad_format(struct mt9v032 *mt9v032, struct v4l2_subdev_pad_config *cfg,
+__mt9v032_get_pad_format(struct mt9v032 *mt9v032,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&mt9v032->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&mt9v032->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v032->format;
default:
@@ -363,12 +365,14 @@ __mt9v032_get_pad_format(struct mt9v032 *mt9v032, struct v4l2_subdev_pad_config
}
static struct v4l2_rect *
-__mt9v032_get_pad_crop(struct mt9v032 *mt9v032, struct v4l2_subdev_pad_config *cfg,
+__mt9v032_get_pad_crop(struct mt9v032 *mt9v032,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&mt9v032->subdev, cfg, pad);
+ return v4l2_subdev_get_try_crop(&mt9v032->subdev, sd_state,
+ pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v032->crop;
default:
@@ -425,7 +429,7 @@ static int mt9v032_s_stream(struct v4l2_subdev *subdev, int enable)
}
static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -438,7 +442,7 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -457,12 +461,13 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
}
static int mt9v032_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
- format->format = *__mt9v032_get_pad_format(mt9v032, cfg, format->pad,
+ format->format = *__mt9v032_get_pad_format(mt9v032, sd_state,
+ format->pad,
format->which);
return 0;
}
@@ -492,7 +497,7 @@ static unsigned int mt9v032_calc_ratio(unsigned int input, unsigned int output)
}
static int mt9v032_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -503,7 +508,7 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
unsigned int hratio;
unsigned int vratio;
- __crop = __mt9v032_get_pad_crop(mt9v032, cfg, format->pad,
+ __crop = __mt9v032_get_pad_crop(mt9v032, sd_state, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
@@ -519,7 +524,7 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
hratio = mt9v032_calc_ratio(__crop->width, width);
vratio = mt9v032_calc_ratio(__crop->height, height);
- __format = __mt9v032_get_pad_format(mt9v032, cfg, format->pad,
+ __format = __mt9v032_get_pad_format(mt9v032, sd_state, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
@@ -536,7 +541,7 @@ static int mt9v032_set_format(struct v4l2_subdev *subdev,
}
static int mt9v032_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -544,12 +549,13 @@ static int mt9v032_get_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__mt9v032_get_pad_crop(mt9v032, cfg, sel->pad, sel->which);
+ sel->r = *__mt9v032_get_pad_crop(mt9v032, sd_state, sel->pad,
+ sel->which);
return 0;
}
static int mt9v032_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -581,13 +587,15 @@ static int mt9v032_set_selection(struct v4l2_subdev *subdev,
rect.height = min_t(unsigned int,
rect.height, MT9V032_PIXEL_ARRAY_HEIGHT - rect.top);
- __crop = __mt9v032_get_pad_crop(mt9v032, cfg, sel->pad, sel->which);
+ __crop = __mt9v032_get_pad_crop(mt9v032, sd_state, sel->pad,
+ sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
- __format = __mt9v032_get_pad_format(mt9v032, cfg, sel->pad,
+ __format = __mt9v032_get_pad_format(mt9v032, sd_state,
+ sel->pad,
sel->which);
__format->width = rect.width;
__format->height = rect.height;
@@ -922,13 +930,13 @@ static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(subdev, fh->pad, 0);
+ crop = v4l2_subdev_get_try_crop(subdev, fh->state, 0);
crop->left = MT9V032_COLUMN_START_DEF;
crop->top = MT9V032_ROW_START_DEF;
crop->width = MT9V032_WINDOW_WIDTH_DEF;
crop->height = MT9V032_WINDOW_HEIGHT_DEF;
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
if (mt9v032->model->color)
format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 61ae6a0d5679..9dca6667d7f9 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -791,14 +791,14 @@ static int mt9v111_g_frame_interval(struct v4l2_subdev *sd,
static struct v4l2_mbus_framefmt *__mt9v111_get_pad_format(
struct mt9v111_dev *mt9v111,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
#if IS_ENABLED(CONFIG_VIDEO_V4L2_SUBDEV_API)
- return v4l2_subdev_get_try_format(&mt9v111->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&mt9v111->sd, sd_state, pad);
#else
return &cfg->try_fmt;
#endif
@@ -810,7 +810,7 @@ static struct v4l2_mbus_framefmt *__mt9v111_get_pad_format(
}
static int mt9v111_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > ARRAY_SIZE(mt9v111_formats) - 1)
@@ -822,7 +822,7 @@ static int mt9v111_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int mt9v111_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
unsigned int i;
@@ -845,7 +845,7 @@ static int mt9v111_enum_frame_interval(struct v4l2_subdev *sd,
}
static int mt9v111_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->pad || fse->index >= ARRAY_SIZE(mt9v111_frame_sizes))
@@ -860,7 +860,7 @@ static int mt9v111_enum_frame_size(struct v4l2_subdev *subdev,
}
static int mt9v111_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
@@ -869,7 +869,8 @@ static int mt9v111_get_format(struct v4l2_subdev *subdev,
return -EINVAL;
mutex_lock(&mt9v111->stream_mutex);
- format->format = *__mt9v111_get_pad_format(mt9v111, cfg, format->pad,
+ format->format = *__mt9v111_get_pad_format(mt9v111, sd_state,
+ format->pad,
format->which);
mutex_unlock(&mt9v111->stream_mutex);
@@ -877,7 +878,7 @@ static int mt9v111_get_format(struct v4l2_subdev *subdev,
}
static int mt9v111_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mt9v111_dev *mt9v111 = sd_to_mt9v111(subdev);
@@ -925,7 +926,7 @@ static int mt9v111_set_format(struct v4l2_subdev *subdev,
new_fmt.height = mt9v111_frame_sizes[idx].height;
/* Update the device (or pad) format if it has changed. */
- __fmt = __mt9v111_get_pad_format(mt9v111, cfg, format->pad,
+ __fmt = __mt9v111_get_pad_format(mt9v111, sd_state, format->pad,
format->which);
/* Format hasn't changed, stop here. */
@@ -954,9 +955,9 @@ done:
}
static int mt9v111_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
- cfg->try_fmt = mt9v111_def_fmt;
+ sd_state->pads->try_fmt = mt9v111_def_fmt;
return 0;
}
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index 87d76a7f691a..f3ac379ef34a 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -488,7 +488,7 @@ unlock:
}
static int noon010_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(noon010_formats))
@@ -499,15 +499,15 @@ static int noon010_enum_mbus_code(struct v4l2_subdev *sd,
}
static int noon010_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
fmt->format = *mf;
}
return 0;
@@ -539,7 +539,8 @@ static const struct noon010_format *noon010_try_fmt(struct v4l2_subdev *sd,
return &noon010_formats[i];
}
-static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int noon010_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
@@ -554,8 +555,8 @@ static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
*mf = fmt->format;
}
return 0;
@@ -637,7 +638,9 @@ static int noon010_log_status(struct v4l2_subdev *sd)
static int noon010_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
mf->width = noon010_sizes[0].width;
mf->height = noon010_sizes[0].height;
diff --git a/drivers/media/i2c/ov1063x.c b/drivers/media/i2c/ov1063x.c
new file mode 100644
index 000000000000..38fb41c6dbb1
--- /dev/null
+++ b/drivers/media/i2c/ov1063x.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OmniVision OV1063X Camera Driver
+ *
+ * Based on the original driver written by Phil Edworthy.
+ * Copyright (C) 2013 Phil Edworthy
+ * Copyright (C) 2013 Renesas Electronics
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This driver has been tested at QVGA, VGA and 720p, and 1280x800 at up to
+ * 30fps and it should work at any resolution in between and any frame rate
+ * up to 30fps.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#include "ov1063x_regs.h"
+
+/* Register definitions */
+#define OV1063X_VFLIP 0x381c
+#define OV1063X_VFLIP_ON GENMASK(7, 6)
+#define OV1063X_VFLIP_SUBSAMPLE BIT(0)
+#define OV1063X_HMIRROR 0x381d
+#define OV1063X_HMIRROR_ON GENMASK(1, 0)
+#define OV1063X_HORIZ_COLORCORRECT 0x6900
+#define OV1063X_HORIZ_COLORCORRECT_ON BIT(0)
+#define OV1063X_PID 0x300a
+#define OV1063X_VER 0x300b
+
+#define OV1063X_FORMAT_CTRL00 0x4300
+#define OV1063X_FORMAT_YUYV 0x38
+#define OV1063X_FORMAT_YYYU 0x39
+#define OV1063X_FORMAT_UYVY 0x3A
+#define OV1063X_FORMAT_VYUY 0x3B
+
+/* IDs */
+#define OV10633_VERSION_REG 0xa630
+#define OV10635_VERSION_REG 0xa635
+#define OV1063X_VERSION(pid, ver) (((pid) << 8) | ((ver) & 0xff))
+
+enum ov1063x_model {
+ SENSOR_OV10633,
+ SENSOR_OV10635,
+};
+
+#define OV1063X_SENSOR_WIDTH 1312
+#define OV1063X_SENSOR_HEIGHT 814
+
+#define OV1063X_MAX_WIDTH 1280
+#define OV1063X_MAX_HEIGHT 800
+
+struct ov1063x_color_format {
+ u32 code;
+ u32 colorspace;
+};
+
+struct ov1063x_framesize {
+ u16 width;
+ u16 height;
+};
+
+struct ov1063x_priv {
+ struct v4l2_subdev subdev;
+ struct v4l2_async_subdev asd;
+ struct v4l2_ctrl_handler hdl;
+ int model;
+ int revision;
+ int xvclk_rate;
+ /* Protects the struct fields below */
+ struct mutex lock;
+
+ int fps_numerator;
+ int fps_denominator;
+ struct v4l2_mbus_framefmt format;
+ int width;
+ int height;
+
+ struct regmap *regmap;
+
+ /* Sensor reference clock */
+ struct clk *xvclk;
+
+ bool power;
+
+ /* GPIOs */
+ struct gpio_desc *resetb_gpio;
+ struct gpio_desc *pwdn_gpio;
+
+ struct v4l2_ctrl *colorbar;
+};
+
+static const struct ov1063x_framesize ov1063x_framesizes[] = {
+ {
+ .width = 1280,
+ .height = 800,
+ }, {
+ .width = 1280,
+ .height = 720,
+ }, {
+ .width = 752,
+ .height = 480,
+ }, {
+ .width = 640,
+ .height = 480,
+ }, {
+ .width = 600,
+ .height = 400,
+ }, {
+ .width = 352,
+ .height = 288,
+ }, {
+ .width = 320,
+ .height = 240,
+ },
+};
+
+/*
+ * supported color format list
+ */
+static const struct ov1063x_color_format ov1063x_cfmts[] = {
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ },
+ {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ },
+ {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ },
+};
+
+static struct ov1063x_priv *to_ov1063x(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct ov1063x_priv,
+ subdev);
+}
+
+/* Helper function to write consecutive 8 bit registers */
+static int ov1063x_regmap_write16(struct regmap *map, u16 reg, u16 val)
+{
+ int ret;
+
+ ret = regmap_write(map, reg, val >> 8);
+ if (ret)
+ return ret;
+
+ return regmap_write(map, reg + 1, val & 0xff);
+}
+
+/* Start/Stop streaming from the device */
+static int ov1063x_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov1063x_priv *priv = to_ov1063x(client);
+ struct regmap *map = priv->regmap;
+ int ret;
+
+ ret = regmap_write(map, 0x0100, enable);
+ if (ret)
+ return ret;
+
+ return regmap_write(map, 0x301c, enable ? 0xf0 : 0x70);
+}
+
+static int ov1063x_set_regs(struct i2c_client *client,
+ const struct ov1063x_reg *regs, int nr_regs);
+
+/* Set status of additional camera capabilities */
+static int ov1063x_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov1063x_priv *priv = container_of(ctrl->handler,
+ struct ov1063x_priv, hdl);
+ struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev);
+ struct regmap *map = priv->regmap;
+ const struct ov1063x_reg *regs;
+ int n_regs, ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ return regmap_update_bits(map, OV1063X_VFLIP,
+ OV1063X_VFLIP_ON,
+ ctrl->val ? OV1063X_VFLIP_ON : 0);
+ case V4L2_CID_HFLIP:
+ ret = regmap_update_bits(map, OV1063X_HORIZ_COLORCORRECT,
+ OV1063X_HORIZ_COLORCORRECT_ON,
+ ctrl->val ?
+ OV1063X_HORIZ_COLORCORRECT_ON : 0);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(map, OV1063X_HMIRROR,
+ OV1063X_HMIRROR_ON,
+ ctrl->val ? OV1063X_HMIRROR_ON : 0);
+ case V4L2_CID_TEST_PATTERN:
+ if (ctrl->val) {
+ n_regs = ARRAY_SIZE(ov1063x_regs_colorbar_enable);
+ regs = ov1063x_regs_colorbar_enable;
+ } else {
+ n_regs = ARRAY_SIZE(ov1063x_regs_colorbar_disable);
+ regs = ov1063x_regs_colorbar_disable;
+ }
+ return ov1063x_set_regs(client, regs, n_regs);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Get the best pixel clock (pclk) that meets minimum hts/vts requirements.
+ * xvclk_rate => pre-divider => clk1 => multiplier => clk2 => post-divider
+ * => pclk
+ * We try all valid combinations of settings for the 3 blocks to get the pixel
+ * clock, and from that calculate the actual hts/vts to use. The vts is
+ * extended so as to achieve the required frame rate. The function also returns
+ * the PLL register contents needed to set the pixel clock.
+ */
+static int ov1063x_get_pclk(int xvclk_rate, int *htsmin, int *vtsmin,
+ int fps_numerator, int fps_denominator,
+ u8 *r3003, u8 *r3004)
+{
+ int pre_divs[] = { 2, 3, 4, 6, 8, 10, 12, 14 };
+ int pclk;
+ int best_pclk = INT_MAX;
+ int best_hts = 0;
+ int i, j, k;
+ int best_i = 0, best_j = 0, best_k = 0;
+ int clk1, clk2;
+ int hts;
+
+ /* Pre-div, reg 0x3004, bits 6:4 */
+ for (i = 0; i < ARRAY_SIZE(pre_divs); i++) {
+ clk1 = (xvclk_rate / pre_divs[i]) * 2;
+
+ if (clk1 < 3000000 || clk1 > 27000000)
+ continue;
+
+ /* Mult = reg 0x3003, bits 5:0 */
+ for (j = 1; j < 32; j++) {
+ clk2 = (clk1 * j);
+
+ if (clk2 < 200000000 || clk2 > 500000000)
+ continue;
+
+ /* Post-div, reg 0x3004, bits 2:0 */
+ for (k = 0; k < 8; k++) {
+ pclk = clk2 / (2 * (k + 1));
+
+ if (pclk > 96000000)
+ continue;
+
+ hts = *htsmin + 210 + pclk / 300000;
+
+ /* 2 clock cycles for every YUV422 pixel */
+ if (pclk < (((hts * *vtsmin) / fps_denominator)
+ * fps_numerator * 2))
+ continue;
+
+ if (pclk < best_pclk) {
+ best_pclk = pclk;
+ best_hts = hts;
+ best_i = i;
+ best_j = j;
+ best_k = k;
+ }
+ }
+ }
+ }
+
+ /* register contents */
+ *r3003 = (u8)best_j;
+ *r3004 = ((u8)best_i << 4) | (u8)best_k;
+
+ /* Did we get a valid PCLK? */
+ if (best_pclk == INT_MAX)
+ return -1;
+
+ *htsmin = best_hts;
+
+ /* Adjust vts to get as close to the desired frame rate as we can */
+ *vtsmin = best_pclk / ((best_hts / fps_denominator) *
+ fps_numerator * 2);
+
+ return best_pclk;
+}
+
+static int ov1063x_set_regs(struct i2c_client *client,
+ const struct ov1063x_reg *regs, int nr_regs)
+{
+ struct ov1063x_priv *priv = to_ov1063x(client);
+ struct regmap *map = priv->regmap;
+ int i, ret;
+ u8 val;
+
+ for (i = 0; i < nr_regs; i++) {
+ if (regs[i].reg == 0x300c) {
+ val = ((client->addr * 2) | 0x1);
+
+ ret = regmap_write(map, regs[i].reg, val);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_write(map, regs[i].reg, regs[i].val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Setup registers according to resolution and color encoding */
+static int ov1063x_set_params(struct i2c_client *client, u32 width, u32 height)
+{
+ struct ov1063x_priv *priv = to_ov1063x(client);
+ struct regmap *map = priv->regmap;
+ int ret = -EINVAL;
+ int pclk;
+ int hts, vts;
+ u8 r3003, r3004, r4300;
+ int tmp;
+ u32 height_pre_subsample;
+ u32 width_pre_subsample;
+ u8 horiz_crop_mode;
+ int nr_isp_pixels;
+ int vert_sub_sample = 0;
+ int horiz_sub_sample = 0;
+ int sensor_width;
+ int n_regs;
+
+ if (width > OV1063X_MAX_WIDTH || height > OV1063X_MAX_HEIGHT)
+ return ret;
+
+ priv->width = width;
+ priv->height = height;
+
+ /* Vertical sub-sampling? */
+ height_pre_subsample = priv->height;
+ if (priv->height <= 400) {
+ vert_sub_sample = 1;
+ height_pre_subsample <<= 1;
+ }
+
+ /* Horizontal sub-sampling? */
+ width_pre_subsample = priv->width;
+ if (priv->width <= 640) {
+ horiz_sub_sample = 1;
+ width_pre_subsample <<= 1;
+ }
+
+ /* Horizontal cropping */
+ if (width_pre_subsample > 768) {
+ sensor_width = OV1063X_SENSOR_WIDTH;
+ horiz_crop_mode = 0x63;
+ } else if (width_pre_subsample > 656) {
+ sensor_width = 768;
+ horiz_crop_mode = 0x6b;
+ } else {
+ sensor_width = 656;
+ horiz_crop_mode = 0x73;
+ }
+
+ /* minimum values for hts and vts */
+ hts = sensor_width;
+ vts = height_pre_subsample + 50;
+ dev_dbg(&client->dev, "fps=(%d/%d), hts=%d, vts=%d\n",
+ priv->fps_numerator, priv->fps_denominator, hts, vts);
+
+ /* Get the best PCLK & adjust hts,vts accordingly */
+ pclk = ov1063x_get_pclk(priv->xvclk_rate, &hts, &vts,
+ priv->fps_numerator, priv->fps_denominator,
+ &r3003, &r3004);
+ if (pclk < 0)
+ return ret;
+ dev_dbg(&client->dev, "pclk=%d, hts=%d, vts=%d\n", pclk, hts, vts);
+ dev_dbg(&client->dev, "r3003=0x%X r3004=0x%X\n", r3003, r3004);
+
+ /* Disable ISP & program all registers that we might modify */
+ ret = ov1063x_set_regs(client, ov1063x_regs_change_mode,
+ ARRAY_SIZE(ov1063x_regs_change_mode));
+ if (ret)
+ return ret;
+
+ /* Set PLL */
+ ret = regmap_write(map, 0x3003, r3003);
+ if (ret)
+ return ret;
+ ret = regmap_write(map, 0x3004, r3004);
+ if (ret)
+ return ret;
+
+ /* Set HSYNC */
+ ret = regmap_write(map, 0x4700, 0x00);
+ if (ret)
+ return ret;
+
+ switch (priv->format.code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ r4300 = OV1063X_FORMAT_UYVY;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ r4300 = OV1063X_FORMAT_VYUY;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ r4300 = OV1063X_FORMAT_YUYV;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ r4300 = OV1063X_FORMAT_YYYU;
+ break;
+ default:
+ r4300 = OV1063X_FORMAT_UYVY;
+ break;
+ }
+
+ /* Set format to UYVY */
+ ret = regmap_write(map, OV1063X_FORMAT_CTRL00, r4300);
+ if (ret)
+ return ret;
+
+ dev_dbg(&client->dev, "r4300=0x%X\n", r4300);
+
+ /* Set output to 8-bit yuv */
+ ret = regmap_write(map, 0x4605, 0x08);
+ if (ret)
+ return ret;
+
+ /* Horizontal cropping */
+ ret = regmap_write(map, 0x3621, horiz_crop_mode);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(map, 0x3702, (pclk + 1500000) / 3000000);
+ if (ret)
+ return ret;
+ ret = regmap_write(map, 0x3703, (pclk + 666666) / 1333333);
+ if (ret)
+ return ret;
+ ret = regmap_write(map, 0x3704, (pclk + 961500) / 1923000);
+ if (ret)
+ return ret;
+
+ /* Vertical cropping */
+ tmp = ((OV1063X_SENSOR_HEIGHT - height_pre_subsample) / 2) & ~0x1;
+ ret = ov1063x_regmap_write16(map, 0x3802, tmp);
+ if (ret)
+ return ret;
+ tmp = tmp + height_pre_subsample + 3;
+ ret = ov1063x_regmap_write16(map, 0x3806, tmp);
+ if (ret)
+ return ret;
+
+ dev_dbg(&client->dev, "width x height = %x x %x\n",
+ priv->width, priv->height);
+ /* Output size */
+ ret = ov1063x_regmap_write16(map, 0x3808, priv->width);
+ if (ret)
+ return ret;
+ ret = ov1063x_regmap_write16(map, 0x380a, priv->height);
+ if (ret)
+ return ret;
+
+ dev_dbg(&client->dev, "hts x vts = %x x %x\n", hts, vts);
+
+ ret = ov1063x_regmap_write16(map, 0x380c, hts);
+ if (ret)
+ return ret;
+
+ ret = ov1063x_regmap_write16(map, 0x380e, vts);
+ if (ret)
+ return ret;
+
+ if (vert_sub_sample) {
+ ret = regmap_update_bits(map, OV1063X_VFLIP,
+ OV1063X_VFLIP_SUBSAMPLE,
+ OV1063X_VFLIP_SUBSAMPLE);
+ if (ret)
+ return ret;
+ n_regs = ARRAY_SIZE(ov1063x_regs_vert_sub_sample);
+ ret = ov1063x_set_regs(client, ov1063x_regs_vert_sub_sample,
+ n_regs);
+ if (ret)
+ return ret;
+ }
+
+ ret = ov1063x_regmap_write16(map, 0x4606, 2 * hts);
+ if (ret)
+ return ret;
+ ret = ov1063x_regmap_write16(map, 0x460a,
+ 2 * (hts - width_pre_subsample));
+ if (ret)
+ return ret;
+
+ tmp = (vts - 8) * 16;
+ ret = ov1063x_regmap_write16(map, 0xc488, tmp);
+ if (ret)
+ return ret;
+ ret = ov1063x_regmap_write16(map, 0xc48a, tmp);
+ if (ret)
+ return ret;
+
+ nr_isp_pixels = sensor_width * (priv->height + 4);
+ ret = ov1063x_regmap_write16(map, 0xc4cc, nr_isp_pixels / 256);
+ if (ret)
+ return ret;
+ ret = ov1063x_regmap_write16(map, 0xc4ce, nr_isp_pixels / 256);
+ if (ret)
+ return ret;
+ ret = ov1063x_regmap_write16(map, 0xc512, nr_isp_pixels / 16);
+ if (ret)
+ return ret;
+
+ /* Horizontal sub-sampling */
+ if (horiz_sub_sample) {
+ ret = regmap_write(map, 0x5005, 0x9);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(map, 0x3007, 0x2);
+ if (ret)
+ return ret;
+ }
+
+ ret = ov1063x_regmap_write16(map, 0xc518, vts);
+ if (ret)
+ return ret;
+ ret = ov1063x_regmap_write16(map, 0xc51a, hts);
+ if (ret)
+ return ret;
+
+ /* Enable ISP blocks */
+ ret = ov1063x_set_regs(client, ov1063x_regs_enable,
+ ARRAY_SIZE(ov1063x_regs_enable));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * V4L2 subdev video and pad level operations
+ */
+
+static void ov1063x_get_default_format(struct v4l2_mbus_framefmt *mf)
+{
+ mf->width = ov1063x_framesizes[0].width;
+ mf->height = ov1063x_framesizes[0].height;
+ mf->colorspace = ov1063x_cfmts[0].colorspace;
+ mf->code = ov1063x_cfmts[0].code;
+
+ mf->field = V4L2_FIELD_NONE;
+}
+
+static int ov1063x_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov1063x_priv *priv = to_ov1063x(client);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mutex_lock(&priv->lock);
+ fmt->format = *mf;
+ mutex_unlock(&priv->lock);
+ return 0;
+ }
+
+ mutex_lock(&priv->lock);
+ fmt->format = priv->format;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static void __ov1063x_try_frame_size(struct v4l2_mbus_framefmt *mf)
+{
+ const struct ov1063x_framesize *fsize = &ov1063x_framesizes[0];
+ const struct ov1063x_framesize *match = NULL;
+ int i = ARRAY_SIZE(ov1063x_framesizes);
+ unsigned int min_err = UINT_MAX;
+
+ while (i--) {
+ int err = abs(fsize->width - mf->width)
+ + abs(fsize->height - mf->height);
+ if (err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ fsize++;
+ }
+
+ if (!match)
+ match = &ov1063x_framesizes[0];
+
+ mf->width = match->width;
+ mf->height = match->height;
+}
+
+static int ov1063x_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int index = ARRAY_SIZE(ov1063x_cfmts);
+ struct ov1063x_priv *priv = to_ov1063x(client);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ int ret = 0;
+
+ __ov1063x_try_frame_size(mf);
+
+ while (--index >= 0)
+ if (ov1063x_cfmts[index].code == mf->code)
+ break;
+
+ if (index < 0)
+ return -EINVAL;
+
+ mf->colorspace = ov1063x_cfmts[index].colorspace;
+ mf->code = ov1063x_cfmts[index].code;
+ mf->field = V4L2_FIELD_NONE;
+
+ mutex_lock(&priv->lock);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ *mf = fmt->format;
+ } else {
+ priv->format = fmt->format;
+ ret = ov1063x_set_params(client, mf->width, mf->height);
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int ov1063x_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(ov1063x_cfmts))
+ return -EINVAL;
+
+ code->code = ov1063x_cfmts[code->index].code;
+
+ return 0;
+}
+
+static int ov1063x_enum_frame_sizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int i = ARRAY_SIZE(ov1063x_cfmts);
+
+ if (fse->index >= ARRAY_SIZE(ov1063x_framesizes))
+ return -EINVAL;
+
+ while (--i)
+ if (ov1063x_cfmts[i].code == fse->code)
+ break;
+
+ fse->code = ov1063x_cfmts[i].code;
+
+ fse->min_width = ov1063x_framesizes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = ov1063x_framesizes[fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static void ov1063x_set_power(struct i2c_client *client, bool on)
+{
+ struct ov1063x_priv *priv = to_ov1063x(client);
+
+ dev_dbg(&client->dev, "%s: on: %d\n", __func__, on);
+
+ if (priv->power == on)
+ return;
+
+ if (on) {
+ if (priv->pwdn_gpio) {
+ gpiod_set_value_cansleep(priv->pwdn_gpio, 1);
+ usleep_range(1000, 1200);
+ }
+ if (priv->resetb_gpio) {
+ gpiod_set_value_cansleep(priv->resetb_gpio, 1);
+ usleep_range(250000, 260000);
+ }
+ } else {
+ if (priv->pwdn_gpio)
+ gpiod_set_value_cansleep(priv->pwdn_gpio, 0);
+ if (priv->resetb_gpio)
+ gpiod_set_value_cansleep(priv->resetb_gpio, 0);
+ }
+
+ priv->power = on;
+}
+
+static int ov1063x_video_probe(struct i2c_client *client)
+{
+ struct ov1063x_priv *priv = to_ov1063x(client);
+ struct regmap *map = priv->regmap;
+ u32 pid, ver;
+ int ret;
+
+ ov1063x_set_power(client, true);
+
+ ret = ov1063x_set_regs(client, ov1063x_regs_default,
+ ARRAY_SIZE(ov1063x_regs_default));
+ if (ret)
+ return ret;
+
+ usleep_range(500, 510);
+
+ /* check and show product ID and manufacturer ID */
+ ret = regmap_read(map, OV1063X_PID, &pid);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, OV1063X_VER, &ver);
+ if (ret)
+ return ret;
+
+ if (OV1063X_VERSION(pid, ver) == OV10635_VERSION_REG) {
+ priv->model = SENSOR_OV10635;
+ priv->revision = 1;
+ } else if (OV1063X_VERSION(pid, ver) == OV10633_VERSION_REG) {
+ priv->model = SENSOR_OV10633;
+ priv->revision = 1;
+ } else {
+ dev_err(&client->dev, "Product ID error %x:%x\n", pid, ver);
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "ov1063x Product ID %x Manufacturer ID %x\n",
+ pid, ver);
+
+ /* Program all the 'standard' registers */
+
+ return v4l2_ctrl_handler_setup(&priv->hdl);
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+static int ov1063x_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ mf = v4l2_subdev_get_try_format(sd, fh->state, 0);
+ ov1063x_get_default_format(mf);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ov1063x_ctrl_ops = {
+ .s_ctrl = ov1063x_s_ctrl,
+};
+
+static const char * const ov1063x_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color Bars",
+};
+
+static const struct v4l2_subdev_video_ops ov1063x_subdev_video_ops = {
+ .s_stream = ov1063x_s_stream,
+};
+
+static const struct v4l2_subdev_internal_ops ov1063x_sd_internal_ops = {
+ .open = ov1063x_open,
+};
+
+static const struct v4l2_subdev_core_ops ov1063x_subdev_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_pad_ops ov1063x_subdev_pad_ops = {
+ .enum_mbus_code = ov1063x_enum_mbus_code,
+ .enum_frame_size = ov1063x_enum_frame_sizes,
+ .get_fmt = ov1063x_get_fmt,
+ .set_fmt = ov1063x_set_fmt,
+};
+
+static struct v4l2_subdev_ops ov1063x_subdev_ops = {
+ .core = &ov1063x_subdev_core_ops,
+ .video = &ov1063x_subdev_video_ops,
+ .pad = &ov1063x_subdev_pad_ops,
+};
+
+static const struct regmap_config ov1063x_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+};
+
+/*
+ * i2c_driver function
+ */
+
+static int ov1063x_probe(struct i2c_client *client)
+{
+ struct ov1063x_priv *priv;
+ struct v4l2_subdev *sd;
+ struct clk *clk;
+ unsigned int menu_size;
+ int ret = 0;
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, priv);
+
+ priv->regmap = devm_regmap_init_i2c(client, &ov1063x_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ clk = devm_clk_get(&client->dev, "xvclk");
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "xvclk reference is missing!\n");
+ ret = PTR_ERR(clk);
+ goto err;
+ }
+ priv->xvclk = clk;
+
+ priv->xvclk_rate = clk_get_rate(clk);
+ dev_dbg(&client->dev, "xvclk_rate: %d (Hz)\n", priv->xvclk_rate);
+
+ if (priv->xvclk_rate < 6000000 ||
+ priv->xvclk_rate > 27000000) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = clk_prepare_enable(priv->xvclk);
+ if (ret < 0)
+ goto err;
+
+ /* Default framerate */
+ priv->fps_numerator = 30;
+ priv->fps_denominator = 1;
+ ov1063x_get_default_format(&priv->format);
+ priv->width = priv->format.width;
+ priv->height = priv->format.height;
+
+ sd = &priv->subdev;
+ v4l2_i2c_subdev_init(sd, client, &ov1063x_subdev_ops);
+
+ sd->internal_ops = &ov1063x_sd_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ v4l2_ctrl_handler_init(&priv->hdl, 3);
+ v4l2_ctrl_new_std(&priv->hdl, &ov1063x_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&priv->hdl, &ov1063x_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ menu_size = ARRAY_SIZE(ov1063x_test_pattern_menu) - 1;
+ priv->colorbar =
+ v4l2_ctrl_new_std_menu_items(&priv->hdl, &ov1063x_ctrl_ops,
+ V4L2_CID_TEST_PATTERN, menu_size,
+ 0, 0, ov1063x_test_pattern_menu);
+ priv->subdev.ctrl_handler = &priv->hdl;
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto err;
+ }
+
+ mutex_init(&priv->lock);
+
+ /* Optional gpio don't fail if not present */
+ priv->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->pwdn_gpio))
+ return PTR_ERR(priv->pwdn_gpio);
+
+ /* Optional gpio don't fail if not present */
+ priv->resetb_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->resetb_gpio))
+ return PTR_ERR(priv->resetb_gpio);
+
+ ret = ov1063x_video_probe(client);
+ if (ret) {
+ v4l2_ctrl_handler_free(&priv->hdl);
+ goto err;
+ }
+
+ sd->dev = &client->dev;
+ ret = v4l2_async_register_subdev(sd);
+
+ dev_info(&client->dev, "%s sensor driver registered !!\n", sd->name);
+
+ return 0;
+err:
+ clk_disable_unprepare(priv->xvclk);
+ return ret;
+}
+
+static int ov1063x_remove(struct i2c_client *client)
+{
+ struct ov1063x_priv *priv = i2c_get_clientdata(client);
+
+ v4l2_ctrl_handler_free(&priv->hdl);
+ v4l2_async_unregister_subdev(&priv->subdev);
+ ov1063x_set_power(client, false);
+ clk_disable_unprepare(priv->xvclk);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov1063x_id[] = {
+ { "ov10635", 0 },
+ { "ov10633", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ov1063x_id);
+
+static const struct of_device_id ov1063x_dt_id[] = {
+ {
+ .compatible = "ovti,ov10635", .data = "ov10635"
+ },
+ {
+ .compatible = "ovti,ov10633", .data = "ov10633"
+ },
+ {
+ }
+};
+MODULE_DEVICE_TABLE(of, ov1063x_dt_id);
+
+static struct i2c_driver ov1063x_i2c_driver = {
+ .driver = {
+ .name = "ov1063x",
+ .of_match_table = of_match_ptr(ov1063x_dt_id),
+ },
+ .probe_new = ov1063x_probe,
+ .remove = ov1063x_remove,
+ .id_table = ov1063x_id,
+};
+
+module_i2c_driver(ov1063x_i2c_driver);
+
+MODULE_DESCRIPTION("SoC Camera driver for OmniVision OV1063X");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov1063x_regs.h b/drivers/media/i2c/ov1063x_regs.h
new file mode 100644
index 000000000000..e63e13663210
--- /dev/null
+++ b/drivers/media/i2c/ov1063x_regs.h
@@ -0,0 +1,699 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * OmniVision OV1063X Camera Driver
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+struct ov1063x_reg {
+ u16 reg;
+ u8 val;
+};
+
+static const struct ov1063x_reg ov1063x_regs_default[] = {
+ /* Register configuration for full resolution : 1280x720 */
+ {0x103, 0x1}, /** Software Reset */
+ {0x301b, 0xff}, /** System Control Clock Reset #1 */
+ {0x301c, 0xff}, /** System Control Clock Reset #2 */
+ {0x301a, 0xff}, /** System Control Clock Reset #0 */
+ {0x300c, 0x61}, /** Serial Camera Control Bus ID */
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x300c, 0x61},
+ {0x3021, 0x3}, /** System Control Misc */
+ {0x3011, 0x2},
+ {0x6900, 0xc},
+ {0x6901, 0x1},
+ {0x3033, 0x8}, /** System clock/4 */
+ {0x3503, 0x10}, /** AEC Delay enabled */
+ {0x302d, 0x2f}, /** Power Down Control */
+ {0x3025, 0x3}, /** Debug Control enabled */
+ /*
+ * FPS is computed as:
+ * XVCLK = 9MHz
+ * preDivide = {/1,/1.5,/2,/3,/4,/5,/6,/7}
+ * decided based on (0x3004[6:4].
+ * numerator = (0x3003[5:0] * XVCLK)/preDivide)
+ * denominator = (2 * (1+0x3004[2:0]))
+ * FPS = numerator/denominator.
+ */
+ /* {0x3003, 0x1B}, */ /* fps = 30fps. */
+ /* {0x3004, 0x03}, */
+ {0x3003, 0x20}, /* fps = 15fps. */
+ {0x3004, 0x3},
+ {0x3005, 0x20},
+ {0x3006, 0x91},
+ {0x3600, 0x74},
+ {0x3601, 0x2b},
+ {0x3612, 0x0},
+ {0x3611, 0x67},
+ {0x3633, 0xba},
+ {0x3602, 0x2f},
+ {0x3603, 0x0},
+ {0x3630, 0xa8},
+ {0x3631, 0x16},
+ {0x3714, 0x10},
+ {0x371d, 0x1},
+ {0x4300, 0x3A}, /* UYVY mode */
+ {0x3007, 0x1},
+ /*
+ * RAW mode and Pixel CLK selct.
+ * 0x3024[0] = 1 :: System CLK (0x3003,0x3004)
+ * 0x3024[0] = 0 :: secondary CLK (0x3005,0x3006)
+ */
+ {0x3024, 0x1},
+ {0x3020, 0xb},
+ {0x3702, 0xd},
+ {0x3703, 0x20},
+ {0x3704, 0x15},
+ {0x3709, 0x28},
+ {0x370d, 0x0},
+ {0x3712, 0x0},
+ {0x3713, 0x20},
+ {0x3715, 0x4},
+ {0x381d, 0x40},
+ {0x381c, 0x0},
+ {0x3824, 0x10},
+ {0x3815, 0x8c},
+ {0x3804, 0x5},
+ {0x3805, 0x1f},
+ {0x3800, 0x0},
+ {0x3801, 0x0},
+ {0x3806, 0x3},
+ {0x3807, 0x1},
+ {0x3802, 0x0},
+ {0x3803, 0x2e},
+ {0x3808, 0x5},
+ {0x3809, 0x0},
+ {0x380a, 0x2},
+ {0x380b, 0xd0},
+ {0x380c, 0x6},
+ {0x380d, 0xf6}, /* 1280x720 */
+ {0x380e, 0x2},
+ {0x380f, 0xec}, /* 1280x720 */
+ {0x3811, 0x8},
+ {0x381f, 0xc},
+ {0x3621, 0x63},
+ {0x5005, 0x8},
+ {0x56d5, 0x0},
+ {0x56d6, 0x80},
+ {0x56d7, 0x0},
+ {0x56d8, 0x0},
+ {0x56d9, 0x0},
+ {0x56da, 0x80},
+ {0x56db, 0x0},
+ {0x56dc, 0x0},
+ {0x56e8, 0x0},
+ {0x56e9, 0x7f},
+ {0x56ea, 0x0},
+ {0x56eb, 0x7f},
+ {0x5100, 0x0},
+ {0x5101, 0x80},
+ {0x5102, 0x0},
+ {0x5103, 0x80},
+ {0x5104, 0x0},
+ {0x5105, 0x80},
+ {0x5106, 0x0},
+ {0x5107, 0x80},
+ {0x5108, 0x0},
+ {0x5109, 0x0},
+ {0x510a, 0x0},
+ {0x510b, 0x0},
+ {0x510c, 0x0},
+ {0x510d, 0x0},
+ {0x510e, 0x0},
+ {0x510f, 0x0},
+ {0x5110, 0x0},
+ {0x5111, 0x80},
+ {0x5112, 0x0},
+ {0x5113, 0x80},
+ {0x5114, 0x0},
+ {0x5115, 0x80},
+ {0x5116, 0x0},
+ {0x5117, 0x80},
+ {0x5118, 0x0},
+ {0x5119, 0x0},
+ {0x511a, 0x0},
+ {0x511b, 0x0},
+ {0x511c, 0x0},
+ {0x511d, 0x0},
+ {0x511e, 0x0},
+ {0x511f, 0x0},
+ {0x56d0, 0x0},
+ {0x5006, 0x24},
+ {0x5608, 0x0},
+ {0x52d7, 0x6},
+ {0x528d, 0x8},
+ {0x5293, 0x12},
+ {0x52d3, 0x12},
+ {0x5288, 0x6},
+ {0x5289, 0x20},
+ {0x52c8, 0x6},
+ {0x52c9, 0x20},
+ {0x52cd, 0x4},
+ {0x5381, 0x0},
+ {0x5382, 0xff},
+ {0x5589, 0x76},
+ {0x558a, 0x47},
+ {0x558b, 0xef},
+ {0x558c, 0xc9},
+ {0x558d, 0x49},
+ {0x558e, 0x30},
+ {0x558f, 0x67},
+ {0x5590, 0x3f},
+ {0x5591, 0xf0},
+ {0x5592, 0x10},
+ {0x55a2, 0x6d},
+ {0x55a3, 0x55},
+ {0x55a4, 0xc3},
+ {0x55a5, 0xb5},
+ {0x55a6, 0x43},
+ {0x55a7, 0x38},
+ {0x55a8, 0x5f},
+ {0x55a9, 0x4b},
+ {0x55aa, 0xf0},
+ {0x55ab, 0x10},
+ {0x5581, 0x52},
+ {0x5300, 0x1},
+ {0x5301, 0x0},
+ {0x5302, 0x0},
+ {0x5303, 0xe},
+ {0x5304, 0x0},
+ {0x5305, 0xe},
+ {0x5306, 0x0},
+ {0x5307, 0x36},
+ {0x5308, 0x0},
+ {0x5309, 0xd9},
+ {0x530a, 0x0},
+ {0x530b, 0xf},
+ {0x530c, 0x0},
+ {0x530d, 0x2c},
+ {0x530e, 0x0},
+ {0x530f, 0x59},
+ {0x5310, 0x0},
+ {0x5311, 0x7b},
+ {0x5312, 0x0},
+ {0x5313, 0x22},
+ {0x5314, 0x0},
+ {0x5315, 0xd5},
+ {0x5316, 0x0},
+ {0x5317, 0x13},
+ {0x5318, 0x0},
+ {0x5319, 0x18},
+ {0x531a, 0x0},
+ {0x531b, 0x26},
+ {0x531c, 0x0},
+ {0x531d, 0xdc},
+ {0x531e, 0x0},
+ {0x531f, 0x2},
+ {0x5320, 0x0},
+ {0x5321, 0x24},
+ {0x5322, 0x0},
+ {0x5323, 0x56},
+ {0x5324, 0x0},
+ {0x5325, 0x85},
+ {0x5326, 0x0},
+ {0x5327, 0x20},
+ {0x5609, 0x1},
+ {0x560a, 0x40},
+ {0x560b, 0x1},
+ {0x560c, 0x40},
+ {0x560d, 0x0},
+ {0x560e, 0xfa},
+ {0x560f, 0x0},
+ {0x5610, 0xfa},
+ {0x5611, 0x2},
+ {0x5612, 0x80},
+ {0x5613, 0x2},
+ {0x5614, 0x80},
+ {0x5615, 0x1},
+ {0x5616, 0x2c},
+ {0x5617, 0x1},
+ {0x5618, 0x2c},
+ {0x563b, 0x1},
+ {0x563c, 0x1},
+ {0x563d, 0x1},
+ {0x563e, 0x1},
+ {0x563f, 0x3},
+ {0x5640, 0x3},
+ {0x5641, 0x3},
+ {0x5642, 0x5},
+ {0x5643, 0x9},
+ {0x5644, 0x5},
+ {0x5645, 0x5},
+ {0x5646, 0x5},
+ {0x5647, 0x5},
+ {0x5651, 0x0},
+ {0x5652, 0x80},
+ {0x521a, 0x1},
+ {0x521b, 0x3},
+ {0x521c, 0x6},
+ {0x521d, 0xa},
+ {0x521e, 0xe},
+ {0x521f, 0x12},
+ {0x5220, 0x16},
+ {0x5223, 0x2},
+ {0x5225, 0x4},
+ {0x5227, 0x8},
+ {0x5229, 0xc},
+ {0x522b, 0x12},
+ {0x522d, 0x18},
+ {0x522f, 0x1e},
+ {0x5241, 0x4},
+ {0x5242, 0x1},
+ {0x5243, 0x3},
+ {0x5244, 0x6},
+ {0x5245, 0xa},
+ {0x5246, 0xe},
+ {0x5247, 0x12},
+ {0x5248, 0x16},
+ {0x524a, 0x3},
+ {0x524c, 0x4},
+ {0x524e, 0x8},
+ {0x5250, 0xc},
+ {0x5252, 0x12},
+ {0x5254, 0x18},
+ {0x5256, 0x1e},
+ {0x4605, 0x00}, /* 8-bit YUV mode. */
+ {0x4606, 0x7},
+ {0x4607, 0x71},
+ {0x460a, 0x2},
+ {0x460b, 0x70},
+ {0x460c, 0x0},
+ {0x4620, 0xe},
+ {0x4700, 0x4},
+ {0x4701, 0x01},
+ /* {0x4702, 0x1}, */
+ {0x4702, 0x00}, /* 01 */
+ {0x4703, 0x00},
+ {0x4704, 0x00}, /* 01 */
+ /*
+ * Non-overlapping HSYNC-VSYNC.
+ * Therefore do not set the VSYNC delay registers.
+ */
+ {0x4705, 0x00}, /* Vsync delay high byte */
+ {0x4706, 0x00}, /* Vsync delay middle byte */
+ {0x4707, 0x00}, /* Vsync delay low byte{0x4708, 0x1}, */
+ /* {0x4709, 0x50}, */
+ {0x4004, 0x8},
+ {0x4005, 0x18},
+ {0x4001, 0x4},
+ {0x4050, 0x20},
+ {0x4051, 0x22},
+ {0x4057, 0x9c},
+ {0x405a, 0x0},
+ {0x4202, 0x2},
+ {0x3023, 0x10},
+ {0x100, 0x1},
+ {0x100, 0x1},
+ {0x6f0e, 0x0},
+ {0x6f0f, 0x0},
+ {0x460e, 0x8},
+ {0x460f, 0x1},
+ {0x4610, 0x0},
+ {0x4611, 0x1},
+ {0x4612, 0x0},
+ {0x4613, 0x1},
+ {0x4605, 0x00},
+ {0x4608, 0x0},
+ {0x4609, 0x8},
+ {0x6804, 0x0},
+ {0x6805, 0x6},
+ {0x6806, 0x0},
+ {0x5120, 0x0},
+ {0x3510, 0x0},
+ {0x3504, 0x0},
+ {0x6800, 0x0},
+ {0x6f0d, 0x0},
+ {0x5000, 0xff},
+ {0x5001, 0xbf},
+ {0x5002, 0xfe},
+ {0x503d, 0x0},
+ /* {0x503e, 0x00}, */
+ {0xc450, 0x1},
+ {0xc452, 0x4},
+ {0xc453, 0x0},
+ {0xc454, 0x0},
+ {0xc455, 0x0},
+ {0xc456, 0x0},
+ {0xc457, 0x0},
+ {0xc458, 0x0},
+ {0xc459, 0x0},
+ {0xc45b, 0x0},
+ {0xc45c, 0x0},
+ {0xc45d, 0x0},
+ {0xc45e, 0x0},
+ {0xc45f, 0x0},
+ {0xc460, 0x0},
+ {0xc461, 0x1},
+ {0xc462, 0x1},
+ {0xc464, 0x88},
+ {0xc465, 0x0},
+ {0xc466, 0x8a},
+ {0xc467, 0x0},
+ {0xc468, 0x86},
+ {0xc469, 0x0},
+ {0xc46a, 0x40},
+ {0xc46b, 0x50},
+ {0xc46c, 0x30},
+ {0xc46d, 0x28},
+ {0xc46e, 0x60},
+ {0xc46f, 0x40},
+ {0xc47c, 0x1},
+ {0xc47d, 0x38},
+ {0xc47e, 0x0},
+ {0xc47f, 0x0},
+ {0xc480, 0x0},
+ {0xc481, 0xff},
+ {0xc482, 0x0},
+ {0xc483, 0x40},
+ {0xc484, 0x0},
+ {0xc485, 0x18},
+ {0xc486, 0x0},
+ {0xc487, 0x18},
+ {0xc488, 0x2e},
+ {0xc489, 0x80},
+ {0xc48a, 0x2e},
+ {0xc48b, 0x80},
+ {0xc48c, 0x0},
+ {0xc48d, 0x4},
+ {0xc48e, 0x0},
+ {0xc48f, 0x4},
+ {0xc490, 0x7},
+ {0xc492, 0x20},
+ {0xc493, 0x8},
+ {0xc498, 0x2},
+ {0xc499, 0x0},
+ {0xc49a, 0x2},
+ {0xc49b, 0x0},
+ {0xc49c, 0x2},
+ {0xc49d, 0x0},
+ {0xc49e, 0x2},
+ {0xc49f, 0x60},
+ {0xc4a0, 0x4},
+ {0xc4a1, 0x0},
+ {0xc4a2, 0x6},
+ {0xc4a3, 0x0},
+ {0xc4a4, 0x0},
+ {0xc4a5, 0x10},
+ {0xc4a6, 0x0},
+ {0xc4a7, 0x40},
+ {0xc4a8, 0x0},
+ {0xc4a9, 0x80},
+ {0xc4aa, 0xd},
+ {0xc4ab, 0x0},
+ {0xc4ac, 0xf},
+ {0xc4ad, 0xc0},
+ {0xc4b4, 0x1},
+ {0xc4b5, 0x1},
+ {0xc4b6, 0x0},
+ {0xc4b7, 0x1},
+ {0xc4b8, 0x0},
+ {0xc4b9, 0x1},
+ {0xc4ba, 0x1},
+ {0xc4bb, 0x0},
+ {0xc4be, 0x2},
+ {0xc4bf, 0x33},
+ {0xc4c8, 0x3},
+ {0xc4c9, 0xd0},
+ {0xc4ca, 0xe},
+ {0xc4cb, 0x0},
+ {0xc4cc, 0xe},
+ {0xc4cd, 0x51},
+ {0xc4ce, 0xe},
+ {0xc4cf, 0x51},
+ {0xc4d0, 0x4},
+ {0xc4d1, 0x80},
+ {0xc4e0, 0x4},
+ {0xc4e1, 0x2},
+ {0xc4e2, 0x1},
+ {0xc4e4, 0x10},
+ {0xc4e5, 0x20},
+ {0xc4e6, 0x30},
+ {0xc4e7, 0x40},
+ {0xc4e8, 0x50},
+ {0xc4e9, 0x60},
+ {0xc4ea, 0x70},
+ {0xc4eb, 0x80},
+ {0xc4ec, 0x90},
+ {0xc4ed, 0xa0},
+ {0xc4ee, 0xb0},
+ {0xc4ef, 0xc0},
+ {0xc4f0, 0xd0},
+ {0xc4f1, 0xe0},
+ {0xc4f2, 0xf0},
+ {0xc4f3, 0x80},
+ {0xc4f4, 0x0},
+ {0xc4f5, 0x20},
+ {0xc4f6, 0x2},
+ {0xc4f7, 0x0},
+ {0xc4f8, 0x4},
+ {0xc4f9, 0xb},
+ {0xc4fa, 0x0},
+ {0xc4fb, 0x1},
+ {0xc4fc, 0x1},
+ {0xc4fd, 0x1},
+ {0xc4fe, 0x4},
+ {0xc4ff, 0x2},
+ {0xc500, 0x68},
+ {0xc501, 0x74},
+ {0xc502, 0x70},
+ {0xc503, 0x80},
+ {0xc504, 0x5},
+ {0xc505, 0x80},
+ {0xc506, 0x3},
+ {0xc507, 0x80},
+ {0xc508, 0x1},
+ {0xc509, 0xc0},
+ {0xc50a, 0x1},
+ {0xc50b, 0xa0},
+ {0xc50c, 0x1},
+ {0xc50d, 0x2c},
+ {0xc50e, 0x1},
+ {0xc50f, 0xa},
+ {0xc510, 0x0},
+ {0xc511, 0x0},
+ {0xc512, 0xe5},
+ {0xc513, 0x14},
+ {0xc514, 0x4},
+ {0xc515, 0x0},
+ {0xc518, 0x3},
+ {0xc519, 0x48},
+ {0xc51a, 0x7},
+ {0xc51b, 0x70},
+ {0xc2e0, 0x0},
+ {0xc2e1, 0x51},
+ {0xc2e2, 0x0},
+ {0xc2e3, 0xd6},
+ {0xc2e4, 0x1},
+ {0xc2e5, 0x5e},
+ {0xc2e9, 0x1},
+ {0xc2ea, 0x7a},
+ {0xc2eb, 0x90},
+ {0xc2ed, 0x1},
+ {0xc2ee, 0x7a},
+ {0xc2ef, 0x64},
+ {0xc308, 0x0},
+ {0xc309, 0x0},
+ {0xc30a, 0x0},
+ {0xc30c, 0x0},
+ {0xc30d, 0x1},
+ {0xc30e, 0x0},
+ {0xc30f, 0x0},
+ {0xc310, 0x1},
+ {0xc311, 0x60},
+ {0xc312, 0xff},
+ {0xc313, 0x8},
+ {0xc314, 0x1},
+ {0xc315, 0x7f},
+ {0xc316, 0xff},
+ {0xc317, 0xb},
+ {0xc318, 0x0},
+ {0xc319, 0xc},
+ {0xc31a, 0x0},
+ {0xc31b, 0xe0},
+ {0xc31c, 0x0},
+ {0xc31d, 0x14},
+ {0xc31e, 0x0},
+ {0xc31f, 0xc5},
+ {0xc320, 0xff},
+ {0xc321, 0x4b},
+ {0xc322, 0xff},
+ {0xc323, 0xf0},
+ {0xc324, 0xff},
+ {0xc325, 0xe8},
+ {0xc326, 0x0},
+ {0xc327, 0x46},
+ {0xc328, 0xff},
+ {0xc329, 0xd2},
+ {0xc32a, 0xff},
+ {0xc32b, 0xe4},
+ {0xc32c, 0xff},
+ {0xc32d, 0xbb},
+ {0xc32e, 0x0},
+ {0xc32f, 0x61},
+ {0xc330, 0xff},
+ {0xc331, 0xf9},
+ {0xc332, 0x0},
+ {0xc333, 0xd9},
+ {0xc334, 0x0},
+ {0xc335, 0x2e},
+ {0xc336, 0x0},
+ {0xc337, 0xb1},
+ {0xc338, 0xff},
+ {0xc339, 0x64},
+ {0xc33a, 0xff},
+ {0xc33b, 0xeb},
+ {0xc33c, 0xff},
+ {0xc33d, 0xe8},
+ {0xc33e, 0x0},
+ {0xc33f, 0x48},
+ {0xc340, 0xff},
+ {0xc341, 0xd0},
+ {0xc342, 0xff},
+ {0xc343, 0xed},
+ {0xc344, 0xff},
+ {0xc345, 0xad},
+ {0xc346, 0x0},
+ {0xc347, 0x66},
+ {0xc348, 0x1},
+ {0xc349, 0x0},
+ {0x6700, 0x4},
+ {0x6701, 0x7b},
+ {0x6702, 0xfd},
+ {0x6703, 0xf9},
+ {0x6704, 0x3d},
+ {0x6705, 0x71},
+ /*
+ * 0x6706[3:0] :: XVCLK
+ * 0x6706[3:0] :: 0 = 6MHz
+ * 0x6706[3:0] :: 1 = 9MHz
+ * 0x6706[3:0] :: 8 = 24MHz
+ * 0x6706[3:0] :: 9 = 27MHz
+ */
+ {0x6706, 0x71},
+ {0x6708, 0x5},
+ {0x3822, 0x50},
+ {0x6f06, 0x6f},
+ {0x6f07, 0x0},
+ {0x6f0a, 0x6f},
+ {0x6f0b, 0x0},
+ {0x6f00, 0x3},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x3042, 0xf0},
+ {0x301b, 0xf0},
+ {0x301c, 0xf0},
+ {0x301a, 0xf0},
+};
+
+static const struct ov1063x_reg ov1063x_regs_change_mode[] = {
+ { 0x301b, 0xff }, { 0x301c, 0xff }, { 0x301a, 0xff }, { 0x5005, 0x08 },
+ { 0x3007, 0x01 }, { 0x381c, 0x00 }, { 0x381f, 0x0C }, { 0x4001, 0x04 },
+ { 0x4004, 0x08 }, { 0x4050, 0x20 }, { 0x4051, 0x22 }, { 0x6e47, 0x0C },
+ { 0x4610, 0x05 }, { 0x4613, 0x10 },
+};
+
+static const struct ov1063x_reg ov1063x_regs_bt656[] = {
+ { 0x4700, 0x02 }, { 0x4302, 0x03 }, { 0x4303, 0xf8 }, { 0x4304, 0x00 },
+ { 0x4305, 0x08 }, { 0x4306, 0x03 }, { 0x4307, 0xf8 }, { 0x4308, 0x00 },
+ { 0x4309, 0x08 },
+};
+
+static const struct ov1063x_reg ov1063x_regs_bt656_10bit[] = {
+ { 0x4700, 0x02 }, { 0x4302, 0x03 }, { 0x4303, 0xfe }, { 0x4304, 0x00 },
+ { 0x4305, 0x02 }, { 0x4306, 0x03 }, { 0x4307, 0xfe }, { 0x4308, 0x00 },
+ { 0x4309, 0x02 },
+};
+
+static const struct ov1063x_reg ov1063x_regs_vert_sub_sample[] = {
+ { 0x381f, 0x06 }, { 0x4001, 0x02 }, { 0x4004, 0x02 }, { 0x4050, 0x10 },
+ { 0x4051, 0x11 }, { 0x6e47, 0x06 }, { 0x4610, 0x03 }, { 0x4613, 0x0a },
+};
+
+static const struct ov1063x_reg ov1063x_regs_enable[] = {
+ { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 },
+ { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 },
+ { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 },
+ { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 },
+ { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 },
+ { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x3042, 0xf0 },
+ { 0x3042, 0xf0 }, { 0x3042, 0xf0 }, { 0x301b, 0xf0 }, { 0x301c, 0xf0 },
+ { 0x301a, 0xf0 },
+};
+
+/*
+ * Datasheet highlight the following sequence to enable and disable
+ * Test Pattern mode i.e. colobar
+ */
+static const struct ov1063x_reg ov1063x_regs_colorbar_enable[] = {
+ {0x5120, 0x01}, {0x56d0, 0x01}, {0x5300, 0x01}, {0x5301, 0x00},
+ {0x5302, 0x00}, {0x5303, 0x00}, {0x5304, 0x00}, {0x5305, 0x00},
+ {0x5306, 0x00}, {0x5307, 0x00}, {0x5308, 0x01}, {0x5309, 0x00},
+ {0x530a, 0x00}, {0x530b, 0x00}, {0x530c, 0x00}, {0x530d, 0x00},
+ {0x530e, 0x00}, {0x530f, 0x00}, {0x5310, 0x01}, {0x5311, 0x00},
+ {0x5312, 0x00}, {0x5313, 0x00}, {0x5314, 0x01}, {0x5315, 0x00},
+ {0x5316, 0x00}, {0x5317, 0x00}, {0x5318, 0x00}, {0x5319, 0x00},
+ {0x531a, 0x00}, {0x531b, 0x00}, {0x531c, 0x01}, {0x531d, 0x00},
+ {0x531e, 0x00}, {0x531f, 0x00}, {0x5320, 0x00}, {0x5321, 0x00},
+ {0x5322, 0x00}, {0x5323, 0x00}, {0x5324, 0x01}, {0x5325, 0x00},
+ {0x5326, 0x00}, {0x5327, 0x00}, {0xc2ea, 0x80}, {0xc2eb, 0x80},
+ {0x5000, 0x79}, {0x503d, 0x80},
+};
+
+static const struct ov1063x_reg ov1063x_regs_colorbar_disable[] = {
+ {0x503d, 0x00}, {0x5120, 0x00}, {0x56d0, 0x00}, {0x5300, 0x01},
+ {0x5301, 0x00}, {0x5302, 0x00}, {0x5303, 0x0e}, {0x5304, 0x00},
+ {0x5305, 0x0e}, {0x5306, 0x00}, {0x5307, 0x36}, {0x5308, 0x00},
+ {0x5309, 0xd9}, {0x530a, 0x00}, {0x530b, 0x0f}, {0x530c, 0x00},
+ {0x530d, 0x2c}, {0x530e, 0x00}, {0x530f, 0x59}, {0x5310, 0x00},
+ {0x5311, 0x7b}, {0x5312, 0x00}, {0x5313, 0x22}, {0x5314, 0x00},
+ {0x5315, 0xd5}, {0x5316, 0x00}, {0x5317, 0x13}, {0x5318, 0x00},
+ {0x5319, 0x18}, {0x531a, 0x00}, {0x531b, 0x26}, {0x531c, 0x00},
+ {0x531d, 0xdc}, {0x531e, 0x00}, {0x531f, 0x02}, {0x5320, 0x00},
+ {0x5321, 0x24}, {0x5322, 0x00}, {0x5323, 0x56}, {0x5324, 0x00},
+ {0x5325, 0x85}, {0x5326, 0x00}, {0x5327, 0x20}, {0xc2ea, 0x7a},
+ {0xc2eb, 0x90}, {0x5000, 0xff},
+};
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 236ad2c816b7..397fd79b2a2f 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1150,7 +1150,7 @@ static int ov13858_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov13858 *ov13858 = to_ov13858(sd);
struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd,
- fh->pad,
+ fh->state,
0);
mutex_lock(&ov13858->mutex);
@@ -1275,7 +1275,7 @@ static const struct v4l2_ctrl_ops ov13858_ctrl_ops = {
};
static int ov13858_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Only one bayer order(GRBG) is supported */
@@ -1288,7 +1288,7 @@ static int ov13858_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov13858_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -1315,14 +1315,14 @@ static void ov13858_update_pad_format(const struct ov13858_mode *mode,
}
static int ov13858_do_get_pad_format(struct ov13858 *ov13858,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
struct v4l2_subdev *sd = &ov13858->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
ov13858_update_pad_format(ov13858->cur_mode, fmt);
@@ -1332,14 +1332,14 @@ static int ov13858_do_get_pad_format(struct ov13858 *ov13858,
}
static int ov13858_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov13858 *ov13858 = to_ov13858(sd);
int ret;
mutex_lock(&ov13858->mutex);
- ret = ov13858_do_get_pad_format(ov13858, cfg, fmt);
+ ret = ov13858_do_get_pad_format(ov13858, sd_state, fmt);
mutex_unlock(&ov13858->mutex);
return ret;
@@ -1347,7 +1347,7 @@ static int ov13858_get_pad_format(struct v4l2_subdev *sd,
static int
ov13858_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov13858 *ov13858 = to_ov13858(sd);
@@ -1371,7 +1371,7 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
ov13858_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
ov13858->cur_mode = mode;
diff --git a/drivers/media/i2c/ov2312.c b/drivers/media/i2c/ov2312.c
new file mode 100644
index 000000000000..b7ff859361b4
--- /dev/null
+++ b/drivers/media/i2c/ov2312.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Omnivision OV2312 RGB-IR Image Sensor driver
+ *
+ * Copyright (c) 2022 Jai Luthra <j-luthra@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ctrls.h>
+
+#include "ov2312.h"
+
+struct ov2312 {
+ struct device *dev;
+
+ struct clk *clk;
+ unsigned long clk_rate;
+
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *again;
+ struct v4l2_ctrl *dgain;
+ struct v4l2_ctrl *h_flip;
+ struct v4l2_ctrl *v_flip;
+
+ u32 fps;
+
+ struct mutex lock;
+ bool streaming;
+};
+
+static inline struct ov2312 *to_ov2312(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov2312, sd);
+}
+
+static int ov2312_read(struct ov2312 *ov2312, u16 addr, u32 *val, size_t nbytes)
+{
+ int ret;
+ __le32 val_le = 0;
+
+ ret = regmap_bulk_read(ov2312->regmap, addr, &val_le, nbytes);
+ if (ret < 0) {
+ dev_err(ov2312->dev, "%s: failed to read reg 0x%04x: %d\n",
+ __func__, addr, ret);
+ return ret;
+ }
+
+ *val = le32_to_cpu(val_le);
+ return 0;
+}
+
+static int ov2312_write(struct ov2312 *ov2312, u16 addr, u32 val, size_t nbytes)
+{
+ int ret;
+ __le32 val_le = cpu_to_le32(val);
+
+ ret = regmap_bulk_write(ov2312->regmap, addr, &val_le, nbytes);
+ if (ret < 0)
+ dev_err(ov2312->dev, "%s: failed to write reg 0x%04x: %d\n",
+ __func__, addr, ret);
+ return ret;
+}
+
+static int ov2312_write_table(struct ov2312 *ov2312,
+ const struct reg_sequence *regs,
+ unsigned int nr_regs)
+{
+ int ret, i;
+
+ for (i = 0; i < nr_regs; i++) {
+ ret = regmap_write(ov2312->regmap, regs[i].reg, regs[i].def);
+ if (ret < 0) {
+ dev_err(ov2312->dev,
+ "%s: failed to write reg[%d] 0x%04x = 0x%02x (%d)!\n",
+ __func__, i, regs[i].reg, regs[i].def, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void ov2312_init_formats(struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *format;
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ format = v4l2_state_get_stream_format(state, 0, i);
+ format->code = ov2312_mbus_formats[0];
+ format->width = ov2312_framesizes[0].width;
+ format->height = ov2312_framesizes[0].height;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_DEFAULT;
+ }
+}
+
+
+static int ov2312_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov2312 *ov2312 = to_ov2312(sd);
+ struct v4l2_mbus_framefmt *format;
+ const struct v4l2_area *fsize;
+ u32 code;
+ int ret = 0;
+
+ if (fmt->pad != 0)
+ return -EINVAL;
+
+ if (fmt->stream != 0)
+ return -EINVAL;
+
+ /* Sensor only supports a single format. */
+ code = ov2312_mbus_formats[0];
+
+ /* Find the nearest supported frame size. */
+ fsize = v4l2_find_nearest_size(ov2312_framesizes,
+ ARRAY_SIZE(ov2312_framesizes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ v4l2_subdev_lock_state(state);
+
+ /* Update the stored format and return it. */
+ format = v4l2_state_get_stream_format(state, fmt->pad, fmt->stream);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE && ov2312->streaming) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ format->code = code;
+ format->width = fsize->width;
+ format->height = fsize->height;
+
+ fmt->format = *format;
+
+done:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int _ov2312_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route routes[] = {
+ {
+ .source_pad = 0,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_IMMUTABLE |
+ V4L2_SUBDEV_ROUTE_FL_SOURCE |
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ },
+ {
+ .source_pad = 0,
+ .source_stream = 1,
+ .flags = V4L2_SUBDEV_ROUTE_FL_IMMUTABLE |
+ V4L2_SUBDEV_ROUTE_FL_SOURCE |
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ },
+ };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+
+ int ret;
+
+ ret = v4l2_subdev_set_routing(sd, state, &routing);
+ if (ret < 0)
+ return ret;
+
+ ov2312_init_formats(state);
+
+ return 0;
+}
+
+static int ov2312_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *fmt;
+ const struct v4l2_subdev_krouting *routing;
+ u32 bpp;
+ int ret = 0;
+ unsigned int i;
+
+ if (pad != 0)
+ return -EINVAL;
+
+ state = v4l2_subdev_lock_active_state(sd);
+ routing = &state->routing;
+
+ fmt = v4l2_state_get_stream_format(state, 0, 0);
+ if (!fmt) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ memset(fd, 0, sizeof(*fd));
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+
+ /* pixel stream - 2 virtual channels */
+
+ bpp = 10;
+
+ for (i = 0; i < 2; ++i) {
+ fd->entry[fd->num_entries].stream = i;
+
+ fd->entry[fd->num_entries].flags = V4L2_MBUS_FRAME_DESC_FL_LEN_MAX;
+ fd->entry[fd->num_entries].length = fmt->width * fmt->height * bpp / 8;
+ fd->entry[fd->num_entries].pixelcode = fmt->code;
+ fd->entry[fd->num_entries].bus.csi2.vc = i;
+ fd->entry[fd->num_entries].bus.csi2.dt = 0x2b; /* RAW10 */
+
+ fd->num_entries++;
+ }
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ov2312_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ int ret;
+
+ if (routing->num_routes == 0 || routing->num_routes > 2)
+ return -EINVAL;
+
+ v4l2_subdev_lock_state(state);
+
+ ret = _ov2312_set_routing(sd, state);
+
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+
+static int ov2312_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ int ret;
+
+ v4l2_subdev_lock_state(state);
+
+ ret = _ov2312_set_routing(sd, state);
+
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ov2312_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(ov2312_mbus_formats))
+ return -EINVAL;
+
+ code->code = ov2312_mbus_formats[code->index];
+
+ return 0;
+}
+
+static int ov2312_enum_frame_sizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ov2312_mbus_formats); ++i) {
+ if (ov2312_mbus_formats[i] == fse->code)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ov2312_mbus_formats))
+ return -EINVAL;
+
+ if (fse->index >= ARRAY_SIZE(ov2312_framesizes))
+ return -EINVAL;
+
+ fse->min_width = ov2312_framesizes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->max_height = ov2312_framesizes[fse->index].height;
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int ov2312_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov2312 *ov2312 = to_ov2312(sd);
+
+ fi->interval.numerator = 1;
+ fi->interval.denominator = ov2312->fps/2;
+
+ return 0;
+}
+
+static int ov2312_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov2312 *ov2312 = to_ov2312(sd);
+
+ dev_dbg(ov2312->dev, "%s: Set framerate %dfps\n", __func__,
+ fi->interval.denominator/fi->interval.numerator);
+ if (fi->interval.denominator/fi->interval.numerator != ov2312->fps) {
+ dev_err(ov2312->dev, "%s: Framerate can only be %dfps\n",
+ __func__, ov2312->fps);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ov2312_detect(struct ov2312 *ov2312)
+{
+ int ret;
+ u32 id;
+
+ ret = ov2312_read(ov2312, OV2312_SC_CHIP_ID_HI, &id, 2);
+ if (ret < 0)
+ return ret;
+
+ id = cpu_to_be16(id);
+
+ if (id != OV2312_CHIP_ID) {
+ dev_err(ov2312->dev,
+ "%s: unknown chip ID 0x%04x\n", __func__, id);
+ return -ENODEV;
+ }
+
+ dev_dbg(ov2312->dev, "%s: detected chip ID 0x%04x\n", __func__, id);
+ return 0;
+}
+
+static int ov2312_set_AB_mode(struct ov2312 *ov2312)
+{
+ int i, ret;
+ u32 exposure = ov2312->exposure->val;
+ u32 again = ov2312->again->val;
+ u32 dgain = ov2312->dgain->val;
+ struct reg_sequence ov2312_groupB[] = {
+ {0x3208, 0x00},/* Group B (RGB Dominant VC1) */
+ {OV2312_AEC_PK_EXPO_HI, (exposure >> 8) & 0xff},
+ {OV2312_AEC_PK_EXPO_LO, exposure & 0xff},
+ {OV2312_AEC_PK_AGAIN_HI, (again >> 4) & 0xff},
+ {OV2312_AEC_PK_AGAIN_LO, (again & 0x0f) << 4},
+ {OV2312_AEC_PK_DGAIN_HI, (dgain >> 8) & 0xff},
+ {OV2312_AEC_PK_DGAIN_LO, dgain & 0xff},
+ {0x3920, 0x00},
+ {0x4813, 0x00},/* VC=0. This register takes effect from next frame */
+ {0x3208, 0x10},
+ {0x320D, 0x00},/* Auto mode switch between group0 and group1 ;setting to switch */
+ {0x320D, 0x31},
+ {0x3208, 0xA0},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(ov2312_groupB); i++) {
+ ret = regmap_write(ov2312->regmap, ov2312_groupB[i].reg, ov2312_groupB[i].def);
+ if (ret < 0) {
+ dev_err(ov2312->dev,
+ "%s: failed to write reg[%d] 0x%04x = 0x%02x (%d)!\n",
+ __func__, i, ov2312_groupB[i].reg, ov2312_groupB[i].def, ret);
+ return ret;
+ }
+ }
+
+ msleep(33);
+ return 0;
+}
+
+static int ov2312_set_orientation(struct ov2312 *ov2312)
+{
+ bool v_flip = ov2312->v_flip->val;
+ bool h_flip = ov2312->h_flip->val;
+ u32 reg = (v_flip ? 0x4400 : 0) | (h_flip ? 0x0004 : 0);
+
+ return ov2312_write(ov2312, OV2312_TIMING_VFLIP, be16_to_cpu(reg), 2);
+}
+
+static int ov2312_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2312 *ov2312 = container_of(ctrl->handler,
+ struct ov2312, ctrls);
+ int ret;
+
+ dev_dbg(ov2312->dev, "%s: %s, value: %d\n", __func__,
+ ctrl->name, ctrl->val);
+
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any controls to H/W at this time. Instead
+ * the controls will be restored right after power-up.
+ */
+ if (pm_runtime_suspended(ov2312->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ case V4L2_CID_ANALOGUE_GAIN:
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov2312_set_AB_mode(ov2312);
+ break;
+
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ ret = ov2312_set_orientation(ov2312);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ov2312_power_on(struct ov2312 *ov2312)
+{
+ int ret;
+ ret = clk_prepare_enable(ov2312->clk);
+ if (ret < 0)
+ return ret;
+
+ if (ov2312->reset_gpio) {
+ gpiod_set_value_cansleep(ov2312->reset_gpio, 0);
+ msleep(10);
+ gpiod_set_value_cansleep(ov2312->reset_gpio, 1);
+ msleep(30);
+ }
+ return 0;
+}
+
+static int ov2312_power_off(struct ov2312 *ov2312)
+{
+ if (ov2312->reset_gpio) {
+ gpiod_set_value_cansleep(ov2312->reset_gpio, 0);
+ msleep(10);
+ }
+
+ clk_disable_unprepare(ov2312->clk);
+
+ return 0;
+}
+
+static int ov2312_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2312 *ov2312 = to_ov2312(sd);
+
+ return ov2312_power_on(ov2312);
+}
+
+static int ov2312_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2312 *ov2312 = to_ov2312(sd);
+
+ return ov2312_power_off(ov2312);
+}
+
+static int ov2312_start_stream(struct ov2312 *ov2312)
+{
+ int ret;
+ ret = ov2312_write_table(ov2312, ov2312_1600x1300_60fps_AB,
+ ARRAY_SIZE(ov2312_1600x1300_60fps_AB));
+ if (ret < 0)
+ return ret;
+
+ /* Update controls on wake up */
+ ret = ov2312_set_orientation(ov2312);
+ if (ret < 0)
+ return ret;
+
+ ret = ov2312_set_AB_mode(ov2312);
+ if (ret < 0)
+ return ret;
+
+ msleep(100);
+
+ /* Set active */
+ ret = ov2312_write(ov2312, OV2312_SYS_MODE_SEL, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ /* No communication is possible for a while after exiting standby */
+ msleep(20);
+ return 0;
+}
+
+static int ov2312_stop_stream(struct ov2312 *ov2312)
+{
+ int ret;
+
+ /* Set standby */
+ ret = ov2312_write(ov2312, OV2312_SYS_MODE_SEL, 0, 1);
+ if (ret < 0)
+ return ret;
+
+ /* No communication is possible for a while after entering standby */
+ usleep_range(10000, 20000);
+ return 0;
+}
+
+static int ov2312_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov2312 *ov2312 = to_ov2312(sd);
+ int ret;
+
+ mutex_lock(&ov2312->lock);
+ if (ov2312->streaming == enable) {
+ mutex_unlock(&ov2312->lock);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(ov2312->dev);
+ if (ret < 0) {
+ goto err_unlock;
+ }
+
+ ret = ov2312_start_stream(ov2312);
+ if (ret < 0)
+ goto err_runtime_put;
+ } else {
+ ret = ov2312_stop_stream(ov2312);
+ if (ret < 0)
+ goto err_runtime_put;
+ pm_runtime_put(ov2312->dev);
+ }
+
+ ov2312->streaming = enable;
+ mutex_unlock(&ov2312->lock);
+ return 0;
+
+err_runtime_put:
+ pm_runtime_put(ov2312->dev);
+
+err_unlock:
+ mutex_unlock(&ov2312->lock);
+ dev_err(ov2312->dev,
+ "%s: failed to setup streaming %d\n", __func__, ret);
+ return ret;
+}
+
+static struct v4l2_subdev_video_ops ov2312_subdev_video_ops = {
+ .s_stream = ov2312_set_stream,
+ .g_frame_interval = ov2312_get_frame_interval,
+ .s_frame_interval = ov2312_set_frame_interval,
+};
+
+static struct v4l2_subdev_pad_ops ov2312_subdev_pad_ops = {
+ .init_cfg = ov2312_init_cfg,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ov2312_set_fmt,
+ .enum_mbus_code = ov2312_enum_mbus_code,
+ .enum_frame_size = ov2312_enum_frame_sizes,
+ .set_routing = ov2312_set_routing,
+ .get_frame_desc = ov2312_get_frame_desc,
+};
+
+static struct v4l2_subdev_ops ov2312_subdev_ops = {
+ .video = &ov2312_subdev_video_ops,
+ .pad = &ov2312_subdev_pad_ops,
+};
+
+static const struct v4l2_ctrl_ops ov2312_ctrl_ops = {
+ .s_ctrl = ov2312_set_ctrl,
+};
+
+static const struct dev_pm_ops ov2312_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov2312_suspend, ov2312_resume, NULL)
+};
+
+static int ov2312_probe(struct i2c_client *client)
+{
+ struct ov2312 *ov2312;
+ struct v4l2_subdev *sd;
+ struct v4l2_ctrl_handler *ctrl_hdr;
+ int ret;
+
+ /* Allocate internal struct */
+ ov2312 = devm_kzalloc(&client->dev, sizeof(*ov2312), GFP_KERNEL);
+ if (!ov2312)
+ return -ENOMEM;
+
+ ov2312->dev = &client->dev;
+ ov2312->client = client;
+
+ /* Initialize I2C Regmap */
+ ov2312->regmap = devm_regmap_init_i2c(client, &ov2312_regmap_config);
+ if (IS_ERR(ov2312->regmap))
+ return PTR_ERR(ov2312->regmap);
+
+ /* Initialize Shutdown GPIO */
+ ov2312->reset_gpio = devm_gpiod_get_optional(ov2312->dev,
+ "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov2312->reset_gpio))
+ return PTR_ERR(ov2312->reset_gpio);
+
+ ov2312->clk = devm_clk_get(ov2312->dev, "xvclk");
+ if (IS_ERR(ov2312->clk))
+ return PTR_ERR(ov2312->clk);
+
+ ov2312->clk_rate = clk_get_rate(ov2312->clk);
+ dev_info(ov2312->dev, "xvclk rate: %lu Hz\n", ov2312->clk_rate);
+
+ if (ov2312->clk_rate < 6000000 || ov2312->clk_rate > 27000000)
+ return -EINVAL;
+
+ /* Power on */
+ ret = ov2312_power_on(ov2312);
+ if (ret < 0)
+ return ret;
+
+ /* Detect sensor */
+ ret = ov2312_detect(ov2312);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the subdev and its controls. */
+ sd = &ov2312->sd;
+ v4l2_i2c_subdev_init(sd, client, &ov2312_subdev_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_MULTIPLEXED;
+
+ /* Initialize the media entity. */
+ ov2312->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ov2312->pad.stream_count = 2;
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sd->entity, 1, &ov2312->pad);
+ if (ret < 0) {
+ dev_err(ov2312->dev,
+ "%s: media entity init failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ ov2312->fps = OV2312_FRAMERATE_DEFAULT;
+ mutex_init(&ov2312->lock);
+
+ /* Initialize controls */
+ ctrl_hdr = &ov2312->ctrls;
+ ret = v4l2_ctrl_handler_init(ctrl_hdr, 5);
+ if (ret < 0) {
+ dev_err(ov2312->dev,
+ "%s: ctrl handler init failed: %d\n", __func__, ret);
+ goto err_media_cleanup;
+ }
+
+ ov2312->ctrls.lock = &ov2312->lock;
+
+ /* Add new controls */
+ ov2312->exposure = v4l2_ctrl_new_std(ctrl_hdr, &ov2312_ctrl_ops,
+ V4L2_CID_EXPOSURE, 1,
+ OV2312_EXPOSURE_MAX,
+ 1, OV2312_EXPOSURE_DEFAULT);
+
+ ov2312->again = v4l2_ctrl_new_std(ctrl_hdr, &ov2312_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN, 0,
+ OV2312_AGAIN_MAX, 1,
+ OV2312_AGAIN_DEFAULT);
+
+ ov2312->dgain = v4l2_ctrl_new_std(ctrl_hdr, &ov2312_ctrl_ops,
+ V4L2_CID_DIGITAL_GAIN, 0,
+ OV2312_DGAIN_MAX, 1,
+ OV2312_DGAIN_DEFAULT);
+
+ ov2312->h_flip = v4l2_ctrl_new_std(ctrl_hdr, &ov2312_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ ov2312->v_flip = v4l2_ctrl_new_std(ctrl_hdr, &ov2312_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ ov2312->sd.ctrl_handler = ctrl_hdr;
+ if (ov2312->ctrls.error) {
+ ret = ov2312->ctrls.error;
+ dev_err(ov2312->dev,
+ "%s: failed to add the ctrls: %d\n", __func__, ret);
+ goto err_ctrl_free;
+ }
+
+ /* PM Runtime */
+ pm_runtime_enable(ov2312->dev);
+ pm_runtime_set_suspended(ov2312->dev);
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret < 0)
+ goto err_pm_disable;
+
+ /* Finally, register the subdev. */
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0) {
+ dev_err(ov2312->dev,
+ "%s: v4l2 subdev register failed %d\n", __func__, ret);
+ goto err_subdev_cleanup;
+ }
+
+ dev_info(ov2312->dev, "ov2312 probed\n");
+ return 0;
+
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(&ov2312->sd);
+
+err_pm_disable:
+ pm_runtime_disable(ov2312->dev);
+
+err_ctrl_free:
+ v4l2_ctrl_handler_free(ctrl_hdr);
+ mutex_destroy(&ov2312->lock);
+
+err_media_cleanup:
+ media_entity_cleanup(&ov2312->sd.entity);
+
+ return ret;
+}
+
+static int ov2312_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2312 *ov2312 = to_ov2312(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&ov2312->ctrls);
+ v4l2_subdev_cleanup(&ov2312->sd);
+ media_entity_cleanup(&sd->entity);
+ mutex_destroy(&ov2312->lock);
+
+ pm_runtime_disable(ov2312->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov2312_id[] = {
+ { "ov2312", 0 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(i2c, ov2312_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id ov2312_of_match[] = {
+ { .compatible = "ovti,ov2312", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ov2312_of_match);
+#endif
+
+static struct i2c_driver ov2312_i2c_driver = {
+ .driver = {
+ .name = "ov2312",
+ .pm = &ov2312_pm_ops,
+ .of_match_table = of_match_ptr(ov2312_of_match),
+ },
+ .probe_new = ov2312_probe,
+ .remove = ov2312_remove,
+ .id_table = ov2312_id,
+};
+
+module_i2c_driver(ov2312_i2c_driver);
+
+MODULE_AUTHOR("Jai Luthra <j-luthra@ti.com>");
+MODULE_DESCRIPTION("OV2312 RGB-IR Image Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov2312.h b/drivers/media/i2c/ov2312.h
new file mode 100644
index 000000000000..329bd8cfb979
--- /dev/null
+++ b/drivers/media/i2c/ov2312.h
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Omnivision OV2312 RGB-IR Image Sensor driver
+ *
+ * Copyright (c) 2022 Jai Luthra <j-luthra@ti.com>
+ */
+
+#include <linux/types.h>
+#include <linux/media-bus-format.h>
+
+#define OV2312_CHIP_ID 0x2311
+#define OV2312_FRAMERATE_DEFAULT 60
+
+#define OV2312_OUT_WIDTH 1600
+#define OV2312_OUT_HEIGHT 1300
+#define OV2312_VTS 0x0588
+
+#define OV2312_SYS_MODE_SEL 0x0100
+#define OV2312_SC_CHIP_ID_HI 0x300a
+#define OV2312_SC_CHIP_ID_LO 0x300b
+#define OV2312_AEC_PK_EXPO_HI 0x3501
+#define OV2312_AEC_PK_EXPO_LO 0x3502
+#define OV2312_AEC_PK_AGAIN_HI 0x3508
+#define OV2312_AEC_PK_AGAIN_LO 0x3509
+#define OV2312_AEC_PK_DGAIN_HI 0x350a
+#define OV2312_AEC_PK_DGAIN_LO 0x350b
+#define OV2312_TIMING_VFLIP 0x3820
+#define OV2312_TIMING_HFLIP 0x3821
+
+/* Exposure control */
+#define OV2312_EXPOSURE_MAX (OV2312_VTS - 12)
+#define OV2312_EXPOSURE_DEFAULT 0x057c
+#define OV2312_IR_EXPOSURE 0x0090
+#define OV2312_IR_STROBE OV2312_IR_EXPOSURE
+#define OV2312_IR_STROBE_START (OV2312_VTS - OV2312_IR_EXPOSURE - 7)
+
+/* Analog gain control */
+#define OV2312_AGAIN_MAX 0x1FF
+#define OV2312_AGAIN_DEFAULT 0x010
+
+/* Digital gain control */
+#define OV2312_DGAIN_MAX 0x0FFF
+#define OV2312_DGAIN_DEFAULT 0x0100
+
+static const struct v4l2_area ov2312_framesizes[] = {
+ {
+ .width = OV2312_OUT_WIDTH,
+ .height = OV2312_OUT_HEIGHT,
+ },
+};
+
+static const u32 ov2312_mbus_formats[] = {
+ MEDIA_BUS_FMT_SBGGI10_1X10,
+};
+
+
+static const struct regmap_config ov2312_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+};
+
+static const struct reg_sequence ov2312_1600x1300_60fps_AB[] = {
+ {0x0103, 0x01},
+ {0x0100, 0x00},
+ {0x010c, 0x02},
+ {0x010b, 0x01},
+ {0x0300, 0x01},
+ {0x0302, 0x32},
+ {0x0303, 0x00},
+ {0x0304, 0x03},
+ {0x0305, 0x02},
+ {0x0306, 0x01},
+ {0x030d, 0x5a},
+ {0x030e, 0x04},
+ {0x3001, 0x02},
+ {0x3004, 0x00},
+ {0x3005, 0x00},
+ {0x3006, 0x0a},
+ {0x3011, 0x0d},
+ {0x3014, 0x04},
+ {0x301c, 0xf0},
+ {0x3020, 0x20},
+ {0x302c, 0x00},
+ {0x302d, 0x00},
+ {0x302e, 0x00},
+ {0x302f, 0x03},
+ {0x3030, 0x10},
+ {0x303f, 0x03},
+ {0x3103, 0x00},
+ {0x3106, 0x08},
+ {0x31ff, 0x01},
+ {0x3501, 0x05},
+ {0x3502, 0x7c},
+ {0x3506, 0x00},
+ {0x3507, 0x00},
+ {0x3620, 0x67},
+ {0x3633, 0x78},
+ {0x3662, 0x65},
+ {0x3664, 0xb0},
+ {0x3666, 0x70},
+ {0x3670, 0x68},
+ {0x3674, 0x10},
+ {0x3675, 0x00},
+ {0x367e, 0x90},
+ {0x3680, 0x84},
+ {0x36a2, 0x04},
+ {0x36a3, 0x80},
+ {0x36b0, 0x00},
+ {0x3700, 0x35},
+ {0x3704, 0x39},
+ {0x370a, 0x50},
+ {0x3712, 0x00},
+ {0x3713, 0x02},
+ {0x3778, 0x00},
+ {0x379b, 0x01},
+ {0x379c, 0x10},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x06},
+ {0x3805, 0x4f},
+ {0x3806, 0x05},
+ {0x3807, 0x23},
+ {0x3808, 0x06},
+ {0x3809, 0x40},
+ {0x380a, 0x05},
+ {0x380b, 0x14},
+ {0x380c, 0x03},
+ {0x380d, 0xa8},
+ {0x380e, (OV2312_VTS >> 8) & 0xff},
+ {0x380f, OV2312_VTS & 0xff},
+ {0x3810, 0x00},
+ {0x3811, 0x08},
+ {0x3812, 0x00},
+ {0x3813, 0x08},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x3816, 0x00},
+ {0x3817, 0x01},
+ {0x3818, 0x00},
+ {0x3819, 0x05},
+ {0x382b, 0x5a},
+ {0x382c, 0x0a},
+ {0x382d, 0xf8},
+ {0x3881, 0x44},
+ {0x3882, 0x02},
+ {0x3883, 0x8c},
+ {0x3885, 0x07},
+ {0x389d, 0x03},
+ {0x38a6, 0x00},
+ {0x38a7, 0x01},
+ {0x38b3, 0x07},
+ {0x38b1, 0x00},
+ {0x38e5, 0x02},
+ {0x38e7, 0x00},
+ {0x38e8, 0x00},
+ {0x3910, 0xff},
+ {0x3911, 0xff},
+ {0x3912, 0x08},
+ {0x3913, 0x00},
+ {0x3914, 0x00},
+ {0x3915, 0x00},
+ {0x391c, 0x00},
+ {0x3920, 0xff},
+ {0x3921, 0x80},
+ {0x3922, 0x00},
+ {0x3923, 0x00},
+ {0x3924, 0x05},
+ {0x3925, 0x00},
+ {0x3926, 0x00},
+ {0x3927, 0x00},
+ {0x3928, 0x1a},
+ {0x392d, 0x03},
+ {0x392e, 0xa8},
+ {0x392f, 0x08},
+ {0x4001, 0x00},
+ {0x4003, 0x40},
+ {0x4008, 0x04},
+ {0x4009, 0x1b},
+ {0x400c, 0x04},
+ {0x400d, 0x1b},
+ {0x4010, 0xf4},
+ {0x4011, 0x00},
+ {0x4016, 0x00},
+ {0x4017, 0x04},
+ {0x4042, 0x11},
+ {0x4043, 0x70},
+ {0x4045, 0x00},
+ {0x4409, 0x5f},
+ {0x4509, 0x00},
+ {0x450b, 0x00},
+ {0x4600, 0x00},
+ {0x4601, 0x80},
+ {0x4708, 0x09},
+ {0x470c, 0x81},
+ {0x4710, 0x06},
+ {0x4711, 0x00},
+ {0x4800, 0x00},
+ {0x481f, 0x30},
+ {0x4837, 0x14},
+ {0x4f00, 0x00},
+ {0x4f07, 0x00},
+ {0x4f08, 0x03},
+ {0x4f09, 0x08},
+ {0x4f0c, 0x05},
+ {0x4f0d, 0xb4},
+ {0x4f10, 0x00},
+ {0x4f11, 0x00},
+ {0x4f12, 0x07},
+ {0x4f13, 0xe2},
+ {0x5000, 0x9f},
+ {0x5001, 0x20},
+ {0x5026, 0x00},
+ {0x5c00, 0x00},
+ {0x5c01, 0x2c},
+ {0x5c02, 0x00},
+ {0x5c03, 0x7f},
+ {0x5e00, 0x00},
+ {0x5e01, 0x41},
+ {0x38b1, 0x02},
+ {0x0100, 0x01},
+ {0x3006, 0x08},/* Strobe control */
+ {0x3004, 0x02},
+ {0x3007, 0x02},
+ {0x301c, 0x20},
+ {0x3020, 0x20},
+ {0x3025, 0x02},
+ {0x382c, 0x0a},
+ {0x382d, 0xf8},
+ {0x3920, 0xff},
+ {0x3921, 0x00},
+ {0x3923, 0x00},
+ {0x3924, 0x00},
+ {0x3925, 0x00},
+ {0x3926, 0x00},
+ {0x3927, 0x00},
+ {0x3928, 0x80},
+ {0x392b, 0x00},
+ {0x392c, 0x00},
+ {0x392d, 0x03},
+ {0x392e, 0xa8},
+ {0x392f, 0x0b},
+ {0x38b3, 0x07},
+ {0x3885, 0x07},
+ {0x382b, 0x3a},
+ {0x3670, 0x68},
+ {0x301C, 0xF0},/* AB mode - Group auto switch example setting */
+ {0x3209, 0x01},/* Stay in Group A for 1 Frame */
+ {0x320A, 0x01},/* Stay in Group B for 1 Frame */
+ {0x320B, 0x00},
+ {0x320C, 0x00},
+ {0x3208, 0x01},/* Group A (IR Dominant VC0) */
+ {OV2312_AEC_PK_EXPO_HI, (OV2312_IR_EXPOSURE >> 8) & 0xff},
+ {OV2312_AEC_PK_EXPO_LO, OV2312_IR_EXPOSURE & 0xff},
+ {OV2312_AEC_PK_AGAIN_HI, 0x01},
+ {OV2312_AEC_PK_AGAIN_LO, 0x00},
+ {OV2312_AEC_PK_DGAIN_HI, 0x01},
+ {OV2312_AEC_PK_DGAIN_LO, 0x00},
+ {0x3920, 0xff},/* IR Strobe duty cycle */
+ {0x3927, (OV2312_IR_STROBE >> 8) & 0xff},
+ {0x3928, OV2312_IR_STROBE & 0xff},
+ {0x3929, (OV2312_IR_STROBE_START >> 8) & 0xff},
+ {0x392a, OV2312_IR_STROBE_START & 0xff},
+ {0x4813, 0x01},/* VC=1. This register takes effect from next frame */
+ {0x3208, 0x11},
+
+};
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 4a4bd5b665a1..4b75da55b260 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -913,7 +913,7 @@ err:
}
static int ov2640_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -925,7 +925,7 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mf;
return 0;
#else
@@ -946,7 +946,7 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
}
static int ov2640_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -996,7 +996,7 @@ static int ov2640_set_fmt(struct v4l2_subdev *sd,
/* select format */
priv->cfmt_code = mf->code;
} else {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
}
out:
mutex_unlock(&priv->lock);
@@ -1005,11 +1005,11 @@ out:
}
static int ov2640_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, cfg, 0);
+ v4l2_subdev_get_try_format(sd, sd_state, 0);
const struct ov2640_win_size *win =
ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT);
@@ -1026,7 +1026,7 @@ static int ov2640_init_cfg(struct v4l2_subdev *sd,
}
static int ov2640_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(ov2640_codes))
@@ -1037,7 +1037,7 @@ static int ov2640_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2640_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index fb78a1cedc03..486a341c0bc6 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -980,7 +980,7 @@ static int ov2659_init(struct v4l2_subdev *sd, u32 val)
*/
static int ov2659_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -996,7 +996,7 @@ static int ov2659_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2659_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1022,7 +1022,7 @@ static int ov2659_enum_frame_sizes(struct v4l2_subdev *sd,
}
static int ov2659_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1034,7 +1034,7 @@ static int ov2659_get_fmt(struct v4l2_subdev *sd,
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
mutex_lock(&ov2659->lock);
fmt->format = *mf;
mutex_unlock(&ov2659->lock);
@@ -1084,7 +1084,7 @@ static void __ov2659_try_frame_size(struct v4l2_mbus_framefmt *mf,
}
static int ov2659_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1114,7 +1114,7 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
#endif
} else {
@@ -1313,7 +1313,7 @@ static int ov2659_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
dev_dbg(&client->dev, "%s:\n", __func__);
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 59cdbc33658c..0162ff328902 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -645,7 +645,7 @@ unlock:
}
static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
@@ -659,7 +659,7 @@ static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2680_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
@@ -673,7 +673,8 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- fmt = v4l2_subdev_get_try_format(&sensor->sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state,
+ format->pad);
#else
ret = -EINVAL;
#endif
@@ -690,7 +691,7 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
}
static int ov2680_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
@@ -721,7 +722,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- try_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ try_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *try_fmt;
#endif
goto unlock;
@@ -743,22 +744,22 @@ unlock:
}
static int ov2680_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
- .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
.width = 800,
.height = 600,
}
};
- return ov2680_set_fmt(sd, cfg, &fmt);
+ return ov2680_set_fmt(sd, sd_state, &fmt);
}
static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
@@ -775,7 +776,7 @@ static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
}
static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct v4l2_fract tpf;
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index 6814583d9606..ab9953067238 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -328,7 +328,7 @@ static void ov2685_fill_fmt(const struct ov2685_mode *mode,
}
static int ov2685_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov2685 *ov2685 = to_ov2685(sd);
@@ -341,7 +341,7 @@ static int ov2685_set_fmt(struct v4l2_subdev *sd,
}
static int ov2685_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov2685 *ov2685 = to_ov2685(sd);
@@ -353,7 +353,7 @@ static int ov2685_get_fmt(struct v4l2_subdev *sd,
}
static int ov2685_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(supported_modes))
@@ -365,7 +365,7 @@ static int ov2685_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2685_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
@@ -494,7 +494,7 @@ static int ov2685_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov2685->mutex);
- try_fmt = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ try_fmt = v4l2_subdev_get_try_format(sd, fh->state, 0);
/* Initialize try_fmt */
ov2685_fill_fmt(&supported_modes[0], try_fmt);
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index bd0d45b0d43f..22cb4d08e038 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -710,7 +710,7 @@ exit:
}
static int ov2740_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov2740 *ov2740 = to_ov2740(sd);
@@ -725,7 +725,7 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov2740->mutex);
ov2740_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
ov2740->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov2740->link_freq, mode->link_freq_index);
@@ -750,14 +750,15 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
}
static int ov2740_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov2740 *ov2740 = to_ov2740(sd);
mutex_lock(&ov2740->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov2740->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&ov2740->sd,
+ sd_state,
fmt->pad);
else
ov2740_update_pad_format(ov2740->cur_mode, &fmt->format);
@@ -768,7 +769,7 @@ static int ov2740_get_format(struct v4l2_subdev *sd,
}
static int ov2740_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -780,7 +781,7 @@ static int ov2740_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2740_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -803,7 +804,7 @@ static int ov2740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov2740->mutex);
ov2740_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov2740->mutex);
return 0;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 8f0812e85901..ae1118f0f0f3 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -29,13 +30,27 @@
#define OV5640_XCLK_MIN 6000000
#define OV5640_XCLK_MAX 54000000
+#define OV5640_NATIVE_WIDTH 2624
+#define OV5640_NATIVE_HEIGHT 1964
+#define OV5640_PIXEL_ARRAY_TOP 14
+#define OV5640_PIXEL_ARRAY_LEFT 16
+#define OV5640_PIXEL_ARRAY_WIDTH 2592
+#define OV5640_PIXEL_ARRAY_HEIGHT 1944
+
+/* FIXME: not documented. */
+#define OV5640_MIN_VBLANK 24
+#define OV5640_MAX_VTS 3375
+
#define OV5640_DEFAULT_SLAVE_ID 0x3c
+#define OV5640_LINK_RATE_MAX 490000000U
+
#define OV5640_REG_SYS_RESET02 0x3002
#define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006
#define OV5640_REG_SYS_CTRL0 0x3008
#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42
#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02
+#define OV5640_REG_SYS_CTRL0_SW_RST 0x82
#define OV5640_REG_CHIP_ID 0x300a
#define OV5640_REG_IO_MIPI_CTRL00 0x300e
#define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017
@@ -59,10 +74,16 @@
#define OV5640_REG_AEC_PK_MANUAL 0x3503
#define OV5640_REG_AEC_PK_REAL_GAIN 0x350a
#define OV5640_REG_AEC_PK_VTS 0x350c
+#define OV5640_REG_TIMING_HS 0x3800
+#define OV5640_REG_TIMING_VS 0x3802
+#define OV5640_REG_TIMING_HW 0x3804
+#define OV5640_REG_TIMING_VH 0x3806
#define OV5640_REG_TIMING_DVPHO 0x3808
#define OV5640_REG_TIMING_DVPVO 0x380a
#define OV5640_REG_TIMING_HTS 0x380c
#define OV5640_REG_TIMING_VTS 0x380e
+#define OV5640_REG_TIMING_HOFFS 0x3810
+#define OV5640_REG_TIMING_VOFFS 0x3812
#define OV5640_REG_TIMING_TC_REG20 0x3820
#define OV5640_REG_TIMING_TC_REG21 0x3821
#define OV5640_REG_AEC_CTRL00 0x3a00
@@ -98,7 +119,8 @@
#define OV5640_REG_AVG_READOUT 0x56a1
enum ov5640_mode_id {
- OV5640_MODE_QCIF_176_144 = 0,
+ OV5640_MODE_QQVGA_160_120 = 0,
+ OV5640_MODE_QCIF_176_144,
OV5640_MODE_QVGA_320_240,
OV5640_MODE_VGA_640_480,
OV5640_MODE_NTSC_720_480,
@@ -117,6 +139,47 @@ enum ov5640_frame_rate {
OV5640_NUM_FRAMERATES,
};
+enum ov5640_pixel_rate_id {
+ OV5640_PIXEL_RATE_168M,
+ OV5640_PIXEL_RATE_148M,
+ OV5640_PIXEL_RATE_124M,
+ OV5640_PIXEL_RATE_96M,
+ OV5640_PIXEL_RATE_48M,
+ OV5640_NUM_PIXEL_RATES,
+};
+
+/*
+ * The chip manual suggests 24/48/96/192 MHz pixel clocks.
+ *
+ * 192MHz exceeds the sysclk limits; use 168MHz as maximum pixel rate for
+ * full resolution mode @15 FPS.
+ */
+static const u32 ov5640_pixel_rates[] = {
+ [OV5640_PIXEL_RATE_168M] = 168000000,
+ [OV5640_PIXEL_RATE_148M] = 148000000,
+ [OV5640_PIXEL_RATE_124M] = 124000000,
+ [OV5640_PIXEL_RATE_96M] = 96000000,
+ [OV5640_PIXEL_RATE_48M] = 48000000,
+};
+
+/*
+ * MIPI CSI-2 link frequencies.
+ *
+ * Derived from the above defined pixel rate for bpp = (8, 16, 24) and
+ * data_lanes = (1, 2)
+ *
+ * link_freq = (pixel_rate * bpp) / (2 * data_lanes)
+ */
+static const s64 ov5640_csi2_link_freqs[] = {
+ 992000000, 888000000, 768000000, 744000000, 672000000, 672000000,
+ 592000000, 592000000, 576000000, 576000000, 496000000, 496000000,
+ 384000000, 384000000, 384000000, 336000000, 296000000, 288000000,
+ 248000000, 192000000, 192000000, 192000000, 96000000,
+};
+
+/* Link freq for default mode: UYVY 16 bpp, 2 data lanes. */
+#define OV5640_DEFAULT_LINK_FREQ 13
+
enum ov5640_format_mux {
OV5640_FMT_MUX_YUV422 = 0,
OV5640_FMT_MUX_RGB,
@@ -129,18 +192,89 @@ enum ov5640_format_mux {
struct ov5640_pixfmt {
u32 code;
u32 colorspace;
+ u8 bpp;
};
-static const struct ov5640_pixfmt ov5640_formats[] = {
- { MEDIA_BUS_FMT_JPEG_1X8, V4L2_COLORSPACE_JPEG, },
- { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_COLORSPACE_SRGB, },
- { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_COLORSPACE_SRGB, },
+static const struct ov5640_pixfmt ov5640_dvp_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ },
+ { /* sentinel */ }
+};
+
+static const struct ov5640_pixfmt ov5640_csi2_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 24,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bpp = 8,
+ },
+ { /* sentinel */ }
};
/*
@@ -183,21 +317,42 @@ struct reg_value {
u32 delay_ms;
};
+struct ov5640_timings {
+ /* Analog crop rectangle. */
+ struct v4l2_rect analog_crop;
+ /* Visibile crop: from analog crop top-left corner. */
+ struct v4l2_rect crop;
+ /* Total pixels per line: width + fixed hblank. */
+ u32 htot;
+ /* Default vertical blanking: frame height = height + vblank. */
+ u32 vblank_def;
+};
+
struct ov5640_mode_info {
enum ov5640_mode_id id;
enum ov5640_downsize_mode dn_mode;
- u32 hact;
- u32 htot;
- u32 vact;
- u32 vtot;
+ enum ov5640_pixel_rate_id pixel_rate;
+
+ unsigned int width;
+ unsigned int height;
+
+ struct ov5640_timings dvp_timings;
+ struct ov5640_timings csi2_timings;
+
const struct reg_value *reg_data;
u32 reg_data_size;
+
+ /* Used by s_frame_interval only. */
u32 max_fps;
+ u32 def_fps;
};
struct ov5640_ctrls {
struct v4l2_ctrl_handler handler;
struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
struct {
struct v4l2_ctrl *auto_exp;
struct v4l2_ctrl *exposure;
@@ -237,8 +392,6 @@ struct ov5640_dev {
/* lock to protect all members below */
struct mutex lock;
- int power_count;
-
struct v4l2_mbus_framefmt fmt;
bool pending_fmt_change;
@@ -246,6 +399,7 @@ struct ov5640_dev {
const struct ov5640_mode_info *last_mode;
enum ov5640_frame_rate current_fr;
struct v4l2_fract frame_interval;
+ s64 current_link_freq;
struct ov5640_ctrls ctrls;
@@ -267,6 +421,40 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
ctrls.handler)->sd;
}
+static inline bool ov5640_is_csi2(const struct ov5640_dev *sensor)
+{
+ return sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY;
+}
+
+static inline const struct ov5640_pixfmt *
+ov5640_formats(struct ov5640_dev *sensor)
+{
+ return ov5640_is_csi2(sensor) ? ov5640_csi2_formats
+ : ov5640_dvp_formats;
+}
+
+static const struct ov5640_pixfmt *
+ov5640_code_to_pixfmt(struct ov5640_dev *sensor, u32 code)
+{
+ const struct ov5640_pixfmt *formats = ov5640_formats(sensor);
+ unsigned int i;
+
+ for (i = 0; formats[i].code; ++i) {
+ if (formats[i].code == code)
+ return &formats[i];
+ }
+
+ return &formats[0];
+}
+
+static u32 ov5640_code_to_bpp(struct ov5640_dev *sensor, u32 code)
+{
+ const struct ov5640_pixfmt *format = ov5640_code_to_pixfmt(sensor,
+ code);
+
+ return format->bpp;
+}
+
/*
* FIXME: all of these register tables are likely filled with
* entries that set the register to their power-on default values,
@@ -275,8 +463,31 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
* over i2c.
*/
/* YUV422 UYVY VGA@30fps */
-static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
- {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
+
+static const struct v4l2_mbus_framefmt ov5640_csi2_default_fmt = {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .width = 640,
+ .height = 480,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(V4L2_COLORSPACE_SRGB),
+ .quantization = V4L2_QUANTIZATION_FULL_RANGE,
+ .xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(V4L2_COLORSPACE_SRGB),
+ .field = V4L2_FIELD_NONE,
+};
+
+static const struct v4l2_mbus_framefmt ov5640_dvp_default_fmt = {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .width = 640,
+ .height = 480,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(V4L2_COLORSPACE_SRGB),
+ .quantization = V4L2_QUANTIZATION_FULL_RANGE,
+ .xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(V4L2_COLORSPACE_SRGB),
+ .field = V4L2_FIELD_NONE,
+};
+
+static const struct reg_value ov5640_init_setting[] = {
+ {0x3103, 0x11, 0, 0},
{0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
{0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
{0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
@@ -291,11 +502,7 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x3c06, 0x00, 0, 0}, {0x3c07, 0x08, 0, 0}, {0x3c08, 0x00, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3815, 0x31, 0, 0},
{0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
{0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
@@ -359,129 +566,25 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x3a1f, 0x14, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3c00, 0x04, 0, 300},
};
-static const struct reg_value ov5640_setting_VGA_640_480[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_XGA_1024_768[] = {
+static const struct reg_value ov5640_setting_low_res[] = {
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3815, 0x31, 0, 0},
{0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
{0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
{0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_QVGA_320_240[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_QCIF_176_144[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_NTSC_720_480[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_PAL_720_576[] = {
- {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x5001, 0xa3, 0, 0},
};
static const struct reg_value ov5640_setting_720P_1280_720[] = {
{0x3c07, 0x07, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3815, 0x31, 0, 0},
{0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
{0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
{0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
@@ -496,11 +599,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
- {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3815, 0x11, 0, 0},
{0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
{0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
@@ -511,9 +610,6 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0},
{0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
- {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
- {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0},
{0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
{0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
{0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
@@ -527,11 +623,7 @@ static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
- {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3815, 0x11, 0, 0},
{0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
{0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
{0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
@@ -542,62 +634,462 @@ static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 70},
};
-/* power-on sensor init reg table */
-static const struct ov5640_mode_info ov5640_mode_init_data = {
- 0, SUBSAMPLING, 640, 1896, 480, 984,
- ov5640_init_setting_30fps_VGA,
- ARRAY_SIZE(ov5640_init_setting_30fps_VGA),
- OV5640_30_FPS,
+static const struct ov5640_mode_info ov5640_mode_data[OV5640_NUM_MODES] = {
+ {
+ /* 160x120 */
+ .id = OV5640_MODE_QQVGA_160_120,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_48M,
+ .width = 160,
+ .height = 120,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 160,
+ .height = 120,
+ },
+ .htot = 1896,
+ .vblank_def = 864,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ /* Maintain a minimum processing margin. */
+ .crop = {
+ .left = 2,
+ .top = 4,
+ .width = 160,
+ .height = 120,
+ },
+ .htot = 1896,
+ .vblank_def = 864,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 176x144 */
+ .id = OV5640_MODE_QCIF_176_144,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_48M,
+ .width = 176,
+ .height = 144,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 176,
+ .height = 144,
+ },
+ .htot = 1896,
+ .vblank_def = 840,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ /* Maintain a minimum processing margin. */
+ .crop = {
+ .left = 2,
+ .top = 4,
+ .width = 176,
+ .height = 144,
+ },
+ .htot = 1896,
+ .vblank_def = 840,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 320x240 */
+ .id = OV5640_MODE_QVGA_320_240,
+ .dn_mode = SUBSAMPLING,
+ .width = 320,
+ .height = 240,
+ .pixel_rate = OV5640_PIXEL_RATE_48M,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 320,
+ .height = 240,
+ },
+ .htot = 1896,
+ .vblank_def = 744,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ /* Maintain a minimum processing margin. */
+ .crop = {
+ .left = 2,
+ .top = 4,
+ .width = 320,
+ .height = 240,
+ },
+ .htot = 1896,
+ .vblank_def = 744,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 640x480 */
+ .id = OV5640_MODE_VGA_640_480,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_48M,
+ .width = 640,
+ .height = 480,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 640,
+ .height = 480,
+ },
+ .htot = 1896,
+ .vblank_def = 600,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ /* Maintain a minimum processing margin. */
+ .crop = {
+ .left = 2,
+ .top = 4,
+ .width = 640,
+ .height = 480,
+ },
+ .htot = 1896,
+ .vblank_def = 600,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_60_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 720x480 */
+ .id = OV5640_MODE_NTSC_720_480,
+ .dn_mode = SUBSAMPLING,
+ .width = 720,
+ .height = 480,
+ .pixel_rate = OV5640_PIXEL_RATE_96M,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 56,
+ .top = 60,
+ .width = 720,
+ .height = 480,
+ },
+ .htot = 1896,
+ .vblank_def = 504,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ .crop = {
+ .left = 56,
+ .top = 60,
+ .width = 720,
+ .height = 480,
+ },
+ .htot = 1896,
+ .vblank_def = 504,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 720x576 */
+ .id = OV5640_MODE_PAL_720_576,
+ .dn_mode = SUBSAMPLING,
+ .width = 720,
+ .height = 576,
+ .pixel_rate = OV5640_PIXEL_RATE_96M,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 56,
+ .top = 6,
+ .width = 720,
+ .height = 576,
+ },
+ .htot = 1896,
+ .vblank_def = 408,
+ },
+ .csi2_timings = {
+ /* Feed the full valid pixel array to the ISP. */
+ .analog_crop = {
+ .left = OV5640_PIXEL_ARRAY_LEFT,
+ .top = OV5640_PIXEL_ARRAY_TOP,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ .crop = {
+ .left = 56,
+ .top = 6,
+ .width = 720,
+ .height = 576,
+ },
+ .htot = 1896,
+ .vblank_def = 408,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 1024x768 */
+ .id = OV5640_MODE_XGA_1024_768,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_96M,
+ .width = 1024,
+ .height = 768,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = 2624,
+ .height = 1944,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 1024,
+ .height = 768,
+ },
+ .htot = 1896,
+ .vblank_def = 312,
+ },
+ .csi2_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 4,
+ .width = OV5640_NATIVE_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ },
+ .crop = {
+ .left = 16,
+ .top = 6,
+ .width = 1024,
+ .height = 768,
+ },
+ .htot = 1896,
+ .vblank_def = 312,
+ },
+ .reg_data = ov5640_setting_low_res,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_low_res),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 1280x720 */
+ .id = OV5640_MODE_720P_1280_720,
+ .dn_mode = SUBSAMPLING,
+ .pixel_rate = OV5640_PIXEL_RATE_124M,
+ .width = 1280,
+ .height = 720,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 250,
+ .width = 2624,
+ .height = 1456,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 1280,
+ .height = 720,
+ },
+ .htot = 1892,
+ .vblank_def = 20,
+ },
+ .csi2_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 250,
+ .width = 2624,
+ .height = 1456,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 1280,
+ .height = 720,
+ },
+ .htot = 1892,
+ .vblank_def = 20,
+ },
+ .reg_data = ov5640_setting_720P_1280_720,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_720P_1280_720),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 1920x1080 */
+ .id = OV5640_MODE_1080P_1920_1080,
+ .dn_mode = SCALING,
+ .pixel_rate = OV5640_PIXEL_RATE_148M,
+ .width = 1920,
+ .height = 1080,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 336,
+ .top = 434,
+ .width = 1952,
+ .height = 1088,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 1920,
+ .height = 1080,
+ },
+ .htot = 2500,
+ .vblank_def = 40,
+ },
+ .csi2_timings = {
+ /* Crop the full valid pixel array in the center. */
+ .analog_crop = {
+ .left = 336,
+ .top = 434,
+ .width = 1952,
+ .height = 1088,
+ },
+ /* Maintain a larger processing margins. */
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 1920,
+ .height = 1080,
+ },
+ .htot = 2500,
+ .vblank_def = 40,
+ },
+ .reg_data = ov5640_setting_1080P_1920_1080,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_1080P_1920_1080),
+ .max_fps = OV5640_30_FPS,
+ .def_fps = OV5640_30_FPS
+ }, {
+ /* 2592x1944 */
+ .id = OV5640_MODE_QSXGA_2592_1944,
+ .dn_mode = SCALING,
+ .pixel_rate = OV5640_PIXEL_RATE_168M,
+ .width = OV5640_PIXEL_ARRAY_WIDTH,
+ .height = OV5640_PIXEL_ARRAY_HEIGHT,
+ .dvp_timings = {
+ .analog_crop = {
+ .left = 0,
+ .top = 0,
+ .width = 2624,
+ .height = 1952,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 2592,
+ .height = 1944,
+ },
+ .htot = 2844,
+ .vblank_def = 24,
+ },
+ .csi2_timings = {
+ /* Give more processing margin to full resolution. */
+ .analog_crop = {
+ .left = 0,
+ .top = 0,
+ .width = OV5640_NATIVE_WIDTH,
+ .height = 1952,
+ },
+ .crop = {
+ .left = 16,
+ .top = 4,
+ .width = 2592,
+ .height = 1944,
+ },
+ .htot = 2844,
+ .vblank_def = 24,
+ },
+ .reg_data = ov5640_setting_QSXGA_2592_1944,
+ .reg_data_size = ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944),
+ .max_fps = OV5640_15_FPS,
+ .def_fps = OV5640_15_FPS
+ },
};
-static const struct ov5640_mode_info
-ov5640_mode_data[OV5640_NUM_MODES] = {
- {OV5640_MODE_QCIF_176_144, SUBSAMPLING,
- 176, 1896, 144, 984,
- ov5640_setting_QCIF_176_144,
- ARRAY_SIZE(ov5640_setting_QCIF_176_144),
- OV5640_30_FPS},
- {OV5640_MODE_QVGA_320_240, SUBSAMPLING,
- 320, 1896, 240, 984,
- ov5640_setting_QVGA_320_240,
- ARRAY_SIZE(ov5640_setting_QVGA_320_240),
- OV5640_30_FPS},
- {OV5640_MODE_VGA_640_480, SUBSAMPLING,
- 640, 1896, 480, 1080,
- ov5640_setting_VGA_640_480,
- ARRAY_SIZE(ov5640_setting_VGA_640_480),
- OV5640_60_FPS},
- {OV5640_MODE_NTSC_720_480, SUBSAMPLING,
- 720, 1896, 480, 984,
- ov5640_setting_NTSC_720_480,
- ARRAY_SIZE(ov5640_setting_NTSC_720_480),
- OV5640_30_FPS},
- {OV5640_MODE_PAL_720_576, SUBSAMPLING,
- 720, 1896, 576, 984,
- ov5640_setting_PAL_720_576,
- ARRAY_SIZE(ov5640_setting_PAL_720_576),
- OV5640_30_FPS},
- {OV5640_MODE_XGA_1024_768, SUBSAMPLING,
- 1024, 1896, 768, 1080,
- ov5640_setting_XGA_1024_768,
- ARRAY_SIZE(ov5640_setting_XGA_1024_768),
- OV5640_30_FPS},
- {OV5640_MODE_720P_1280_720, SUBSAMPLING,
- 1280, 1892, 720, 740,
- ov5640_setting_720P_1280_720,
- ARRAY_SIZE(ov5640_setting_720P_1280_720),
- OV5640_30_FPS},
- {OV5640_MODE_1080P_1920_1080, SCALING,
- 1920, 2500, 1080, 1120,
- ov5640_setting_1080P_1920_1080,
- ARRAY_SIZE(ov5640_setting_1080P_1920_1080),
- OV5640_30_FPS},
- {OV5640_MODE_QSXGA_2592_1944, SCALING,
- 2592, 2844, 1944, 1968,
- ov5640_setting_QSXGA_2592_1944,
- ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944),
- OV5640_15_FPS},
-};
+static const struct ov5640_timings *
+ov5640_timings(const struct ov5640_dev *sensor,
+ const struct ov5640_mode_info *mode)
+{
+ if (ov5640_is_csi2(sensor))
+ return &mode->csi2_timings;
+
+ return &mode->dvp_timings;
+}
static int ov5640_init_slave_id(struct ov5640_dev *sensor)
{
@@ -1072,17 +1564,20 @@ static int ov5640_set_jpeg_timings(struct ov5640_dev *sensor,
if (ret < 0)
return ret;
- ret = ov5640_write_reg16(sensor, OV5640_REG_VFIFO_HSIZE, mode->hact);
+ ret = ov5640_write_reg16(sensor, OV5640_REG_VFIFO_HSIZE, mode->width);
if (ret < 0)
return ret;
- return ov5640_write_reg16(sensor, OV5640_REG_VFIFO_VSIZE, mode->vact);
+ return ov5640_write_reg16(sensor, OV5640_REG_VFIFO_VSIZE, mode->height);
}
/* download ov5640 settings to sensor through i2c */
static int ov5640_set_timings(struct ov5640_dev *sensor,
const struct ov5640_mode_info *mode)
{
+ const struct ov5640_timings *timings;
+ const struct v4l2_rect *analog_crop;
+ const struct v4l2_rect *crop;
int ret;
if (sensor->fmt.code == MEDIA_BUS_FMT_JPEG_1X8) {
@@ -1091,32 +1586,68 @@ static int ov5640_set_timings(struct ov5640_dev *sensor,
return ret;
}
- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact);
+ timings = ov5640_timings(sensor, mode);
+ analog_crop = &timings->analog_crop;
+ crop = &timings->crop;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HS,
+ analog_crop->left);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VS,
+ analog_crop->top);
if (ret < 0)
return ret;
- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->vact);
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HW,
+ analog_crop->left + analog_crop->width - 1);
if (ret < 0)
return ret;
- ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, mode->htot);
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VH,
+ analog_crop->top + analog_crop->height - 1);
if (ret < 0)
return ret;
- return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, mode->vtot);
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HOFFS, crop->left);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VOFFS, crop->top);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->width);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->height);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, timings->htot);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS,
+ mode->height + timings->vblank_def);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
-static int ov5640_load_regs(struct ov5640_dev *sensor,
- const struct ov5640_mode_info *mode)
+static void ov5640_load_regs(struct ov5640_dev *sensor,
+ const struct reg_value *regs, unsigned int regnum)
{
- const struct reg_value *regs = mode->reg_data;
unsigned int i;
u32 delay_ms;
u16 reg_addr;
u8 mask, val;
int ret = 0;
- for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
+ for (i = 0; i < regnum; ++i, ++regs) {
delay_ms = regs->delay_ms;
reg_addr = regs->reg_addr;
val = regs->val;
@@ -1125,7 +1656,7 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
/* remain in power down mode for DVP */
if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
- sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
+ !ov5640_is_csi2(sensor))
continue;
if (mask)
@@ -1138,8 +1669,6 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
if (delay_ms)
usleep_range(1000 * delay_ms, 1000 * delay_ms + 100);
}
-
- return ov5640_set_timings(sensor, mode);
}
static int ov5640_set_autoexposure(struct ov5640_dev *sensor, bool on)
@@ -1524,22 +2053,17 @@ static int ov5640_set_virtual_channel(struct ov5640_dev *sensor)
}
static const struct ov5640_mode_info *
-ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
- int width, int height, bool nearest)
+ov5640_find_mode(struct ov5640_dev *sensor, int width, int height, bool nearest)
{
const struct ov5640_mode_info *mode;
mode = v4l2_find_nearest_size(ov5640_mode_data,
ARRAY_SIZE(ov5640_mode_data),
- hact, vact,
- width, height);
+ width, height, width, height);
if (!mode ||
- (!nearest && (mode->hact != width || mode->vact != height)))
- return NULL;
-
- /* Check to see if the current mode exceeds the max frame rate */
- if (ov5640_framerates[fr] > ov5640_framerates[mode->max_fps])
+ (!nearest &&
+ (mode->width != width || mode->height != height)))
return NULL;
return mode;
@@ -1547,9 +2071,11 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
static u64 ov5640_calc_pixel_rate(struct ov5640_dev *sensor)
{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ const struct ov5640_timings *timings = &mode->dvp_timings;
u64 rate;
- rate = sensor->current_mode->vtot * sensor->current_mode->htot;
+ rate = timings->htot * (timings->crop.height + timings->vblank_def);
rate *= ov5640_framerates[sensor->current_fr];
return rate;
@@ -1602,7 +2128,8 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor,
return ret;
/* Write capture setting */
- ret = ov5640_load_regs(sensor, mode);
+ ov5640_load_regs(sensor, mode->reg_data, mode->reg_data_size);
+ ret = ov5640_set_timings(sensor, mode);
if (ret < 0)
return ret;
@@ -1726,7 +2253,8 @@ static int ov5640_set_mode_direct(struct ov5640_dev *sensor,
return -EINVAL;
/* Write capture setting */
- return ov5640_load_regs(sensor, mode);
+ ov5640_load_regs(sensor, mode->reg_data, mode->reg_data_size);
+ return ov5640_set_timings(sensor, mode);
}
static int ov5640_set_mode(struct ov5640_dev *sensor)
@@ -1760,7 +2288,7 @@ static int ov5640_set_mode(struct ov5640_dev *sensor)
* the same rate than YUV, so we can just use 16 bpp all the time.
*/
rate = ov5640_calc_pixel_rate(sensor) * 16;
- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ if (ov5640_is_csi2(sensor)) {
rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
ret = ov5640_set_mipi_pclk(sensor, rate);
} else {
@@ -1834,10 +2362,8 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor)
int ret;
/* first load the initial register values */
- ret = ov5640_load_regs(sensor, &ov5640_mode_init_data);
- if (ret < 0)
- return ret;
- sensor->last_mode = &ov5640_mode_init_data;
+ ov5640_load_regs(sensor, ov5640_init_setting,
+ ARRAY_SIZE(ov5640_init_setting));
ret = ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3f,
(ilog2(OV5640_SCLK2X_ROOT_DIV) << 2) |
@@ -1858,24 +2384,48 @@ static void ov5640_power(struct ov5640_dev *sensor, bool enable)
gpiod_set_value_cansleep(sensor->pwdn_gpio, enable ? 0 : 1);
}
-static void ov5640_reset(struct ov5640_dev *sensor)
+/*
+ * From section 2.7 power up sequence:
+ * t0 + t1 + t2 >= 5ms Delay from DOVDD stable to PWDN pull down
+ * t3 >= 1ms Delay from PWDN pull down to RESETB pull up
+ * t4 >= 20ms Delay from RESETB pull up to SCCB (i2c) stable
+ *
+ * Some modules don't expose RESETB/PWDN pins directly, instead providing a
+ * "PWUP" GPIO which is wired through appropriate delays and inverters to the
+ * pins.
+ *
+ * In such cases, this gpio should be mapped to pwdn_gpio in the driver, and we
+ * should still toggle the pwdn_gpio below with the appropriate delays, while
+ * the calls to reset_gpio will be ignored.
+ */
+static void ov5640_powerup_sequence(struct ov5640_dev *sensor)
{
- if (!sensor->reset_gpio)
- return;
-
- gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ if (sensor->pwdn_gpio) {
+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
- /* camera power cycle */
- ov5640_power(sensor, false);
- usleep_range(5000, 10000);
- ov5640_power(sensor, true);
- usleep_range(5000, 10000);
+ /* camera power cycle */
+ ov5640_power(sensor, false);
+ usleep_range(5000, 10000);
+ ov5640_power(sensor, true);
+ usleep_range(5000, 10000);
- gpiod_set_value_cansleep(sensor->reset_gpio, 1);
- usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ usleep_range(1000, 2000);
- gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ } else {
+ /* software reset */
+ ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0,
+ OV5640_REG_SYS_CTRL0_SW_RST);
+ }
usleep_range(20000, 25000);
+
+ /*
+ * software standby: allows registers programming;
+ * exit at restore_mode() for CSI, s_stream(1) for DVP
+ */
+ ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0,
+ OV5640_REG_SYS_CTRL0_SW_PWDN);
}
static int ov5640_set_power_on(struct ov5640_dev *sensor)
@@ -1898,8 +2448,7 @@ static int ov5640_set_power_on(struct ov5640_dev *sensor)
goto xclk_off;
}
- ov5640_reset(sensor);
- ov5640_power(sensor, true);
+ ov5640_powerup_sequence(sensor);
ret = ov5640_init_slave_id(sensor);
if (ret)
@@ -2131,53 +2680,22 @@ power_off:
/* --------------- Subdev Operations --------------- */
-static int ov5640_s_power(struct v4l2_subdev *sd, int on)
-{
- struct ov5640_dev *sensor = to_ov5640_dev(sd);
- int ret = 0;
-
- mutex_lock(&sensor->lock);
-
- /*
- * If the power count is modified from 0 to != 0 or from != 0 to 0,
- * update the power state.
- */
- if (sensor->power_count == !on) {
- ret = ov5640_set_power(sensor, !!on);
- if (ret)
- goto out;
- }
-
- /* Update the power count. */
- sensor->power_count += on ? 1 : -1;
- WARN_ON(sensor->power_count < 0);
-out:
- mutex_unlock(&sensor->lock);
-
- if (on && !ret && sensor->power_count == 1) {
- /* restore controls */
- ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
- }
-
- return ret;
-}
-
static int ov5640_try_frame_interval(struct ov5640_dev *sensor,
struct v4l2_fract *fi,
- u32 width, u32 height)
+ const struct ov5640_mode_info *mode_info)
{
- const struct ov5640_mode_info *mode;
+ const struct ov5640_mode_info *mode = mode_info;
enum ov5640_frame_rate rate = OV5640_15_FPS;
int minfps, maxfps, best_fps, fps;
int i;
minfps = ov5640_framerates[OV5640_15_FPS];
- maxfps = ov5640_framerates[OV5640_60_FPS];
+ maxfps = ov5640_framerates[mode->max_fps];
if (fi->numerator == 0) {
fi->denominator = maxfps;
fi->numerator = 1;
- rate = OV5640_60_FPS;
+ rate = mode->max_fps;
goto find_mode;
}
@@ -2198,12 +2716,12 @@ static int ov5640_try_frame_interval(struct ov5640_dev *sensor,
fi->denominator = best_fps;
find_mode:
- mode = ov5640_find_mode(sensor, rate, width, height, false);
+ mode = ov5640_find_mode(sensor, mode->width, mode->height, false);
return mode ? rate : -EINVAL;
}
static int ov5640_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
@@ -2215,7 +2733,7 @@ static int ov5640_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&sensor->lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(&sensor->sd, cfg,
+ fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state,
format->pad);
else
fmt = &sensor->fmt;
@@ -2234,25 +2752,34 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
const struct ov5640_mode_info *mode;
- int i;
+ const struct ov5640_pixfmt *pixfmt;
+ unsigned int bpp;
- mode = ov5640_find_mode(sensor, fr, fmt->width, fmt->height, true);
+ mode = ov5640_find_mode(sensor, fmt->width, fmt->height, true);
if (!mode)
return -EINVAL;
- fmt->width = mode->hact;
- fmt->height = mode->vact;
+
+ pixfmt = ov5640_code_to_pixfmt(sensor, fmt->code);
+ bpp = pixfmt->bpp;
+
+ /*
+ * Adjust mode according to bpp:
+ * - 8bpp modes work for resolution >= 1280x720
+ * - 24bpp modes work resolution < 1280x720
+ */
+ if (bpp == 8 && mode->width < 1280)
+ mode = &ov5640_mode_data[OV5640_MODE_720P_1280_720];
+ else if (bpp == 24 && mode->width > 1024)
+ mode = &ov5640_mode_data[OV5640_MODE_XGA_1024_768];
+
+ fmt->width = mode->width;
+ fmt->height = mode->height;
if (new_mode)
*new_mode = mode;
- for (i = 0; i < ARRAY_SIZE(ov5640_formats); i++)
- if (ov5640_formats[i].code == fmt->code)
- break;
- if (i >= ARRAY_SIZE(ov5640_formats))
- i = 0;
-
- fmt->code = ov5640_formats[i].code;
- fmt->colorspace = ov5640_formats[i].colorspace;
+ fmt->code = pixfmt->code;
+ fmt->colorspace = pixfmt->colorspace;
fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
@@ -2260,14 +2787,114 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
return 0;
}
+static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
+ struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
+ const struct ov5640_timings *timings;
+ s32 exposure_val, exposure_max;
+ unsigned int hblank;
+ unsigned int i = 0;
+ u32 pixel_rate;
+ s64 link_freq;
+ u32 num_lanes;
+ u32 vblank;
+ u32 bpp;
+
+ /*
+ * Update the pixel rate control value.
+ *
+ * For DVP mode, maintain the pixel rate calculation using fixed FPS.
+ */
+ if (!ov5640_is_csi2(sensor)) {
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
+
+ return 0;
+ }
+
+ /*
+ * The MIPI CSI-2 link frequency should comply with the CSI-2
+ * specification and be lower than 1GHz.
+ *
+ * Start from the suggested pixel_rate for the current mode and
+ * progressively slow it down if it exceeds 1GHz.
+ */
+ num_lanes = sensor->ep.bus.mipi_csi2.num_data_lanes;
+ bpp = ov5640_code_to_bpp(sensor, fmt->code);
+ do {
+ pixel_rate = ov5640_pixel_rates[pixel_rate_id];
+ link_freq = pixel_rate * bpp / (2 * num_lanes);
+ } while (link_freq >= 1000000000U &&
+ ++pixel_rate_id < OV5640_NUM_PIXEL_RATES);
+
+ sensor->current_link_freq = link_freq;
+
+ /*
+ * Higher link rates require the clock tree to be programmed with
+ * 'mipi_div' = 1; this has the effect of halving the actual output
+ * pixel rate in the MIPI domain.
+ *
+ * Adjust the pixel rate and link frequency control value to report it
+ * correctly to userspace.
+ */
+ if (link_freq > OV5640_LINK_RATE_MAX) {
+ pixel_rate /= 2;
+ link_freq /= 2;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ov5640_csi2_link_freqs); ++i) {
+ if (ov5640_csi2_link_freqs[i] == link_freq)
+ break;
+ }
+ WARN_ON(i == ARRAY_SIZE(ov5640_csi2_link_freqs));
+
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
+ __v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
+
+ timings = ov5640_timings(sensor, mode);
+ hblank = timings->htot - mode->width;
+ __v4l2_ctrl_modify_range(sensor->ctrls.hblank,
+ hblank, hblank, 1, hblank);
+
+ vblank = timings->vblank_def;
+
+ if (sensor->current_fr != mode->def_fps) {
+ /*
+ * Compute the vertical blanking according to the framerate
+ * configured with s_frame_interval.
+ */
+ int fie_num = sensor->frame_interval.numerator;
+ int fie_denom = sensor->frame_interval.denominator;
+
+ vblank = ((fie_num * pixel_rate / fie_denom) / timings->htot) -
+ mode->height;
+ }
+
+ __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
+ OV5640_MAX_VTS - mode->height, 1, vblank);
+ __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
+
+ exposure_max = timings->crop.height + vblank - 4;
+ exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
+ sensor->ctrls.exposure->minimum,
+ exposure_max);
+
+ __v4l2_ctrl_modify_range(sensor->ctrls.exposure,
+ sensor->ctrls.exposure->minimum,
+ exposure_max, 1, exposure_val);
+
+ return 0;
+}
+
static int ov5640_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
const struct ov5640_mode_info *new_mode;
struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
- struct v4l2_mbus_framefmt *fmt;
int ret;
if (format->pad != 0)
@@ -2285,27 +2912,68 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
if (ret)
goto out;
- if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
- else
- fmt = &sensor->fmt;
-
- *fmt = *mbus_fmt;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, 0) = *mbus_fmt;
+ goto out;
+ }
if (new_mode != sensor->current_mode) {
+ sensor->current_fr = new_mode->def_fps;
sensor->current_mode = new_mode;
sensor->pending_mode_change = true;
}
if (mbus_fmt->code != sensor->fmt.code)
sensor->pending_fmt_change = true;
- __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
- ov5640_calc_pixel_rate(sensor));
+ /* update format even if code is unchanged, resolution might change */
+ sensor->fmt = *mbus_fmt;
+
+ ov5640_update_pixel_rate(sensor);
+
out:
mutex_unlock(&sensor->lock);
return ret;
}
+static int ov5640_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ const struct ov5640_timings *timings;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP: {
+ mutex_lock(&sensor->lock);
+ timings = ov5640_timings(sensor, mode);
+ sel->r = timings->analog_crop;
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+ }
+
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV5640_NATIVE_WIDTH;
+ sel->r.height = OV5640_NATIVE_HEIGHT;
+
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = OV5640_PIXEL_ARRAY_TOP;
+ sel->r.left = OV5640_PIXEL_ARRAY_LEFT;
+ sel->r.width = OV5640_PIXEL_ARRAY_WIDTH;
+ sel->r.height = OV5640_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int ov5640_set_framefmt(struct ov5640_dev *sensor,
struct v4l2_mbus_framefmt *format)
{
@@ -2314,17 +2982,20 @@ static int ov5640_set_framefmt(struct ov5640_dev *sensor,
u8 fmt, mux;
switch (format->code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
case MEDIA_BUS_FMT_UYVY8_2X8:
/* YUV422, UYVY */
fmt = 0x3f;
mux = OV5640_FMT_MUX_YUV422;
break;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
case MEDIA_BUS_FMT_YUYV8_2X8:
/* YUV422, YUYV */
fmt = 0x30;
mux = OV5640_FMT_MUX_YUV422;
break;
case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_1X16:
/* RGB565 {g[2:0],b[4:0]},{r[4:0],g[5:3]} */
fmt = 0x6F;
mux = OV5640_FMT_MUX_RGB;
@@ -2334,6 +3005,11 @@ static int ov5640_set_framefmt(struct ov5640_dev *sensor,
fmt = 0x61;
mux = OV5640_FMT_MUX_RGB;
break;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ /* BGR888: RGB */
+ fmt = 0x23;
+ mux = OV5640_FMT_MUX_RGB;
+ break;
case MEDIA_BUS_FMT_JPEG_1X8:
/* YUV422, YUYV */
fmt = 0x30;
@@ -2627,6 +3303,15 @@ static int ov5640_set_ctrl_vflip(struct ov5640_dev *sensor, int value)
(BIT(2) | BIT(1)) : 0);
}
+static int ov5640_set_ctrl_vblank(struct ov5640_dev *sensor, int value)
+{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+
+ /* Update the VTOT timing register value. */
+ return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS,
+ mode->height + value);
+}
+
static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
@@ -2657,16 +3342,32 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ struct device *dev = &sensor->i2c_client->dev;
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ const struct ov5640_timings *timings;
+ unsigned int exp_max;
int ret;
/* v4l2_ctrl_lock() locks our own mutex */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ /* Update the exposure range to the newly programmed vblank. */
+ timings = ov5640_timings(sensor, mode);
+ exp_max = mode->height + ctrl->val - 4;
+ __v4l2_ctrl_modify_range(sensor->ctrls.exposure,
+ sensor->ctrls.exposure->minimum,
+ exp_max, sensor->ctrls.exposure->step,
+ timings->vblank_def);
+ break;
+ }
+
/*
* If the device is not powered up by the host driver do
* not apply any controls to H/W at this time. Instead
* the controls will be restored right after power-up.
*/
- if (sensor->power_count == 0)
+ if (pm_runtime_suspended(dev))
return 0;
switch (ctrl->id) {
@@ -2700,6 +3401,9 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_VFLIP:
ret = ov5640_set_ctrl_vflip(sensor, ctrl->val);
break;
+ case V4L2_CID_VBLANK:
+ ret = ov5640_set_ctrl_vblank(sensor, ctrl->val);
+ break;
default:
ret = -EINVAL;
break;
@@ -2715,9 +3419,14 @@ static const struct v4l2_ctrl_ops ov5640_ctrl_ops = {
static int ov5640_init_controls(struct ov5640_dev *sensor)
{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
const struct v4l2_ctrl_ops *ops = &ov5640_ctrl_ops;
struct ov5640_ctrls *ctrls = &sensor->ctrls;
struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ struct v4l2_fwnode_device_properties props;
+ const struct ov5640_timings *timings;
+ unsigned int max_vblank;
+ unsigned int hblank;
int ret;
v4l2_ctrl_handler_init(hdl, 32);
@@ -2727,8 +3436,25 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
/* Clock related controls */
ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
- 0, INT_MAX, 1,
- ov5640_calc_pixel_rate(sensor));
+ ov5640_pixel_rates[OV5640_NUM_PIXEL_RATES - 1],
+ ov5640_pixel_rates[0], 1,
+ ov5640_pixel_rates[mode->pixel_rate]);
+
+ ctrls->link_freq = v4l2_ctrl_new_int_menu(hdl, ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(ov5640_csi2_link_freqs) - 1,
+ OV5640_DEFAULT_LINK_FREQ,
+ ov5640_csi2_link_freqs);
+
+ timings = ov5640_timings(sensor, mode);
+ hblank = timings->htot - mode->width;
+ ctrls->hblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK, hblank,
+ hblank, 1, hblank);
+
+ max_vblank = OV5640_MAX_VTS - mode->height;
+ ctrls->vblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK,
+ OV5640_MIN_VBLANK, max_vblank,
+ 1, timings->vblank_def);
/* Auto/manual white balance */
ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
@@ -2748,7 +3474,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
/* Auto/manual gain */
ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
0, 1, 1, 1);
- ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
0, 1023, 1, 0);
ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
@@ -2777,7 +3503,20 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
goto free_ctrls;
}
+ ret = v4l2_fwnode_device_parse(&sensor->i2c_client->dev, &props);
+ if (ret)
+ goto free_ctrls;
+
+ if (props.rotation == 180)
+ sensor->upside_down = true;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, ops, &props);
+ if (ret)
+ goto free_ctrls;
+
ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ ctrls->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
@@ -2794,19 +3533,32 @@ free_ctrls:
}
static int ov5640_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ u32 bpp = ov5640_code_to_bpp(sensor, fse->code);
+ unsigned int index = fse->index;
+
if (fse->pad != 0)
return -EINVAL;
- if (fse->index >= OV5640_NUM_MODES)
+ if (!bpp)
+ return -EINVAL;
+
+ /* Only low-resolution modes are supported for 24bpp formats. */
+ if (bpp == 24 && index >= OV5640_MODE_720P_1280_720)
+ return -EINVAL;
+
+ /* FIXME: Low resolution modes don't work in 8bpp formats. */
+ if (bpp == 8)
+ index += OV5640_MODE_720P_1280_720;
+
+ if (index >= OV5640_NUM_MODES)
return -EINVAL;
- fse->min_width =
- ov5640_mode_data[fse->index].hact;
+ fse->min_width = ov5640_mode_data[index].width;
fse->max_width = fse->min_width;
- fse->min_height =
- ov5640_mode_data[fse->index].vact;
+ fse->min_height = ov5640_mode_data[index].height;
fse->max_height = fse->min_height;
return 0;
@@ -2814,10 +3566,11 @@ static int ov5640_enum_frame_size(struct v4l2_subdev *sd,
static int ov5640_enum_frame_interval(
struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ const struct ov5640_mode_info *mode;
struct v4l2_fract tpf;
int ret;
@@ -2826,11 +3579,14 @@ static int ov5640_enum_frame_interval(
if (fie->index >= OV5640_NUM_FRAMERATES)
return -EINVAL;
+ mode = ov5640_find_mode(sensor, fie->width, fie->height, false);
+ if (!mode)
+ return -EINVAL;
+
tpf.numerator = 1;
tpf.denominator = ov5640_framerates[fie->index];
- ret = ov5640_try_frame_interval(sensor, &tpf,
- fie->width, fie->height);
+ ret = ov5640_try_frame_interval(sensor, &tpf, mode);
if (ret < 0)
return -EINVAL;
@@ -2869,21 +3625,24 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
mode = sensor->current_mode;
- frame_rate = ov5640_try_frame_interval(sensor, &fi->interval,
- mode->hact, mode->vact);
+ frame_rate = ov5640_try_frame_interval(sensor, &fi->interval, mode);
if (frame_rate < 0) {
/* Always return a valid frame interval value */
fi->interval = sensor->frame_interval;
goto out;
}
- mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
- mode->vact, true);
+ mode = ov5640_find_mode(sensor, mode->width, mode->height, true);
if (!mode) {
ret = -EINVAL;
goto out;
}
+ if (ov5640_framerates[frame_rate] > ov5640_framerates[mode->max_fps]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (mode != sensor->current_mode ||
frame_rate != sensor->current_fr) {
sensor->current_fr = frame_rate;
@@ -2891,8 +3650,7 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
sensor->current_mode = mode;
sensor->pending_mode_change = true;
- __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
- ov5640_calc_pixel_rate(sensor));
+ ov5640_update_pixel_rate(sensor);
}
out:
mutex_unlock(&sensor->lock);
@@ -2900,40 +3658,62 @@ out:
}
static int ov5640_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- if (code->pad != 0)
- return -EINVAL;
- if (code->index >= ARRAY_SIZE(ov5640_formats))
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ const struct ov5640_pixfmt *formats;
+ unsigned int num_formats;
+
+ if (ov5640_is_csi2(sensor)) {
+ formats = ov5640_csi2_formats;
+ num_formats = ARRAY_SIZE(ov5640_csi2_formats) - 1;
+ } else {
+ formats = ov5640_dvp_formats;
+ num_formats = ARRAY_SIZE(ov5640_dvp_formats) - 1;
+ }
+
+ if (code->index >= num_formats)
return -EINVAL;
- code->code = ov5640_formats[code->index].code;
+ code->code = formats[code->index].code;
+
return 0;
}
static int ov5640_s_stream(struct v4l2_subdev *sd, int enable)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ struct device *dev = &sensor->i2c_client->dev;
int ret = 0;
mutex_lock(&sensor->lock);
- if (sensor->streaming == !enable) {
- if (enable && sensor->pending_mode_change) {
+ if (sensor->streaming == enable) {
+ mutex_unlock(&sensor->lock);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto err;
+
+ if (sensor->pending_mode_change) {
ret = ov5640_set_mode(sensor);
if (ret)
- goto out;
+ goto put_pm;
}
- if (enable && sensor->pending_fmt_change) {
+ if (sensor->pending_fmt_change) {
ret = ov5640_set_framefmt(sensor, &sensor->fmt);
if (ret)
- goto out;
+ goto put_pm;
sensor->pending_fmt_change = false;
}
- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
+
+ if (ov5640_is_csi2(sensor))
ret = ov5640_set_stream_mipi(sensor, enable);
else
ret = ov5640_set_stream_dvp(sensor, enable);
@@ -2941,13 +3721,38 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable)
if (!ret)
sensor->streaming = enable;
}
-out:
+
+ sensor->streaming = enable;
+ mutex_unlock(&sensor->lock);
+ return 0;
+
+put_pm:
+ pm_runtime_put(dev);
+err:
mutex_unlock(&sensor->lock);
return ret;
}
+static int ov5640_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_get_try_format(sd, state, 0);
+ struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, state, 0);
+
+ *fmt = ov5640_is_csi2(sensor) ? ov5640_csi2_default_fmt :
+ ov5640_dvp_default_fmt;
+
+ crop->left = OV5640_PIXEL_ARRAY_LEFT;
+ crop->top = OV5640_PIXEL_ARRAY_TOP;
+ crop->width = OV5640_PIXEL_ARRAY_WIDTH;
+ crop->height = OV5640_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+}
+
static const struct v4l2_subdev_core_ops ov5640_core_ops = {
- .s_power = ov5640_s_power,
.log_status = v4l2_ctrl_subdev_log_status,
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
@@ -2960,9 +3765,11 @@ static const struct v4l2_subdev_video_ops ov5640_video_ops = {
};
static const struct v4l2_subdev_pad_ops ov5640_pad_ops = {
+ .init_cfg = ov5640_init_cfg,
.enum_mbus_code = ov5640_enum_mbus_code,
.get_fmt = ov5640_get_fmt,
.set_fmt = ov5640_set_fmt,
+ .get_selection = ov5640_get_selection,
.enum_frame_size = ov5640_enum_frame_size,
.enum_frame_interval = ov5640_enum_frame_interval,
};
@@ -3013,13 +3820,34 @@ power_off:
return ret;
}
+static int ov5640_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ov5640_dev *sensor = to_ov5640_dev(subdev);
+
+ return ov5640_set_power(sensor, false);
+}
+
+static int ov5640_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ov5640_dev *sensor = to_ov5640_dev(subdev);
+ int ret = 0;
+
+ ret = ov5640_set_power(sensor, true);
+ if (ret)
+ return ret;
+
+ return __v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+}
+
static int ov5640_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct fwnode_handle *endpoint;
struct ov5640_dev *sensor;
- struct v4l2_mbus_framefmt *fmt;
- u32 rotation;
int ret;
sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
@@ -3032,40 +3860,17 @@ static int ov5640_probe(struct i2c_client *client)
* default init sequence initialize sensor to
* YUV422 UYVY VGA@30fps
*/
- fmt = &sensor->fmt;
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
- fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
- fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
- fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
- fmt->width = 640;
- fmt->height = 480;
- fmt->field = V4L2_FIELD_NONE;
sensor->frame_interval.numerator = 1;
sensor->frame_interval.denominator = ov5640_framerates[OV5640_30_FPS];
sensor->current_fr = OV5640_30_FPS;
sensor->current_mode =
&ov5640_mode_data[OV5640_MODE_VGA_640_480];
sensor->last_mode = sensor->current_mode;
+ sensor->current_link_freq =
+ ov5640_csi2_link_freqs[OV5640_DEFAULT_LINK_FREQ];
sensor->ae_target = 52;
- /* optional indication of physical rotation of sensor */
- ret = fwnode_property_read_u32(dev_fwnode(&client->dev), "rotation",
- &rotation);
- if (!ret) {
- switch (rotation) {
- case 180:
- sensor->upside_down = true;
- fallthrough;
- case 0:
- break;
- default:
- dev_warn(dev, "%u degrees rotation is not supported, ignoring...\n",
- rotation);
- }
- }
-
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev),
NULL);
if (!endpoint) {
@@ -3087,6 +3892,9 @@ static int ov5640_probe(struct i2c_client *client)
return -EINVAL;
}
+ sensor->fmt = ov5640_is_csi2(sensor) ? ov5640_csi2_default_fmt :
+ ov5640_dvp_default_fmt;
+
/* get system clock (xclk) */
sensor->xclk = devm_clk_get(dev, "xclk");
if (IS_ERR(sensor->xclk)) {
@@ -3138,13 +3946,17 @@ static int ov5640_probe(struct i2c_client *client)
if (ret)
goto entity_cleanup;
+ pm_runtime_enable(dev);
+ pm_runtime_set_suspended(dev);
+
ret = v4l2_async_register_subdev_sensor_common(&sensor->sd);
if (ret)
- goto free_ctrls;
+ goto pm_disable;
return 0;
-free_ctrls:
+pm_disable:
+ pm_runtime_disable(dev);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
entity_cleanup:
media_entity_cleanup(&sensor->sd.entity);
@@ -3154,17 +3966,23 @@ entity_cleanup:
static int ov5640_remove(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5640_dev *sensor = to_ov5640_dev(sd);
v4l2_async_unregister_subdev(&sensor->sd);
media_entity_cleanup(&sensor->sd.entity);
+ pm_runtime_disable(dev);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
mutex_destroy(&sensor->lock);
return 0;
}
+static const struct dev_pm_ops ov5640_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov5640_suspend, ov5640_resume, NULL)
+};
+
static const struct i2c_device_id ov5640_id[] = {
{"ov5640", 0},
{},
@@ -3181,6 +3999,7 @@ static struct i2c_driver ov5640_i2c_driver = {
.driver = {
.name = "ov5640",
.of_match_table = ov5640_dt_ids,
+ .pm = &ov5640_pm_ops,
},
.id_table = ov5640_id,
.probe_new = ov5640_probe,
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index a6c17d15d754..368fa21e675e 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -837,7 +837,7 @@ static const struct v4l2_ctrl_ops ov5645_ctrl_ops = {
};
static int ov5645_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -849,7 +849,7 @@ static int ov5645_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5645_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->code != MEDIA_BUS_FMT_UYVY8_2X8)
@@ -868,13 +868,13 @@ static int ov5645_enum_frame_size(struct v4l2_subdev *subdev,
static struct v4l2_mbus_framefmt *
__ov5645_get_pad_format(struct ov5645 *ov5645,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&ov5645->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&ov5645->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5645->fmt;
default:
@@ -883,23 +883,25 @@ __ov5645_get_pad_format(struct ov5645 *ov5645,
}
static int ov5645_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5645 *ov5645 = to_ov5645(sd);
- format->format = *__ov5645_get_pad_format(ov5645, cfg, format->pad,
+ format->format = *__ov5645_get_pad_format(ov5645, sd_state,
+ format->pad,
format->which);
return 0;
}
static struct v4l2_rect *
-__ov5645_get_pad_crop(struct ov5645 *ov5645, struct v4l2_subdev_pad_config *cfg,
+__ov5645_get_pad_crop(struct ov5645 *ov5645,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov5645->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&ov5645->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5645->crop;
default:
@@ -908,7 +910,7 @@ __ov5645_get_pad_crop(struct ov5645 *ov5645, struct v4l2_subdev_pad_config *cfg,
}
static int ov5645_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov5645 *ov5645 = to_ov5645(sd);
@@ -917,8 +919,8 @@ static int ov5645_set_format(struct v4l2_subdev *sd,
const struct ov5645_mode_info *new_mode;
int ret;
- __crop = __ov5645_get_pad_crop(ov5645, cfg, format->pad,
- format->which);
+ __crop = __ov5645_get_pad_crop(ov5645, sd_state, format->pad,
+ format->which);
new_mode = v4l2_find_nearest_size(ov5645_mode_info_data,
ARRAY_SIZE(ov5645_mode_info_data),
@@ -942,8 +944,8 @@ static int ov5645_set_format(struct v4l2_subdev *sd,
ov5645->current_mode = new_mode;
}
- __format = __ov5645_get_pad_format(ov5645, cfg, format->pad,
- format->which);
+ __format = __ov5645_get_pad_format(ov5645, sd_state, format->pad,
+ format->which);
__format->width = __crop->width;
__format->height = __crop->height;
__format->code = MEDIA_BUS_FMT_UYVY8_2X8;
@@ -956,21 +958,21 @@ static int ov5645_set_format(struct v4l2_subdev *sd,
}
static int ov5645_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { 0 };
- fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
fmt.format.width = 1920;
fmt.format.height = 1080;
- ov5645_set_format(subdev, cfg, &fmt);
+ ov5645_set_format(subdev, sd_state, &fmt);
return 0;
}
static int ov5645_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ov5645 *ov5645 = to_ov5645(sd);
@@ -978,7 +980,7 @@ static int ov5645_get_selection(struct v4l2_subdev *sd,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- sel->r = *__ov5645_get_pad_crop(ov5645, cfg, sel->pad,
+ sel->r = *__ov5645_get_pad_crop(ov5645, sd_state, sel->pad,
sel->which);
return 0;
}
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index e7d2e5b4ad4b..febba0316664 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* A V4L2 driver for OmniVision OV5647 cameras.
*
@@ -8,119 +9,316 @@
* Copyright (C) 2006-7 Jonathan Corbet <corbet@lwn.net>
*
* Copyright (C) 2016, Synopsys, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed .as is. WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-mediabus.h>
-#define SENSOR_NAME "ov5647"
+/*
+ * From the datasheet, "20ms after PWDN goes low or 20ms after RESETB goes
+ * high if reset is inserted after PWDN goes high, host can access sensor's
+ * SCCB to initialize sensor."
+ */
+#define PWDN_ACTIVE_DELAY_MS 20
#define MIPI_CTRL00_CLOCK_LANE_GATE BIT(5)
+#define MIPI_CTRL00_LINE_SYNC_ENABLE BIT(4)
#define MIPI_CTRL00_BUS_IDLE BIT(2)
#define MIPI_CTRL00_CLOCK_LANE_DISABLE BIT(0)
#define OV5647_SW_STANDBY 0x0100
#define OV5647_SW_RESET 0x0103
-#define OV5647_REG_CHIPID_H 0x300A
-#define OV5647_REG_CHIPID_L 0x300B
-#define OV5640_REG_PAD_OUT 0x300D
+#define OV5647_REG_CHIPID_H 0x300a
+#define OV5647_REG_CHIPID_L 0x300b
+#define OV5640_REG_PAD_OUT 0x300d
+#define OV5647_REG_EXP_HI 0x3500
+#define OV5647_REG_EXP_MID 0x3501
+#define OV5647_REG_EXP_LO 0x3502
+#define OV5647_REG_AEC_AGC 0x3503
+#define OV5647_REG_GAIN_HI 0x350a
+#define OV5647_REG_GAIN_LO 0x350b
+#define OV5647_REG_VTS_HI 0x380e
+#define OV5647_REG_VTS_LO 0x380f
#define OV5647_REG_FRAME_OFF_NUMBER 0x4202
#define OV5647_REG_MIPI_CTRL00 0x4800
#define OV5647_REG_MIPI_CTRL14 0x4814
+#define OV5647_REG_AWB 0x5001
#define REG_TERM 0xfffe
#define VAL_TERM 0xfe
#define REG_DLY 0xffff
-#define OV5647_ROW_START 0x01
-#define OV5647_ROW_START_MIN 0
-#define OV5647_ROW_START_MAX 2004
-#define OV5647_ROW_START_DEF 54
+/* OV5647 native and active pixel array size */
+#define OV5647_NATIVE_WIDTH 2624U
+#define OV5647_NATIVE_HEIGHT 1956U
-#define OV5647_COLUMN_START 0x02
-#define OV5647_COLUMN_START_MIN 0
-#define OV5647_COLUMN_START_MAX 2750
-#define OV5647_COLUMN_START_DEF 16
+#define OV5647_PIXEL_ARRAY_LEFT 16U
+#define OV5647_PIXEL_ARRAY_TOP 16U
+#define OV5647_PIXEL_ARRAY_WIDTH 2592U
+#define OV5647_PIXEL_ARRAY_HEIGHT 1944U
-#define OV5647_WINDOW_HEIGHT 0x03
-#define OV5647_WINDOW_HEIGHT_MIN 2
-#define OV5647_WINDOW_HEIGHT_MAX 2006
-#define OV5647_WINDOW_HEIGHT_DEF 1944
+#define OV5647_VBLANK_MIN 4
+#define OV5647_VTS_MAX 32767
-#define OV5647_WINDOW_WIDTH 0x04
-#define OV5647_WINDOW_WIDTH_MIN 2
-#define OV5647_WINDOW_WIDTH_MAX 2752
-#define OV5647_WINDOW_WIDTH_DEF 2592
+#define OV5647_EXPOSURE_MIN 4
+#define OV5647_EXPOSURE_STEP 1
+#define OV5647_EXPOSURE_DEFAULT 1000
+#define OV5647_EXPOSURE_MAX 65535
struct regval_list {
u16 addr;
u8 data;
};
+struct ov5647_mode {
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_rect crop;
+ u64 pixel_rate;
+ int hts;
+ int vts;
+ const struct regval_list *reg_list;
+ unsigned int num_regs;
+};
+
struct ov5647 {
struct v4l2_subdev sd;
struct media_pad pad;
struct mutex lock;
- struct v4l2_mbus_framefmt format;
- unsigned int width;
- unsigned int height;
- int power_count;
struct clk *xclk;
+ struct gpio_desc *pwdn;
+ bool clock_ncont;
+ struct v4l2_ctrl_handler ctrls;
+ const struct ov5647_mode *mode;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *exposure;
+ bool streaming;
};
-static inline struct ov5647 *to_state(struct v4l2_subdev *sd)
+static inline struct ov5647 *to_sensor(struct v4l2_subdev *sd)
{
return container_of(sd, struct ov5647, sd);
}
-static struct regval_list sensor_oe_disable_regs[] = {
+static const struct regval_list sensor_oe_disable_regs[] = {
{0x3000, 0x00},
{0x3001, 0x00},
{0x3002, 0x00},
};
-static struct regval_list sensor_oe_enable_regs[] = {
+static const struct regval_list sensor_oe_enable_regs[] = {
{0x3000, 0x0f},
{0x3001, 0xff},
{0x3002, 0xe4},
};
-static struct regval_list ov5647_640x480[] = {
+static struct regval_list ov5647_2592x1944_10bpp[] = {
{0x0100, 0x00},
{0x0103, 0x01},
- {0x3034, 0x08},
+ {0x3034, 0x1a},
{0x3035, 0x21},
- {0x3036, 0x46},
+ {0x3036, 0x69},
{0x303c, 0x11},
{0x3106, 0xf5},
- {0x3821, 0x07},
- {0x3820, 0x41},
+ {0x3821, 0x06},
+ {0x3820, 0x00},
{0x3827, 0xec},
- {0x370c, 0x0f},
+ {0x370c, 0x03},
+ {0x3612, 0x5b},
+ {0x3618, 0x04},
+ {0x5000, 0x06},
+ {0x5002, 0x41},
+ {0x5003, 0x08},
+ {0x5a00, 0x08},
+ {0x3000, 0x00},
+ {0x3001, 0x00},
+ {0x3002, 0x00},
+ {0x3016, 0x08},
+ {0x3017, 0xe0},
+ {0x3018, 0x44},
+ {0x301c, 0xf8},
+ {0x301d, 0xf0},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3c01, 0x80},
+ {0x3b07, 0x0c},
+ {0x380c, 0x0b},
+ {0x380d, 0x1c},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x3708, 0x64},
+ {0x3709, 0x12},
+ {0x3808, 0x0a},
+ {0x3809, 0x20},
+ {0x380a, 0x07},
+ {0x380b, 0x98},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x0a},
+ {0x3805, 0x3f},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3811, 0x10},
+ {0x3813, 0x06},
+ {0x3630, 0x2e},
+ {0x3632, 0xe2},
+ {0x3633, 0x23},
+ {0x3634, 0x44},
+ {0x3636, 0x06},
+ {0x3620, 0x64},
+ {0x3621, 0xe0},
+ {0x3600, 0x37},
+ {0x3704, 0xa0},
+ {0x3703, 0x5a},
+ {0x3715, 0x78},
+ {0x3717, 0x01},
+ {0x3731, 0x02},
+ {0x370b, 0x60},
+ {0x3705, 0x1a},
+ {0x3f05, 0x02},
+ {0x3f06, 0x10},
+ {0x3f01, 0x0a},
+ {0x3a08, 0x01},
+ {0x3a09, 0x28},
+ {0x3a0a, 0x00},
+ {0x3a0b, 0xf6},
+ {0x3a0d, 0x08},
+ {0x3a0e, 0x06},
+ {0x3a0f, 0x58},
+ {0x3a10, 0x50},
+ {0x3a1b, 0x58},
+ {0x3a1e, 0x50},
+ {0x3a11, 0x60},
+ {0x3a1f, 0x28},
+ {0x4001, 0x02},
+ {0x4004, 0x04},
+ {0x4000, 0x09},
+ {0x4837, 0x19},
+ {0x4800, 0x24},
+ {0x3503, 0x03},
+ {0x0100, 0x01},
+};
+
+static struct regval_list ov5647_1080p30_10bpp[] = {
+ {0x0100, 0x00},
+ {0x0103, 0x01},
+ {0x3034, 0x1a},
+ {0x3035, 0x21},
+ {0x3036, 0x62},
+ {0x303c, 0x11},
+ {0x3106, 0xf5},
+ {0x3821, 0x06},
+ {0x3820, 0x00},
+ {0x3827, 0xec},
+ {0x370c, 0x03},
+ {0x3612, 0x5b},
+ {0x3618, 0x04},
+ {0x5000, 0x06},
+ {0x5002, 0x41},
+ {0x5003, 0x08},
+ {0x5a00, 0x08},
+ {0x3000, 0x00},
+ {0x3001, 0x00},
+ {0x3002, 0x00},
+ {0x3016, 0x08},
+ {0x3017, 0xe0},
+ {0x3018, 0x44},
+ {0x301c, 0xf8},
+ {0x301d, 0xf0},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3c01, 0x80},
+ {0x3b07, 0x0c},
+ {0x380c, 0x09},
+ {0x380d, 0x70},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x3708, 0x64},
+ {0x3709, 0x12},
+ {0x3808, 0x07},
+ {0x3809, 0x80},
+ {0x380a, 0x04},
+ {0x380b, 0x38},
+ {0x3800, 0x01},
+ {0x3801, 0x5c},
+ {0x3802, 0x01},
+ {0x3803, 0xb2},
+ {0x3804, 0x08},
+ {0x3805, 0xe3},
+ {0x3806, 0x05},
+ {0x3807, 0xf1},
+ {0x3811, 0x04},
+ {0x3813, 0x02},
+ {0x3630, 0x2e},
+ {0x3632, 0xe2},
+ {0x3633, 0x23},
+ {0x3634, 0x44},
+ {0x3636, 0x06},
+ {0x3620, 0x64},
+ {0x3621, 0xe0},
+ {0x3600, 0x37},
+ {0x3704, 0xa0},
+ {0x3703, 0x5a},
+ {0x3715, 0x78},
+ {0x3717, 0x01},
+ {0x3731, 0x02},
+ {0x370b, 0x60},
+ {0x3705, 0x1a},
+ {0x3f05, 0x02},
+ {0x3f06, 0x10},
+ {0x3f01, 0x0a},
+ {0x3a08, 0x01},
+ {0x3a09, 0x4b},
+ {0x3a0a, 0x01},
+ {0x3a0b, 0x13},
+ {0x3a0d, 0x04},
+ {0x3a0e, 0x03},
+ {0x3a0f, 0x58},
+ {0x3a10, 0x50},
+ {0x3a1b, 0x58},
+ {0x3a1e, 0x50},
+ {0x3a11, 0x60},
+ {0x3a1f, 0x28},
+ {0x4001, 0x02},
+ {0x4004, 0x04},
+ {0x4000, 0x09},
+ {0x4837, 0x19},
+ {0x4800, 0x34},
+ {0x3503, 0x03},
+ {0x0100, 0x01},
+};
+
+static struct regval_list ov5647_2x2binned_10bpp[] = {
+ {0x0100, 0x00},
+ {0x0103, 0x01},
+ {0x3034, 0x1a},
+ {0x3035, 0x21},
+ {0x3036, 0x62},
+ {0x303c, 0x11},
+ {0x3106, 0xf5},
+ {0x3827, 0xec},
+ {0x370c, 0x03},
{0x3612, 0x59},
{0x3618, 0x00},
{0x5000, 0x06},
- {0x5001, 0x01},
{0x5002, 0x41},
{0x5003, 0x08},
{0x5a00, 0x08},
@@ -136,32 +334,115 @@ static struct regval_list ov5647_640x480[] = {
{0x3a19, 0xf8},
{0x3c01, 0x80},
{0x3b07, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x0a},
+ {0x3805, 0x3f},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3808, 0x05},
+ {0x3809, 0x10},
+ {0x380a, 0x03},
+ {0x380b, 0xcc},
{0x380c, 0x07},
{0x380d, 0x68},
- {0x380e, 0x03},
- {0x380f, 0xd8},
+ {0x3811, 0x0c},
+ {0x3813, 0x06},
{0x3814, 0x31},
{0x3815, 0x31},
+ {0x3630, 0x2e},
+ {0x3632, 0xe2},
+ {0x3633, 0x23},
+ {0x3634, 0x44},
+ {0x3636, 0x06},
+ {0x3620, 0x64},
+ {0x3621, 0xe0},
+ {0x3600, 0x37},
+ {0x3704, 0xa0},
+ {0x3703, 0x5a},
+ {0x3715, 0x78},
+ {0x3717, 0x01},
+ {0x3731, 0x02},
+ {0x370b, 0x60},
+ {0x3705, 0x1a},
+ {0x3f05, 0x02},
+ {0x3f06, 0x10},
+ {0x3f01, 0x0a},
+ {0x3a08, 0x01},
+ {0x3a09, 0x28},
+ {0x3a0a, 0x00},
+ {0x3a0b, 0xf6},
+ {0x3a0d, 0x08},
+ {0x3a0e, 0x06},
+ {0x3a0f, 0x58},
+ {0x3a10, 0x50},
+ {0x3a1b, 0x58},
+ {0x3a1e, 0x50},
+ {0x3a11, 0x60},
+ {0x3a1f, 0x28},
+ {0x4001, 0x02},
+ {0x4004, 0x04},
+ {0x4000, 0x09},
+ {0x4837, 0x16},
+ {0x4800, 0x24},
+ {0x3503, 0x03},
+ {0x3820, 0x41},
+ {0x3821, 0x07},
+ {0x350a, 0x00},
+ {0x350b, 0x10},
+ {0x3500, 0x00},
+ {0x3501, 0x1a},
+ {0x3502, 0xf0},
+ {0x3212, 0xa0},
+ {0x0100, 0x01},
+};
+
+static struct regval_list ov5647_640x480_10bpp[] = {
+ {0x0100, 0x00},
+ {0x0103, 0x01},
+ {0x3035, 0x11},
+ {0x3036, 0x46},
+ {0x303c, 0x11},
+ {0x3821, 0x07},
+ {0x3820, 0x41},
+ {0x370c, 0x03},
+ {0x3612, 0x59},
+ {0x3618, 0x00},
+ {0x5000, 0x06},
+ {0x5003, 0x08},
+ {0x5a00, 0x08},
+ {0x3000, 0xff},
+ {0x3001, 0xff},
+ {0x3002, 0xff},
+ {0x301d, 0xf0},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3c01, 0x80},
+ {0x3b07, 0x0c},
+ {0x380c, 0x07},
+ {0x380d, 0x3c},
+ {0x3814, 0x35},
+ {0x3815, 0x35},
{0x3708, 0x64},
{0x3709, 0x52},
{0x3808, 0x02},
{0x3809, 0x80},
{0x380a, 0x01},
- {0x380b, 0xE0},
- {0x3801, 0x00},
+ {0x380b, 0xe0},
+ {0x3800, 0x00},
+ {0x3801, 0x10},
{0x3802, 0x00},
{0x3803, 0x00},
{0x3804, 0x0a},
- {0x3805, 0x3f},
+ {0x3805, 0x2f},
{0x3806, 0x07},
- {0x3807, 0xa1},
- {0x3811, 0x08},
- {0x3813, 0x02},
+ {0x3807, 0x9f},
{0x3630, 0x2e},
{0x3632, 0xe2},
{0x3633, 0x23},
{0x3634, 0x44},
- {0x3636, 0x06},
{0x3620, 0x64},
{0x3621, 0xe0},
{0x3600, 0x37},
@@ -176,11 +457,11 @@ static struct regval_list ov5647_640x480[] = {
{0x3f06, 0x10},
{0x3f01, 0x0a},
{0x3a08, 0x01},
- {0x3a09, 0x27},
+ {0x3a09, 0x2e},
{0x3a0a, 0x00},
- {0x3a0b, 0xf6},
- {0x3a0d, 0x04},
- {0x3a0e, 0x03},
+ {0x3a0b, 0xfb},
+ {0x3a0d, 0x02},
+ {0x3a0e, 0x01},
{0x3a0f, 0x58},
{0x3a10, 0x50},
{0x3a1b, 0x58},
@@ -190,31 +471,152 @@ static struct regval_list ov5647_640x480[] = {
{0x4001, 0x02},
{0x4004, 0x02},
{0x4000, 0x09},
- {0x4837, 0x24},
- {0x4050, 0x6e},
- {0x4051, 0x8f},
+ {0x3000, 0x00},
+ {0x3001, 0x00},
+ {0x3002, 0x00},
+ {0x3017, 0xe0},
+ {0x301c, 0xfc},
+ {0x3636, 0x06},
+ {0x3016, 0x08},
+ {0x3827, 0xec},
+ {0x3018, 0x44},
+ {0x3035, 0x21},
+ {0x3106, 0xf5},
+ {0x3034, 0x1a},
+ {0x301c, 0xf8},
+ {0x4800, 0x34},
+ {0x3503, 0x03},
{0x0100, 0x01},
};
-static int ov5647_write(struct v4l2_subdev *sd, u16 reg, u8 val)
+static const struct ov5647_mode ov5647_modes[] = {
+ /* 2592x1944 full resolution full FOV 10-bit mode. */
+ {
+ .format = {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .width = 2592,
+ .height = 1944
+ },
+ .crop = {
+ .left = OV5647_PIXEL_ARRAY_LEFT,
+ .top = OV5647_PIXEL_ARRAY_TOP,
+ .width = 2592,
+ .height = 1944
+ },
+ .pixel_rate = 87500000,
+ .hts = 2844,
+ .vts = 0x7b0,
+ .reg_list = ov5647_2592x1944_10bpp,
+ .num_regs = ARRAY_SIZE(ov5647_2592x1944_10bpp)
+ },
+ /* 1080p30 10-bit mode. Full resolution centre-cropped down to 1080p. */
+ {
+ .format = {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .width = 1920,
+ .height = 1080
+ },
+ .crop = {
+ .left = 348 + OV5647_PIXEL_ARRAY_LEFT,
+ .top = 434 + OV5647_PIXEL_ARRAY_TOP,
+ .width = 1928,
+ .height = 1080,
+ },
+ .pixel_rate = 81666700,
+ .hts = 2416,
+ .vts = 0x450,
+ .reg_list = ov5647_1080p30_10bpp,
+ .num_regs = ARRAY_SIZE(ov5647_1080p30_10bpp)
+ },
+ /* 2x2 binned full FOV 10-bit mode. */
+ {
+ .format = {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .width = 1296,
+ .height = 972
+ },
+ .crop = {
+ .left = OV5647_PIXEL_ARRAY_LEFT,
+ .top = OV5647_PIXEL_ARRAY_TOP,
+ .width = 2592,
+ .height = 1944,
+ },
+ .pixel_rate = 81666700,
+ .hts = 1896,
+ .vts = 0x59b,
+ .reg_list = ov5647_2x2binned_10bpp,
+ .num_regs = ARRAY_SIZE(ov5647_2x2binned_10bpp)
+ },
+ /* 10-bit VGA full FOV 60fps. 2x2 binned and subsampled down to VGA. */
+ {
+ .format = {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .width = 640,
+ .height = 480
+ },
+ .crop = {
+ .left = 16 + OV5647_PIXEL_ARRAY_LEFT,
+ .top = OV5647_PIXEL_ARRAY_TOP,
+ .width = 2560,
+ .height = 1920,
+ },
+ .pixel_rate = 55000000,
+ .hts = 1852,
+ .vts = 0x1f8,
+ .reg_list = ov5647_640x480_10bpp,
+ .num_regs = ARRAY_SIZE(ov5647_640x480_10bpp)
+ },
+};
+
+/* Default sensor mode is 2x2 binned 640x480 SBGGR10_1X10. */
+#define OV5647_DEFAULT_MODE (&ov5647_modes[3])
+#define OV5647_DEFAULT_FORMAT (ov5647_modes[3].format)
+
+static int ov5647_write16(struct v4l2_subdev *sd, u16 reg, u16 val)
{
+ unsigned char data[4] = { reg >> 8, reg & 0xff, val >> 8, val & 0xff};
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
+
+ ret = i2c_master_send(client, data, 4);
+ if (ret < 0) {
+ dev_dbg(&client->dev, "%s: i2c write error, reg: %x\n",
+ __func__, reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5647_write(struct v4l2_subdev *sd, u16 reg, u8 val)
+{
unsigned char data[3] = { reg >> 8, reg & 0xff, val};
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
ret = i2c_master_send(client, data, 3);
- if (ret < 0)
+ if (ret < 0) {
dev_dbg(&client->dev, "%s: i2c write error, reg: %x\n",
__func__, reg);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ov5647_read(struct v4l2_subdev *sd, u16 reg, u8 *val)
{
- int ret;
unsigned char data_w[2] = { reg >> 8, reg & 0xff };
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
ret = i2c_master_send(client, data_w, 2);
if (ret < 0) {
@@ -224,15 +626,17 @@ static int ov5647_read(struct v4l2_subdev *sd, u16 reg, u8 *val)
}
ret = i2c_master_recv(client, val, 1);
- if (ret < 0)
+ if (ret < 0) {
dev_dbg(&client->dev, "%s: i2c read error, reg: %x\n",
__func__, reg);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ov5647_write_array(struct v4l2_subdev *sd,
- struct regval_list *regs, int array_size)
+ const struct regval_list *regs, int array_size)
{
int i, ret;
@@ -255,161 +659,174 @@ static int ov5647_set_virtual_channel(struct v4l2_subdev *sd, int channel)
return ret;
channel_id &= ~(3 << 6);
- return ov5647_write(sd, OV5647_REG_MIPI_CTRL14, channel_id | (channel << 6));
+
+ return ov5647_write(sd, OV5647_REG_MIPI_CTRL14,
+ channel_id | (channel << 6));
}
-static int ov5647_stream_on(struct v4l2_subdev *sd)
+static int ov5647_set_mode(struct v4l2_subdev *sd)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5647 *sensor = to_sensor(sd);
+ u8 resetval, rdval;
int ret;
- ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_BUS_IDLE);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
if (ret < 0)
return ret;
- ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x00);
- if (ret < 0)
+ ret = ov5647_write_array(sd, sensor->mode->reg_list,
+ sensor->mode->num_regs);
+ if (ret < 0) {
+ dev_err(&client->dev, "write sensor default regs error\n");
return ret;
+ }
- return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x00);
-}
-
-static int ov5647_stream_off(struct v4l2_subdev *sd)
-{
- int ret;
-
- ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_CLOCK_LANE_GATE
- | MIPI_CTRL00_BUS_IDLE | MIPI_CTRL00_CLOCK_LANE_DISABLE);
+ ret = ov5647_set_virtual_channel(sd, 0);
if (ret < 0)
return ret;
- ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x0f);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &resetval);
if (ret < 0)
return ret;
- return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x01);
+ if (!(resetval & 0x01)) {
+ dev_err(&client->dev, "Device was in SW standby");
+ ret = ov5647_write(sd, OV5647_SW_STANDBY, 0x01);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
-static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
+static int ov5647_stream_on(struct v4l2_subdev *sd)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5647 *sensor = to_sensor(sd);
+ u8 val = MIPI_CTRL00_BUS_IDLE;
int ret;
- u8 rdval;
- ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
- if (ret < 0)
+ ret = ov5647_set_mode(sd);
+ if (ret) {
+ dev_err(&client->dev, "Failed to program sensor mode: %d\n", ret);
return ret;
+ }
- if (standby)
- rdval &= ~0x01;
- else
- rdval |= 0x01;
-
- return ov5647_write(sd, OV5647_SW_STANDBY, rdval);
-}
+ /* Apply customized values from user when stream starts. */
+ ret = __v4l2_ctrl_handler_setup(sd->ctrl_handler);
+ if (ret)
+ return ret;
-static int __sensor_init(struct v4l2_subdev *sd)
-{
- int ret;
- u8 resetval, rdval;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
+ if (sensor->clock_ncont)
+ val |= MIPI_CTRL00_CLOCK_LANE_GATE |
+ MIPI_CTRL00_LINE_SYNC_ENABLE;
- ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, val);
if (ret < 0)
return ret;
- ret = ov5647_write_array(sd, ov5647_640x480,
- ARRAY_SIZE(ov5647_640x480));
- if (ret < 0) {
- dev_err(&client->dev, "write sensor default regs error\n");
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x00);
+ if (ret < 0)
return ret;
- }
- ret = ov5647_set_virtual_channel(sd, 0);
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x00);
+}
+
+static int ov5647_stream_off(struct v4l2_subdev *sd)
+{
+ int ret;
+
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00,
+ MIPI_CTRL00_CLOCK_LANE_GATE | MIPI_CTRL00_BUS_IDLE |
+ MIPI_CTRL00_CLOCK_LANE_DISABLE);
if (ret < 0)
return ret;
- ret = ov5647_read(sd, OV5647_SW_STANDBY, &resetval);
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x0f);
if (ret < 0)
return ret;
- if (!(resetval & 0x01)) {
- dev_err(&client->dev, "Device was in SW standby");
- ret = ov5647_write(sd, OV5647_SW_STANDBY, 0x01);
- if (ret < 0)
- return ret;
- }
-
- /*
- * stream off to make the clock lane into LP-11 state.
- */
- return ov5647_stream_off(sd);
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x01);
}
-static int ov5647_sensor_power(struct v4l2_subdev *sd, int on)
+static int ov5647_power_on(struct device *dev)
{
- int ret = 0;
- struct ov5647 *ov5647 = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5647 *sensor = dev_get_drvdata(dev);
+ int ret;
- mutex_lock(&ov5647->lock);
+ dev_dbg(dev, "OV5647 power on\n");
- if (on && !ov5647->power_count) {
- dev_dbg(&client->dev, "OV5647 power on\n");
+ if (sensor->pwdn) {
+ gpiod_set_value_cansleep(sensor->pwdn, 0);
+ msleep(PWDN_ACTIVE_DELAY_MS);
+ }
- ret = clk_prepare_enable(ov5647->xclk);
- if (ret < 0) {
- dev_err(&client->dev, "clk prepare enable failed\n");
- goto out;
- }
+ ret = clk_prepare_enable(sensor->xclk);
+ if (ret < 0) {
+ dev_err(dev, "clk prepare enable failed\n");
+ goto error_pwdn;
+ }
- ret = ov5647_write_array(sd, sensor_oe_enable_regs,
- ARRAY_SIZE(sensor_oe_enable_regs));
- if (ret < 0) {
- clk_disable_unprepare(ov5647->xclk);
- dev_err(&client->dev,
- "write sensor_oe_enable_regs error\n");
- goto out;
- }
+ ret = ov5647_write_array(&sensor->sd, sensor_oe_enable_regs,
+ ARRAY_SIZE(sensor_oe_enable_regs));
+ if (ret < 0) {
+ dev_err(dev, "write sensor_oe_enable_regs error\n");
+ goto error_clk_disable;
+ }
- ret = __sensor_init(sd);
- if (ret < 0) {
- clk_disable_unprepare(ov5647->xclk);
- dev_err(&client->dev,
- "Camera not available, check Power\n");
- goto out;
- }
- } else if (!on && ov5647->power_count == 1) {
- dev_dbg(&client->dev, "OV5647 power off\n");
+ /* Stream off to coax lanes into LP-11 state. */
+ ret = ov5647_stream_off(&sensor->sd);
+ if (ret < 0) {
+ dev_err(dev, "camera not available, check power\n");
+ goto error_clk_disable;
+ }
- ret = ov5647_write_array(sd, sensor_oe_disable_regs,
- ARRAY_SIZE(sensor_oe_disable_regs));
+ return 0;
- if (ret < 0)
- dev_dbg(&client->dev, "disable oe failed\n");
+error_clk_disable:
+ clk_disable_unprepare(sensor->xclk);
+error_pwdn:
+ gpiod_set_value_cansleep(sensor->pwdn, 1);
- ret = set_sw_standby(sd, true);
+ return ret;
+}
- if (ret < 0)
- dev_dbg(&client->dev, "soft stby failed\n");
+static int ov5647_power_off(struct device *dev)
+{
+ struct ov5647 *sensor = dev_get_drvdata(dev);
+ u8 rdval;
+ int ret;
- clk_disable_unprepare(ov5647->xclk);
- }
+ dev_dbg(dev, "OV5647 power off\n");
- /* Update the power count. */
- ov5647->power_count += on ? 1 : -1;
- WARN_ON(ov5647->power_count < 0);
+ ret = ov5647_write_array(&sensor->sd, sensor_oe_disable_regs,
+ ARRAY_SIZE(sensor_oe_disable_regs));
+ if (ret < 0)
+ dev_dbg(dev, "disable oe failed\n");
-out:
- mutex_unlock(&ov5647->lock);
+ /* Enter software standby */
+ ret = ov5647_read(&sensor->sd, OV5647_SW_STANDBY, &rdval);
+ if (ret < 0)
+ dev_dbg(dev, "software standby failed\n");
- return ret;
+ rdval &= ~0x01;
+ ret = ov5647_write(&sensor->sd, OV5647_SW_STANDBY, rdval);
+ if (ret < 0)
+ dev_dbg(dev, "software standby failed\n");
+
+ clk_disable_unprepare(sensor->xclk);
+ gpiod_set_value_cansleep(sensor->pwdn, 1);
+
+ return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int ov5647_sensor_get_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
+ struct v4l2_dbg_register *reg)
{
- u8 val;
int ret;
+ u8 val;
ret = ov5647_read(sd, reg->reg & 0xff, &val);
if (ret < 0)
@@ -422,29 +839,79 @@ static int ov5647_sensor_get_register(struct v4l2_subdev *sd,
}
static int ov5647_sensor_set_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
+ const struct v4l2_dbg_register *reg)
{
return ov5647_write(sd, reg->reg & 0xff, reg->val & 0xff);
}
#endif
-/*
- * Subdev core operations registration
- */
+/* Subdev core operations registration */
static const struct v4l2_subdev_core_ops ov5647_subdev_core_ops = {
- .s_power = ov5647_sensor_power,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov5647_sensor_get_register,
.s_register = ov5647_sensor_set_register,
#endif
};
+static const struct v4l2_rect *
+__ov5647_get_pad_crop(struct ov5647 *ov5647,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(&ov5647->sd, sd_state, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &ov5647->mode->crop;
+ }
+
+ return NULL;
+}
+
static int ov5647_s_stream(struct v4l2_subdev *sd, int enable)
{
- if (enable)
- return ov5647_stream_on(sd);
- else
- return ov5647_stream_off(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5647 *sensor = to_sensor(sd);
+ int ret;
+
+ mutex_lock(&sensor->lock);
+ if (sensor->streaming == enable) {
+ mutex_unlock(&sensor->lock);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ goto error_unlock;
+
+ ret = ov5647_stream_on(sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "stream start failed: %d\n", ret);
+ goto error_pm;
+ }
+ } else {
+ ret = ov5647_stream_off(sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "stream stop failed: %d\n", ret);
+ goto error_pm;
+ }
+ pm_runtime_put(&client->dev);
+ }
+
+ sensor->streaming = enable;
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+
+error_pm:
+ pm_runtime_put(&client->dev);
+error_unlock:
+ mutex_unlock(&sensor->lock);
+
+ return ret;
}
static const struct v4l2_subdev_video_ops ov5647_subdev_video_ops = {
@@ -452,19 +919,150 @@ static const struct v4l2_subdev_video_ops ov5647_subdev_video_ops = {
};
static int ov5647_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SBGGR8_1X8;
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov5647_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct v4l2_mbus_framefmt *fmt;
+
+ if (fse->code != MEDIA_BUS_FMT_SBGGR10_1X10 ||
+ fse->index >= ARRAY_SIZE(ov5647_modes))
+ return -EINVAL;
+
+ fmt = &ov5647_modes[fse->index].format;
+ fse->min_width = fmt->width;
+ fse->max_width = fmt->width;
+ fse->min_height = fmt->height;
+ fse->max_height = fmt->height;
+
+ return 0;
+}
+
+static int ov5647_get_pad_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ const struct v4l2_mbus_framefmt *sensor_format;
+ struct ov5647 *sensor = to_sensor(sd);
+
+ mutex_lock(&sensor->lock);
+ switch (format->which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ sensor_format = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ break;
+ default:
+ sensor_format = &sensor->mode->format;
+ break;
+ }
+
+ *fmt = *sensor_format;
+ mutex_unlock(&sensor->lock);
return 0;
}
+static int ov5647_set_pad_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov5647 *sensor = to_sensor(sd);
+ const struct ov5647_mode *mode;
+
+ mode = v4l2_find_nearest_size(ov5647_modes, ARRAY_SIZE(ov5647_modes),
+ format.width, format.height,
+ fmt->width, fmt->height);
+
+ /* Update the sensor mode and apply at it at streamon time. */
+ mutex_lock(&sensor->lock);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = mode->format;
+ } else {
+ int exposure_max, exposure_def;
+ int hblank, vblank;
+
+ sensor->mode = mode;
+ __v4l2_ctrl_modify_range(sensor->pixel_rate, mode->pixel_rate,
+ mode->pixel_rate, 1, mode->pixel_rate);
+
+ hblank = mode->hts - mode->format.width;
+ __v4l2_ctrl_modify_range(sensor->hblank, hblank, hblank, 1,
+ hblank);
+
+ vblank = mode->vts - mode->format.height;
+ __v4l2_ctrl_modify_range(sensor->vblank, OV5647_VBLANK_MIN,
+ OV5647_VTS_MAX - mode->format.height,
+ 1, vblank);
+ __v4l2_ctrl_s_ctrl(sensor->vblank, vblank);
+
+ exposure_max = mode->vts - 4;
+ exposure_def = min(exposure_max, OV5647_EXPOSURE_DEFAULT);
+ __v4l2_ctrl_modify_range(sensor->exposure,
+ sensor->exposure->minimum,
+ exposure_max, sensor->exposure->step,
+ exposure_def);
+ }
+ *fmt = mode->format;
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+}
+
+static int ov5647_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP: {
+ struct ov5647 *sensor = to_sensor(sd);
+
+ mutex_lock(&sensor->lock);
+ sel->r = *__ov5647_get_pad_crop(sensor, sd_state, sel->pad,
+ sel->which);
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+ }
+
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV5647_NATIVE_WIDTH;
+ sel->r.height = OV5647_NATIVE_HEIGHT;
+
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = OV5647_PIXEL_ARRAY_TOP;
+ sel->r.left = OV5647_PIXEL_ARRAY_LEFT;
+ sel->r.width = OV5647_PIXEL_ARRAY_WIDTH;
+ sel->r.height = OV5647_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static const struct v4l2_subdev_pad_ops ov5647_subdev_pad_ops = {
- .enum_mbus_code = ov5647_enum_mbus_code,
+ .enum_mbus_code = ov5647_enum_mbus_code,
+ .enum_frame_size = ov5647_enum_frame_size,
+ .set_fmt = ov5647_set_pad_fmt,
+ .get_fmt = ov5647_get_pad_fmt,
+ .get_selection = ov5647_get_selection,
};
static const struct v4l2_subdev_ops ov5647_subdev_ops = {
@@ -475,9 +1073,9 @@ static const struct v4l2_subdev_ops ov5647_subdev_ops = {
static int ov5647_detect(struct v4l2_subdev *sd)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 read;
int ret;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
ret = ov5647_write(sd, OV5647_SW_RESET, 0x01);
if (ret < 0)
@@ -507,21 +1105,16 @@ static int ov5647_detect(struct v4l2_subdev *sd)
static int ov5647_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
struct v4l2_rect *crop =
- v4l2_subdev_get_try_crop(sd, fh->pad, 0);
-
- crop->left = OV5647_COLUMN_START_DEF;
- crop->top = OV5647_ROW_START_DEF;
- crop->width = OV5647_WINDOW_WIDTH_DEF;
- crop->height = OV5647_WINDOW_HEIGHT_DEF;
+ v4l2_subdev_get_try_crop(sd, fh->state, 0);
- format->code = MEDIA_BUS_FMT_SBGGR8_1X8;
+ crop->left = OV5647_PIXEL_ARRAY_LEFT;
+ crop->top = OV5647_PIXEL_ARRAY_TOP;
+ crop->width = OV5647_PIXEL_ARRAY_WIDTH;
+ crop->height = OV5647_PIXEL_ARRAY_HEIGHT;
- format->width = OV5647_WINDOW_WIDTH_DEF;
- format->height = OV5647_WINDOW_HEIGHT_DEF;
- format->field = V4L2_FIELD_NONE;
- format->colorspace = V4L2_COLORSPACE_SRGB;
+ *format = OV5647_DEFAULT_FORMAT;
return 0;
}
@@ -530,11 +1123,220 @@ static const struct v4l2_subdev_internal_ops ov5647_subdev_internal_ops = {
.open = ov5647_open,
};
-static int ov5647_parse_dt(struct device_node *np)
+static int ov5647_s_auto_white_balance(struct v4l2_subdev *sd, u32 val)
{
- struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
- struct device_node *ep;
+ return ov5647_write(sd, OV5647_REG_AWB, val ? 1 : 0);
+}
+
+static int ov5647_s_autogain(struct v4l2_subdev *sd, u32 val)
+{
+ int ret;
+ u8 reg;
+
+ /* Non-zero turns on AGC by clearing bit 1.*/
+ ret = ov5647_read(sd, OV5647_REG_AEC_AGC, &reg);
+ if (ret)
+ return ret;
+
+ return ov5647_write(sd, OV5647_REG_AEC_AGC, val ? reg & ~BIT(1)
+ : reg | BIT(1));
+}
+
+static int ov5647_s_exposure_auto(struct v4l2_subdev *sd, u32 val)
+{
+ int ret;
+ u8 reg;
+
+ /*
+ * Everything except V4L2_EXPOSURE_MANUAL turns on AEC by
+ * clearing bit 0.
+ */
+ ret = ov5647_read(sd, OV5647_REG_AEC_AGC, &reg);
+ if (ret)
+ return ret;
+
+ return ov5647_write(sd, OV5647_REG_AEC_AGC,
+ val == V4L2_EXPOSURE_MANUAL ? reg | BIT(0)
+ : reg & ~BIT(0));
+}
+
+static int ov5647_s_analogue_gain(struct v4l2_subdev *sd, u32 val)
+{
+ int ret;
+
+ /* 10 bits of gain, 2 in the high register. */
+ ret = ov5647_write(sd, OV5647_REG_GAIN_HI, (val >> 8) & 3);
+ if (ret)
+ return ret;
+
+ return ov5647_write(sd, OV5647_REG_GAIN_LO, val & 0xff);
+}
+static int ov5647_s_exposure(struct v4l2_subdev *sd, u32 val)
+{
+ int ret;
+
+ /*
+ * Sensor has 20 bits, but the bottom 4 bits are fractions of a line
+ * which we leave as zero (and don't receive in "val").
+ */
+ ret = ov5647_write(sd, OV5647_REG_EXP_HI, (val >> 12) & 0xf);
+ if (ret)
+ return ret;
+
+ ret = ov5647_write(sd, OV5647_REG_EXP_MID, (val >> 4) & 0xff);
+ if (ret)
+ return ret;
+
+ return ov5647_write(sd, OV5647_REG_EXP_LO, (val & 0xf) << 4);
+}
+
+static int ov5647_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5647 *sensor = container_of(ctrl->handler,
+ struct ov5647, ctrls);
+ struct v4l2_subdev *sd = &sensor->sd;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+
+ /* v4l2_ctrl_lock() locks our own mutex */
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exposure_max, exposure_def;
+
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = sensor->mode->format.height + ctrl->val - 4;
+ exposure_def = min(exposure_max, OV5647_EXPOSURE_DEFAULT);
+ __v4l2_ctrl_modify_range(sensor->exposure,
+ sensor->exposure->minimum,
+ exposure_max, sensor->exposure->step,
+ exposure_def);
+ }
+
+ /*
+ * If the device is not powered up do not apply any controls
+ * to H/W at this time. Instead the controls will be restored
+ * at s_stream(1) time.
+ */
+ if (pm_runtime_get_if_in_use(&client->dev) == 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = ov5647_s_auto_white_balance(sd, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ ret = ov5647_s_autogain(sd, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = ov5647_s_exposure_auto(sd, ctrl->val);
+ break;
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov5647_s_analogue_gain(sd, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = ov5647_s_exposure(sd, ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = ov5647_write16(sd, OV5647_REG_VTS_HI,
+ sensor->mode->format.height + ctrl->val);
+ break;
+
+ /* Read-only, but we adjust it based on mode. */
+ case V4L2_CID_PIXEL_RATE:
+ case V4L2_CID_HBLANK:
+ /* Read-only, but we adjust it based on mode. */
+ break;
+
+ default:
+ dev_info(&client->dev,
+ "Control (id:0x%x, val:0x%x) not supported\n",
+ ctrl->id, ctrl->val);
+ return -EINVAL;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov5647_ctrl_ops = {
+ .s_ctrl = ov5647_s_ctrl,
+};
+
+static int ov5647_init_controls(struct ov5647 *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->sd);
+ int hblank, exposure_max, exposure_def;
+
+ v4l2_ctrl_handler_init(&sensor->ctrls, 8);
+
+ v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std_menu(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL,
+ 0, V4L2_EXPOSURE_MANUAL);
+
+ exposure_max = sensor->mode->vts - 4;
+ exposure_def = min(exposure_max, OV5647_EXPOSURE_DEFAULT);
+ sensor->exposure = v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV5647_EXPOSURE_MIN,
+ exposure_max, OV5647_EXPOSURE_STEP,
+ exposure_def);
+
+ /* min: 16 = 1.0x; max (10 bits); default: 32 = 2.0x. */
+ v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN, 16, 1023, 1, 32);
+
+ /* By default, PIXEL_RATE is read only, but it does change per mode */
+ sensor->pixel_rate = v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ sensor->mode->pixel_rate,
+ sensor->mode->pixel_rate, 1,
+ sensor->mode->pixel_rate);
+
+ /* By default, HBLANK is read only, but it does change per mode. */
+ hblank = sensor->mode->hts - sensor->mode->format.width;
+ sensor->hblank = v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_HBLANK, hblank, hblank, 1,
+ hblank);
+
+ sensor->vblank = v4l2_ctrl_new_std(&sensor->ctrls, &ov5647_ctrl_ops,
+ V4L2_CID_VBLANK, OV5647_VBLANK_MIN,
+ OV5647_VTS_MAX -
+ sensor->mode->format.height, 1,
+ sensor->mode->vts -
+ sensor->mode->format.height);
+
+ if (sensor->ctrls.error)
+ goto handler_free;
+
+ sensor->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->sd.ctrl_handler = &sensor->ctrls;
+
+ return 0;
+
+handler_free:
+ dev_err(&client->dev, "%s Controls initialization failed (%d)\n",
+ __func__, sensor->ctrls.error);
+ v4l2_ctrl_handler_free(&sensor->ctrls);
+
+ return sensor->ctrls.error;
+}
+
+static int ov5647_parse_dt(struct ov5647 *sensor, struct device_node *np)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct device_node *ep;
int ret;
ep = of_graph_get_next_endpoint(np, NULL);
@@ -542,33 +1344,39 @@ static int ov5647_parse_dt(struct device_node *np)
return -EINVAL;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg);
+ if (ret)
+ goto out;
+ sensor->clock_ncont = bus_cfg.bus.mipi_csi2.flags &
+ V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+
+out:
of_node_put(ep);
+
return ret;
}
static int ov5647_probe(struct i2c_client *client)
{
+ struct device_node *np = client->dev.of_node;
struct device *dev = &client->dev;
struct ov5647 *sensor;
- int ret;
struct v4l2_subdev *sd;
- struct device_node *np = client->dev.of_node;
u32 xclk_freq;
+ int ret;
sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
if (!sensor)
return -ENOMEM;
if (IS_ENABLED(CONFIG_OF) && np) {
- ret = ov5647_parse_dt(np);
+ ret = ov5647_parse_dt(sensor, np);
if (ret) {
dev_err(dev, "DT parsing error: %d\n", ret);
return ret;
}
}
- /* get system clock (xclk) */
sensor->xclk = devm_clk_get(dev, NULL);
if (IS_ERR(sensor->xclk)) {
dev_err(dev, "could not get xclk");
@@ -581,52 +1389,87 @@ static int ov5647_probe(struct i2c_client *client)
return -EINVAL;
}
+ /* Request the power down GPIO asserted. */
+ sensor->pwdn = devm_gpiod_get_optional(dev, "pwdn", GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->pwdn)) {
+ dev_err(dev, "Failed to get 'pwdn' gpio\n");
+ return -EINVAL;
+ }
+
mutex_init(&sensor->lock);
+ sensor->mode = OV5647_DEFAULT_MODE;
+
+ ret = ov5647_init_controls(sensor);
+ if (ret)
+ goto mutex_destroy;
+
sd = &sensor->sd;
v4l2_i2c_subdev_init(sd, client, &ov5647_subdev_ops);
- sensor->sd.internal_ops = &ov5647_subdev_internal_ops;
- sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &ov5647_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sd->entity, 1, &sensor->pad);
if (ret < 0)
- goto mutex_remove;
+ goto ctrl_handler_free;
+
+ ret = ov5647_power_on(dev);
+ if (ret)
+ goto entity_cleanup;
ret = ov5647_detect(sd);
if (ret < 0)
- goto error;
+ goto power_off;
ret = v4l2_async_register_subdev(sd);
if (ret < 0)
- goto error;
+ goto power_off;
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
dev_dbg(dev, "OmniVision OV5647 camera driver probed\n");
+
return 0;
-error:
+
+power_off:
+ ov5647_power_off(dev);
+entity_cleanup:
media_entity_cleanup(&sd->entity);
-mutex_remove:
+ctrl_handler_free:
+ v4l2_ctrl_handler_free(&sensor->ctrls);
+mutex_destroy:
mutex_destroy(&sensor->lock);
+
return ret;
}
static int ov5647_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov5647 *ov5647 = to_state(sd);
+ struct ov5647 *sensor = to_sensor(sd);
- v4l2_async_unregister_subdev(&ov5647->sd);
- media_entity_cleanup(&ov5647->sd.entity);
+ v4l2_async_unregister_subdev(&sensor->sd);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(&sensor->ctrls);
v4l2_device_unregister_subdev(sd);
- mutex_destroy(&ov5647->lock);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&sensor->lock);
return 0;
}
+static const struct dev_pm_ops ov5647_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov5647_power_off, ov5647_power_on, NULL)
+};
+
static const struct i2c_device_id ov5647_id[] = {
{ "ov5647", 0 },
- { }
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov5647_id);
@@ -641,7 +1484,8 @@ MODULE_DEVICE_TABLE(of, ov5647_of_match);
static struct i2c_driver ov5647_driver = {
.driver = {
.of_match_table = of_match_ptr(ov5647_of_match),
- .name = SENSOR_NAME,
+ .name = "ov5647",
+ .pm = &ov5647_pm_ops,
},
.probe_new = ov5647_probe,
.remove = ov5647_remove,
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 04d3f1490201..7e0755e1105c 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -1937,7 +1937,7 @@ static int ov5670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov5670 *ov5670 = to_ov5670(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
mutex_lock(&ov5670->mutex);
@@ -2153,7 +2153,7 @@ error:
}
static int ov5670_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Only one bayer order GRBG is supported */
@@ -2166,7 +2166,7 @@ static int ov5670_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5670_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -2193,11 +2193,12 @@ static void ov5670_update_pad_format(const struct ov5670_mode *mode,
}
static int ov5670_do_get_pad_format(struct ov5670 *ov5670,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov5670->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&ov5670->sd,
+ sd_state,
fmt->pad);
else
ov5670_update_pad_format(ov5670->cur_mode, fmt);
@@ -2206,21 +2207,21 @@ static int ov5670_do_get_pad_format(struct ov5670 *ov5670,
}
static int ov5670_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5670 *ov5670 = to_ov5670(sd);
int ret;
mutex_lock(&ov5670->mutex);
- ret = ov5670_do_get_pad_format(ov5670, cfg, fmt);
+ ret = ov5670_do_get_pad_format(ov5670, sd_state, fmt);
mutex_unlock(&ov5670->mutex);
return ret;
}
static int ov5670_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5670 *ov5670 = to_ov5670(sd);
@@ -2238,7 +2239,7 @@ static int ov5670_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
ov5670_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
ov5670->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov5670->link_freq, mode->link_freq_index);
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 9540ce8918f0..cf8bec8e9f55 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -926,7 +926,7 @@ static int __maybe_unused ov5675_resume(struct device *dev)
}
static int ov5675_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5675 *ov5675 = to_ov5675(sd);
@@ -941,7 +941,7 @@ static int ov5675_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov5675->mutex);
ov5675_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
ov5675->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov5675->link_freq, mode->link_freq_index);
@@ -967,14 +967,15 @@ static int ov5675_set_format(struct v4l2_subdev *sd,
}
static int ov5675_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5675 *ov5675 = to_ov5675(sd);
mutex_lock(&ov5675->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov5675->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&ov5675->sd,
+ sd_state,
fmt->pad);
else
ov5675_update_pad_format(ov5675->cur_mode, &fmt->format);
@@ -985,7 +986,7 @@ static int ov5675_get_format(struct v4l2_subdev *sd,
}
static int ov5675_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -997,7 +998,7 @@ static int ov5675_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5675_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -1020,7 +1021,7 @@ static int ov5675_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov5675->mutex);
ov5675_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov5675->mutex);
return 0;
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index cc678d9d2e0d..eb4b717ea4c8 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -806,7 +806,7 @@ ov5695_find_best_fit(struct v4l2_subdev_format *fmt)
}
static int ov5695_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5695 *ov5695 = to_ov5695(sd);
@@ -822,7 +822,7 @@ static int ov5695_set_fmt(struct v4l2_subdev *sd,
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
#endif
} else {
ov5695->cur_mode = mode;
@@ -841,7 +841,7 @@ static int ov5695_set_fmt(struct v4l2_subdev *sd,
}
static int ov5695_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov5695 *ov5695 = to_ov5695(sd);
@@ -850,7 +850,8 @@ static int ov5695_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov5695->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
#else
mutex_unlock(&ov5695->mutex);
return -EINVAL;
@@ -867,7 +868,7 @@ static int ov5695_get_fmt(struct v4l2_subdev *sd,
}
static int ov5695_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
@@ -878,7 +879,7 @@ static int ov5695_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5695_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -1056,7 +1057,7 @@ static int ov5695_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov5695 *ov5695 = to_ov5695(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
const struct ov5695_mode *def_mode = &supported_modes[0];
mutex_lock(&ov5695->mutex);
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index d73f9f540932..a273f2db21dc 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -467,7 +467,7 @@ static int ov6650_s_power(struct v4l2_subdev *sd, int on)
}
static int ov6650_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -492,7 +492,7 @@ static int ov6650_get_selection(struct v4l2_subdev *sd,
}
static int ov6650_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -535,7 +535,7 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
}
static int ov6650_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -550,9 +550,9 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
/* update media bus format code and frame size */
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf->width = cfg->try_fmt.width;
- mf->height = cfg->try_fmt.height;
- mf->code = cfg->try_fmt.code;
+ mf->width = sd_state->pads->try_fmt.width;
+ mf->height = sd_state->pads->try_fmt.height;
+ mf->code = sd_state->pads->try_fmt.code;
} else {
mf->width = priv->rect.width >> priv->half_scale;
@@ -668,7 +668,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
}
static int ov6650_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -701,15 +701,15 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
/* store media bus format code and frame size in pad config */
- cfg->try_fmt.width = mf->width;
- cfg->try_fmt.height = mf->height;
- cfg->try_fmt.code = mf->code;
+ sd_state->pads->try_fmt.width = mf->width;
+ sd_state->pads->try_fmt.height = mf->height;
+ sd_state->pads->try_fmt.code = mf->code;
/* return default mbus frame format updated with pad config */
*mf = ov6650_def_fmt;
- mf->width = cfg->try_fmt.width;
- mf->height = cfg->try_fmt.height;
- mf->code = cfg->try_fmt.code;
+ mf->width = sd_state->pads->try_fmt.width;
+ mf->height = sd_state->pads->try_fmt.height;
+ mf->code = sd_state->pads->try_fmt.code;
} else {
/* apply new media bus format code and frame size */
@@ -728,7 +728,7 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
}
static int ov6650_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(ov6650_codes))
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 0c10203f822b..ebb299f207e5 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -898,7 +898,7 @@ static const struct v4l2_ctrl_ops ov7251_ctrl_ops = {
};
static int ov7251_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index > 0)
@@ -910,7 +910,7 @@ static int ov7251_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov7251_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->code != MEDIA_BUS_FMT_Y10_1X10)
@@ -928,7 +928,7 @@ static int ov7251_enum_frame_size(struct v4l2_subdev *subdev,
}
static int ov7251_enum_frame_ival(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
unsigned int index = fie->index;
@@ -950,13 +950,13 @@ static int ov7251_enum_frame_ival(struct v4l2_subdev *subdev,
static struct v4l2_mbus_framefmt *
__ov7251_get_pad_format(struct ov7251 *ov7251,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&ov7251->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&ov7251->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov7251->fmt;
default:
@@ -965,13 +965,14 @@ __ov7251_get_pad_format(struct ov7251 *ov7251,
}
static int ov7251_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7251 *ov7251 = to_ov7251(sd);
mutex_lock(&ov7251->lock);
- format->format = *__ov7251_get_pad_format(ov7251, cfg, format->pad,
+ format->format = *__ov7251_get_pad_format(ov7251, sd_state,
+ format->pad,
format->which);
mutex_unlock(&ov7251->lock);
@@ -979,12 +980,13 @@ static int ov7251_get_format(struct v4l2_subdev *sd,
}
static struct v4l2_rect *
-__ov7251_get_pad_crop(struct ov7251 *ov7251, struct v4l2_subdev_pad_config *cfg,
+__ov7251_get_pad_crop(struct ov7251 *ov7251,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov7251->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&ov7251->sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov7251->crop;
default:
@@ -1027,7 +1029,7 @@ ov7251_find_mode_by_ival(struct ov7251 *ov7251, struct v4l2_fract *timeperframe)
}
static int ov7251_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7251 *ov7251 = to_ov7251(sd);
@@ -1038,7 +1040,8 @@ static int ov7251_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov7251->lock);
- __crop = __ov7251_get_pad_crop(ov7251, cfg, format->pad, format->which);
+ __crop = __ov7251_get_pad_crop(ov7251, sd_state, format->pad,
+ format->which);
new_mode = v4l2_find_nearest_size(ov7251_mode_info_data,
ARRAY_SIZE(ov7251_mode_info_data),
@@ -1077,7 +1080,7 @@ static int ov7251_set_format(struct v4l2_subdev *sd,
ov7251->current_mode = new_mode;
}
- __format = __ov7251_get_pad_format(ov7251, cfg, format->pad,
+ __format = __ov7251_get_pad_format(ov7251, sd_state, format->pad,
format->which);
__format->width = __crop->width;
__format->height = __crop->height;
@@ -1098,24 +1101,24 @@ exit:
}
static int ov7251_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
- .which = cfg ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
.width = 640,
.height = 480
}
};
- ov7251_set_format(subdev, cfg, &fmt);
+ ov7251_set_format(subdev, sd_state, &fmt);
return 0;
}
static int ov7251_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ov7251 *ov7251 = to_ov7251(sd);
@@ -1124,7 +1127,7 @@ static int ov7251_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&ov7251->lock);
- sel->r = *__ov7251_get_pad_crop(ov7251, cfg, sel->pad,
+ sel->r = *__ov7251_get_pad_crop(ov7251, sd_state, sel->pad,
sel->which);
mutex_unlock(&ov7251->lock);
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 154776d0069e..c15a0a4e6fb0 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -946,7 +946,7 @@ static int ov7670_set_hw(struct v4l2_subdev *sd, int hstart, int hstop,
static int ov7670_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= N_OV7670_FMTS)
@@ -1091,7 +1091,7 @@ static int ov7670_apply_fmt(struct v4l2_subdev *sd)
* Set a format.
*/
static int ov7670_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7670_info *info = to_state(sd);
@@ -1108,7 +1108,8 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
if (ret)
return ret;
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ format->pad);
*mbus_fmt = format->format;
#endif
return 0;
@@ -1130,7 +1131,7 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
}
static int ov7670_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7670_info *info = to_state(sd);
@@ -1140,7 +1141,7 @@ static int ov7670_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mbus_fmt;
return 0;
#else
@@ -1188,7 +1189,7 @@ static int ov7670_s_frame_interval(struct v4l2_subdev *sd,
static int ov7670_frame_rates[] = { 30, 15, 10, 5, 1 };
static int ov7670_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct ov7670_info *info = to_state(sd);
@@ -1227,7 +1228,7 @@ static int ov7670_enum_frame_interval(struct v4l2_subdev *sd,
* Frame size enumeration
*/
static int ov7670_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct ov7670_info *info = to_state(sd);
@@ -1708,7 +1709,7 @@ static void ov7670_get_default_format(struct v4l2_subdev *sd,
static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
ov7670_get_default_format(sd, format);
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index 2cc6a678069a..7982ff4aad2a 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1136,7 +1136,7 @@ ov772x_set_fmt_error:
}
static int ov772x_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct ov772x_priv *priv = to_ov772x(sd);
@@ -1158,7 +1158,7 @@ static int ov772x_get_selection(struct v4l2_subdev *sd,
}
static int ov772x_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -1177,7 +1177,7 @@ static int ov772x_get_fmt(struct v4l2_subdev *sd,
}
static int ov772x_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov772x_priv *priv = to_ov772x(sd);
@@ -1201,7 +1201,7 @@ static int ov772x_set_fmt(struct v4l2_subdev *sd,
mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
@@ -1299,7 +1299,7 @@ static const struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
};
static int ov772x_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (fie->pad || fie->index >= ARRAY_SIZE(ov772x_frame_intervals))
@@ -1317,7 +1317,7 @@ static int ov772x_enum_frame_interval(struct v4l2_subdev *sd,
}
static int ov772x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(ov772x_cfmts))
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 5832461c032d..53bd6294dd04 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -709,7 +709,7 @@ static const struct ov7740_pixfmt ov7740_formats[] = {
#define N_OV7740_FMTS ARRAY_SIZE(ov7740_formats)
static int ov7740_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= N_OV7740_FMTS)
@@ -721,7 +721,7 @@ static int ov7740_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov7740_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (fie->pad)
@@ -740,7 +740,7 @@ static int ov7740_enum_frame_interval(struct v4l2_subdev *sd,
}
static int ov7740_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->pad)
@@ -803,7 +803,7 @@ static int ov7740_try_fmt_internal(struct v4l2_subdev *sd,
}
static int ov7740_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
@@ -825,7 +825,8 @@ static int ov7740_set_fmt(struct v4l2_subdev *sd,
if (ret)
goto error;
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ format->pad);
*mbus_fmt = format->format;
#endif
mutex_unlock(&ov7740->mutex);
@@ -848,7 +849,7 @@ error:
}
static int ov7740_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
@@ -860,7 +861,7 @@ static int ov7740_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov7740->mutex);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
format->format = *mbus_fmt;
ret = 0;
#else
@@ -905,7 +906,7 @@ static int ov7740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
mutex_lock(&ov7740->mutex);
ov7740_get_default_format(sd, format);
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index 2f4ceaa80593..b28953d0a84d 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -1457,7 +1457,7 @@ static int __maybe_unused ov8856_resume(struct device *dev)
}
static int ov8856_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov8856 *ov8856 = to_ov8856(sd);
@@ -1472,7 +1472,7 @@ static int ov8856_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov8856->mutex);
ov8856_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
ov8856->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov8856->link_freq, mode->link_freq_index);
@@ -1498,14 +1498,15 @@ static int ov8856_set_format(struct v4l2_subdev *sd,
}
static int ov8856_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov8856 *ov8856 = to_ov8856(sd);
mutex_lock(&ov8856->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov8856->sd, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(&ov8856->sd,
+ sd_state,
fmt->pad);
else
ov8856_update_pad_format(ov8856->cur_mode, &fmt->format);
@@ -1516,7 +1517,7 @@ static int ov8856_get_format(struct v4l2_subdev *sd,
}
static int ov8856_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Only one bayer order GRBG is supported */
@@ -1529,7 +1530,7 @@ static int ov8856_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov8856_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
@@ -1552,7 +1553,7 @@ static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov8856->mutex);
ov8856_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov8856->mutex);
return 0;
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index e2a25240fc85..38bacd778488 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -519,7 +519,7 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd,
}
static int ov9640_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -547,13 +547,13 @@ static int ov9640_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return ov9640_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
static int ov9640_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(ov9640_codes))
@@ -565,7 +565,7 @@ static int ov9640_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov9640_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 4fe68aa55789..b0b9660e3024 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1070,7 +1070,7 @@ static void ov965x_get_default_format(struct v4l2_mbus_framefmt *mf)
}
static int ov965x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(ov965x_formats))
@@ -1081,7 +1081,7 @@ static int ov965x_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov965x_enum_frame_sizes(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int i = ARRAY_SIZE(ov965x_formats);
@@ -1167,14 +1167,14 @@ static int ov965x_s_frame_interval(struct v4l2_subdev *sd,
}
static int ov965x_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ov965x *ov965x = to_ov965x(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
fmt->format = *mf;
return 0;
}
@@ -1212,7 +1212,7 @@ static void __ov965x_try_frame_size(struct v4l2_mbus_framefmt *mf,
}
static int ov965x_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
unsigned int index = ARRAY_SIZE(ov965x_formats);
@@ -1234,8 +1234,9 @@ static int ov965x_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov965x->lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
*mf = fmt->format;
}
} else {
@@ -1364,7 +1365,7 @@ static int ov965x_s_stream(struct v4l2_subdev *sd, int on)
static int ov965x_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf =
- v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ v4l2_subdev_get_try_format(sd, fh->state, 0);
ov965x_get_default_format(mf);
return 0;
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index 16bcb764b0e0..32cd8f9754bb 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -403,7 +403,7 @@ static int rdacm20_s_stream(struct v4l2_subdev *sd, int enable)
}
static int rdacm20_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
@@ -415,7 +415,7 @@ static int rdacm20_enum_mbus_code(struct v4l2_subdev *sd,
}
static int rdacm20_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index 4cc51e001874..2e4018c26912 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -488,7 +488,7 @@ static int reg_write_multiple(struct i2c_client *client,
}
static int rj54n1_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(rj54n1_colour_fmts))
@@ -541,7 +541,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
s32 *out_w, s32 *out_h);
static int rj54n1_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -578,7 +578,7 @@ static int rj54n1_set_selection(struct v4l2_subdev *sd,
}
static int rj54n1_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -603,7 +603,7 @@ static int rj54n1_get_selection(struct v4l2_subdev *sd,
}
static int rj54n1_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -973,7 +973,7 @@ static int rj54n1_reg_init(struct i2c_client *client)
}
static int rj54n1_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -1009,7 +1009,7 @@ static int rj54n1_set_fmt(struct v4l2_subdev *sd,
&mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 71804a70bc6d..e2b88c5e4f98 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -817,7 +817,7 @@ static const struct s5c73m3_frame_size *s5c73m3_find_frame_size(
}
static void s5c73m3_oif_try_format(struct s5c73m3 *state,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt,
const struct s5c73m3_frame_size **fs)
{
@@ -844,8 +844,8 @@ static void s5c73m3_oif_try_format(struct s5c73m3 *state,
*fs = state->oif_pix_size[RES_ISP];
else
*fs = s5c73m3_find_frame_size(
- v4l2_subdev_get_try_format(sd, cfg,
- OIF_ISP_PAD),
+ v4l2_subdev_get_try_format(sd, sd_state,
+ OIF_ISP_PAD),
RES_ISP);
break;
}
@@ -854,7 +854,7 @@ static void s5c73m3_oif_try_format(struct s5c73m3 *state,
}
static void s5c73m3_try_format(struct s5c73m3 *state,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt,
const struct s5c73m3_frame_size **fs)
{
@@ -946,7 +946,7 @@ static int s5c73m3_oif_s_frame_interval(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
@@ -984,7 +984,7 @@ static int s5c73m3_oif_get_pad_code(int pad, int index)
}
static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5c73m3 *state = sensor_sd_to_s5c73m3(sd);
@@ -992,7 +992,8 @@ static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
u32 code;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
return 0;
}
@@ -1018,7 +1019,7 @@ static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
@@ -1026,7 +1027,8 @@ static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
u32 code;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
return 0;
}
@@ -1056,7 +1058,7 @@ static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
}
static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
const struct s5c73m3_frame_size *frame_size = NULL;
@@ -1066,10 +1068,10 @@ static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&state->lock);
- s5c73m3_try_format(state, cfg, fmt, &frame_size);
+ s5c73m3_try_format(state, sd_state, fmt, &frame_size);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
} else {
switch (fmt->pad) {
@@ -1095,7 +1097,7 @@ static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
const struct s5c73m3_frame_size *frame_size = NULL;
@@ -1105,13 +1107,14 @@ static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&state->lock);
- s5c73m3_oif_try_format(state, cfg, fmt, &frame_size);
+ s5c73m3_oif_try_format(state, sd_state, fmt, &frame_size);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
if (fmt->pad == OIF_ISP_PAD) {
- mf = v4l2_subdev_get_try_format(sd, cfg, OIF_SOURCE_PAD);
+ mf = v4l2_subdev_get_try_format(sd, sd_state,
+ OIF_SOURCE_PAD);
mf->width = fmt->format.width;
mf->height = fmt->format.height;
}
@@ -1183,7 +1186,7 @@ static int s5c73m3_oif_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
}
static int s5c73m3_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const int codes[] = {
@@ -1199,7 +1202,7 @@ static int s5c73m3_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
int ret;
@@ -1214,7 +1217,7 @@ static int s5c73m3_oif_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5c73m3_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int idx;
@@ -1241,7 +1244,7 @@ static int s5c73m3_enum_frame_size(struct v4l2_subdev *sd,
}
static int s5c73m3_oif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
@@ -1259,7 +1262,7 @@ static int s5c73m3_oif_enum_frame_size(struct v4l2_subdev *sd,
if (fse->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, cfg,
+ mf = v4l2_subdev_get_try_format(sd, sd_state,
OIF_ISP_PAD);
w = mf->width;
@@ -1315,11 +1318,11 @@ static int s5c73m3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, fh->pad, S5C73M3_ISP_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, S5C73M3_ISP_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
- mf = v4l2_subdev_get_try_format(sd, fh->pad, S5C73M3_JPEG_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, S5C73M3_JPEG_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
S5C73M3_JPEG_FMT);
@@ -1330,15 +1333,15 @@ static int s5c73m3_oif_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, fh->pad, OIF_ISP_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, OIF_ISP_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
- mf = v4l2_subdev_get_try_format(sd, fh->pad, OIF_JPEG_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, OIF_JPEG_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
S5C73M3_JPEG_FMT);
- mf = v4l2_subdev_get_try_format(sd, fh->pad, OIF_SOURCE_PAD);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, OIF_SOURCE_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
return 0;
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 4e97309a67f4..af9a305242cd 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -525,7 +525,7 @@ static int s5k4ecgx_try_frame_size(struct v4l2_mbus_framefmt *mf,
}
static int s5k4ecgx_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5k4ecgx_formats))
@@ -535,15 +535,16 @@ static int s5k4ecgx_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int s5k4ecgx_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int s5k4ecgx_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct s5k4ecgx *priv = to_s5k4ecgx(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
fmt->format = *mf;
}
return 0;
@@ -575,7 +576,8 @@ static const struct s5k4ecgx_pixfmt *s5k4ecgx_try_fmt(struct v4l2_subdev *sd,
return &s5k4ecgx_formats[i];
}
-static int s5k4ecgx_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5k4ecgx_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k4ecgx *priv = to_s5k4ecgx(sd);
@@ -590,8 +592,8 @@ static int s5k4ecgx_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_confi
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (sd_state) {
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
*mf = fmt->format;
}
return 0;
@@ -686,7 +688,9 @@ static int s5k4ecgx_registered(struct v4l2_subdev *sd)
*/
static int s5k4ecgx_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
mf->width = s5k4ecgx_prev_sizes[0].size.width;
mf->height = s5k4ecgx_prev_sizes[0].size.height;
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index ec65a8e084c6..6ab9d717256b 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -1180,7 +1180,7 @@ static int s5k5baf_s_frame_interval(struct v4l2_subdev *sd,
* V4L2 subdev pad level and video operations
*/
static int s5k5baf_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (fie->index > S5K5BAF_MAX_FR_TIME - S5K5BAF_MIN_FR_TIME ||
@@ -1199,7 +1199,7 @@ static int s5k5baf_enum_frame_interval(struct v4l2_subdev *sd,
}
static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad == PAD_CIS) {
@@ -1217,7 +1217,7 @@ static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5k5baf_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int i;
@@ -1274,15 +1274,16 @@ static int s5k5baf_try_isp_format(struct v4l2_mbus_framefmt *mf)
return pixfmt;
}
-static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int s5k5baf_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct s5k5baf *state = to_s5k5baf(sd);
const struct s5k5baf_pixfmt *pixfmt;
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1304,8 +1305,9 @@ static int s5k5baf_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
return 0;
}
-static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int s5k5baf_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *mf = &fmt->format;
struct s5k5baf *state = to_s5k5baf(sd);
@@ -1315,7 +1317,7 @@ static int s5k5baf_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
mf->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = *mf;
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = *mf;
return 0;
}
@@ -1367,7 +1369,7 @@ static int s5k5baf_is_bound_target(u32 target)
}
static int s5k5baf_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
enum selection_rect rtype;
@@ -1387,9 +1389,11 @@ static int s5k5baf_get_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
if (rtype == R_COMPOSE)
- sel->r = *v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ sel->r = *v4l2_subdev_get_try_compose(sd, sd_state,
+ sel->pad);
else
- sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ sel->r = *v4l2_subdev_get_try_crop(sd, sd_state,
+ sel->pad);
return 0;
}
@@ -1458,7 +1462,7 @@ static bool s5k5baf_cmp_rect(const struct v4l2_rect *r1,
}
static int s5k5baf_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
static enum selection_rect rtype;
@@ -1479,9 +1483,12 @@ static int s5k5baf_set_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
rects = (struct v4l2_rect * []) {
&s5k5baf_cis_rect,
- v4l2_subdev_get_try_crop(sd, cfg, PAD_CIS),
- v4l2_subdev_get_try_compose(sd, cfg, PAD_CIS),
- v4l2_subdev_get_try_crop(sd, cfg, PAD_OUT)
+ v4l2_subdev_get_try_crop(sd, sd_state,
+ PAD_CIS),
+ v4l2_subdev_get_try_compose(sd, sd_state,
+ PAD_CIS),
+ v4l2_subdev_get_try_crop(sd, sd_state,
+ PAD_OUT)
};
s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
return 0;
@@ -1699,22 +1706,22 @@ static int s5k5baf_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, fh->pad, PAD_CIS);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, PAD_CIS);
s5k5baf_try_cis_format(mf);
if (s5k5baf_is_cis_subdev(sd))
return 0;
- mf = v4l2_subdev_get_try_format(sd, fh->pad, PAD_OUT);
+ mf = v4l2_subdev_get_try_format(sd, fh->state, PAD_OUT);
mf->colorspace = s5k5baf_formats[0].colorspace;
mf->code = s5k5baf_formats[0].code;
mf->width = s5k5baf_cis_rect.width;
mf->height = s5k5baf_cis_rect.height;
mf->field = V4L2_FIELD_NONE;
- *v4l2_subdev_get_try_crop(sd, fh->pad, PAD_CIS) = s5k5baf_cis_rect;
- *v4l2_subdev_get_try_compose(sd, fh->pad, PAD_CIS) = s5k5baf_cis_rect;
- *v4l2_subdev_get_try_crop(sd, fh->pad, PAD_OUT) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_crop(sd, fh->state, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_compose(sd, fh->state, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_get_try_crop(sd, fh->state, PAD_OUT) = s5k5baf_cis_rect;
return 0;
}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index f26c168ef942..b97dd6149e90 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -99,7 +99,7 @@ static const struct v4l2_mbus_framefmt *find_sensor_format(
}
static int s5k6a3_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5k6a3_formats))
@@ -123,17 +123,18 @@ static void s5k6a3_try_format(struct v4l2_mbus_framefmt *mf)
}
static struct v4l2_mbus_framefmt *__s5k6a3_get_format(
- struct s5k6a3 *sensor, struct v4l2_subdev_pad_config *cfg,
+ struct s5k6a3 *sensor, struct v4l2_subdev_state *sd_state,
u32 pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return cfg ? v4l2_subdev_get_try_format(&sensor->subdev, cfg, pad) : NULL;
+ return sd_state ? v4l2_subdev_get_try_format(&sensor->subdev,
+ sd_state, pad) : NULL;
return &sensor->format;
}
static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k6a3 *sensor = sd_to_s5k6a3(sd);
@@ -141,7 +142,7 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
s5k6a3_try_format(&fmt->format);
- mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
+ mf = __s5k6a3_get_format(sensor, sd_state, fmt->pad, fmt->which);
if (mf) {
mutex_lock(&sensor->lock);
*mf = fmt->format;
@@ -151,13 +152,13 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
}
static int s5k6a3_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k6a3 *sensor = sd_to_s5k6a3(sd);
struct v4l2_mbus_framefmt *mf;
- mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
+ mf = __s5k6a3_get_format(sensor, sd_state, fmt->pad, fmt->which);
mutex_lock(&sensor->lock);
fmt->format = *mf;
@@ -173,7 +174,9 @@ static const struct v4l2_subdev_pad_ops s5k6a3_pad_ops = {
static int s5k6a3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
*format = s5k6a3_formats[0];
format->width = S5K6A3_DEFAULT_WIDTH;
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 6516e205e9a3..caf37d7cf0c9 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -997,7 +997,7 @@ static int s5k6aa_s_frame_interval(struct v4l2_subdev *sd,
* V4L2 subdev pad level and video operations
*/
static int s5k6aa_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1024,7 +1024,7 @@ static int s5k6aa_enum_frame_interval(struct v4l2_subdev *sd,
}
static int s5k6aa_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5k6aa_formats))
@@ -1035,7 +1035,7 @@ static int s5k6aa_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s5k6aa_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int i = ARRAY_SIZE(s5k6aa_formats);
@@ -1057,14 +1057,15 @@ static int s5k6aa_enum_frame_size(struct v4l2_subdev *sd,
}
static struct v4l2_rect *
-__s5k6aa_get_crop_rect(struct s5k6aa *s5k6aa, struct v4l2_subdev_pad_config *cfg,
+__s5k6aa_get_crop_rect(struct s5k6aa *s5k6aa,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
return &s5k6aa->ccd_rect;
WARN_ON(which != V4L2_SUBDEV_FORMAT_TRY);
- return v4l2_subdev_get_try_crop(&s5k6aa->sd, cfg, 0);
+ return v4l2_subdev_get_try_crop(&s5k6aa->sd, sd_state, 0);
}
static void s5k6aa_try_format(struct s5k6aa *s5k6aa,
@@ -1088,7 +1089,8 @@ static void s5k6aa_try_format(struct s5k6aa *s5k6aa,
mf->field = V4L2_FIELD_NONE;
}
-static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5k6aa_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1097,7 +1099,7 @@ static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
memset(fmt->reserved, 0, sizeof(fmt->reserved));
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
fmt->format = *mf;
return 0;
}
@@ -1109,7 +1111,8 @@ static int s5k6aa_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
return 0;
}
-static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5k6aa_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1122,8 +1125,8 @@ static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
s5k6aa_try_format(s5k6aa, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
- crop = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ crop = v4l2_subdev_get_try_crop(sd, sd_state, 0);
} else {
if (s5k6aa->streaming) {
ret = -EBUSY;
@@ -1163,7 +1166,7 @@ static int s5k6aa_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
}
static int s5k6aa_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1175,7 +1178,7 @@ static int s5k6aa_get_selection(struct v4l2_subdev *sd,
memset(sel->reserved, 0, sizeof(sel->reserved));
mutex_lock(&s5k6aa->lock);
- rect = __s5k6aa_get_crop_rect(s5k6aa, cfg, sel->which);
+ rect = __s5k6aa_get_crop_rect(s5k6aa, sd_state, sel->which);
sel->r = *rect;
mutex_unlock(&s5k6aa->lock);
@@ -1186,7 +1189,7 @@ static int s5k6aa_get_selection(struct v4l2_subdev *sd,
}
static int s5k6aa_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct s5k6aa *s5k6aa = to_s5k6aa(sd);
@@ -1198,13 +1201,13 @@ static int s5k6aa_set_selection(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&s5k6aa->lock);
- crop_r = __s5k6aa_get_crop_rect(s5k6aa, cfg, sel->which);
+ crop_r = __s5k6aa_get_crop_rect(s5k6aa, sd_state, sel->which);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
mf = &s5k6aa->preset->mbus_fmt;
s5k6aa->apply_crop = 1;
} else {
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
}
v4l_bound_align_image(&sel->r.width, mf->width,
S5K6AA_WIN_WIDTH_MAX, 1,
@@ -1425,8 +1428,10 @@ static int s5k6aa_initialize_ctrls(struct s5k6aa *s5k6aa)
*/
static int s5k6aa_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
- struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
+ struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, fh->state, 0);
format->colorspace = s5k6aa_formats[0].colorspace;
format->code = s5k6aa_formats[0].code;
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 6171ced809bb..a7f043cad149 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -543,7 +543,7 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
}
static int saa6752hs_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *f = &format->format;
@@ -563,7 +563,7 @@ static int saa6752hs_get_fmt(struct v4l2_subdev *sd,
}
static int saa6752hs_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *f = &format->format;
@@ -595,7 +595,7 @@ static int saa6752hs_set_fmt(struct v4l2_subdev *sd,
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *f;
+ sd_state->pads->try_fmt = *f;
return 0;
}
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 88dc6baac639..a958bbc2c33d 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1167,7 +1167,7 @@ static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
}
static int saa711x_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index ba103a6a1875..adf905360171 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -980,7 +980,7 @@ static int saa717x_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regi
#endif
static int saa717x_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 6fc0680a93d0..f2da9f1b3821 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -1573,7 +1573,7 @@ static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
}
static int smiapp_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
@@ -1627,13 +1627,13 @@ static u32 __smiapp_get_mbus_code(struct v4l2_subdev *subdev,
}
static int __smiapp_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(subdev, cfg,
+ fmt->format = *v4l2_subdev_get_try_format(subdev, sd_state,
fmt->pad);
} else {
struct v4l2_rect *r;
@@ -1653,21 +1653,21 @@ static int __smiapp_get_format(struct v4l2_subdev *subdev,
}
static int smiapp_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
int rval;
mutex_lock(&sensor->mutex);
- rval = __smiapp_get_format(subdev, cfg, fmt);
+ rval = __smiapp_get_format(subdev, sd_state, fmt);
mutex_unlock(&sensor->mutex);
return rval;
}
static void smiapp_get_crop_compose(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect **crops,
struct v4l2_rect **comps, int which)
{
@@ -1683,12 +1683,14 @@ static void smiapp_get_crop_compose(struct v4l2_subdev *subdev,
} else {
if (crops) {
for (i = 0; i < subdev->entity.num_pads; i++) {
- crops[i] = v4l2_subdev_get_try_crop(subdev, cfg, i);
+ crops[i] = v4l2_subdev_get_try_crop(subdev,
+ sd_state,
+ i);
BUG_ON(!crops[i]);
}
}
if (comps) {
- *comps = v4l2_subdev_get_try_compose(subdev, cfg,
+ *comps = v4l2_subdev_get_try_compose(subdev, sd_state,
SMIAPP_PAD_SINK);
BUG_ON(!*comps);
}
@@ -1697,14 +1699,14 @@ static void smiapp_get_crop_compose(struct v4l2_subdev *subdev,
/* Changes require propagation only on sink pad. */
static void smiapp_propagate(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg, int which,
+ struct v4l2_subdev_state *sd_state, int which,
int target)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
struct v4l2_rect *comp, *crops[SMIAPP_PADS];
- smiapp_get_crop_compose(subdev, cfg, crops, &comp, which);
+ smiapp_get_crop_compose(subdev, sd_state, crops, &comp, which);
switch (target) {
case V4L2_SEL_TGT_CROP:
@@ -1745,7 +1747,7 @@ static const struct smiapp_csi_data_format
}
static int smiapp_set_format_source(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -1756,7 +1758,7 @@ static int smiapp_set_format_source(struct v4l2_subdev *subdev,
unsigned int i;
int rval;
- rval = __smiapp_get_format(subdev, cfg, fmt);
+ rval = __smiapp_get_format(subdev, sd_state, fmt);
if (rval)
return rval;
@@ -1798,7 +1800,7 @@ static int smiapp_set_format_source(struct v4l2_subdev *subdev,
}
static int smiapp_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -1810,7 +1812,7 @@ static int smiapp_set_format(struct v4l2_subdev *subdev,
if (fmt->pad == ssd->source_pad) {
int rval;
- rval = smiapp_set_format_source(subdev, cfg, fmt);
+ rval = smiapp_set_format_source(subdev, sd_state, fmt);
mutex_unlock(&sensor->mutex);
@@ -1832,7 +1834,7 @@ static int smiapp_set_format(struct v4l2_subdev *subdev,
SMIA_LIM(sensor, MIN_Y_OUTPUT_SIZE),
SMIA_LIM(sensor, MAX_Y_OUTPUT_SIZE));
- smiapp_get_crop_compose(subdev, cfg, crops, NULL, fmt->which);
+ smiapp_get_crop_compose(subdev, sd_state, crops, NULL, fmt->which);
crops[ssd->sink_pad]->left = 0;
crops[ssd->sink_pad]->top = 0;
@@ -1840,7 +1842,7 @@ static int smiapp_set_format(struct v4l2_subdev *subdev,
crops[ssd->sink_pad]->height = fmt->format.height;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
ssd->sink_fmt = *crops[ssd->sink_pad];
- smiapp_propagate(subdev, cfg, fmt->which,
+ smiapp_propagate(subdev, sd_state, fmt->which,
V4L2_SEL_TGT_CROP);
mutex_unlock(&sensor->mutex);
@@ -1893,7 +1895,7 @@ static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w,
}
static void smiapp_set_compose_binner(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel,
struct v4l2_rect **crops,
struct v4l2_rect *comp)
@@ -1941,7 +1943,7 @@ static void smiapp_set_compose_binner(struct v4l2_subdev *subdev,
* result.
*/
static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel,
struct v4l2_rect **crops,
struct v4l2_rect *comp)
@@ -2057,25 +2059,25 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
}
/* We're only called on source pads. This function sets scaling. */
static int smiapp_set_compose(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
struct v4l2_rect *comp, *crops[SMIAPP_PADS];
- smiapp_get_crop_compose(subdev, cfg, crops, &comp, sel->which);
+ smiapp_get_crop_compose(subdev, sd_state, crops, &comp, sel->which);
sel->r.top = 0;
sel->r.left = 0;
if (ssd == sensor->binner)
- smiapp_set_compose_binner(subdev, cfg, sel, crops, comp);
+ smiapp_set_compose_binner(subdev, sd_state, sel, crops, comp);
else
- smiapp_set_compose_scaler(subdev, cfg, sel, crops, comp);
+ smiapp_set_compose_scaler(subdev, sd_state, sel, crops, comp);
*comp = sel->r;
- smiapp_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_COMPOSE);
+ smiapp_propagate(subdev, sd_state, sel->which, V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return smiapp_pll_blanking_update(sensor);
@@ -2127,7 +2129,7 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
}
static int smiapp_set_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -2135,7 +2137,7 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
struct v4l2_rect *src_size, *crops[SMIAPP_PADS];
struct v4l2_rect _r;
- smiapp_get_crop_compose(subdev, cfg, crops, NULL, sel->which);
+ smiapp_get_crop_compose(subdev, sd_state, crops, NULL, sel->which);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
if (sel->pad == ssd->sink_pad)
@@ -2146,14 +2148,18 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
if (sel->pad == ssd->sink_pad) {
_r.left = 0;
_r.top = 0;
- _r.width = v4l2_subdev_get_try_format(subdev, cfg, sel->pad)
+ _r.width = v4l2_subdev_get_try_format(subdev,
+ sd_state,
+ sel->pad)
->width;
- _r.height = v4l2_subdev_get_try_format(subdev, cfg, sel->pad)
+ _r.height = v4l2_subdev_get_try_format(subdev,
+ sd_state,
+ sel->pad)
->height;
src_size = &_r;
} else {
src_size = v4l2_subdev_get_try_compose(
- subdev, cfg, ssd->sink_pad);
+ subdev, sd_state, ssd->sink_pad);
}
}
@@ -2171,7 +2177,7 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
*crops[sel->pad] = sel->r;
if (ssd != sensor->pixel_array && sel->pad == SMIAPP_PAD_SINK)
- smiapp_propagate(subdev, cfg, sel->which,
+ smiapp_propagate(subdev, sd_state, sel->which,
V4L2_SEL_TGT_CROP);
return 0;
@@ -2187,7 +2193,7 @@ static void smiapp_get_native_size(struct smiapp_subdev *ssd,
}
static int __smiapp_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -2200,13 +2206,14 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
if (ret)
return ret;
- smiapp_get_crop_compose(subdev, cfg, crops, &comp, sel->which);
+ smiapp_get_crop_compose(subdev, sd_state, crops, &comp, sel->which);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
sink_fmt = ssd->sink_fmt;
} else {
struct v4l2_mbus_framefmt *fmt =
- v4l2_subdev_get_try_format(subdev, cfg, ssd->sink_pad);
+ v4l2_subdev_get_try_format(subdev, sd_state,
+ ssd->sink_pad);
sink_fmt.left = 0;
sink_fmt.top = 0;
@@ -2237,20 +2244,20 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
}
static int smiapp_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
int rval;
mutex_lock(&sensor->mutex);
- rval = __smiapp_get_selection(subdev, cfg, sel);
+ rval = __smiapp_get_selection(subdev, sd_state, sel);
mutex_unlock(&sensor->mutex);
return rval;
}
static int smiapp_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -2276,10 +2283,10 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- ret = smiapp_set_crop(subdev, cfg, sel);
+ ret = smiapp_set_crop(subdev, sd_state, sel);
break;
case V4L2_SEL_TGT_COMPOSE:
- ret = smiapp_set_compose(subdev, cfg, sel);
+ ret = smiapp_set_compose(subdev, sd_state, sel);
break;
default:
ret = -EINVAL;
@@ -2635,9 +2642,9 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
for (i = 0; i < ssd->npads; i++) {
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, i);
+ v4l2_subdev_get_try_format(sd, fh->state, i);
struct v4l2_rect *try_crop =
- v4l2_subdev_get_try_crop(sd, fh->pad, i);
+ v4l2_subdev_get_try_crop(sd, fh->state, i);
struct v4l2_rect *try_comp;
smiapp_get_native_size(ssd, try_crop);
@@ -2650,7 +2657,7 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
if (ssd != sensor->pixel_array)
continue;
- try_comp = v4l2_subdev_get_try_compose(sd, fh->pad, i);
+ try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
*try_comp = *try_crop;
}
diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c
index 46924024faa8..19c0252df2f1 100644
--- a/drivers/media/i2c/sr030pc30.c
+++ b/drivers/media/i2c/sr030pc30.c
@@ -468,7 +468,7 @@ static int sr030pc30_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int sr030pc30_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!code || code->pad ||
@@ -480,7 +480,7 @@ static int sr030pc30_enum_mbus_code(struct v4l2_subdev *sd,
}
static int sr030pc30_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf;
@@ -525,7 +525,7 @@ static const struct sr030pc30_format *try_fmt(struct v4l2_subdev *sd,
/* Return nearest media bus frame format. */
static int sr030pc30_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct sr030pc30_info *info = sd ? to_sr030pc30(sd) : NULL;
@@ -541,7 +541,7 @@ static int sr030pc30_set_fmt(struct v4l2_subdev *sd,
fmt = try_fmt(sd, mf);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 003ba22334cd..69c7624171a8 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -644,7 +644,7 @@ out:
}
static int mipid02_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -671,7 +671,7 @@ static int mipid02_enum_mbus_code(struct v4l2_subdev *sd,
}
static int mipid02_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
@@ -688,7 +688,8 @@ static int mipid02_get_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(&bridge->sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(&bridge->sd, sd_state,
+ format->pad);
else
fmt = &bridge->fmt;
@@ -705,7 +706,7 @@ static int mipid02_get_fmt(struct v4l2_subdev *sd,
}
static void mipid02_set_fmt_source(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -719,11 +720,11 @@ static void mipid02_set_fmt_source(struct v4l2_subdev *sd,
if (format->which != V4L2_SUBDEV_FORMAT_TRY)
return;
- *v4l2_subdev_get_try_format(sd, cfg, format->pad) = format->format;
+ *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = format->format;
}
static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -732,7 +733,7 @@ static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
format->format.code = get_fmt_code(format->format.code);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
else
fmt = &bridge->fmt;
@@ -740,7 +741,7 @@ static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
}
static int mipid02_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -763,9 +764,9 @@ static int mipid02_set_fmt(struct v4l2_subdev *sd,
}
if (format->pad == MIPID02_SOURCE)
- mipid02_set_fmt_source(sd, cfg, format);
+ mipid02_set_fmt_source(sd, sd_state, format);
else
- mipid02_set_fmt_sink(sd, cfg, format);
+ mipid02_set_fmt_sink(sd, sd_state, format);
error:
mutex_unlock(&bridge->lock);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index f21da11caf22..3205cd8298dd 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1649,7 +1649,7 @@ static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
/* --------------- PAD OPS --------------- */
static int tc358743_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->index) {
@@ -1666,7 +1666,7 @@ static int tc358743_enum_mbus_code(struct v4l2_subdev *sd,
}
static int tc358743_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tc358743_state *state = to_state(sd);
@@ -1702,13 +1702,13 @@ static int tc358743_get_fmt(struct v4l2_subdev *sd,
}
static int tc358743_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tc358743_state *state = to_state(sd);
u32 code = format->format.code; /* is overwritten by get_fmt */
- int ret = tc358743_get_fmt(sd, cfg, format);
+ int ret = tc358743_get_fmt(sd, sd_state, format);
format->format.code = code;
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 8476330964fc..c62554fc35e7 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1719,19 +1719,19 @@ static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
*/
static int tda1997x_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct tda1997x_state *state = to_state(sd);
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
mf->code = state->mbus_codes[0];
return 0;
}
static int tda1997x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct tda1997x_state *state = to_state(sd);
@@ -1763,7 +1763,7 @@ static void tda1997x_fill_format(struct tda1997x_state *state,
}
static int tda1997x_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tda1997x_state *state = to_state(sd);
@@ -1776,7 +1776,7 @@ static int tda1997x_get_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
format->format.code = fmt->code;
} else
format->format.code = state->mbus_code;
@@ -1785,7 +1785,7 @@ static int tda1997x_get_format(struct v4l2_subdev *sd,
}
static int tda1997x_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tda1997x_state *state = to_state(sd);
@@ -1810,7 +1810,7 @@ static int tda1997x_set_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
*fmt = format->format;
} else {
int ret = tda1997x_setup_format(state, format->format.code);
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index a7fbe5b400c2..5f159588aaeb 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -859,7 +859,7 @@ static const struct v4l2_ctrl_ops tvp514x_ctrl_ops = {
* Enumertaes mbus codes supported
*/
static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
u32 pad = code->pad;
@@ -886,7 +886,7 @@ static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
* Retrieves pad format which is active or tried based on requirement
*/
static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
@@ -918,7 +918,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
* Set pad format for the output pad
*/
static int tvp514x_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 3b3221fd3fe8..9b7f262e934a 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1027,7 +1027,7 @@ static void tvp5150_set_default(v4l2_std_id std, struct v4l2_rect *crop)
static struct v4l2_rect *
tvp5150_get_pad_crop(struct tvp5150 *decoder,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
switch (which) {
@@ -1035,7 +1035,7 @@ tvp5150_get_pad_crop(struct tvp5150 *decoder,
return &decoder->rect;
case V4L2_SUBDEV_FORMAT_TRY:
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- return v4l2_subdev_get_try_crop(&decoder->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&decoder->sd, sd_state, pad);
#else
return ERR_PTR(-EINVAL);
#endif
@@ -1045,7 +1045,7 @@ tvp5150_get_pad_crop(struct tvp5150 *decoder,
}
static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *f;
@@ -1104,7 +1104,7 @@ static void tvp5150_set_hw_selection(struct v4l2_subdev *sd,
}
static int tvp5150_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct tvp5150 *decoder = to_tvp5150(sd);
@@ -1138,7 +1138,7 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
sel->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
- crop = tvp5150_get_pad_crop(decoder, cfg, sel->pad, sel->which);
+ crop = tvp5150_get_pad_crop(decoder, sd_state, sel->pad, sel->which);
if (IS_ERR(crop))
return PTR_ERR(crop);
@@ -1156,7 +1156,7 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
}
static int tvp5150_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct tvp5150 *decoder = container_of(sd, struct tvp5150, sd);
@@ -1180,7 +1180,7 @@ static int tvp5150_get_selection(struct v4l2_subdev *sd,
sel->r.height = TVP5150_V_MAX_OTHERS;
return 0;
case V4L2_SEL_TGT_CROP:
- crop = tvp5150_get_pad_crop(decoder, cfg, sel->pad,
+ crop = tvp5150_get_pad_crop(decoder, sd_state, sel->pad,
sel->which);
if (IS_ERR(crop))
return PTR_ERR(crop);
@@ -1208,7 +1208,7 @@ static int tvp5150_get_mbus_config(struct v4l2_subdev *sd,
V4L2 subdev pad ops
****************************************************************************/
static int tvp5150_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct tvp5150 *decoder = to_tvp5150(sd);
v4l2_std_id std;
@@ -1229,7 +1229,7 @@ static int tvp5150_init_cfg(struct v4l2_subdev *sd,
}
static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index)
@@ -1240,7 +1240,7 @@ static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
}
static int tvp5150_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct tvp5150 *decoder = to_tvp5150(sd);
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index ada4ec5ef782..2de18833b07b 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -797,7 +797,8 @@ static const struct v4l2_ctrl_ops tvp7002_ctrl_ops = {
* Enumerate supported digital video formats for pad.
*/
static int
-tvp7002_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+tvp7002_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/* Check requested format index is within range */
@@ -818,7 +819,8 @@ tvp7002_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cf
* get video format for pad.
*/
static int
-tvp7002_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+tvp7002_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tvp7002 *tvp7002 = to_tvp7002(sd);
@@ -841,10 +843,11 @@ tvp7002_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cf
* set video format for pad.
*/
static int
-tvp7002_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+tvp7002_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return tvp7002_get_pad_format(sd, cfg, fmt);
+ return tvp7002_get_pad_format(sd, sd_state, fmt);
}
/* V4L2 core operation handlers */
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index a25a350b0ddc..09f5b3986928 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -720,7 +720,7 @@ tw9910_set_fmt_error:
}
static int tw9910_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -746,7 +746,7 @@ static int tw9910_get_selection(struct v4l2_subdev *sd,
}
static int tw9910_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -797,7 +797,7 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd,
}
static int tw9910_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *mf = &format->format;
@@ -829,7 +829,7 @@ static int tw9910_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return tw9910_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ sd_state->pads->try_fmt = *mf;
return 0;
}
@@ -886,7 +886,7 @@ static const struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
};
static int tw9910_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index)
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index c292c92e37b9..29003dec6f2d 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -546,7 +546,7 @@ static int vs6624_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int vs6624_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(vs6624_formats))
@@ -557,7 +557,7 @@ static int vs6624_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vs6624_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -587,7 +587,7 @@ static int vs6624_set_fmt(struct v4l2_subdev *sd,
fmt->colorspace = vs6624_formats[index].colorspace;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
@@ -637,7 +637,7 @@ static int vs6624_set_fmt(struct v4l2_subdev *sd,
}
static int vs6624_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct vs6624 *sensor = to_vs6624(sd);
diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c
index 9e56d2ad6b94..704ef1360eba 100644
--- a/drivers/media/mc/mc-device.c
+++ b/drivers/media/mc/mc-device.c
@@ -581,7 +581,7 @@ static void __media_device_unregister_entity(struct media_entity *entity)
struct media_device *mdev = entity->graph_obj.mdev;
struct media_link *link, *tmp;
struct media_interface *intf;
- unsigned int i;
+ struct media_pad *iter;
ida_free(&mdev->entity_internal_idx, entity->internal_idx);
@@ -597,8 +597,8 @@ static void __media_device_unregister_entity(struct media_entity *entity)
__media_entity_remove_links(entity);
/* Remove all pads that belong to this entity */
- for (i = 0; i < entity->num_pads; i++)
- media_gobj_destroy(&entity->pads[i].graph_obj);
+ media_entity_for_each_pad(entity, iter)
+ media_gobj_destroy(&iter->graph_obj);
/* Remove the entity */
media_gobj_destroy(&entity->graph_obj);
@@ -617,7 +617,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
struct media_entity *entity)
{
struct media_entity_notify *notify, *next;
- unsigned int i;
+ struct media_pad *iter;
int ret;
if (entity->function == MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN ||
@@ -646,9 +646,8 @@ int __must_check media_device_register_entity(struct media_device *mdev,
media_gobj_create(mdev, MEDIA_GRAPH_ENTITY, &entity->graph_obj);
/* Initialize objects at the pads */
- for (i = 0; i < entity->num_pads; i++)
- media_gobj_create(mdev, MEDIA_GRAPH_PAD,
- &entity->pads[i].graph_obj);
+ media_entity_for_each_pad(entity, iter)
+ media_gobj_create(mdev, MEDIA_GRAPH_PAD, &iter->graph_obj);
/* invoke entity_notify callbacks */
list_for_each_entry_safe(notify, next, &mdev->entity_notify, list)
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 12b45e669bcc..acf224ae783b 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -198,7 +198,8 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
struct media_pad *pads)
{
struct media_device *mdev = entity->graph_obj.mdev;
- unsigned int i;
+ struct media_pad *iter;
+ unsigned int i = 0;
if (num_pads >= MEDIA_ENTITY_MAX_PADS)
return -E2BIG;
@@ -209,12 +210,12 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
if (mdev)
mutex_lock(&mdev->graph_mutex);
- for (i = 0; i < num_pads; i++) {
- pads[i].entity = entity;
- pads[i].index = i;
+ media_entity_for_each_pad(entity, iter) {
+ iter->entity = entity;
+ iter->index = i++;
if (mdev)
media_gobj_create(mdev, MEDIA_GRAPH_PAD,
- &entity->pads[i].graph_obj);
+ &iter->graph_obj);
}
if (mdev)
@@ -228,40 +229,63 @@ EXPORT_SYMBOL_GPL(media_entity_pads_init);
* Graph traversal
*/
-static struct media_entity *
-media_entity_other(struct media_entity *entity, struct media_link *link)
+bool media_entity_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
{
- if (link->source->entity == entity)
- return link->sink->entity;
- else
- return link->source->entity;
+ if (pad0 >= entity->num_pads || pad1 >= entity->num_pads)
+ return false;
+
+ if (pad0 == pad1)
+ return true;
+
+ if (!entity->ops || !entity->ops->has_route)
+ return true;
+
+ if (entity->pads[pad1].index < entity->pads[pad0].index)
+ swap(pad0, pad1);
+
+ return entity->ops->has_route(entity, pad0, pad1);
+}
+EXPORT_SYMBOL_GPL(media_entity_has_route);
+
+struct media_pad *__media_entity_next_routed_pad(struct media_pad *root,
+ struct media_pad *iter)
+{
+ struct media_entity *entity = root->entity;
+
+ for (; iter < &entity->pads[entity->num_pads]; iter++) {
+ if (media_entity_has_route(entity, root->index, iter->index))
+ return iter;
+ }
+
+ return NULL;
}
+EXPORT_SYMBOL_GPL(__media_entity_next_routed_pad);
/* push an entity to traversal stack */
-static void stack_push(struct media_graph *graph,
- struct media_entity *entity)
+static void stack_push(struct media_graph *graph, struct media_pad *pad)
{
if (graph->top == MEDIA_ENTITY_ENUM_MAX_DEPTH - 1) {
WARN_ON(1);
return;
}
graph->top++;
- graph->stack[graph->top].link = entity->links.next;
- graph->stack[graph->top].entity = entity;
+ graph->stack[graph->top].link = pad->entity->links.next;
+ graph->stack[graph->top].pad = pad;
}
-static struct media_entity *stack_pop(struct media_graph *graph)
+static struct media_pad *stack_pop(struct media_graph *graph)
{
- struct media_entity *entity;
+ struct media_pad *pad;
- entity = graph->stack[graph->top].entity;
+ pad = graph->stack[graph->top].pad;
graph->top--;
- return entity;
+ return pad;
}
#define link_top(en) ((en)->stack[(en)->top].link)
-#define stack_top(en) ((en)->stack[(en)->top].entity)
+#define stack_top(en) ((en)->stack[(en)->top].pad)
/**
* media_graph_walk_init - Allocate resources for graph walk
@@ -291,60 +315,81 @@ void media_graph_walk_cleanup(struct media_graph *graph)
}
EXPORT_SYMBOL_GPL(media_graph_walk_cleanup);
-void media_graph_walk_start(struct media_graph *graph,
- struct media_entity *entity)
+void media_graph_walk_start(struct media_graph *graph, struct media_pad *pad)
{
media_entity_enum_zero(&graph->ent_enum);
- media_entity_enum_set(&graph->ent_enum, entity);
+ media_entity_enum_set(&graph->ent_enum, pad->entity);
graph->top = 0;
- graph->stack[graph->top].entity = NULL;
- stack_push(graph, entity);
- dev_dbg(entity->graph_obj.mdev->dev,
- "begin graph walk at '%s'\n", entity->name);
+ graph->stack[graph->top].pad = NULL;
+ stack_push(graph, pad);
+ dev_dbg(pad->graph_obj.mdev->dev,
+ "begin graph walk at '%s':%u\n", pad->entity->name, pad->index);
}
EXPORT_SYMBOL_GPL(media_graph_walk_start);
static void media_graph_walk_iter(struct media_graph *graph)
{
- struct media_entity *entity = stack_top(graph);
+ struct media_pad *pad = stack_top(graph);
struct media_link *link;
- struct media_entity *next;
+ struct media_pad *remote;
+ struct media_pad *local;
link = list_entry(link_top(graph), typeof(*link), list);
/* The link is not enabled so we do not follow. */
if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
link_top(graph) = link_top(graph)->next;
- dev_dbg(entity->graph_obj.mdev->dev,
+ dev_dbg(pad->graph_obj.mdev->dev,
"walk: skipping disabled link '%s':%u -> '%s':%u\n",
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index);
return;
}
- /* Get the entity in the other end of the link . */
- next = media_entity_other(entity, link);
+ /*
+ * Get the local pad, the remote pad and the entity at the other
+ * end of the link.
+ */
+ if (link->source->entity == pad->entity) {
+ remote = link->sink;
+ local = link->source;
+ } else {
+ remote = link->source;
+ local = link->sink;
+ }
+
+ /*
+ * Are the local pad and the pad we came from connected
+ * internally in the entity ?
+ */
+ if (!media_entity_has_route(pad->entity, pad->index, local->index)) {
+ link_top(graph) = link_top(graph)->next;
+ dev_dbg(pad->graph_obj.mdev->dev,
+ "walk: skipping \"%s\":%u -> %u (no route)\n",
+ pad->entity->name, pad->index, local->index);
+ return;
+ }
/* Has the entity already been visited? */
- if (media_entity_enum_test_and_set(&graph->ent_enum, next)) {
+ if (media_entity_enum_test_and_set(&graph->ent_enum, remote->entity)) {
link_top(graph) = link_top(graph)->next;
- dev_dbg(entity->graph_obj.mdev->dev,
+ dev_dbg(pad->graph_obj.mdev->dev,
"walk: skipping entity '%s' (already seen)\n",
- next->name);
+ remote->entity->name);
return;
}
/* Push the new entity to stack and start over. */
link_top(graph) = link_top(graph)->next;
- stack_push(graph, next);
- dev_dbg(entity->graph_obj.mdev->dev, "walk: pushing '%s' on stack\n",
- next->name);
+ stack_push(graph, remote);
+ dev_dbg(remote->graph_obj.mdev->dev, "walk: pushing '%s':%u on stack\n",
+ remote->entity->name, remote->index);
}
-struct media_entity *media_graph_walk_next(struct media_graph *graph)
+struct media_pad *media_graph_walk_next(struct media_graph *graph)
{
- struct media_entity *entity;
+ struct media_pad *pad;
if (stack_top(graph) == NULL)
return NULL;
@@ -354,14 +399,14 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph)
* top of the stack until no more entities on the level can be
* found.
*/
- while (link_top(graph) != &stack_top(graph)->links)
+ while (link_top(graph) != &stack_top(graph)->entity->links)
media_graph_walk_iter(graph);
- entity = stack_pop(graph);
- dev_dbg(entity->graph_obj.mdev->dev,
- "walk: returning entity '%s'\n", entity->name);
+ pad = stack_pop(graph);
+ dev_dbg(pad->graph_obj.mdev->dev,
+ "walk: returning pad '%s':%u\n", pad->entity->name, pad->index);
- return entity;
+ return pad;
}
EXPORT_SYMBOL_GPL(media_graph_walk_next);
@@ -404,12 +449,12 @@ EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad);
* Pipeline management
*/
-__must_check int __media_pipeline_start(struct media_entity *entity,
+__must_check int __media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe)
{
- struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_device *mdev = pad->graph_obj.mdev;
struct media_graph *graph = &pipe->graph;
- struct media_entity *entity_err = entity;
+ struct media_pad *pad_err = pad;
struct media_link *link;
int ret;
@@ -419,26 +464,32 @@ __must_check int __media_pipeline_start(struct media_entity *entity,
goto error_graph_walk_start;
}
- media_graph_walk_start(&pipe->graph, entity);
+ media_graph_walk_start(&pipe->graph, pad);
+
+ while ((pad = media_graph_walk_next(graph))) {
+ struct media_entity *entity = pad->entity;
+ bool skip_validation = pad->pipe != NULL;
+ struct media_pad *iter;
- while ((entity = media_graph_walk_next(graph))) {
DECLARE_BITMAP(active, MEDIA_ENTITY_MAX_PADS);
DECLARE_BITMAP(has_no_links, MEDIA_ENTITY_MAX_PADS);
- entity->stream_count++;
-
- if (entity->pipe && entity->pipe != pipe) {
- pr_err("Pipe active for %s. Can't start for %s\n",
- entity->name,
- entity_err->name);
- ret = -EBUSY;
- goto error;
+ media_entity_for_each_routed_pad(pad, iter) {
+ if (iter->pipe && iter->pipe != pipe) {
+ pr_err("Pipe active for %s. Can't start for %s\n",
+ entity->name, iter->entity->name);
+ ret = -EBUSY;
+ } else {
+ iter->pipe = pipe;
+ }
+ iter->stream_count++;
}
- entity->pipe = pipe;
+ if (ret)
+ goto error;
- /* Already streaming --- no need to check. */
- if (entity->stream_count > 1)
+ /* Already part of the pipeline, skip validation. */
+ if (skip_validation)
continue;
if (!entity->ops || !entity->ops->link_validate)
@@ -448,26 +499,32 @@ __must_check int __media_pipeline_start(struct media_entity *entity,
bitmap_fill(has_no_links, entity->num_pads);
list_for_each_entry(link, &entity->links, list) {
- struct media_pad *pad = link->sink->entity == entity
- ? link->sink : link->source;
+ struct media_pad *other_pad =
+ link->sink->entity == entity ?
+ link->sink : link->source;
+
+ /* Ignore pads to which there is no route. */
+ if (!media_entity_has_route(entity, pad->index,
+ other_pad->index))
+ continue;
/* Mark that a pad is connected by a link. */
- bitmap_clear(has_no_links, pad->index, 1);
+ bitmap_clear(has_no_links, other_pad->index, 1);
/*
* Pads that either do not need to connect or
* are connected through an enabled link are
* fine.
*/
- if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) ||
+ if (!(other_pad->flags & MEDIA_PAD_FL_MUST_CONNECT) ||
link->flags & MEDIA_LNK_FL_ENABLED)
- bitmap_set(active, pad->index, 1);
+ bitmap_set(active, other_pad->index, 1);
/*
* Link validation will only take place for
* sink ends of the link that are enabled.
*/
- if (link->sink != pad ||
+ if (link->sink != other_pad ||
!(link->flags & MEDIA_LNK_FL_ENABLED))
continue;
@@ -503,21 +560,25 @@ error:
* Link validation on graph failed. We revert what we did and
* return the error.
*/
- media_graph_walk_start(graph, entity_err);
-
- while ((entity_err = media_graph_walk_next(graph))) {
- /* Sanity check for negative stream_count */
- if (!WARN_ON_ONCE(entity_err->stream_count <= 0)) {
- entity_err->stream_count--;
- if (entity_err->stream_count == 0)
- entity_err->pipe = NULL;
+ media_graph_walk_start(graph, pad_err);
+
+ while ((pad_err = media_graph_walk_next(graph))) {
+ struct media_pad *iter;
+
+ media_entity_for_each_routed_pad(pad_err, iter) {
+ /* Sanity check for negative stream_count */
+ if (!WARN_ON_ONCE(iter->stream_count <= 0)) {
+ --iter->stream_count;
+ if (iter->stream_count == 0)
+ iter->pipe = NULL;
+ }
}
/*
* We haven't increased stream_count further than this
* so we quit here.
*/
- if (entity_err == entity)
+ if (pad_err->entity == pad->entity)
break;
}
@@ -529,23 +590,23 @@ error_graph_walk_start:
}
EXPORT_SYMBOL_GPL(__media_pipeline_start);
-__must_check int media_pipeline_start(struct media_entity *entity,
+__must_check int media_pipeline_start(struct media_pad *pad,
struct media_pipeline *pipe)
{
- struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_device *mdev = pad->graph_obj.mdev;
int ret;
mutex_lock(&mdev->graph_mutex);
- ret = __media_pipeline_start(entity, pipe);
+ ret = __media_pipeline_start(pad, pipe);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(media_pipeline_start);
-void __media_pipeline_stop(struct media_entity *entity)
+void __media_pipeline_stop(struct media_pad *pad)
{
- struct media_graph *graph = &entity->pipe->graph;
- struct media_pipeline *pipe = entity->pipe;
+ struct media_pipeline *pipe = pad->pipe;
+ struct media_graph *graph = &pipe->graph;
/*
* If the following check fails, the driver has performed an
@@ -554,14 +615,18 @@ void __media_pipeline_stop(struct media_entity *entity)
if (WARN_ON(!pipe))
return;
- media_graph_walk_start(graph, entity);
+ media_graph_walk_start(graph, pad);
+
+ while ((pad = media_graph_walk_next(graph))) {
+ struct media_pad *iter;
- while ((entity = media_graph_walk_next(graph))) {
- /* Sanity check for negative stream_count */
- if (!WARN_ON_ONCE(entity->stream_count <= 0)) {
- entity->stream_count--;
- if (entity->stream_count == 0)
- entity->pipe = NULL;
+ media_entity_for_each_routed_pad(pad, iter) {
+ /* Sanity check for negative stream_count */
+ if (!WARN_ON_ONCE(iter->stream_count <= 0)) {
+ iter->stream_count--;
+ if (iter->stream_count == 0)
+ iter->pipe = NULL;
+ }
}
}
@@ -571,12 +636,12 @@ void __media_pipeline_stop(struct media_entity *entity)
}
EXPORT_SYMBOL_GPL(__media_pipeline_stop);
-void media_pipeline_stop(struct media_entity *entity)
+void media_pipeline_stop(struct media_pad *pad)
{
- struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_device *mdev = pad->graph_obj.mdev;
mutex_lock(&mdev->graph_mutex);
- __media_pipeline_stop(entity);
+ __media_pipeline_stop(pad);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_pipeline_stop);
@@ -831,7 +896,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags)
{
const u32 mask = MEDIA_LNK_FL_ENABLED;
struct media_device *mdev;
- struct media_entity *source, *sink;
+ struct media_pad *source, *sink;
int ret = -EBUSY;
if (link == NULL)
@@ -847,8 +912,8 @@ int __media_entity_setup_link(struct media_link *link, u32 flags)
if (link->flags == flags)
return 0;
- source = link->source->entity;
- sink = link->sink->entity;
+ source = link->source;
+ sink = link->sink;
if (!(link->flags & MEDIA_LNK_FL_DYNAMIC) &&
(source->stream_count || sink->stream_count))
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index b33eb08631b1..f32d82ec897d 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -930,7 +930,7 @@ static int cx18_av_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int cx18_av_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 2fe4a0bd0284..92b9c191e266 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -33,6 +33,7 @@ struct ipu3_cio2_fmt {
u32 mbus_code;
u32 fourcc;
u8 mipicode;
+ u8 bpp;
};
/*
@@ -46,18 +47,22 @@ static const struct ipu3_cio2_fmt formats[] = {
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
.mipicode = 0x2b,
+ .bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
.mipicode = 0x2b,
+ .bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
.mipicode = 0x2b,
+ .bpp = 10,
}, {
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
.mipicode = 0x2b,
+ .bpp = 10,
},
};
@@ -288,35 +293,20 @@ static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
/* Calculate the the delay value for termination enable of clock lane HS Rx */
static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
- struct cio2_csi2_timing *timing)
+ struct cio2_csi2_timing *timing,
+ unsigned int bpp, unsigned int lanes)
{
struct device *dev = &cio2->pci_dev->dev;
- struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
- struct v4l2_ctrl *link_freq;
s64 freq;
- int r;
if (!q->sensor)
return -ENODEV;
- link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ);
- if (!link_freq) {
- dev_err(dev, "failed to find LINK_FREQ\n");
- return -EPIPE;
- }
-
- qm.index = v4l2_ctrl_g_ctrl(link_freq);
- r = v4l2_querymenu(q->sensor->ctrl_handler, &qm);
- if (r) {
- dev_err(dev, "failed to get menu item\n");
- return r;
- }
-
- if (!qm.value) {
- dev_err(dev, "error invalid link_freq\n");
- return -EINVAL;
+ freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
+ if (freq < 0) {
+ dev_err(dev, "error %lld, invalid link_freq\n", freq);
+ return freq;
}
- freq = qm.value;
timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
@@ -364,7 +354,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
lanes = q->csi2.lanes;
- r = cio2_csi2_calc_timing(cio2, q, &timing);
+ r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
if (r)
return r;
@@ -1211,11 +1201,11 @@ static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
};
/* Initialize try_fmt */
- format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
+ format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
*format = fmt_default;
/* same as sink */
- format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
+ format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
*format = fmt_default;
return 0;
@@ -1229,7 +1219,7 @@ static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
* return -EINVAL or zero on success
*/
static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
@@ -1237,7 +1227,8 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&q->subdev_lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
else
fmt->format = q->subdev_fmt;
@@ -1254,7 +1245,7 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
@@ -1267,10 +1258,10 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
* source always propagates from sink
*/
if (fmt->pad == CIO2_PAD_SOURCE)
- return cio2_subdev_get_fmt(sd, cfg, fmt);
+ return cio2_subdev_get_fmt(sd, sd_state, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
else
mbus = &q->subdev_fmt;
@@ -1296,7 +1287,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(formats))
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 76a37fbd8458..aafbb34765b0 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -138,12 +138,15 @@ static int empress_try_fmt_vid_cap(struct file *file, void *priv,
{
struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
v4l2_fill_mbus_format(&format.format, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
- saa_call_all(dev, pad, set_fmt, &pad_cfg, &format);
+ saa_call_all(dev, pad, set_fmt, &pad_state, &format);
v4l2_fill_pix_format(&f->fmt.pix, &format.format);
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 7e152bbb4fa6..05bdc4cb07f7 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -168,6 +168,45 @@ config VIDEO_TI_CAL
In TI Technical Reference Manual this module is referred as
Camera Interface Subsystem (CAMSS).
+if VIDEO_TI_CAL
+
+config VIDEO_TI_CAL_MC
+ bool "Media Controller centric mode by default"
+ default n
+ help
+ Enables Media Controller centric mode by default.
+
+ If set, CAL driver will start in Media Controller mode by
+ default. Note that this behavior can be overridden via
+ module parameter 'mc_api'.
+
+endif # VIDEO_TI_CAL
+
+config VIDEO_TI_J721E_CSI2RX
+ tristate "TI J721E CSI2RX wrapper layer driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_SUPPORT && MEDIA_CONTROLLER
+ depends on PHY_CADENCE_DPHY && VIDEO_CADENCE_CSI2RX
+ depends on ARCH_K3 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Support for TI CSI2RX wrapper layer. This just enables the wrapper driver.
+ The Cadence CSI2RX bridge driver needs to be enabled separately.
+
+config VIDEO_TI_VIP
+ tristate "TI Video Input Port"
+ default n
+ depends on VIDEO_DEV && VIDEO_V4L2 && SOC_DRA7XX
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_TI_VPDMA
+ select VIDEO_TI_SC
+ select VIDEO_TI_CSC
+ help
+ Driver support for VIP module on certain TI SoC's
+ VIP = Video Input Port.
+
endif # V4L_PLATFORM_DRIVERS
menuconfig V4L_MEM2MEM_DRIVERS
@@ -181,6 +220,8 @@ menuconfig V4L_MEM2MEM_DRIVERS
if V4L_MEM2MEM_DRIVERS
+source "drivers/media/platform/chips-media/Kconfig"
+
config VIDEO_CODA
tristate "Chips&Media Coda multi-standard codec IP"
depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_MXC || COMPILE_TEST)
@@ -495,6 +536,30 @@ config VIDEO_TI_VPE_DEBUG
help
Enable debug messages on VPE driver.
+config VIDEO_IMG_VXD_DEC
+ tristate "IMG VXD DEC (Video Decoder) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_K3
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is an IMG VXD DEC V4L2 driver that adds support for the
+ Imagination D5520 (Video Decoder) hardware.
+ The module name when built is vxd-dec.
+
+config VIDEO_IMG_VXE_ENC
+ tristate "IMG VXE ENC (Video Encoder) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_K3
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is an IMG VXE ENC V4L2 driver that adds support for the
+ Imagination VXE384 (Video Encoder) hardware.
+ The module name when built is vxe-enc.
+
config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on VIDEO_DEV && VIDEO_V4L2
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 62b6cdc8c730..4336c17ba58a 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
-obj-y += ti-vpe/
+obj-y += ti/
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda/
@@ -80,3 +80,7 @@ obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss/
obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
obj-y += sunxi/
+
+obj-$(CONFIG_VIDEO_IMG_VXD_DEC) += vxe-vxd/
+obj-$(CONFIG_VIDEO_IMG_VXE_ENC) += vxe-vxd/
+obj-$(CONFIG_VIDEO_WAVE_VPU) += chips-media/
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index fe3ec8d0eaee..6d915d914c62 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -1224,7 +1224,7 @@ static int isc_try_configure_pipeline(struct isc_device *isc)
}
static void isc_try_fse(struct isc_device *isc,
- struct v4l2_subdev_pad_config *pad_cfg)
+ struct v4l2_subdev_state *sd_state)
{
int ret;
struct v4l2_subdev_frame_size_enum fse = {};
@@ -1240,17 +1240,17 @@ static void isc_try_fse(struct isc_device *isc,
fse.which = V4L2_SUBDEV_FORMAT_TRY;
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
- pad_cfg, &fse);
+ sd_state, &fse);
/*
* Attempt to obtain format size from subdev. If not available,
* just use the maximum ISC can receive.
*/
if (ret) {
- pad_cfg->try_crop.width = ISC_MAX_SUPPORT_WIDTH;
- pad_cfg->try_crop.height = ISC_MAX_SUPPORT_HEIGHT;
+ sd_state->pads->try_crop.width = ISC_MAX_SUPPORT_WIDTH;
+ sd_state->pads->try_crop.height = ISC_MAX_SUPPORT_HEIGHT;
} else {
- pad_cfg->try_crop.width = fse.max_width;
- pad_cfg->try_crop.height = fse.max_height;
+ sd_state->pads->try_crop.width = fse.max_width;
+ sd_state->pads->try_crop.height = fse.max_height;
}
}
@@ -1261,6 +1261,9 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
struct isc_format *sd_fmt = NULL, *direct_fmt = NULL;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg = {};
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1358,11 +1361,11 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
goto isc_try_fmt_err;
/* Obtain frame sizes if possible to have crop requirements ready */
- isc_try_fse(isc, &pad_cfg);
+ isc_try_fse(isc, &pad_state);
v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
- &pad_cfg, &format);
+ &pad_state, &format);
if (ret < 0)
goto isc_try_fmt_subdev_err;
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index d74aa73f26be..bdbaf47123df 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -556,7 +556,7 @@ static const struct isi_format *find_format_by_fourcc(struct atmel_isi *isi,
}
static void isi_try_fse(struct atmel_isi *isi, const struct isi_format *isi_fmt,
- struct v4l2_subdev_pad_config *pad_cfg)
+ struct v4l2_subdev_state *sd_state)
{
int ret;
struct v4l2_subdev_frame_size_enum fse = {
@@ -565,17 +565,17 @@ static void isi_try_fse(struct atmel_isi *isi, const struct isi_format *isi_fmt,
};
ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_size,
- pad_cfg, &fse);
+ sd_state, &fse);
/*
* Attempt to obtain format size from subdev. If not available,
* just use the maximum ISI can receive.
*/
if (ret) {
- pad_cfg->try_crop.width = MAX_SUPPORT_WIDTH;
- pad_cfg->try_crop.height = MAX_SUPPORT_HEIGHT;
+ sd_state->pads->try_crop.width = MAX_SUPPORT_WIDTH;
+ sd_state->pads->try_crop.height = MAX_SUPPORT_HEIGHT;
} else {
- pad_cfg->try_crop.width = fse.max_width;
- pad_cfg->try_crop.height = fse.max_height;
+ sd_state->pads->try_crop.width = fse.max_width;
+ sd_state->pads->try_crop.height = fse.max_height;
}
}
@@ -585,6 +585,9 @@ static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
const struct isi_format *isi_fmt;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg = {};
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -602,10 +605,10 @@ static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
v4l2_fill_mbus_format(&format.format, pixfmt, isi_fmt->mbus_code);
- isi_try_fse(isi, isi_fmt, &pad_cfg);
+ isi_try_fse(isi, isi_fmt, &pad_state);
ret = v4l2_subdev_call(isi->entity.subdev, pad, set_fmt,
- &pad_cfg, &format);
+ &pad_state, &format);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index be9ec59774d6..cf44c00c7421 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -30,14 +31,25 @@
#define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) ((plane) << (16 + (llane) * 4))
#define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8)
+#define CSI2RX_DPHY_LANE_CTRL_REG 0x40
+#define CSI2RX_DPHY_CL_RST BIT(16)
+#define CSI2RX_DPHY_DL_RST(i) BIT((i) + 12)
+#define CSI2RX_DPHY_CL_EN BIT(4)
+#define CSI2RX_DPHY_DL_EN(i) BIT(i)
+
#define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100)
#define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000)
+#define CSI2RX_STREAM_CTRL_SOFT_RST BIT(4)
+#define CSI2RX_STREAM_CTRL_STOP BIT(1)
#define CSI2RX_STREAM_CTRL_START BIT(0)
+#define CSI2RX_STREAM_STATUS_REG(n) (CSI2RX_STREAM_BASE(n) + 0x004)
+#define CSI2RX_STREAM_STATUS_RDY BIT(31)
+
#define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008)
-#define CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT BIT(31)
#define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16)
+#define CSI2RX_STREAM_DATA_CFG_VC_ALL 0
#define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c)
#define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8)
@@ -54,6 +66,11 @@ enum csi2rx_pads {
CSI2RX_PAD_MAX,
};
+struct csi2rx_fmt {
+ u32 code;
+ u8 bpp;
+};
+
struct csi2rx_priv {
struct device *dev;
unsigned int count;
@@ -75,6 +92,7 @@ struct csi2rx_priv {
u8 max_lanes;
u8 max_streams;
bool has_internal_dphy;
+ struct v4l2_mbus_framefmt fmt;
struct v4l2_subdev subdev;
struct v4l2_async_notifier notifier;
@@ -86,6 +104,172 @@ struct csi2rx_priv {
int source_pad;
};
+static const struct csi2rx_fmt formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .bpp = 8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .bpp = 8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .bpp = 8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .bpp = 8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .bpp = 10,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .bpp = 10,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .bpp = 10,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .bpp = 10,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .bpp = 12,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .bpp = 12,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .bpp = 12,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .bpp = 12,
+ },
+};
+
+static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++)
+ if (formats[i].code == code)
+ return &formats[i];
+
+ return NULL;
+}
+
+static u8 csi2rx_get_bpp(u32 code)
+{
+ const struct csi2rx_fmt *fmt = csi2rx_get_fmt_by_code(code);
+
+ return (fmt) ? fmt->bpp : 0;
+}
+
+static int csi2rx_get_frame_desc_from_source(struct csi2rx_priv *csi2rx,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct media_pad *remote_pad;
+
+ remote_pad = media_entity_remote_pad(&csi2rx->pads[CSI2RX_PAD_SINK]);
+ if (!remote_pad) {
+ dev_err(csi2rx->dev, "No remote pad found for sink\n");
+ return -ENODEV;
+ }
+
+ return v4l2_subdev_call(csi2rx->source_subdev, pad, get_frame_desc,
+ remote_pad->index, fd);
+}
+
+static s64 csi2rx_get_link_freq(struct csi2rx_priv *csi2rx)
+{
+ struct v4l2_mbus_frame_desc fd;
+ bool has_fd = true;
+ int ret;
+ u8 bpp;
+
+ /* First check if the source is sending a multiplexed stream. */
+ ret = csi2rx_get_frame_desc_from_source(csi2rx, &fd);
+ if (ret == -ENOIOCTLCMD)
+ /*
+ * Assume not multiplexed if source can't send frame descriptor.
+ */
+ has_fd = false;
+ else if (ret)
+ return ret;
+
+ if (has_fd && fd.num_entries > 1) {
+ /*
+ * With multistream input we don't have bpp, and cannot use
+ * V4L2_CID_PIXEL_RATE. Passing 0 as bpp causes
+ * v4l2_get_link_freq() to return an error if it falls back to
+ * V4L2_CID_PIXEL_RATE.
+ */
+ bpp = 0;
+ } else if (has_fd && fd.num_entries == 1) {
+ bpp = csi2rx_get_bpp(fd.entry[0].pixelcode);
+ if (!bpp)
+ return -EINVAL;
+ } else {
+ struct v4l2_subdev_format sd_fmt;
+
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ sd_fmt.stream = 0;
+
+ ret = v4l2_subdev_call(csi2rx->source_subdev, pad, get_fmt,
+ NULL, &sd_fmt);
+ if (ret)
+ return ret;
+
+ bpp = csi2rx_get_bpp(sd_fmt.format.code);
+ if (!bpp)
+ return -EINVAL;
+ }
+
+ return v4l2_get_link_freq(csi2rx->source_subdev->ctrl_handler, bpp,
+ 2 * csi2rx->num_lanes);
+}
+
static inline
struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
{
@@ -94,12 +278,65 @@ struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
static void csi2rx_reset(struct csi2rx_priv *csi2rx)
{
+ int i;
+
writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT,
csi2rx->base + CSI2RX_SOFT_RESET_REG);
udelay(10);
writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG);
+
+ /* Reset individual streams. */
+ for (i = 0; i < csi2rx->max_streams; i++) {
+ writel(CSI2RX_STREAM_CTRL_SOFT_RST,
+ csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+ usleep_range(10, 20);
+ writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+ }
+}
+
+static int csi2rx_configure_external_dphy(struct csi2rx_priv *csi2rx)
+{
+ union phy_configure_opts opts = { };
+ struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
+ s64 link_freq;
+ int ret;
+
+ link_freq = csi2rx_get_link_freq(csi2rx);
+ if (link_freq < 0)
+ return link_freq;
+
+ /* link_freq already takes bpp and num_lanes into account. */
+ ret = phy_mipi_dphy_get_default_config(link_freq, 1, 1, cfg);
+ if (ret)
+ return ret;
+
+ cfg->lanes = csi2rx->num_lanes;
+
+ ret = phy_pm_runtime_get_sync(csi2rx->dphy);
+ if (ret < 0 && ret != -ENOTSUPP)
+ return ret;
+
+ ret = phy_set_mode_ext(csi2rx->dphy, PHY_MODE_MIPI_DPHY,
+ PHY_MIPI_DPHY_SUBMODE_RX);
+ if (ret)
+ goto out;
+
+ ret = phy_power_on(csi2rx->dphy);
+ if (ret)
+ goto out;
+
+ ret = phy_configure(csi2rx->dphy, &opts);
+ if (ret) {
+ /* Can't do anything if it fails. Ignore the return value. */
+ phy_power_off(csi2rx->dphy);
+ goto out;
+ }
+
+out:
+ phy_pm_runtime_put(csi2rx->dphy);
+ return ret;
}
static int csi2rx_start(struct csi2rx_priv *csi2rx)
@@ -136,9 +373,23 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
- ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
- if (ret)
- goto err_disable_pclk;
+ /* Enable DPHY clk and data lanes. */
+ if (csi2rx->dphy) {
+ reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST;
+ for (i = 0; i < csi2rx->num_lanes; i++) {
+ reg |= CSI2RX_DPHY_DL_EN(csi2rx->lanes[i] - 1);
+ reg |= CSI2RX_DPHY_DL_RST(csi2rx->lanes[i] - 1);
+ }
+
+ writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
+
+ ret = csi2rx_configure_external_dphy(csi2rx);
+ if (ret) {
+ dev_err(csi2rx->dev,
+ "Failed to configure external DPHY: %d\n", ret);
+ goto err_disable_pclk;
+ }
+ }
/*
* Create a static mapping between the CSI virtual channels
@@ -158,8 +409,8 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
- writel(CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT |
- CSI2RX_STREAM_DATA_CFG_VC_SELECT(i),
+ /* Let all virtual channels through. */
+ writel(CSI2RX_STREAM_DATA_CFG_VC_ALL,
csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i));
writel(CSI2RX_STREAM_CTRL_START,
@@ -170,14 +421,24 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
if (ret)
goto err_disable_pixclk;
+ ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
+ if (ret)
+ goto err_disable_sysclk;
+
clk_disable_unprepare(csi2rx->p_clk);
return 0;
+err_disable_sysclk:
+ clk_disable_unprepare(csi2rx->sys_clk);
err_disable_pixclk:
for (; i > 0; i--)
clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
+ if (csi2rx->dphy) {
+ phy_power_off(csi2rx->dphy);
+ phy_pm_runtime_put(csi2rx->dphy);
+ }
err_disable_pclk:
clk_disable_unprepare(csi2rx->p_clk);
@@ -187,12 +448,23 @@ err_disable_pclk:
static void csi2rx_stop(struct csi2rx_priv *csi2rx)
{
unsigned int i;
+ u32 val;
+ int ret;
clk_prepare_enable(csi2rx->p_clk);
clk_disable_unprepare(csi2rx->sys_clk);
for (i = 0; i < csi2rx->max_streams; i++) {
- writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+ writel(CSI2RX_STREAM_CTRL_STOP,
+ csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+
+ ret = readl_relaxed_poll_timeout(csi2rx->base +
+ CSI2RX_STREAM_STATUS_REG(i),
+ val,
+ (val & CSI2RX_STREAM_STATUS_RDY),
+ 10, 10000);
+ if (ret)
+ dev_warn(csi2rx->dev, "Failed to stop stream%d\n", i);
clk_disable_unprepare(csi2rx->pixel_clk[i]);
}
@@ -201,6 +473,15 @@ static void csi2rx_stop(struct csi2rx_priv *csi2rx)
if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false))
dev_warn(csi2rx->dev, "Couldn't disable our subdev\n");
+
+ if (csi2rx->dphy) {
+ writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
+
+ if (phy_power_off(csi2rx->dphy))
+ dev_warn(csi2rx->dev, "Couldn't power off DPHY\n");
+
+ phy_pm_runtime_put(csi2rx->dphy);
+ }
}
static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
@@ -208,6 +489,12 @@ static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
int ret = 0;
+ if (enable) {
+ ret = pm_runtime_resume_and_get(csi2rx->dev);
+ if (ret < 0)
+ return ret;
+ }
+
mutex_lock(&csi2rx->lock);
if (enable) {
@@ -217,8 +504,10 @@ static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
*/
if (!csi2rx->count) {
ret = csi2rx_start(csi2rx);
- if (ret)
+ if (ret) {
+ pm_runtime_put(csi2rx->dev);
goto out;
+ }
}
csi2rx->count++;
@@ -230,6 +519,8 @@ static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
*/
if (!csi2rx->count)
csi2rx_stop(csi2rx);
+
+ pm_runtime_put(csi2rx->dev);
}
out:
@@ -237,12 +528,118 @@ out:
return ret;
}
+static int csi2rx_get_frame_desc(struct v4l2_subdev *subdev, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
+
+ return csi2rx_get_frame_desc_from_source(csi2rx, fd);
+}
+
+static struct v4l2_mbus_framefmt *
+csi2rx_get_pad_format(struct csi2rx_priv *csi2rx,
+ struct v4l2_subdev_state *state,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&csi2rx->subdev, state, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &csi2rx->fmt;
+ default:
+ return NULL;
+ }
+}
+
+static int csi2rx_get_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ framefmt = csi2rx_get_pad_format(csi2rx, state, format->pad,
+ format->which);
+ if (!framefmt)
+ return -EINVAL;
+
+ mutex_lock(&csi2rx->lock);
+ format->format = *framefmt;
+ mutex_unlock(&csi2rx->lock);
+
+ return 0;
+}
+
+static int csi2rx_set_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
+ struct v4l2_mbus_framefmt *sinkfmt, *srcfmt;
+
+ /* No transcoding, source and sink formats must match. */
+ if (format->pad != CSI2RX_PAD_SINK)
+ return csi2rx_get_fmt(subdev, state, format);
+
+ if (!csi2rx_get_fmt_by_code(format->format.code))
+ format->format.code = formats[0].code;
+
+ format->format.field = V4L2_FIELD_NONE;
+
+ sinkfmt = csi2rx_get_pad_format(csi2rx, state, format->pad,
+ format->which);
+ if (!sinkfmt)
+ return -EINVAL;
+
+ srcfmt = csi2rx_get_pad_format(csi2rx, state, CSI2RX_PAD_SOURCE_STREAM0,
+ format->which);
+ if (!srcfmt)
+ return -EINVAL;
+
+ mutex_lock(&csi2rx->lock);
+ *sinkfmt = format->format;
+ *srcfmt = format->format;
+ mutex_unlock(&csi2rx->lock);
+
+ return 0;
+}
+
+static int csi2rx_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format format = {
+ .which = state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = CSI2RX_PAD_SINK,
+ .format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ },
+ };
+
+ return csi2rx_set_fmt(subdev, state, &format);
+}
+
static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
.s_stream = csi2rx_s_stream,
};
+static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = {
+ .get_fmt = csi2rx_get_fmt,
+ .set_fmt = csi2rx_set_fmt,
+ .init_cfg = csi2rx_init_cfg,
+ .get_frame_desc = csi2rx_get_frame_desc,
+};
+
static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
.video = &csi2rx_video_ops,
+ .pad = &csi2rx_pad_ops,
};
static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
@@ -307,15 +704,6 @@ static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
return PTR_ERR(csi2rx->dphy);
}
- /*
- * FIXME: Once we'll have external D-PHY support, the check
- * will need to be removed.
- */
- if (csi2rx->dphy) {
- dev_err(&pdev->dev, "External D-PHY not supported yet\n");
- return -EINVAL;
- }
-
clk_prepare_enable(csi2rx->p_clk);
dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG);
clk_disable_unprepare(csi2rx->p_clk);
@@ -340,7 +728,7 @@ static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
* FIXME: Once we'll have internal D-PHY support, the check
* will need to be removed.
*/
- if (csi2rx->has_internal_dphy) {
+ if (!csi2rx->dphy && csi2rx->has_internal_dphy) {
dev_err(&pdev->dev, "Internal D-PHY not supported yet\n");
return -EINVAL;
}
@@ -417,6 +805,29 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
return ret;
}
+static int csi2rx_suspend(struct device *dev)
+{
+ struct csi2rx_priv *csi2rx = dev_get_drvdata(dev);
+
+ mutex_lock(&csi2rx->lock);
+ if (csi2rx->count)
+ csi2rx_stop(csi2rx);
+ mutex_unlock(&csi2rx->lock);
+
+ return 0;
+}
+
+static int csi2rx_resume(struct device *dev)
+{
+ struct csi2rx_priv *csi2rx = dev_get_drvdata(dev);
+
+ mutex_lock(&csi2rx->lock);
+ if (csi2rx->count)
+ csi2rx_start(csi2rx);
+ mutex_unlock(&csi2rx->lock);
+ return 0;
+}
+
static int csi2rx_probe(struct platform_device *pdev)
{
struct csi2rx_priv *csi2rx;
@@ -450,24 +861,34 @@ static int csi2rx_probe(struct platform_device *pdev)
csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++)
csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+ csi2rx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX,
csi2rx->pads);
if (ret)
goto err_cleanup;
+ ret = csi2rx_init_cfg(&csi2rx->subdev, NULL);
+ if (ret)
+ goto err_cleanup;
+
+ pm_runtime_enable(csi2rx->dev);
ret = v4l2_async_register_subdev(&csi2rx->subdev);
if (ret < 0)
- goto err_cleanup;
+ goto pm_disable;
dev_info(&pdev->dev,
"Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n",
csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams,
+ csi2rx->dphy ? "external" :
csi2rx->has_internal_dphy ? "internal" : "no");
return 0;
+pm_disable:
+ pm_runtime_disable(csi2rx->dev);
err_cleanup:
+ v4l2_async_notifier_unregister(&csi2rx->notifier);
v4l2_async_notifier_cleanup(&csi2rx->notifier);
err_free_priv:
kfree(csi2rx);
@@ -478,12 +899,19 @@ static int csi2rx_remove(struct platform_device *pdev)
{
struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
+ v4l2_async_notifier_unregister(&csi2rx->notifier);
+ v4l2_async_notifier_cleanup(&csi2rx->notifier);
v4l2_async_unregister_subdev(&csi2rx->subdev);
+ pm_runtime_disable(csi2rx->dev);
kfree(csi2rx);
return 0;
}
+static const struct dev_pm_ops csi2rx_pm_ops = {
+ SET_RUNTIME_PM_OPS(csi2rx_suspend, csi2rx_resume, NULL)
+};
+
static const struct of_device_id csi2rx_of_table[] = {
{ .compatible = "cdns,csi2rx" },
{ },
@@ -497,6 +925,7 @@ static struct platform_driver csi2rx_driver = {
.driver = {
.name = "cdns-csi2rx",
.of_match_table = csi2rx_of_table,
+ .pm = &csi2rx_pm_ops,
},
};
module_platform_driver(csi2rx_driver);
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index e4d08acfbb49..537076fe14e5 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -156,7 +156,7 @@ static const struct csi2tx_fmt *csi2tx_get_fmt_from_mbus(u32 mbus)
}
static int csi2tx_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index >= ARRAY_SIZE(csi2tx_formats))
@@ -169,20 +169,20 @@ static int csi2tx_enum_mbus_code(struct v4l2_subdev *subdev,
static struct v4l2_mbus_framefmt *
__csi2tx_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csi2tx_priv *csi2tx = v4l2_subdev_to_csi2tx(subdev);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(subdev, cfg,
+ return v4l2_subdev_get_try_format(subdev, sd_state,
fmt->pad);
return &csi2tx->pad_fmts[fmt->pad];
}
static int csi2tx_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
const struct v4l2_mbus_framefmt *format;
@@ -191,7 +191,7 @@ static int csi2tx_get_pad_format(struct v4l2_subdev *subdev,
if (fmt->pad == CSI2TX_PAD_SOURCE)
return -EINVAL;
- format = __csi2tx_get_pad_format(subdev, cfg, fmt);
+ format = __csi2tx_get_pad_format(subdev, sd_state, fmt);
if (!format)
return -EINVAL;
@@ -201,7 +201,7 @@ static int csi2tx_get_pad_format(struct v4l2_subdev *subdev,
}
static int csi2tx_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
const struct v4l2_mbus_framefmt *src_format = &fmt->format;
@@ -214,7 +214,7 @@ static int csi2tx_set_pad_format(struct v4l2_subdev *subdev,
if (!csi2tx_get_fmt_from_mbus(fmt->format.code))
src_format = &fmt_default;
- dst_format = __csi2tx_get_pad_format(subdev, cfg, fmt);
+ dst_format = __csi2tx_get_pad_format(subdev, sd_state, fmt);
if (!dst_format)
return -EINVAL;
diff --git a/drivers/media/platform/chips-media/Kconfig b/drivers/media/platform/chips-media/Kconfig
new file mode 100644
index 000000000000..30ccd991cac3
--- /dev/null
+++ b/drivers/media/platform/chips-media/Kconfig
@@ -0,0 +1,2 @@
+ comment "Chips&Media media platform drivers"
+source "drivers/media/platform/chips-media/wave5/Kconfig"
diff --git a/drivers/media/platform/chips-media/Makefile b/drivers/media/platform/chips-media/Makefile
new file mode 100644
index 000000000000..9941f8dd0966
--- /dev/null
+++ b/drivers/media/platform/chips-media/Makefile
@@ -0,0 +1,2 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+obj-y += wave5/
diff --git a/drivers/media/platform/chips-media/wave5/Kconfig b/drivers/media/platform/chips-media/wave5/Kconfig
new file mode 100644
index 000000000000..a3b949356cd5
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_WAVE_VPU
+ tristate "Chips&Media Wave Codec Driver"
+ depends on VIDEO_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ help
+ Chips&Media stateful encoder and decoder driver.
+ The driver supports HEVC and H264 formats.
+ To compile this driver as modules, choose M here: the
+ modules will be called wave5.
diff --git a/drivers/media/platform/chips-media/wave5/Makefile b/drivers/media/platform/chips-media/wave5/Makefile
new file mode 100644
index 000000000000..3d738a03bd8e
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_WAVE_VPU) += wave5.o
+wave5-objs += wave5-hw.o \
+ wave5-vpuapi.o \
+ wave5-vdi.o \
+ wave5-vpu-dec.o \
+ wave5-vpu.o \
+ wave5-vpu-enc.o \
+ wave5-helper.o
diff --git a/drivers/media/platform/chips-media/wave5/TODO b/drivers/media/platform/chips-media/wave5/TODO
new file mode 100644
index 000000000000..2164fd071a56
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/TODO
@@ -0,0 +1,18 @@
+* Handle interrupts better
+
+Currently the interrupt handling uses an unusual design employing a kfifo to
+transfer irq status to irq thread. This was done as a work around for dropped
+interrupts seen with IRQF_ONESHOT based handling.
+
+This needs further investigation and fixing properly, with the aid of
+C&M.
+
+* power management handling - add (runtime_)suspen/resume cb where the clock is enabled
+
+* revise logic of wave5_vpu_(dec/enc)_register_framebuffer
+
+* check if the normal kernel endianness/__swab32 routines are sufficient. (instead of the ones
+ implemented in the driver)
+
+* Adjust STREAMON routine for the stateful decoder to adhere to the API, which
+ declares that STREAMON can be called before source buffers have been queued.
diff --git a/drivers/media/platform/chips-media/wave5/wave5-helper.c b/drivers/media/platform/chips-media/wave5/wave5-helper.c
new file mode 100644
index 000000000000..d6cf2bbbc58d
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-helper.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - decoder interface
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#include "wave5-helper.h"
+
+void wave5_cleanup_instance(struct vpu_instance *inst)
+{
+ int i;
+
+ for (i = 0; i < inst->dst_buf_count; i++)
+ wave5_vdi_free_dma_memory(inst->dev, &inst->frame_vbuf[i]);
+
+ wave5_vdi_free_dma_memory(inst->dev, &inst->bitstream_vbuf);
+ v4l2_ctrl_handler_free(&inst->v4l2_ctrl_hdl);
+ if (inst->v4l2_m2m_dev != NULL)
+ v4l2_m2m_release(inst->v4l2_m2m_dev);
+ if (inst->v4l2_fh.vdev != NULL) {
+ v4l2_fh_del(&inst->v4l2_fh);
+ v4l2_fh_exit(&inst->v4l2_fh);
+ }
+ list_del_init(&inst->list);
+ kfifo_free(&inst->irq_status);
+ ida_free(&inst->dev->inst_ida, inst->id);
+ kfree(inst);
+}
+
+int wave5_vpu_release_device(struct file *filp,
+ int (*close_func)(struct vpu_instance *inst, u32 *fail_res),
+ char *name)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(filp->private_data);
+ struct vpu_device *dev = inst->dev;
+ int ret = 0;
+
+ v4l2_m2m_ctx_release(inst->v4l2_fh.m2m_ctx);
+ if (inst->state != VPU_INST_STATE_NONE) {
+ u32 fail_res;
+ int retry_count = 10;
+
+ do {
+ fail_res = 0;
+ ret = close_func(inst, &fail_res);
+ if (ret && ret != -EIO)
+ break;
+ if (fail_res != WAVE5_SYSERR_VPU_STILL_RUNNING)
+ break;
+ if (!wave5_vpu_wait_interrupt(inst, VPU_DEC_TIMEOUT))
+ break;
+ } while (--retry_count);
+
+ if (fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING) {
+ dev_err(inst->dev->dev, "%s close failed, device is still running\n",
+ name);
+ return -EBUSY;
+ }
+ if (ret && ret != -EIO) {
+ dev_err(inst->dev->dev, "%s close, fail: %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ wave5_cleanup_instance(inst);
+ if (dev->irq < 0) {
+ ret = mutex_lock_interruptible(&dev->dev_lock);
+ if (ret)
+ return ret;
+
+ if (list_empty(&dev->instances)) {
+ dev_dbg(dev->dev, "Disabling the hrtimer\n");
+ hrtimer_cancel(&dev->hrtimer);
+ }
+
+ mutex_unlock(&dev->dev_lock);
+ }
+
+ return ret;
+}
+
+int wave5_vpu_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq,
+ const struct vb2_ops *ops)
+{
+ struct vpu_instance *inst = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->ops = ops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->buf_struct_size = sizeof(struct vpu_buffer);
+ src_vq->drv_priv = inst;
+ src_vq->lock = &inst->dev->dev_lock;
+ src_vq->dev = inst->dev->v4l2_dev.dev;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->ops = ops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->buf_struct_size = sizeof(struct vpu_buffer);
+ dst_vq->drv_priv = inst;
+ dst_vq->lock = &inst->dev->dev_lock;
+ dst_vq->dev = inst->dev->v4l2_dev.dev;
+ ret = vb2_queue_init(dst_vq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int wave5_vpu_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ bool is_decoder = inst->type == VPU_INST_TYPE_DEC;
+
+ dev_dbg(inst->dev->dev, "%s: [%s] type: %u id: %u | flags: %u\n", __func__,
+ is_decoder ? "decoder" : "encoder", sub->type, sub->id, sub->flags);
+
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ if (is_decoder)
+ return v4l2_src_change_event_subscribe(fh, sub);
+ return -EINVAL;
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+int wave5_vpu_g_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i;
+
+ f->fmt.pix_mp.width = inst->src_fmt.width;
+ f->fmt.pix_mp.height = inst->src_fmt.height;
+ f->fmt.pix_mp.pixelformat = inst->src_fmt.pixelformat;
+ f->fmt.pix_mp.field = inst->src_fmt.field;
+ f->fmt.pix_mp.flags = inst->src_fmt.flags;
+ f->fmt.pix_mp.num_planes = inst->src_fmt.num_planes;
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ f->fmt.pix_mp.plane_fmt[i].bytesperline = inst->src_fmt.plane_fmt[i].bytesperline;
+ f->fmt.pix_mp.plane_fmt[i].sizeimage = inst->src_fmt.plane_fmt[i].sizeimage;
+ }
+
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.hsv_enc = inst->hsv_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+
+ return 0;
+}
+
+const struct vpu_format *wave5_find_vpu_fmt(unsigned int v4l2_pix_fmt,
+ const struct vpu_format fmt_list[MAX_FMTS])
+{
+ unsigned int index;
+
+ for (index = 0; index < MAX_FMTS; index++) {
+ if (fmt_list[index].v4l2_pix_fmt == v4l2_pix_fmt)
+ return &fmt_list[index];
+ }
+
+ return NULL;
+}
+
+const struct vpu_format *wave5_find_vpu_fmt_by_idx(unsigned int idx,
+ const struct vpu_format fmt_list[MAX_FMTS])
+{
+ if (idx >= MAX_FMTS)
+ return NULL;
+
+ if (!fmt_list[idx].v4l2_pix_fmt)
+ return NULL;
+
+ return &fmt_list[idx];
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-helper.h b/drivers/media/platform/chips-media/wave5/wave5-helper.h
new file mode 100644
index 000000000000..d586d624275e
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-helper.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - basic types
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#ifndef __WAVE_HELPER_H__
+#define __WAVE_HELPER_H__
+
+#include "wave5-vpu.h"
+
+#define FMT_TYPES 2
+#define MAX_FMTS 6
+
+void wave5_cleanup_instance(struct vpu_instance *inst);
+int wave5_vpu_release_device(struct file *filp,
+ int (*close_func)(struct vpu_instance *inst, u32 *fail_res),
+ char *name);
+int wave5_vpu_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq,
+ const struct vb2_ops *ops);
+int wave5_vpu_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub);
+int wave5_vpu_g_fmt_out(struct file *file, void *fh, struct v4l2_format *f);
+const struct vpu_format *wave5_find_vpu_fmt(unsigned int v4l2_pix_fmt,
+ const struct vpu_format fmt_list[MAX_FMTS]);
+const struct vpu_format *wave5_find_vpu_fmt_by_idx(unsigned int idx,
+ const struct vpu_format fmt_list[MAX_FMTS]);
+#endif
diff --git a/drivers/media/platform/chips-media/wave5/wave5-hw.c b/drivers/media/platform/chips-media/wave5/wave5-hw.c
new file mode 100644
index 000000000000..63d66dae34db
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-hw.c
@@ -0,0 +1,3372 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - wave5 backend logic
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#include <linux/iopoll.h>
+#include "wave5-vpu.h"
+#include "wave5.h"
+#include "wave5-regdefine.h"
+
+#define FIO_TIMEOUT 10000000
+#define FIO_CTRL_READY BIT(31)
+#define FIO_CTRL_WRITE BIT(16)
+#define VPU_BUSY_CHECK_TIMEOUT 10000000
+#define QUEUE_REPORT_MASK 0xffff
+
+static void wave5_print_reg_err(struct vpu_device *vpu_dev, u32 reg_fail_reason)
+{
+ char *caller = __builtin_return_address(0);
+ struct device *dev = vpu_dev->dev;
+ u32 reg_val;
+
+ switch (reg_fail_reason) {
+ case WAVE5_SYSERR_QUEUEING_FAIL:
+ reg_val = vpu_read_reg(vpu_dev, W5_RET_QUEUE_FAIL_REASON);
+ dev_dbg(dev, "%s: queueing failure: 0x%x\n", caller, reg_val);
+ break;
+ case WAVE5_SYSERR_RESULT_NOT_READY:
+ dev_err(dev, "%s: result not ready: 0x%x\n", caller, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_ACCESS_VIOLATION_HW:
+ dev_err(dev, "%s: access violation: 0x%x\n", caller, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_WATCHDOG_TIMEOUT:
+ dev_err(dev, "%s: watchdog timeout: 0x%x\n", caller, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_BUS_ERROR:
+ dev_err(dev, "%s: bus error: 0x%x\n", caller, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_DOUBLE_FAULT:
+ dev_err(dev, "%s: double fault: 0x%x\n", caller, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_VPU_STILL_RUNNING:
+ dev_err(dev, "%s: still running: 0x%x\n", caller, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_VLC_BUF_FULL:
+ dev_err(dev, "%s: vlc buf full: 0x%x\n", caller, reg_fail_reason);
+ break;
+ default:
+ dev_err(dev, "%s: failure:: 0x%x\n", caller, reg_fail_reason);
+ break;
+ }
+}
+
+static int wave5_wait_fio_readl(struct vpu_device *vpu_dev, u32 addr, u32 val)
+{
+ u32 ctrl;
+ int ret;
+
+ ctrl = addr & 0xffff;
+ wave5_vdi_write_register(vpu_dev, W5_VPU_FIO_CTRL_ADDR, ctrl);
+ ret = read_poll_timeout(wave5_vdi_readl, ctrl, ctrl & FIO_CTRL_READY,
+ 0, FIO_TIMEOUT, false, vpu_dev, W5_VPU_FIO_CTRL_ADDR);
+ if (ret)
+ return ret;
+ if (wave5_vdi_readl(vpu_dev, W5_VPU_FIO_DATA) != val)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+static void wave5_fio_writel(struct vpu_device *vpu_dev, unsigned int addr, unsigned int data)
+{
+ int ret;
+ unsigned int ctrl;
+
+ wave5_vdi_write_register(vpu_dev, W5_VPU_FIO_DATA, data);
+ ctrl = FIO_CTRL_WRITE | (addr & 0xffff);
+ wave5_vdi_write_register(vpu_dev, W5_VPU_FIO_CTRL_ADDR, ctrl);
+ ret = read_poll_timeout(wave5_vdi_readl, ctrl, ctrl & FIO_CTRL_READY, 0, FIO_TIMEOUT,
+ false, vpu_dev, W5_VPU_FIO_CTRL_ADDR);
+ if (ret) {
+ dev_dbg_ratelimited(vpu_dev->dev, "FIO write timeout: addr=0x%x data=%x\n",
+ ctrl, data);
+ }
+}
+
+static int wave5_wait_bus_busy(struct vpu_device *vpu_dev, unsigned int addr)
+{
+ u32 gdi_status_check_value = 0x3f;
+
+ if (vpu_dev->product_code == WAVE521C_CODE ||
+ vpu_dev->product_code == WAVE521_CODE ||
+ vpu_dev->product_code == WAVE521E1_CODE)
+ gdi_status_check_value = 0x00ff1f3f;
+
+ return wave5_wait_fio_readl(vpu_dev, addr, gdi_status_check_value);
+}
+
+static int wave5_wait_vpu_busy(struct vpu_device *vpu_dev, unsigned int addr)
+{
+ u32 data;
+
+ return read_poll_timeout(wave5_vdi_readl, data, data == 0,
+ 0, VPU_BUSY_CHECK_TIMEOUT, false, vpu_dev, addr);
+}
+
+static int wave5_wait_vcpu_bus_busy(struct vpu_device *vpu_dev, unsigned int addr)
+{
+ return wave5_wait_fio_readl(vpu_dev, addr, 0);
+}
+
+static dma_addr_t wave5_read_reg_for_mem_addr(struct vpu_instance *inst,
+ unsigned int reg_addr)
+{
+ dma_addr_t addr;
+ dma_addr_t high_addr = inst->dev->ext_addr;
+ u32 val;
+
+ val = vpu_read_reg(inst->dev, reg_addr);
+ addr = ((high_addr << 32) | val);
+
+ return addr;
+}
+
+bool wave5_vpu_is_init(struct vpu_device *vpu_dev)
+{
+ return vpu_read_reg(vpu_dev, W5_VCPU_CUR_PC) != 0;
+}
+
+unsigned int wave5_vpu_get_product_id(struct vpu_device *vpu_dev)
+{
+ unsigned int product_id = PRODUCT_ID_NONE;
+ u32 val;
+
+ val = vpu_read_reg(vpu_dev, W5_PRODUCT_NUMBER);
+
+ switch (val) {
+ case WAVE521_CODE:
+ case WAVE521C_CODE:
+ case WAVE521C_DUAL_CODE:
+ case WAVE521E1_CODE:
+ product_id = PRODUCT_ID_521;
+ break;
+ case WAVE511_CODE:
+ product_id = PRODUCT_ID_511;
+ break;
+ case WAVE517_CODE:
+ case WAVE537_CODE:
+ product_id = PRODUCT_ID_517;
+ break;
+ default:
+ dev_err(vpu_dev->dev, "Invalid product id (%x)\n", val);
+ break;
+ }
+ return product_id;
+}
+
+void wave5_bit_issue_command(struct vpu_instance *inst, u32 cmd)
+{
+ u32 instance_index = inst->id;
+ u32 codec_mode = inst->std;
+
+ vpu_write_reg(inst->dev, W5_CMD_INSTANCE_INFO, (codec_mode << 16) |
+ (instance_index & 0xffff));
+ vpu_write_reg(inst->dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(inst->dev, W5_COMMAND, cmd);
+
+ dev_dbg(inst->dev->dev, "%s: cmd=0x%x\n", __func__, cmd);
+
+ vpu_write_reg(inst->dev, W5_VPU_HOST_INT_REQ, 1);
+}
+
+static int wave5_send_query(struct vpu_instance *inst, enum QUERY_OPT query_opt)
+{
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_QUERY_OPTION, query_opt);
+ vpu_write_reg(inst->dev, W5_VPU_BUSY_STATUS, 1);
+ wave5_bit_issue_command(inst, W5_QUERY);
+
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(inst->dev->dev, "command: 'W5_QUERY', timed out opt=0x%x\n", query_opt);
+ return ret;
+ }
+
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS))
+ return -EIO;
+
+ return 0;
+}
+
+static int setup_wave5_properties(struct device *dev)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ struct vpu_attr *p_attr = &vpu_dev->attr;
+ u32 reg_val;
+ u8 *str;
+ int ret;
+ u32 hw_config_def0, hw_config_def1, hw_config_feature, hw_config_rev;
+
+ vpu_write_reg(vpu_dev, W5_QUERY_OPTION, GET_VPU_INFO);
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_QUERY);
+ vpu_write_reg(vpu_dev, W5_VPU_HOST_INT_REQ, 1);
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret)
+ return ret;
+
+ if (!vpu_read_reg(vpu_dev, W5_RET_SUCCESS))
+ return -EIO;
+
+ reg_val = vpu_read_reg(vpu_dev, W5_RET_PRODUCT_NAME);
+ str = (u8 *)&reg_val;
+ p_attr->product_name[0] = str[3];
+ p_attr->product_name[1] = str[2];
+ p_attr->product_name[2] = str[1];
+ p_attr->product_name[3] = str[0];
+ p_attr->product_name[4] = 0;
+
+ p_attr->product_id = wave5_vpu_get_product_id(vpu_dev);
+ p_attr->product_version = vpu_read_reg(vpu_dev, W5_RET_PRODUCT_VERSION);
+ p_attr->fw_version = vpu_read_reg(vpu_dev, W5_RET_FW_VERSION);
+ p_attr->customer_id = vpu_read_reg(vpu_dev, W5_RET_CUSTOMER_ID);
+ hw_config_def0 = vpu_read_reg(vpu_dev, W5_RET_STD_DEF0);
+ hw_config_def1 = vpu_read_reg(vpu_dev, W5_RET_STD_DEF1);
+ hw_config_feature = vpu_read_reg(vpu_dev, W5_RET_CONF_FEATURE);
+ hw_config_rev = vpu_read_reg(vpu_dev, W5_RET_CONF_REVISION);
+
+ p_attr->support_hevc10bit_enc = (hw_config_feature >> 3) & 1;
+ if (hw_config_rev > 167455) //20190321
+ p_attr->support_avc10bit_enc = (hw_config_feature >> 11) & 1;
+ else
+ p_attr->support_avc10bit_enc = p_attr->support_hevc10bit_enc;
+
+ p_attr->support_decoders = 0;
+ p_attr->support_encoders = 0;
+ if (p_attr->product_id == PRODUCT_ID_521) {
+ p_attr->support_dual_core = ((hw_config_def1 >> 26) & 0x01);
+ if (p_attr->support_dual_core || hw_config_rev < 206116) {
+ p_attr->support_decoders = BIT(STD_AVC);
+ p_attr->support_decoders |= BIT(STD_HEVC);
+ p_attr->support_encoders = BIT(STD_AVC);
+ p_attr->support_encoders |= BIT(STD_HEVC);
+ } else {
+ p_attr->support_decoders |= (((hw_config_def1 >> 3) & 0x01) << STD_AVC);
+ p_attr->support_decoders |= (((hw_config_def1 >> 2) & 0x01) << STD_HEVC);
+ p_attr->support_encoders = (((hw_config_def1 >> 1) & 0x01) << STD_AVC);
+ p_attr->support_encoders |= ((hw_config_def1 & 0x01) << STD_HEVC);
+ }
+ } else if (p_attr->product_id == PRODUCT_ID_511) {
+ p_attr->support_decoders = BIT(STD_HEVC);
+ p_attr->support_decoders |= BIT(STD_AVC);
+ } else if (p_attr->product_id == PRODUCT_ID_517) {
+ p_attr->support_decoders = (((hw_config_def1 >> 4) & 0x01) << STD_AV1);
+ p_attr->support_decoders |= (((hw_config_def1 >> 3) & 0x01) << STD_AVS2);
+ p_attr->support_decoders |= (((hw_config_def1 >> 2) & 0x01) << STD_AVC);
+ p_attr->support_decoders |= (((hw_config_def1 >> 1) & 0x01) << STD_VP9);
+ p_attr->support_decoders |= ((hw_config_def1 & 0x01) << STD_HEVC);
+ }
+
+ p_attr->support_backbone = (hw_config_def0 >> 16) & 0x01;
+ p_attr->support_vcpu_backbone = (hw_config_def0 >> 28) & 0x01;
+ p_attr->support_vcore_backbone = (hw_config_def0 >> 22) & 0x01;
+ p_attr->support_dual_core = (hw_config_def1 >> 26) & 0x01;
+ p_attr->support_endian_mask = BIT(VDI_LITTLE_ENDIAN) |
+ BIT(VDI_BIG_ENDIAN) |
+ BIT(VDI_32BIT_LITTLE_ENDIAN) |
+ BIT(VDI_32BIT_BIG_ENDIAN) |
+ (0xffffUL << 16);
+ p_attr->support_bitstream_mode = BIT(BS_MODE_INTERRUPT) |
+ BIT(BS_MODE_PIC_END);
+
+ return 0;
+}
+
+int wave5_vpu_get_version(struct vpu_device *vpu_dev, u32 *revision)
+{
+ u32 reg_val;
+ int ret;
+
+ vpu_write_reg(vpu_dev, W5_QUERY_OPTION, GET_VPU_INFO);
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_QUERY);
+ vpu_write_reg(vpu_dev, W5_VPU_HOST_INT_REQ, 1);
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_err(vpu_dev->dev, "%s: timeout\n", __func__);
+ return ret;
+ }
+
+ if (!vpu_read_reg(vpu_dev, W5_RET_SUCCESS)) {
+ dev_err(vpu_dev->dev, "%s: failed\n", __func__);
+ return -EIO;
+ }
+
+ reg_val = vpu_read_reg(vpu_dev, W5_RET_FW_VERSION);
+ if (revision)
+ *revision = reg_val;
+
+ return 0;
+}
+
+static void remap_page(struct vpu_device *vpu_dev, dma_addr_t code_base, u32 index)
+{
+ u32 remap_size = (W5_REMAP_MAX_SIZE >> 12) & 0x1ff;
+ u32 reg_val = 0x80000000 | (WAVE5_UPPER_PROC_AXI_ID << 20) | (index << 12) | BIT(11)
+ | remap_size;
+
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_CTRL, reg_val);
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_VADDR, index * W5_REMAP_MAX_SIZE);
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_PADDR, code_base + index * W5_REMAP_MAX_SIZE);
+}
+
+int wave5_vpu_init(struct device *dev, u8 *fw, size_t size)
+{
+ struct vpu_buf *common_vb;
+ struct dma_vpu_buf *sram_vb;
+ dma_addr_t code_base, temp_base;
+ u32 code_size, temp_size;
+ u32 i, reg_val;
+ int ret;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ common_vb = &vpu_dev->common_mem;
+
+ code_base = common_vb->daddr;
+ /* ALIGN TO 4KB */
+ code_size = (WAVE5_MAX_CODE_BUF_SIZE & ~0xfff);
+ if (code_size < size * 2)
+ return -EINVAL;
+
+ temp_base = common_vb->daddr + WAVE5_TEMPBUF_OFFSET;
+ temp_size = WAVE5_TEMPBUF_SIZE;
+
+ ret = wave5_vdi_write_memory(vpu_dev, common_vb, 0, fw, size, VDI_128BIT_LITTLE_ENDIAN);
+ if (ret < 0) {
+ dev_err(vpu_dev->dev, "VPU init, Writing firmware to common buffer, fail: %d\n",
+ ret);
+ return ret;
+ }
+
+ vpu_write_reg(vpu_dev, W5_PO_CONF, 0);
+
+ /* clear registers */
+
+ for (i = W5_CMD_REG_BASE; i < W5_CMD_REG_END; i += 4)
+ vpu_write_reg(vpu_dev, i, 0x00);
+
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX0);
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX1);
+
+ vpu_write_reg(vpu_dev, W5_ADDR_CODE_BASE, code_base);
+ vpu_write_reg(vpu_dev, W5_CODE_SIZE, code_size);
+ vpu_write_reg(vpu_dev, W5_CODE_PARAM, (WAVE5_UPPER_PROC_AXI_ID << 4) | 0);
+ vpu_write_reg(vpu_dev, W5_ADDR_TEMP_BASE, temp_base);
+ vpu_write_reg(vpu_dev, W5_TEMP_SIZE, temp_size);
+
+ vpu_write_reg(vpu_dev, W5_HW_OPTION, 0);
+
+ reg_val = (vpu_dev->ext_addr & 0xFFFF);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROC_EXT_ADDR, reg_val);
+ reg_val = ((WAVE5_PROC_AXI_AXPROT & 0x7) << 4) |
+ (WAVE5_PROC_AXI_AXCACHE & 0xF);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_AXI_PARAM, reg_val);
+ reg_val = ((WAVE5_SEC_AXI_AXPROT & 0x7) << 20) |
+ ((WAVE5_SEC_AXI_AXCACHE & 0xF) << 16) |
+ (WAVE5_SEC_AXI_EXT_ADDR & 0xFFFF);
+ vpu_write_reg(vpu_dev, W5_SEC_AXI_PARAM, reg_val);
+
+ /* interrupt */
+ // encoder
+ reg_val = BIT(INT_WAVE5_ENC_SET_PARAM);
+ reg_val |= BIT(INT_WAVE5_ENC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_FULL);
+ // decoder
+ reg_val |= BIT(INT_WAVE5_INIT_SEQ);
+ reg_val |= BIT(INT_WAVE5_DEC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_EMPTY);
+ vpu_write_reg(vpu_dev, W5_VPU_VINT_ENABLE, reg_val);
+
+ reg_val = vpu_read_reg(vpu_dev, W5_VPU_RET_VPU_CONFIG0);
+ if ((reg_val >> 16) & 1) {
+ reg_val = ((WAVE5_PROC_AXI_ID << 28) |
+ (WAVE5_PRP_AXI_ID << 24) |
+ (WAVE5_FBD_Y_AXI_ID << 20) |
+ (WAVE5_FBC_Y_AXI_ID << 16) |
+ (WAVE5_FBD_C_AXI_ID << 12) |
+ (WAVE5_FBC_C_AXI_ID << 8) |
+ (WAVE5_PRI_AXI_ID << 4) |
+ WAVE5_SEC_AXI_ID);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROG_AXI_ID, reg_val);
+ }
+
+ sram_vb = &vpu_dev->sram_buf;
+
+ vpu_write_reg(vpu_dev, W5_ADDR_SEC_AXI, sram_vb->daddr);
+ vpu_write_reg(vpu_dev, W5_SEC_AXI_SIZE, sram_vb->size);
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_INIT_VPU);
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_CORE_START, 1);
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_err(vpu_dev->dev, "VPU init(W5_VPU_REMAP_CORE_START) timeout\n");
+ return ret;
+ }
+
+ reg_val = vpu_read_reg(vpu_dev, W5_RET_SUCCESS);
+ if (!reg_val) {
+ u32 reason_code = vpu_read_reg(vpu_dev, W5_RET_FAIL_REASON);
+
+ wave5_print_reg_err(vpu_dev, reason_code);
+ return -EIO;
+ }
+
+ return setup_wave5_properties(dev);
+}
+
+int wave5_vpu_build_up_dec_param(struct vpu_instance *inst,
+ struct dec_open_param *param)
+{
+ int ret;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ u32 bs_endian;
+ struct dma_vpu_buf *sram_vb;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ p_dec_info->cycle_per_tick = 256;
+ switch (inst->std) {
+ case W_HEVC_DEC:
+ p_dec_info->seq_change_mask = SEQ_CHANGE_ENABLE_ALL_HEVC;
+ break;
+ case W_VP9_DEC:
+ p_dec_info->seq_change_mask = SEQ_CHANGE_ENABLE_ALL_VP9;
+ break;
+ case W_AVS2_DEC:
+ p_dec_info->seq_change_mask = SEQ_CHANGE_ENABLE_ALL_AVS2;
+ break;
+ case W_AVC_DEC:
+ p_dec_info->seq_change_mask = SEQ_CHANGE_ENABLE_ALL_AVC;
+ break;
+ case W_AV1_DEC:
+ p_dec_info->seq_change_mask = SEQ_CHANGE_ENABLE_ALL_AV1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vpu_dev->product == PRODUCT_ID_517)
+ p_dec_info->vb_work.size = WAVE517_WORKBUF_SIZE;
+ else if (vpu_dev->product == PRODUCT_ID_521)
+ p_dec_info->vb_work.size = WAVE521DEC_WORKBUF_SIZE;
+ else if (vpu_dev->product == PRODUCT_ID_511)
+ p_dec_info->vb_work.size = WAVE521DEC_WORKBUF_SIZE;
+
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &p_dec_info->vb_work);
+ if (ret)
+ return ret;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_VCORE_INFO, 1);
+
+ sram_vb = &vpu_dev->sram_buf;
+ p_dec_info->sec_axi_info.buf_base = sram_vb->daddr;
+ p_dec_info->sec_axi_info.buf_size = sram_vb->size;
+
+ wave5_vdi_clear_memory(inst->dev, &p_dec_info->vb_work);
+
+ vpu_write_reg(inst->dev, W5_ADDR_WORK_BASE, p_dec_info->vb_work.daddr);
+ vpu_write_reg(inst->dev, W5_WORK_SIZE, p_dec_info->vb_work.size);
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_BS_START_ADDR, p_dec_info->stream_buf_start_addr);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_BS_SIZE, p_dec_info->stream_buf_size);
+
+ /* NOTE: when endian mode is 0, SDMA reads MSB first */
+ bs_endian = wave5_vdi_convert_endian(inst->dev, param->stream_endian);
+ bs_endian = (~bs_endian & VDI_128BIT_ENDIAN_MASK);
+ vpu_write_reg(inst->dev, W5_CMD_BS_PARAM, bs_endian);
+ vpu_write_reg(inst->dev, W5_CMD_EXT_ADDR, (param->pri_axprot << 20) |
+ (param->pri_axcache << 16) | inst->dev->ext_addr);
+ vpu_write_reg(inst->dev, W5_CMD_NUM_CQ_DEPTH_M1, (COMMAND_QUEUE_DEPTH - 1));
+ vpu_write_reg(inst->dev, W5_CMD_ERR_CONCEAL, (param->error_conceal_unit << 2) |
+ (param->error_conceal_mode));
+
+ wave5_bit_issue_command(inst, W5_CREATE_INSTANCE);
+ // check QUEUE_DONE
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(inst->dev->dev, "command: 'W5_CREATE_INSTANCE' timed out\n");
+ goto free_vb_work;
+ }
+
+ // Check if we were able to add the parameters into the VCPU QUEUE
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS)) {
+ ret = -EIO;
+ goto free_vb_work;
+ }
+
+ p_dec_info->product_code = vpu_read_reg(inst->dev, W5_PRODUCT_NUMBER);
+
+ return 0;
+free_vb_work:
+ wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work);
+ return ret;
+}
+
+int wave5_vpu_dec_init_seq(struct vpu_instance *inst)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ u32 cmd_option = INIT_SEQ_NORMAL;
+ u32 reg_val, bs_option;
+ int ret;
+
+ if (!inst->codec_info)
+ return -EINVAL;
+
+ if (p_dec_info->thumbnail_mode)
+ cmd_option = INIT_SEQ_W_THUMBNAIL;
+
+ /* set attributes of bitstream buffer controller */
+ switch (p_dec_info->open_param.bitstream_mode) {
+ case BS_MODE_INTERRUPT:
+ bs_option = BSOPTION_ENABLE_EXPLICIT_END;
+ break;
+ case BS_MODE_PIC_END:
+ bs_option = BSOPTION_ENABLE_EXPLICIT_END;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vpu_write_reg(inst->dev, W5_BS_RD_PTR, p_dec_info->stream_rd_ptr);
+ vpu_write_reg(inst->dev, W5_BS_WR_PTR, p_dec_info->stream_wr_ptr);
+
+ if (p_dec_info->stream_endflag)
+ bs_option = 3;
+ if (inst->std == W_AV1_DEC)
+ bs_option |= ((p_dec_info->open_param.av1_format) << 2);
+ vpu_write_reg(inst->dev, W5_BS_OPTION, BIT(31) | bs_option);
+
+ vpu_write_reg(inst->dev, W5_COMMAND_OPTION, cmd_option);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_USER_MASK, p_dec_info->user_data_enable);
+
+ wave5_bit_issue_command(inst, W5_INIT_SEQ);
+
+ // check QUEUE_DONE
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(inst->dev->dev, "command: 'W5_INIT_SEQ', timed out\n");
+ return ret;
+ }
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_dec_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_dec_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ // Check if we were able to add a command into VCPU QUEUE
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS)) {
+ reg_val = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, reg_val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void wave5_get_dec_seq_result(struct vpu_instance *inst, struct dec_initial_info *info)
+{
+ u32 reg_val, sub_layer_info;
+ u32 profile_compatibility_flag;
+ u32 output_bit_depth_minus8;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+
+ p_dec_info->stream_rd_ptr = wave5_vpu_dec_get_rd_ptr(inst);
+ info->rd_ptr = p_dec_info->stream_rd_ptr;
+
+ p_dec_info->frame_display_flag = vpu_read_reg(inst->dev, W5_RET_DEC_DISP_IDC);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_SIZE);
+ info->pic_width = ((reg_val >> 16) & 0xffff);
+ info->pic_height = (reg_val & 0xffff);
+ info->min_frame_buffer_count = vpu_read_reg(inst->dev, W5_RET_DEC_NUM_REQUIRED_FB);
+ info->frame_buf_delay = vpu_read_reg(inst->dev, W5_RET_DEC_NUM_REORDER_DELAY);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_CROP_LEFT_RIGHT);
+ info->pic_crop_rect.left = (reg_val >> 16) & 0xffff;
+ info->pic_crop_rect.right = reg_val & 0xffff;
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_CROP_TOP_BOTTOM);
+ info->pic_crop_rect.top = (reg_val >> 16) & 0xffff;
+ info->pic_crop_rect.bottom = reg_val & 0xffff;
+
+ info->f_rate_numerator = vpu_read_reg(inst->dev, W5_RET_DEC_FRAME_RATE_NR);
+ info->f_rate_denominator = vpu_read_reg(inst->dev, W5_RET_DEC_FRAME_RATE_DR);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_COLOR_SAMPLE_INFO);
+ info->luma_bitdepth = reg_val & 0xf;
+ info->chroma_bitdepth = (reg_val >> 4) & 0xf;
+ info->chroma_format_idc = (reg_val >> 8) & 0xf;
+ info->aspect_rate_info = (reg_val >> 16) & 0xff;
+ info->is_ext_sar = ((info->aspect_rate_info == 255) ? true : false);
+ /* [0:15] - vertical size, [16:31] - horizontal size */
+ if (info->is_ext_sar)
+ info->aspect_rate_info = vpu_read_reg(inst->dev, W5_RET_DEC_ASPECT_RATIO);
+ info->bit_rate = vpu_read_reg(inst->dev, W5_RET_DEC_BIT_RATE);
+
+ sub_layer_info = vpu_read_reg(inst->dev, W5_RET_DEC_SUB_LAYER_INFO);
+ info->max_temporal_layers = (sub_layer_info >> 8) & 0x7;
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_SEQ_PARAM);
+ info->level = reg_val & 0xff;
+ profile_compatibility_flag = (reg_val >> 12) & 0xff;
+ info->profile = (reg_val >> 24) & 0x1f;
+ info->tier = (reg_val >> 29) & 0x01;
+ output_bit_depth_minus8 = (reg_val >> 30) & 0x03;
+
+ if (inst->std == W_HEVC_DEC) {
+ /* guessing profile */
+ if (!info->profile) {
+ if ((profile_compatibility_flag & 0x06) == 0x06)
+ info->profile = HEVC_PROFILE_MAIN; /* main profile */
+ else if ((profile_compatibility_flag & 0x04) == 0x04)
+ info->profile = HEVC_PROFILE_MAIN10; /* main10 profile */
+ else if ((profile_compatibility_flag & 0x08) == 0x08)
+ /* main still picture profile */
+ info->profile = HEVC_PROFILE_STILLPICTURE;
+ else
+ info->profile = HEVC_PROFILE_MAIN; /* for old version HM */
+ }
+
+ } else if (inst->std == W_AVS2_DEC) {
+ if (info->luma_bitdepth == 10 && output_bit_depth_minus8 == 2)
+ info->output_bit_depth = 10;
+ else
+ info->output_bit_depth = 8;
+
+ } else if (inst->std == W_AVC_DEC) {
+ info->profile = (reg_val >> 24) & 0x7f;
+ }
+
+ info->vlc_buf_size = vpu_read_reg(inst->dev, W5_RET_VLC_BUF_SIZE);
+ info->param_buf_size = vpu_read_reg(inst->dev, W5_RET_PARAM_BUF_SIZE);
+ p_dec_info->vlc_buf_size = info->vlc_buf_size;
+ p_dec_info->param_buf_size = info->param_buf_size;
+}
+
+int wave5_vpu_dec_get_seq_info(struct vpu_instance *inst, struct dec_initial_info *info)
+{
+ int ret;
+ u32 reg_val;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_ADDR_REPORT_BASE, p_dec_info->user_data_buf_addr);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_REPORT_SIZE, p_dec_info->user_data_buf_size);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_REPORT_PARAM,
+ VPU_USER_DATA_ENDIAN & VDI_128BIT_ENDIAN_MASK);
+
+ // send QUERY cmd
+ ret = wave5_send_query(inst, GET_RESULT);
+ if (ret) {
+ if (ret == -EIO) {
+ reg_val = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, reg_val);
+ }
+ return ret;
+ }
+
+ dev_dbg(inst->dev->dev, "%s: init seq complete\n", __func__);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_dec_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_dec_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ /* this is not a fatal error, set ret to -EIO but don't return immediately */
+ if (vpu_read_reg(inst->dev, W5_RET_DEC_DECODING_SUCCESS) != 1) {
+ info->seq_init_err_reason = vpu_read_reg(inst->dev, W5_RET_DEC_ERR_INFO);
+ ret = -EIO;
+ } else {
+ info->warn_info = vpu_read_reg(inst->dev, W5_RET_DEC_WARN_INFO);
+ }
+
+ // get sequence info
+ info->user_data_size = 0;
+ info->user_data_buf_full = false;
+ info->user_data_header = vpu_read_reg(inst->dev, W5_RET_DEC_USERDATA_IDC);
+ if (info->user_data_header) {
+ if (info->user_data_header & BIT(USERDATA_FLAG_BUFF_FULL))
+ info->user_data_buf_full = true;
+ info->user_data_size = p_dec_info->user_data_buf_size;
+ }
+
+ wave5_get_dec_seq_result(inst, info);
+
+ return ret;
+}
+
+static u32 calculate_table_size(u32 bit_depth, u32 frame_width, u32 frame_height, u32 ot_bg_width)
+{
+ u32 bgs_width = ((bit_depth > 8) ? 256 : 512);
+ u32 comp_frame_width = ALIGN(ALIGN(frame_width, 16) + 16, 16);
+ u32 ot_frame_width = ALIGN(comp_frame_width, ot_bg_width);
+
+ // sizeof_offset_table()
+ u32 ot_bg_height = 32;
+ u32 bgs_height = BIT(14) / bgs_width / ((bit_depth > 8) ? 2 : 1);
+ u32 comp_frame_height = ALIGN(ALIGN(frame_height, 4) + 4, bgs_height);
+ u32 ot_frame_height = ALIGN(comp_frame_height, ot_bg_height);
+
+ return (ot_frame_width / 16) * (ot_frame_height / 4) * 2;
+}
+
+int wave5_vpu_dec_register_framebuffer(struct vpu_instance *inst, struct frame_buffer *fb_arr,
+ enum tiled_map_type map_type, unsigned int count)
+{
+ int ret;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ struct dec_initial_info *init_info = &p_dec_info->initial_info;
+ size_t remain, idx, j, i, cnt_8_chunk;
+ u32 start_no, end_no;
+ u32 reg_val, cbcr_interleave, nv21, pic_size;
+ u32 endian, yuv_format;
+ u32 addr_y, addr_cb, addr_cr;
+ u32 table_width = init_info->pic_width;
+ u32 table_height = init_info->pic_height;
+ u32 mv_col_size, frame_width, frame_height, fbc_y_tbl_size, fbc_c_tbl_size;
+ struct vpu_buf vb_buf;
+ u32 color_format = 0;
+ u32 pixel_order = 1;
+ u32 bwb_flag = (map_type == LINEAR_FRAME_MAP) ? 1 : 0;
+
+ cbcr_interleave = inst->cbcr_interleave;
+ nv21 = inst->nv21;
+ mv_col_size = 0;
+ fbc_y_tbl_size = 0;
+ fbc_c_tbl_size = 0;
+
+ if (map_type >= COMPRESSED_FRAME_MAP) {
+ cbcr_interleave = 0;
+ nv21 = 0;
+
+ switch (inst->std) {
+ case W_HEVC_DEC:
+ mv_col_size = WAVE5_DEC_HEVC_BUF_SIZE(init_info->pic_width,
+ init_info->pic_height);
+ break;
+ case W_VP9_DEC:
+ mv_col_size = WAVE5_DEC_VP9_BUF_SIZE(init_info->pic_width,
+ init_info->pic_height);
+ table_width = ALIGN(table_width, 64);
+ table_height = ALIGN(table_height, 64);
+ break;
+ case W_AVS2_DEC:
+ mv_col_size = WAVE5_DEC_AVS2_BUF_SIZE(init_info->pic_width,
+ init_info->pic_height);
+ break;
+ case W_AVC_DEC:
+ mv_col_size = WAVE5_DEC_AVC_BUF_SIZE(init_info->pic_width,
+ init_info->pic_height);
+ break;
+ case W_AV1_DEC:
+ mv_col_size = WAVE5_DEC_AV1_BUF_SZ_1(init_info->pic_width,
+ init_info->pic_height) +
+ WAVE5_DEC_AV1_BUF_SZ_2(init_info->pic_width, init_info->pic_width,
+ init_info->pic_height);
+ table_width = ALIGN(table_width, 16);
+ table_height = ALIGN(table_height, 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mv_col_size = ALIGN(mv_col_size, 16);
+ vb_buf.daddr = 0;
+ if (inst->std == W_HEVC_DEC || inst->std == W_AVS2_DEC || inst->std ==
+ W_VP9_DEC || inst->std == W_AVC_DEC || inst->std ==
+ W_AV1_DEC) {
+ vb_buf.size = ALIGN(mv_col_size, BUFFER_MARGIN) + BUFFER_MARGIN;
+
+ for (i = 0 ; i < count ; i++) {
+ if (p_dec_info->vb_mv[i].size == 0) {
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &vb_buf);
+ if (ret)
+ goto free_mv_buffers;
+ p_dec_info->vb_mv[i] = vb_buf;
+ }
+ }
+ }
+
+ frame_width = ALIGN(init_info->pic_width, 16);
+ frame_height = ALIGN(init_info->pic_height, 16);
+ if (p_dec_info->product_code == WAVE521C_DUAL_CODE) {
+ // Use a offset table BG width of 1024 for all decoders
+ fbc_y_tbl_size = calculate_table_size(init_info->luma_bitdepth,
+ frame_width, frame_height, 1024);
+ } else {
+ fbc_y_tbl_size = ALIGN(WAVE5_FBC_LUMA_TABLE_SIZE(table_width,
+ table_height), 16);
+ }
+
+ vb_buf.daddr = 0;
+ vb_buf.size = ALIGN(fbc_y_tbl_size, BUFFER_MARGIN) + BUFFER_MARGIN;
+ for (i = 0 ; i < count ; i++) {
+ if (p_dec_info->vb_fbc_y_tbl[i].size == 0) {
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &vb_buf);
+ if (ret)
+ goto free_fbc_y_tbl_buffers;
+ p_dec_info->vb_fbc_y_tbl[i] = vb_buf;
+ }
+ }
+
+ if (p_dec_info->product_code == WAVE521C_DUAL_CODE) {
+ // Use a offset table BG width of 1024 for all decoders
+ fbc_c_tbl_size = calculate_table_size(init_info->chroma_bitdepth,
+ frame_width / 2, frame_height, 1024);
+ } else {
+ fbc_c_tbl_size = ALIGN(WAVE5_FBC_CHROMA_TABLE_SIZE(table_width,
+ table_height), 16);
+ }
+
+ vb_buf.daddr = 0;
+ vb_buf.size = ALIGN(fbc_c_tbl_size, BUFFER_MARGIN) + BUFFER_MARGIN;
+ for (i = 0 ; i < count ; i++) {
+ if (p_dec_info->vb_fbc_c_tbl[i].size == 0) {
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &vb_buf);
+ if (ret)
+ goto free_fbc_c_tbl_buffers;
+ p_dec_info->vb_fbc_c_tbl[i] = vb_buf;
+ }
+ }
+ pic_size = (init_info->pic_width << 16) | (init_info->pic_height);
+
+ // allocate task_buffer
+ vb_buf.size = (p_dec_info->vlc_buf_size * VLC_BUF_NUM) +
+ (p_dec_info->param_buf_size * COMMAND_QUEUE_DEPTH);
+ vb_buf.daddr = 0;
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &vb_buf);
+ if (ret)
+ goto free_fbc_c_tbl_buffers;
+
+ p_dec_info->vb_task = vb_buf;
+
+ vpu_write_reg(inst->dev, W5_CMD_SET_FB_ADDR_TASK_BUF,
+ p_dec_info->vb_task.daddr);
+ vpu_write_reg(inst->dev, W5_CMD_SET_FB_TASK_BUF_SIZE, vb_buf.size);
+ } else {
+ pic_size = (init_info->pic_width << 16) | (init_info->pic_height);
+ }
+ endian = wave5_vdi_convert_endian(inst->dev, fb_arr[0].endian);
+ vpu_write_reg(inst->dev, W5_PIC_SIZE, pic_size);
+
+ yuv_format = 0;
+ color_format = 0;
+
+ reg_val =
+ (bwb_flag << 28) |
+ (pixel_order << 23) | /* PIXEL ORDER in 128bit. first pixel in low address */
+ (yuv_format << 20) |
+ (color_format << 19) |
+ (nv21 << 17) |
+ (cbcr_interleave << 16) |
+ (fb_arr[0].stride);
+ vpu_write_reg(inst->dev, W5_COMMON_PIC_INFO, reg_val);
+
+ remain = count;
+ cnt_8_chunk = ALIGN(count, 8) / 8;
+ idx = 0;
+ for (j = 0; j < cnt_8_chunk; j++) {
+ reg_val = (endian << 16) | (j == cnt_8_chunk - 1) << 4 | ((j == 0) << 3);
+ reg_val |= (p_dec_info->open_param.enable_non_ref_fbc_write << 26);
+ vpu_write_reg(inst->dev, W5_SFB_OPTION, reg_val);
+ start_no = j * 8;
+ end_no = start_no + ((remain >= 8) ? 8 : remain) - 1;
+
+ vpu_write_reg(inst->dev, W5_SET_FB_NUM, (start_no << 8) | end_no);
+
+ for (i = 0; i < 8 && i < remain; i++) {
+ if (map_type == LINEAR_FRAME_MAP && p_dec_info->open_param.cbcr_order ==
+ CBCR_ORDER_REVERSED) {
+ addr_y = fb_arr[i + start_no].buf_y;
+ addr_cb = fb_arr[i + start_no].buf_cr;
+ addr_cr = fb_arr[i + start_no].buf_cb;
+ } else {
+ addr_y = fb_arr[i + start_no].buf_y;
+ addr_cb = fb_arr[i + start_no].buf_cb;
+ addr_cr = fb_arr[i + start_no].buf_cr;
+ }
+ vpu_write_reg(inst->dev, W5_ADDR_LUMA_BASE0 + (i << 4), addr_y);
+ vpu_write_reg(inst->dev, W5_ADDR_CB_BASE0 + (i << 4), addr_cb);
+ if (map_type >= COMPRESSED_FRAME_MAP) {
+ /* luma FBC offset table */
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_Y_OFFSET0 + (i << 4),
+ p_dec_info->vb_fbc_y_tbl[idx].daddr);
+ /* chroma FBC offset table */
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_C_OFFSET0 + (i << 4),
+ p_dec_info->vb_fbc_c_tbl[idx].daddr);
+ vpu_write_reg(inst->dev, W5_ADDR_MV_COL0 + (i << 2),
+ p_dec_info->vb_mv[idx].daddr);
+ } else {
+ vpu_write_reg(inst->dev, W5_ADDR_CR_BASE0 + (i << 4), addr_cr);
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_C_OFFSET0 + (i << 4), 0);
+ vpu_write_reg(inst->dev, W5_ADDR_MV_COL0 + (i << 2), 0);
+ }
+ idx++;
+ }
+ remain -= i;
+
+ wave5_bit_issue_command(inst, W5_SET_FB);
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret)
+ goto free_buffers;
+ }
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_SUCCESS);
+ if (!reg_val) {
+ ret = -EIO;
+ goto free_buffers;
+ }
+
+ return 0;
+
+free_buffers:
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_task);
+free_fbc_c_tbl_buffers:
+ for (i = 0; i < count; i++)
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_c_tbl[i]);
+free_fbc_y_tbl_buffers:
+ for (i = 0; i < count; i++)
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_y_tbl[i]);
+free_mv_buffers:
+ for (i = 0; i < count; i++)
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_mv[i]);
+ return ret;
+}
+
+int wave5_vpu_decode(struct vpu_instance *inst, struct dec_param *option, u32 *fail_res)
+{
+ u32 mode_option = DEC_PIC_NORMAL, bs_option, reg_val;
+ u32 force_latency = 0;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ struct dec_open_param *p_open_param = &p_dec_info->open_param;
+ int ret;
+
+ if (p_dec_info->thumbnail_mode) {
+ mode_option = DEC_PIC_W_THUMBNAIL;
+ } else if (option->skipframe_mode) {
+ switch (option->skipframe_mode) {
+ case WAVE_SKIPMODE_NON_IRAP:
+ mode_option = SKIP_NON_IRAP;
+ force_latency = 1;
+ break;
+ case WAVE_SKIPMODE_NON_REF:
+ mode_option = SKIP_NON_REF_PIC;
+ break;
+ default:
+ // skip mode off
+ break;
+ }
+ }
+
+ // set disable reorder
+ if (!p_dec_info->reorder_enable)
+ force_latency = 1;
+
+ /* set attributes of bitstream buffer controller */
+ bs_option = 0;
+ switch (p_open_param->bitstream_mode) {
+ case BS_MODE_INTERRUPT:
+ bs_option = BSOPTION_ENABLE_EXPLICIT_END;
+ break;
+ case BS_MODE_PIC_END:
+ bs_option = BSOPTION_ENABLE_EXPLICIT_END;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vpu_write_reg(inst->dev, W5_BS_RD_PTR, p_dec_info->stream_rd_ptr);
+ vpu_write_reg(inst->dev, W5_BS_WR_PTR, p_dec_info->stream_wr_ptr);
+ bs_option = (p_dec_info->stream_endflag << 1) | BS_EXPLICIT_END_MODE_ON;
+ if (p_open_param->bitstream_mode == BS_MODE_PIC_END)
+ bs_option |= BIT(31);
+ if (inst->std == W_AV1_DEC)
+ bs_option |= ((p_open_param->av1_format) << 2);
+ vpu_write_reg(inst->dev, W5_BS_OPTION, bs_option);
+
+ /* secondary AXI */
+ reg_val = p_dec_info->sec_axi_info.wave.use_bit_enable |
+ (p_dec_info->sec_axi_info.wave.use_ip_enable << 9) |
+ (p_dec_info->sec_axi_info.wave.use_lf_row_enable << 15);
+ vpu_write_reg(inst->dev, W5_USE_SEC_AXI, reg_val);
+
+ /* set attributes of user buffer */
+ vpu_write_reg(inst->dev, W5_CMD_DEC_USER_MASK, p_dec_info->user_data_enable);
+
+ vpu_write_reg(inst->dev, W5_COMMAND_OPTION,
+ ((option->disable_film_grain << 6) | (option->cra_as_bla_flag << 5) |
+ mode_option));
+ vpu_write_reg(inst->dev, W5_CMD_DEC_TEMPORAL_ID_PLUS1,
+ (p_dec_info->target_spatial_id << 9) |
+ (p_dec_info->temp_id_select_mode << 8) | p_dec_info->target_temp_id);
+ vpu_write_reg(inst->dev, W5_CMD_SEQ_CHANGE_ENABLE_FLAG, p_dec_info->seq_change_mask);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_FORCE_FB_LATENCY_PLUS1, force_latency);
+
+ wave5_bit_issue_command(inst, W5_DEC_PIC);
+ // check QUEUE_DONE
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(inst->dev->dev, "command: 'W5_DEC_PIC', timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_dec_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_dec_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+ // Check if we were able to add a command into the VCPU QUEUE
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS)) {
+ *fail_res = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, *fail_res);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int wave5_vpu_dec_get_result(struct vpu_instance *inst, struct dec_output_info *result)
+{
+ int ret;
+ u32 index, nal_unit_type, reg_val, sub_layer_info;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_ADDR_REPORT_BASE, p_dec_info->user_data_buf_addr);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_REPORT_SIZE, p_dec_info->user_data_buf_size);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_REPORT_PARAM,
+ VPU_USER_DATA_ENDIAN & VDI_128BIT_ENDIAN_MASK);
+
+ // send QUERY cmd
+ ret = wave5_send_query(inst, GET_RESULT);
+ if (ret) {
+ if (ret == -EIO) {
+ reg_val = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, reg_val);
+ }
+
+ return ret;
+ }
+
+ dev_dbg(inst->dev->dev, "%s: dec pic complete\n", __func__);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_dec_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_dec_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_TYPE);
+
+ nal_unit_type = (reg_val >> 4) & 0x3f;
+
+ if (inst->std == W_VP9_DEC) {
+ if (reg_val & 0x01)
+ result->pic_type = PIC_TYPE_I;
+ else if (reg_val & 0x02)
+ result->pic_type = PIC_TYPE_P;
+ else if (reg_val & 0x04)
+ result->pic_type = PIC_TYPE_REPEAT;
+ else
+ result->pic_type = PIC_TYPE_MAX;
+ } else if (inst->std == W_HEVC_DEC) {
+ if (reg_val & 0x04)
+ result->pic_type = PIC_TYPE_B;
+ else if (reg_val & 0x02)
+ result->pic_type = PIC_TYPE_P;
+ else if (reg_val & 0x01)
+ result->pic_type = PIC_TYPE_I;
+ else
+ result->pic_type = PIC_TYPE_MAX;
+ if ((nal_unit_type == 19 || nal_unit_type == 20) && result->pic_type == PIC_TYPE_I)
+ /* IDR_W_RADL, IDR_N_LP */
+ result->pic_type = PIC_TYPE_IDR;
+ } else if (inst->std == W_AVC_DEC) {
+ if (reg_val & 0x04)
+ result->pic_type = PIC_TYPE_B;
+ else if (reg_val & 0x02)
+ result->pic_type = PIC_TYPE_P;
+ else if (reg_val & 0x01)
+ result->pic_type = PIC_TYPE_I;
+ else
+ result->pic_type = PIC_TYPE_MAX;
+ if (nal_unit_type == 5 && result->pic_type == PIC_TYPE_I)
+ result->pic_type = PIC_TYPE_IDR;
+ } else if (inst->std == W_AV1_DEC) {
+ switch (reg_val & 0x07) {
+ case 0:
+ result->pic_type = PIC_TYPE_KEY;
+ break;
+ case 1:
+ result->pic_type = PIC_TYPE_INTER;
+ break;
+ case 2:
+ result->pic_type = PIC_TYPE_AV1_INTRA;
+ break;
+ case 3:
+ result->pic_type = PIC_TYPE_AV1_SWITCH;
+ break;
+ default:
+ result->pic_type = PIC_TYPE_MAX;
+ break;
+ }
+ } else { // AVS2
+ switch (reg_val & 0x07) {
+ case 0:
+ result->pic_type = PIC_TYPE_I;
+ break;
+ case 1:
+ result->pic_type = PIC_TYPE_P;
+ break;
+ case 2:
+ result->pic_type = PIC_TYPE_B;
+ break;
+ case 3:
+ result->pic_type = PIC_TYPE_AVS2_F;
+ break;
+ case 4:
+ result->pic_type = PIC_TYPE_AVS2_S;
+ break;
+ case 5:
+ result->pic_type = PIC_TYPE_AVS2_G;
+ break;
+ case 6:
+ result->pic_type = PIC_TYPE_AVS2_GB;
+ break;
+ default:
+ result->pic_type = PIC_TYPE_MAX;
+ break;
+ }
+ }
+ index = vpu_read_reg(inst->dev, W5_RET_DEC_DISPLAY_INDEX);
+ result->index_frame_display = index;
+ index = vpu_read_reg(inst->dev, W5_RET_DEC_DECODED_INDEX);
+ result->index_frame_decoded = index;
+ result->index_frame_decoded_for_tiled = index;
+
+ sub_layer_info = vpu_read_reg(inst->dev, W5_RET_DEC_SUB_LAYER_INFO);
+ result->temporal_id = sub_layer_info & 0x7;
+
+ if (inst->std == W_HEVC_DEC) {
+ result->decoded_poc = -1;
+ if (result->index_frame_decoded >= 0 ||
+ result->index_frame_decoded == DECODED_IDX_FLAG_SKIP)
+ result->decoded_poc = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_POC);
+ } else if (inst->std == W_AVS2_DEC) {
+ result->avs2_info.decoded_poi = -1;
+ result->avs2_info.display_poi = -1;
+ if (result->index_frame_decoded >= 0)
+ result->avs2_info.decoded_poi =
+ vpu_read_reg(inst->dev, W5_RET_DEC_PIC_POC);
+ } else if (inst->std == W_AVC_DEC) {
+ result->decoded_poc = -1;
+ if (result->index_frame_decoded >= 0 ||
+ result->index_frame_decoded == DECODED_IDX_FLAG_SKIP)
+ result->decoded_poc = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_POC);
+ } else if (inst->std == W_AV1_DEC) {
+ result->decoded_poc = -1;
+ if (result->index_frame_decoded >= 0 ||
+ result->index_frame_decoded == DECODED_IDX_FLAG_SKIP)
+ result->decoded_poc = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_POC);
+ }
+
+ result->sequence_changed = vpu_read_reg(inst->dev, W5_RET_DEC_NOTIFICATION);
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_SIZE);
+ result->dec_pic_width = reg_val >> 16;
+ result->dec_pic_height = reg_val & 0xffff;
+
+ if (result->sequence_changed) {
+ memcpy((void *)&p_dec_info->new_seq_info, (void *)&p_dec_info->initial_info,
+ sizeof(struct dec_initial_info));
+ wave5_get_dec_seq_result(inst, &p_dec_info->new_seq_info);
+ }
+
+ result->dec_host_cmd_tick = vpu_read_reg(inst->dev, W5_RET_DEC_HOST_CMD_TICK);
+ result->dec_decode_end_tick = vpu_read_reg(inst->dev, W5_RET_DEC_DECODING_ENC_TICK);
+
+ if (!p_dec_info->first_cycle_check) {
+ result->frame_cycle =
+ (result->dec_decode_end_tick - result->dec_host_cmd_tick) *
+ p_dec_info->cycle_per_tick;
+ vpu_dev->last_performance_cycles = result->dec_decode_end_tick;
+ p_dec_info->first_cycle_check = true;
+ } else if (result->index_frame_decoded_for_tiled != -1) {
+ result->frame_cycle =
+ (result->dec_decode_end_tick - vpu_dev->last_performance_cycles) *
+ p_dec_info->cycle_per_tick;
+ vpu_dev->last_performance_cycles = result->dec_decode_end_tick;
+ if (vpu_dev->last_performance_cycles < result->dec_host_cmd_tick)
+ result->frame_cycle =
+ (result->dec_decode_end_tick - result->dec_host_cmd_tick) *
+ p_dec_info->cycle_per_tick;
+ }
+
+ // no remaining command. reset frame cycle.
+ if (p_dec_info->instance_queue_count == 0 && p_dec_info->report_queue_count == 0)
+ p_dec_info->first_cycle_check = false;
+
+ return 0;
+}
+
+int wave5_vpu_re_init(struct device *dev, u8 *fw, size_t size)
+{
+ struct vpu_buf *common_vb;
+ dma_addr_t code_base, temp_base;
+ dma_addr_t old_code_base, temp_size;
+ u32 code_size;
+ u32 reg_val;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ common_vb = &vpu_dev->common_mem;
+
+ code_base = common_vb->daddr;
+ /* ALIGN TO 4KB */
+ code_size = (WAVE5_MAX_CODE_BUF_SIZE & ~0xfff);
+ if (code_size < size * 2)
+ return -EINVAL;
+ temp_base = common_vb->daddr + WAVE5_TEMPBUF_OFFSET;
+ temp_size = WAVE5_TEMPBUF_SIZE;
+
+ old_code_base = vpu_read_reg(vpu_dev, W5_VPU_REMAP_PADDR);
+
+ if (old_code_base != code_base + W5_REMAP_INDEX1 * W5_REMAP_MAX_SIZE) {
+ int ret;
+ struct dma_vpu_buf *sram_vb;
+
+ ret = wave5_vdi_write_memory(vpu_dev, common_vb, 0, fw, size,
+ VDI_128BIT_LITTLE_ENDIAN);
+ if (ret < 0) {
+ dev_err(vpu_dev->dev,
+ "VPU init, Writing firmware to common buffer, fail: %d\n", ret);
+ return ret;
+ }
+
+ vpu_write_reg(vpu_dev, W5_PO_CONF, 0);
+
+ ret = wave5_vpu_reset(dev, SW_RESET_ON_BOOT);
+ if (ret < 0) {
+ dev_err(vpu_dev->dev, "VPU init, Resetting the VPU, fail: %d\n", ret);
+ return ret;
+ }
+
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX0);
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX1);
+
+ vpu_write_reg(vpu_dev, W5_ADDR_CODE_BASE, code_base);
+ vpu_write_reg(vpu_dev, W5_CODE_SIZE, code_size);
+ vpu_write_reg(vpu_dev, W5_CODE_PARAM, (WAVE5_UPPER_PROC_AXI_ID << 4) | 0);
+ vpu_write_reg(vpu_dev, W5_ADDR_TEMP_BASE, temp_base);
+ vpu_write_reg(vpu_dev, W5_TEMP_SIZE, temp_size);
+
+ vpu_write_reg(vpu_dev, W5_HW_OPTION, 0);
+
+ reg_val = (vpu_dev->ext_addr & 0xFFFF);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROC_EXT_ADDR, reg_val);
+ reg_val = ((WAVE5_PROC_AXI_AXPROT & 0x7) << 4) |
+ (WAVE5_PROC_AXI_AXCACHE & 0xF);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_AXI_PARAM, reg_val);
+ reg_val = ((WAVE5_SEC_AXI_AXPROT & 0x7) << 20) |
+ ((WAVE5_SEC_AXI_AXCACHE & 0xF) << 16) |
+ (WAVE5_SEC_AXI_EXT_ADDR & 0xFFFF);
+ vpu_write_reg(vpu_dev, W5_SEC_AXI_PARAM, reg_val);
+
+ /* interrupt */
+ // encoder
+ reg_val = BIT(INT_WAVE5_ENC_SET_PARAM);
+ reg_val |= BIT(INT_WAVE5_ENC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_FULL);
+ // decoder
+ reg_val |= BIT(INT_WAVE5_INIT_SEQ);
+ reg_val |= BIT(INT_WAVE5_DEC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_EMPTY);
+ vpu_write_reg(vpu_dev, W5_VPU_VINT_ENABLE, reg_val);
+
+ reg_val = vpu_read_reg(vpu_dev, W5_VPU_RET_VPU_CONFIG0);
+ if ((reg_val >> 16) & 1) {
+ reg_val = ((WAVE5_PROC_AXI_ID << 28) |
+ (WAVE5_PRP_AXI_ID << 24) |
+ (WAVE5_FBD_Y_AXI_ID << 20) |
+ (WAVE5_FBC_Y_AXI_ID << 16) |
+ (WAVE5_FBD_C_AXI_ID << 12) |
+ (WAVE5_FBC_C_AXI_ID << 8) |
+ (WAVE5_PRI_AXI_ID << 4) |
+ WAVE5_SEC_AXI_ID);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROG_AXI_ID, reg_val);
+ }
+
+ sram_vb = &vpu_dev->sram_buf;
+
+ vpu_write_reg(vpu_dev, W5_ADDR_SEC_AXI, sram_vb->daddr);
+ vpu_write_reg(vpu_dev, W5_SEC_AXI_SIZE, sram_vb->size);
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_INIT_VPU);
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_CORE_START, 1);
+
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_err(vpu_dev->dev, "VPU reinit(W5_VPU_REMAP_CORE_START) timeout\n");
+ return ret;
+ }
+
+ reg_val = vpu_read_reg(vpu_dev, W5_RET_SUCCESS);
+ if (!reg_val) {
+ u32 reason_code = vpu_read_reg(vpu_dev, W5_RET_FAIL_REASON);
+
+ wave5_print_reg_err(vpu_dev, reason_code);
+ return -EIO;
+ }
+ }
+
+ return setup_wave5_properties(dev);
+}
+
+static int wave5_vpu_sleep_wake(struct device *dev, bool i_sleep_wake, const uint16_t *code,
+ size_t size)
+{
+ u32 reg_val;
+ struct vpu_buf *common_vb;
+ dma_addr_t code_base;
+ u32 code_size;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (i_sleep_wake) {
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret)
+ return ret;
+
+ /*
+ * Declare who has ownership for the host interface access
+ * 1 = VPU
+ * 0 = Host processer
+ */
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_SLEEP_VPU);
+ /* Send an interrupt named HOST to the VPU */
+ vpu_write_reg(vpu_dev, W5_VPU_HOST_INT_REQ, 1);
+
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret)
+ return ret;
+
+ if (!vpu_read_reg(vpu_dev, W5_RET_SUCCESS)) {
+ u32 reason = vpu_read_reg(vpu_dev, W5_RET_FAIL_REASON);
+
+ wave5_print_reg_err(vpu_dev, reason);
+ return -EIO;
+ }
+ } else { /* restore */
+ common_vb = &vpu_dev->common_mem;
+
+ code_base = common_vb->daddr;
+ /* ALIGN TO 4KB */
+ code_size = (WAVE5_MAX_CODE_BUF_SIZE & ~0xfff);
+ if (code_size < size * 2) {
+ dev_err(dev, "size too small\n");
+ return -EINVAL;
+ }
+
+ /* Power on without DEBUG mode */
+ vpu_write_reg(vpu_dev, W5_PO_CONF, 0);
+
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX0);
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX1);
+
+ vpu_write_reg(vpu_dev, W5_ADDR_CODE_BASE, code_base);
+ vpu_write_reg(vpu_dev, W5_CODE_SIZE, code_size);
+ vpu_write_reg(vpu_dev, W5_CODE_PARAM, (WAVE5_UPPER_PROC_AXI_ID << 4) | 0);
+
+ vpu_write_reg(vpu_dev, W5_HW_OPTION, 0);
+
+ reg_val = (vpu_dev->ext_addr & 0xFFFF);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROC_EXT_ADDR, reg_val);
+ reg_val = ((WAVE5_PROC_AXI_AXPROT & 0x7) << 4) |
+ (WAVE5_PROC_AXI_AXCACHE & 0xF);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_AXI_PARAM, reg_val);
+ reg_val = ((WAVE5_SEC_AXI_AXPROT & 0x7) << 20) |
+ ((WAVE5_SEC_AXI_AXCACHE & 0xF) << 16) |
+ (WAVE5_SEC_AXI_EXT_ADDR & 0xFFFF);
+ vpu_write_reg(vpu_dev, W5_SEC_AXI_PARAM, reg_val);
+
+ /* interrupt */
+ // encoder
+ reg_val = BIT(INT_WAVE5_ENC_SET_PARAM);
+ reg_val |= BIT(INT_WAVE5_ENC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_FULL);
+ // decoder
+ reg_val |= BIT(INT_WAVE5_INIT_SEQ);
+ reg_val |= BIT(INT_WAVE5_DEC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_EMPTY);
+ vpu_write_reg(vpu_dev, W5_VPU_VINT_ENABLE, reg_val);
+
+ reg_val = vpu_read_reg(vpu_dev, W5_VPU_RET_VPU_CONFIG0);
+ if ((reg_val >> 16) & 1) {
+ reg_val = ((WAVE5_PROC_AXI_ID << 28) |
+ (WAVE5_PRP_AXI_ID << 24) |
+ (WAVE5_FBD_Y_AXI_ID << 20) |
+ (WAVE5_FBC_Y_AXI_ID << 16) |
+ (WAVE5_FBD_C_AXI_ID << 12) |
+ (WAVE5_FBC_C_AXI_ID << 8) |
+ (WAVE5_PRI_AXI_ID << 4) |
+ WAVE5_SEC_AXI_ID);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROG_AXI_ID, reg_val);
+ }
+
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_WAKEUP_VPU);
+ /* Start VPU after settings */
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_CORE_START, 1);
+
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_err(vpu_dev->dev, "VPU wakeup(W5_VPU_REMAP_CORE_START) timeout\n");
+ return ret;
+ }
+
+ reg_val = vpu_read_reg(vpu_dev, W5_RET_SUCCESS);
+ if (!reg_val) {
+ u32 reason_code = vpu_read_reg(vpu_dev, W5_RET_FAIL_REASON);
+
+ wave5_print_reg_err(vpu_dev, reason_code);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+int wave5_vpu_reset(struct device *dev, enum sw_reset_mode reset_mode)
+{
+ u32 val = 0;
+ int ret = 0;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ struct vpu_attr *p_attr = &vpu_dev->attr;
+ // VPU doesn't send response. force to set BUSY flag to 0.
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 0);
+
+ if (reset_mode == SW_RESET_SAFETY) {
+ ret = wave5_vpu_sleep_wake(dev, true, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
+ val = vpu_read_reg(vpu_dev, W5_VPU_RET_VPU_CONFIG0);
+ if ((val >> 16) & 0x1)
+ p_attr->support_backbone = true;
+ if ((val >> 22) & 0x1)
+ p_attr->support_vcore_backbone = true;
+ if ((val >> 28) & 0x1)
+ p_attr->support_vcpu_backbone = true;
+
+ val = vpu_read_reg(vpu_dev, W5_VPU_RET_VPU_CONFIG1);
+ if ((val >> 26) & 0x1)
+ p_attr->support_dual_core = true;
+
+ // waiting for completion of bus transaction
+ if (p_attr->support_backbone) {
+ dev_dbg(dev, "%s: backbone supported\n", __func__);
+
+ if (p_attr->support_dual_core) {
+ // check CORE0
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE0, 0x7);
+
+ ret = wave5_wait_bus_busy(vpu_dev, W5_BACKBONE_BUS_STATUS_VCORE0);
+ if (ret) {
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE0, 0x00);
+ return ret;
+ }
+
+ // check CORE1
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE1, 0x7);
+
+ ret = wave5_wait_bus_busy(vpu_dev, W5_BACKBONE_BUS_STATUS_VCORE1);
+ if (ret) {
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE1, 0x00);
+ return ret;
+ }
+
+ } else if (p_attr->support_vcore_backbone) {
+ if (p_attr->support_vcpu_backbone) {
+ // step1 : disable request
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCPU, 0xFF);
+
+ // step2 : waiting for completion of bus transaction
+ ret = wave5_wait_vcpu_bus_busy(vpu_dev,
+ W5_BACKBONE_BUS_STATUS_VCPU);
+ if (ret) {
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCPU, 0x00);
+ return ret;
+ }
+ }
+ // step1 : disable request
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE0, 0x7);
+
+ // step2 : waiting for completion of bus transaction
+ if (wave5_wait_bus_busy(vpu_dev, W5_BACKBONE_BUS_STATUS_VCORE0)) {
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE0, 0x00);
+ return -EBUSY;
+ }
+ } else {
+ // step1 : disable request
+ wave5_fio_writel(vpu_dev, W5_COMBINED_BACKBONE_BUS_CTRL, 0x7);
+
+ // step2 : waiting for completion of bus transaction
+ if (wave5_wait_bus_busy(vpu_dev, W5_COMBINED_BACKBONE_BUS_STATUS)) {
+ wave5_fio_writel(vpu_dev, W5_COMBINED_BACKBONE_BUS_CTRL, 0x00);
+ return -EBUSY;
+ }
+ }
+ } else {
+ dev_dbg(dev, "%s: backbone NOT supported\n", __func__);
+ // step1 : disable request
+ wave5_fio_writel(vpu_dev, W5_GDI_BUS_CTRL, 0x100);
+
+ // step2 : waiting for completion of bus transaction
+ ret = wave5_wait_bus_busy(vpu_dev, W5_GDI_BUS_STATUS);
+ if (ret) {
+ wave5_fio_writel(vpu_dev, W5_GDI_BUS_CTRL, 0x00);
+ return ret;
+ }
+ }
+
+ switch (reset_mode) {
+ case SW_RESET_ON_BOOT:
+ case SW_RESET_FORCE:
+ case SW_RESET_SAFETY:
+ val = W5_RST_BLOCK_ALL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (val) {
+ vpu_write_reg(vpu_dev, W5_VPU_RESET_REQ, val);
+
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_RESET_STATUS);
+ if (ret) {
+ vpu_write_reg(vpu_dev, W5_VPU_RESET_REQ, 0);
+ return ret;
+ }
+ vpu_write_reg(vpu_dev, W5_VPU_RESET_REQ, 0);
+ }
+ // step3 : must clear GDI_BUS_CTRL after done SW_RESET
+ if (p_attr->support_backbone) {
+ if (p_attr->support_dual_core) {
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE0, 0x00);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE1, 0x00);
+ } else if (p_attr->support_vcore_backbone) {
+ if (p_attr->support_vcpu_backbone)
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCPU, 0x00);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE0, 0x00);
+ } else {
+ wave5_fio_writel(vpu_dev, W5_COMBINED_BACKBONE_BUS_CTRL, 0x00);
+ }
+ } else {
+ wave5_fio_writel(vpu_dev, W5_GDI_BUS_CTRL, 0x00);
+ }
+ if (reset_mode == SW_RESET_SAFETY || reset_mode == SW_RESET_FORCE)
+ ret = wave5_vpu_sleep_wake(dev, false, NULL, 0);
+
+ return ret;
+}
+
+int wave5_vpu_dec_finish_seq(struct vpu_instance *inst, u32 *fail_res)
+{
+ int ret;
+
+ wave5_bit_issue_command(inst, W5_DESTROY_INSTANCE);
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret)
+ return -ETIMEDOUT;
+
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS)) {
+ *fail_res = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, *fail_res);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int wave5_vpu_dec_set_bitstream_flag(struct vpu_instance *inst, bool eos)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ enum bit_stream_mode bs_mode = (enum bit_stream_mode)p_dec_info->open_param.bitstream_mode;
+
+ p_dec_info->stream_endflag = eos ? 1 : 0;
+
+ if (bs_mode == BS_MODE_INTERRUPT) {
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_BS_OPTION, (p_dec_info->stream_endflag << 1) |
+ p_dec_info->stream_endflag);
+ vpu_write_reg(inst->dev, W5_BS_WR_PTR, p_dec_info->stream_wr_ptr);
+
+ wave5_bit_issue_command(inst, W5_UPDATE_BS);
+ ret = wave5_wait_vpu_busy(inst->dev,
+ W5_VPU_BUSY_STATUS);
+ if (ret)
+ return ret;
+
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int wave5_dec_clr_disp_flag(struct vpu_instance *inst, unsigned int index)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_CLR_DISP_IDC, BIT(index));
+ vpu_write_reg(inst->dev, W5_CMD_DEC_SET_DISP_IDC, 0);
+ ret = wave5_send_query(inst, UPDATE_DISP_FLAG);
+
+ if (ret) {
+ if (ret == -EIO) {
+ u32 reg_val = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+
+ wave5_print_reg_err(inst->dev, reg_val);
+ }
+ return ret;
+ }
+
+ p_dec_info->frame_display_flag = vpu_read_reg(inst->dev, W5_RET_DEC_DISP_IDC);
+
+ return 0;
+}
+
+int wave5_dec_set_disp_flag(struct vpu_instance *inst, unsigned int index)
+{
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_CLR_DISP_IDC, 0);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_SET_DISP_IDC, BIT(index));
+ ret = wave5_send_query(inst, UPDATE_DISP_FLAG);
+
+ if (ret) {
+ if (ret == -EIO) {
+ u32 reg_val = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+
+ wave5_print_reg_err(inst->dev, reg_val);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+
+int wave5_vpu_clear_interrupt(struct vpu_instance *inst, u32 flags)
+{
+ u32 interrupt_reason;
+
+ interrupt_reason = vpu_read_reg(inst->dev, W5_VPU_VINT_REASON_USR);
+ interrupt_reason &= ~flags;
+ vpu_write_reg(inst->dev, W5_VPU_VINT_REASON_USR, interrupt_reason);
+
+ return 0;
+}
+
+dma_addr_t wave5_vpu_dec_get_rd_ptr(struct vpu_instance *inst)
+{
+ int ret;
+
+ ret = wave5_send_query(inst, GET_BS_RD_PTR);
+
+ if (ret)
+ return inst->codec_info->dec_info.stream_rd_ptr;
+
+ return wave5_read_reg_for_mem_addr(inst, W5_RET_QUERY_DEC_BS_RD_PTR);
+}
+
+int wave5_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr)
+{
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_RET_QUERY_DEC_SET_BS_RD_PTR, addr);
+
+ ret = wave5_send_query(inst, SET_BS_RD_PTR);
+
+ return ret;
+}
+
+/************************************************************************/
+/* ENCODER functions */
+/************************************************************************/
+
+int wave5_vpu_build_up_enc_param(struct device *dev, struct vpu_instance *inst,
+ struct enc_open_param *open_param)
+{
+ int ret;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ u32 reg_val;
+ struct dma_vpu_buf *sram_vb;
+ u32 bs_endian;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ dma_addr_t buffer_addr;
+ size_t buffer_size;
+
+ p_enc_info->cycle_per_tick = 256;
+ sram_vb = &vpu_dev->sram_buf;
+ p_enc_info->sec_axi_info.buf_base = sram_vb->daddr;
+ p_enc_info->sec_axi_info.buf_size = sram_vb->size;
+
+ if (vpu_dev->product == PRODUCT_ID_521)
+ p_enc_info->vb_work.size = WAVE521ENC_WORKBUF_SIZE;
+
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &p_enc_info->vb_work);
+ if (ret) {
+ memset(&p_enc_info->vb_work, 0, sizeof(p_enc_info->vb_work));
+ return ret;
+ }
+
+ wave5_vdi_clear_memory(vpu_dev, &p_enc_info->vb_work);
+
+ vpu_write_reg(inst->dev, W5_ADDR_WORK_BASE, p_enc_info->vb_work.daddr);
+ vpu_write_reg(inst->dev, W5_WORK_SIZE, p_enc_info->vb_work.size);
+
+ reg_val = wave5_vdi_convert_endian(vpu_dev, open_param->stream_endian);
+ bs_endian = (~reg_val & VDI_128BIT_ENDIAN_MASK);
+
+ reg_val = (open_param->line_buf_int_en << 6) | bs_endian;
+ vpu_write_reg(inst->dev, W5_CMD_BS_PARAM, reg_val);
+ vpu_write_reg(inst->dev, W5_CMD_EXT_ADDR, (open_param->pri_axprot << 20) |
+ (open_param->pri_axcache << 16) | inst->dev->ext_addr);
+ vpu_write_reg(inst->dev, W5_CMD_NUM_CQ_DEPTH_M1, (COMMAND_QUEUE_DEPTH - 1));
+
+ reg_val = 0;
+ if (vpu_dev->product == PRODUCT_ID_521)
+ reg_val |= (open_param->sub_frame_sync_enable |
+ open_param->sub_frame_sync_mode << 1);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SRC_OPTIONS, reg_val);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_VCORE_INFO, 1);
+
+ wave5_bit_issue_command(inst, W5_CREATE_INSTANCE);
+ // check QUEUE_DONE
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(inst->dev->dev, "command: 'W5_CREATE_INSTANCE' timed out\n");
+ goto free_vb_work;
+ }
+
+ // Check if we were able to add the parameters into the VCPU QUEUE
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS)) {
+ reg_val = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, reg_val);
+ ret = -EIO;
+ goto free_vb_work;
+ }
+
+ buffer_addr = open_param->bitstream_buffer;
+ buffer_size = open_param->bitstream_buffer_size;
+ p_enc_info->sub_frame_sync_config.sub_frame_sync_mode = open_param->sub_frame_sync_mode;
+ p_enc_info->sub_frame_sync_config.sub_frame_sync_on = open_param->sub_frame_sync_enable;
+ p_enc_info->stream_rd_ptr = buffer_addr;
+ p_enc_info->stream_wr_ptr = buffer_addr;
+ p_enc_info->line_buf_int_en = open_param->line_buf_int_en;
+ p_enc_info->stream_buf_start_addr = buffer_addr;
+ p_enc_info->stream_buf_size = buffer_size;
+ p_enc_info->stream_buf_end_addr = buffer_addr + buffer_size;
+ p_enc_info->stride = 0;
+ p_enc_info->initial_info_obtained = false;
+ p_enc_info->product_code = vpu_read_reg(inst->dev, W5_PRODUCT_NUMBER);
+
+ return 0;
+free_vb_work:
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_work);
+ return ret;
+}
+
+static void wave5_set_enc_crop_info(u32 codec, struct enc_wave_param *param, int rot_mode,
+ int src_width, int src_height)
+{
+ int aligned_width = (codec == W_HEVC_ENC) ? ALIGN(src_width, 32) : ALIGN(src_width, 16);
+ int aligned_height = (codec == W_HEVC_ENC) ? ALIGN(src_height, 32) : ALIGN(src_height, 16);
+ int pad_right, pad_bot;
+ int crop_right, crop_left, crop_top, crop_bot;
+ int prp_mode = rot_mode >> 1; // remove prp_enable bit
+
+ if (codec == W_HEVC_ENC &&
+ (!rot_mode || prp_mode == 14)) // prp_mode 14 : hor_mir && ver_mir && rot_180
+ return;
+
+ pad_right = aligned_width - src_width;
+ pad_bot = aligned_height - src_height;
+
+ if (param->conf_win_right > 0)
+ crop_right = param->conf_win_right + pad_right;
+ else
+ crop_right = pad_right;
+
+ if (param->conf_win_bot > 0)
+ crop_bot = param->conf_win_bot + pad_bot;
+ else
+ crop_bot = pad_bot;
+
+ crop_top = param->conf_win_top;
+ crop_left = param->conf_win_left;
+
+ param->conf_win_top = crop_top;
+ param->conf_win_left = crop_left;
+ param->conf_win_bot = crop_bot;
+ param->conf_win_right = crop_right;
+
+ if (prp_mode == 1 || prp_mode == 15) {
+ param->conf_win_top = crop_right;
+ param->conf_win_left = crop_top;
+ param->conf_win_bot = crop_left;
+ param->conf_win_right = crop_bot;
+ } else if (prp_mode == 2 || prp_mode == 12) {
+ param->conf_win_top = crop_bot;
+ param->conf_win_left = crop_right;
+ param->conf_win_bot = crop_top;
+ param->conf_win_right = crop_left;
+ } else if (prp_mode == 3 || prp_mode == 13) {
+ param->conf_win_top = crop_left;
+ param->conf_win_left = crop_bot;
+ param->conf_win_bot = crop_right;
+ param->conf_win_right = crop_top;
+ } else if (prp_mode == 4 || prp_mode == 10) {
+ param->conf_win_top = crop_bot;
+ param->conf_win_bot = crop_top;
+ } else if (prp_mode == 8 || prp_mode == 6) {
+ param->conf_win_left = crop_right;
+ param->conf_win_right = crop_left;
+ } else if (prp_mode == 5 || prp_mode == 11) {
+ param->conf_win_top = crop_left;
+ param->conf_win_left = crop_top;
+ param->conf_win_bot = crop_right;
+ param->conf_win_right = crop_bot;
+ } else if (prp_mode == 7 || prp_mode == 9) {
+ param->conf_win_top = crop_right;
+ param->conf_win_left = crop_bot;
+ param->conf_win_bot = crop_left;
+ param->conf_win_right = crop_top;
+ }
+}
+
+int wave5_vpu_enc_init_seq(struct vpu_instance *inst)
+{
+ u32 reg_val = 0, rot_mir_mode, fixed_cu_size_mode = 0x7;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ struct enc_open_param *p_open_param = &p_enc_info->open_param;
+ struct enc_wave_param *p_param = &p_open_param->wave_param;
+ int ret;
+
+ if (inst->dev->product != PRODUCT_ID_521)
+ return -EINVAL;
+
+ /*==============================================*/
+ /* OPT_CUSTOM_GOP */
+ /*==============================================*/
+ /*
+ * SET_PARAM + CUSTOM_GOP
+ * only when gop_preset_idx == custom_gop, custom_gop related registers should be set
+ */
+ if (p_param->gop_preset_idx == PRESET_IDX_CUSTOM_GOP) {
+ int i = 0, j = 0;
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_CUSTOM_GOP_PARAM,
+ p_param->gop_param.custom_gop_size);
+ for (i = 0; i < p_param->gop_param.custom_gop_size; i++) {
+ vpu_write_reg(inst->dev, W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_0 + (i * 4),
+ p_param->gop_param.pic_param[i].pic_type |
+ (p_param->gop_param.pic_param[i].poc_offset << 2) |
+ (p_param->gop_param.pic_param[i].pic_qp << 6) |
+ (p_param->gop_param.pic_param[i].use_multi_ref_p << 13) |
+ ((p_param->gop_param.pic_param[i].ref_poc_l0 & 0x1F) << 14) |
+ ((p_param->gop_param.pic_param[i].ref_poc_l1 & 0x1F) << 19) |
+ (p_param->gop_param.pic_param[i].temporal_id << 24));
+ }
+
+ for (j = i; j < MAX_GOP_NUM; j++)
+ vpu_write_reg(inst->dev,
+ W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_0 + (j * 4), 0);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_SET_PARAM_OPTION, OPT_CUSTOM_GOP);
+ wave5_bit_issue_command(inst, W5_ENC_SET_PARAM);
+
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(inst->dev->dev, "command: 'W5_ENC_SET_PARAM', timed out op=0x%x\n",
+ OPT_CUSTOM_GOP);
+ return ret;
+ }
+ }
+
+ /*======================================================================*/
+ /* OPT_COMMON: */
+ /* the last SET_PARAM command should be called with OPT_COMMON */
+ /*======================================================================*/
+ rot_mir_mode = 0;
+ if (p_enc_info->rotation_enable) {
+ switch (p_enc_info->rotation_angle) {
+ case 0:
+ rot_mir_mode |= NONE_ROTATE;
+ break;
+ case 90:
+ rot_mir_mode |= ROT_CLOCKWISE_90;
+ break;
+ case 180:
+ rot_mir_mode |= ROT_CLOCKWISE_180;
+ break;
+ case 270:
+ rot_mir_mode |= ROT_CLOCKWISE_270;
+ break;
+ }
+ }
+
+ if (p_enc_info->mirror_enable) {
+ switch (p_enc_info->mirror_direction) {
+ case MIRDIR_NONE:
+ rot_mir_mode |= NONE_ROTATE;
+ break;
+ case MIRDIR_VER:
+ rot_mir_mode |= MIR_VER_FLIP;
+ break;
+ case MIRDIR_HOR:
+ rot_mir_mode |= MIR_HOR_FLIP;
+ break;
+ case MIRDIR_HOR_VER:
+ rot_mir_mode |= MIR_HOR_VER_FLIP;
+ break;
+ }
+ }
+
+ wave5_set_enc_crop_info(inst->std, p_param, rot_mir_mode, p_open_param->pic_width,
+ p_open_param->pic_height);
+
+ /* SET_PARAM + COMMON */
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_SET_PARAM_OPTION, OPT_COMMON);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_SRC_SIZE, p_open_param->pic_height << 16
+ | p_open_param->pic_width);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MAP_ENDIAN, VDI_LITTLE_ENDIAN);
+
+ if (inst->std == W_AVC_ENC) {
+ reg_val = p_param->profile | (p_param->level << 3) |
+ (p_param->internal_bit_depth << 14) | (p_param->use_long_term << 21);
+ if (p_param->scaling_list_enable == 2) {
+ reg_val |= BIT(22) | BIT(23); // [23]=USE_DEFAULT_SCALING_LIST
+ } else { // 0 or 1
+ reg_val |= (p_param->scaling_list_enable << 22);
+ }
+ } else { // HEVC enc
+ reg_val = p_param->profile |
+ (p_param->level << 3) |
+ (p_param->tier << 12) |
+ (p_param->internal_bit_depth << 14) |
+ (p_param->use_long_term << 21) |
+ (p_param->tmvp_enable << 23) |
+ (p_param->sao_enable << 24) |
+ (p_param->skip_intra_trans << 25) |
+ (p_param->strong_intra_smooth_enable << 27) |
+ (p_param->en_still_picture << 30);
+ if (p_param->scaling_list_enable == 2)
+ reg_val |= BIT(22) | BIT(31); // [31]=USE_DEFAULT_SCALING_LIST
+ else
+ reg_val |= (p_param->scaling_list_enable << 22);
+ }
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_SPS_PARAM, reg_val);
+
+ reg_val = (p_param->lossless_enable) |
+ (p_param->const_intra_pred_flag << 1) |
+ (p_param->lf_cross_slice_boundary_enable << 2) |
+ (p_param->weight_pred_enable << 3) |
+ (p_param->wpp_enable << 4) |
+ (p_param->disable_deblk << 5) |
+ ((p_param->beta_offset_div2 & 0xF) << 6) |
+ ((p_param->tc_offset_div2 & 0xF) << 10) |
+ ((p_param->chroma_cb_qp_offset & 0x1F) << 14) |
+ ((p_param->chroma_cr_qp_offset & 0x1F) << 19) |
+ (p_param->transform8x8_enable << 29) |
+ (p_param->entropy_coding_mode << 30);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_PPS_PARAM, reg_val);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_GOP_PARAM, p_param->gop_preset_idx);
+
+ if (inst->std == W_AVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INTRA_PARAM, p_param->intra_qp |
+ ((p_param->intra_period & 0x7ff) << 6) |
+ ((p_param->avc_idr_period & 0x7ff) << 17) |
+ ((p_param->forced_idr_header_enable & 3) << 28));
+ else
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INTRA_PARAM,
+ p_param->decoding_refresh_type | (p_param->intra_qp << 3) |
+ (p_param->forced_idr_header_enable << 9) |
+ (p_param->intra_period << 16));
+
+ reg_val = (p_param->use_recommend_enc_param) |
+ (p_param->rdo_skip << 2) |
+ (p_param->lambda_scaling_enable << 3) |
+ (p_param->coef_clear_disable << 4) |
+ (fixed_cu_size_mode << 5) |
+ (p_param->intra_nx_n_enable << 8) |
+ (p_param->max_num_merge << 18) |
+ (p_param->custom_md_enable << 20) |
+ (p_param->custom_lambda_enable << 21) |
+ (p_param->monochrome_enable << 22);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RDO_PARAM, reg_val);
+
+ if (inst->std == W_AVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INTRA_REFRESH,
+ p_param->intra_mb_refresh_arg << 16 | p_param->intra_mb_refresh_mode);
+ else
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INTRA_REFRESH,
+ p_param->intra_refresh_arg << 16 | p_param->intra_refresh_mode);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_FRAME_RATE, p_open_param->frame_rate_info);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_TARGET_RATE, p_open_param->bit_rate);
+
+ if (inst->std == W_AVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_PARAM,
+ p_open_param->rc_enable | (p_param->mb_level_rc_enable << 1) |
+ (p_param->hvs_qp_enable << 2) | (p_param->hvs_qp_scale << 4) |
+ (p_param->bit_alloc_mode << 8) | (p_param->roi_enable << 13) |
+ ((p_param->initial_rc_qp & 0x3F) << 14) |
+ (p_open_param->vbv_buffer_size << 20));
+ else
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_PARAM,
+ p_open_param->rc_enable | (p_param->cu_level_rc_enable << 1) |
+ (p_param->hvs_qp_enable << 2) | (p_param->hvs_qp_scale << 4) |
+ (p_param->bit_alloc_mode << 8) | (p_param->roi_enable << 13) |
+ ((p_param->initial_rc_qp & 0x3F) << 14) |
+ (p_open_param->vbv_buffer_size << 20));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_WEIGHT_PARAM,
+ p_param->rc_weight_buf << 8 | p_param->rc_weight_param);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_MIN_MAX_QP, p_param->min_qp_i |
+ (p_param->max_qp_i << 6) | (p_param->hvs_max_delta_qp << 12));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_INTER_MIN_MAX_QP, p_param->min_qp_p |
+ (p_param->max_qp_p << 6) | (p_param->min_qp_b << 12) |
+ (p_param->max_qp_b << 18));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_BIT_RATIO_LAYER_0_3,
+ (u32)p_param->fixed_bit_ratio[0] |
+ ((u32)p_param->fixed_bit_ratio[1] << 8) |
+ ((u32)p_param->fixed_bit_ratio[2] << 16) |
+ ((u32)p_param->fixed_bit_ratio[3] << 24));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_BIT_RATIO_LAYER_4_7,
+ (u32)p_param->fixed_bit_ratio[4] |
+ ((u32)p_param->fixed_bit_ratio[5] << 8) |
+ ((u32)p_param->fixed_bit_ratio[6] << 16) |
+ ((u32)p_param->fixed_bit_ratio[7] << 24));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_ROT_PARAM, rot_mir_mode);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_BG_PARAM, (p_param->bg_detect_enable) |
+ (p_param->bg_thr_diff << 1) | (p_param->bg_thr_mean_diff << 10) |
+ (p_param->bg_lambda_qp << 18) | ((p_param->bg_delta_qp & 0x1F) << 24) |
+ ((inst->std == W_AVC_ENC) ? p_param->s2fme_disable << 29 : 0));
+
+ if (inst->std == W_HEVC_ENC || inst->std == W_AVC_ENC) {
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_LAMBDA_ADDR,
+ p_param->custom_lambda_addr);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CONF_WIN_TOP_BOT,
+ p_param->conf_win_bot << 16 | p_param->conf_win_top);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CONF_WIN_LEFT_RIGHT,
+ p_param->conf_win_right << 16 | p_param->conf_win_left);
+
+ if (inst->std == W_AVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INDEPENDENT_SLICE,
+ p_param->avc_slice_arg << 16 | p_param->avc_slice_mode);
+ else
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INDEPENDENT_SLICE,
+ p_param->independ_slice_mode_arg << 16 |
+ p_param->independ_slice_mode);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_USER_SCALING_LIST_ADDR,
+ p_param->user_scaling_list_addr);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_NUM_UNITS_IN_TICK,
+ p_param->num_units_in_tick);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_TIME_SCALE, p_param->time_scale);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_NUM_TICKS_POC_DIFF_ONE,
+ p_param->num_ticks_poc_diff_one);
+ }
+
+ if (inst->std == W_HEVC_ENC) {
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_PU04,
+ (p_param->pu04_delta_rate & 0xFF) |
+ ((p_param->pu04_intra_planar_delta_rate & 0xFF) << 8) |
+ ((p_param->pu04_intra_dc_delta_rate & 0xFF) << 16) |
+ ((p_param->pu04_intra_angle_delta_rate & 0xFF) << 24));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_PU08,
+ (p_param->pu08_delta_rate & 0xFF) |
+ ((p_param->pu08_intra_planar_delta_rate & 0xFF) << 8) |
+ ((p_param->pu08_intra_dc_delta_rate & 0xFF) << 16) |
+ ((p_param->pu08_intra_angle_delta_rate & 0xFF) << 24));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_PU16,
+ (p_param->pu16_delta_rate & 0xFF) |
+ ((p_param->pu16_intra_planar_delta_rate & 0xFF) << 8) |
+ ((p_param->pu16_intra_dc_delta_rate & 0xFF) << 16) |
+ ((p_param->pu16_intra_angle_delta_rate & 0xFF) << 24));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_PU32,
+ (p_param->pu32_delta_rate & 0xFF) |
+ ((p_param->pu32_intra_planar_delta_rate & 0xFF) << 8) |
+ ((p_param->pu32_intra_dc_delta_rate & 0xFF) << 16) |
+ ((p_param->pu32_intra_angle_delta_rate & 0xFF) << 24));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_CU08,
+ (p_param->cu08_intra_delta_rate & 0xFF) |
+ ((p_param->cu08_inter_delta_rate & 0xFF) << 8) |
+ ((p_param->cu08_merge_delta_rate & 0xFF) << 16));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_CU16,
+ (p_param->cu16_intra_delta_rate & 0xFF) |
+ ((p_param->cu16_inter_delta_rate & 0xFF) << 8) |
+ ((p_param->cu16_merge_delta_rate & 0xFF) << 16));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_CU32,
+ (p_param->cu32_intra_delta_rate & 0xFF) |
+ ((p_param->cu32_inter_delta_rate & 0xFF) << 8) |
+ ((p_param->cu32_merge_delta_rate & 0xFF) << 16));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_DEPENDENT_SLICE,
+ p_param->depend_slice_mode_arg << 16 | p_param->depend_slice_mode);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_NR_PARAM, p_param->nr_y_enable |
+ (p_param->nr_cb_enable << 1) | (p_param->nr_cr_enable << 2) |
+ (p_param->nr_noise_est_enable << 3) |
+ (p_param->nr_noise_sigma_y << 4) |
+ (p_param->nr_noise_sigma_cb << 12) |
+ (p_param->nr_noise_sigma_cr << 20));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_NR_WEIGHT,
+ p_param->nr_intra_weight_y |
+ (p_param->nr_intra_weight_cb << 5) |
+ (p_param->nr_intra_weight_cr << 10) |
+ (p_param->nr_inter_weight_y << 15) |
+ (p_param->nr_inter_weight_cb << 20) |
+ (p_param->nr_inter_weight_cr << 25));
+ }
+ if (p_enc_info->open_param.encode_vui_rbsp || p_enc_info->open_param.enc_hrd_rbsp_in_vps) {
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_VUI_HRD_PARAM,
+ (p_enc_info->open_param.hrd_rbsp_data_size << 18) |
+ (p_enc_info->open_param.vui_rbsp_data_size << 4) |
+ (p_enc_info->open_param.enc_hrd_rbsp_in_vps << 2) |
+ (p_enc_info->open_param.encode_vui_rbsp));
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_VUI_RBSP_ADDR,
+ p_enc_info->open_param.vui_rbsp_data_addr);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_HRD_RBSP_ADDR,
+ p_enc_info->open_param.hrd_rbsp_data_addr);
+ } else {
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_VUI_HRD_PARAM, 0);
+ }
+
+ wave5_bit_issue_command(inst, W5_ENC_SET_PARAM);
+
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(inst->dev->dev, "command: 'W5_ENC_SET_PARAM', timed out\n");
+ return ret;
+ }
+
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS)) {
+ reg_val = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, reg_val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int wave5_vpu_enc_get_seq_info(struct vpu_instance *inst, struct enc_initial_info *info)
+{
+ int ret;
+ u32 reg_val;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+
+ if (inst->dev->product != PRODUCT_ID_521)
+ return -EINVAL;
+
+ // send QUERY cmd
+ ret = wave5_send_query(inst, GET_RESULT);
+ if (ret) {
+ if (ret == -EIO) {
+ reg_val = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, reg_val);
+ }
+ return ret;
+ }
+
+ dev_dbg(inst->dev->dev, "%s: init seq\n", __func__);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_enc_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_enc_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ if (vpu_read_reg(inst->dev, W5_RET_ENC_ENCODING_SUCCESS) != 1) {
+ info->seq_init_err_reason = vpu_read_reg(inst->dev, W5_RET_ENC_ERR_INFO);
+ ret = -EIO;
+ } else {
+ info->warn_info = vpu_read_reg(inst->dev, W5_RET_ENC_WARN_INFO);
+ }
+
+ info->min_frame_buffer_count = vpu_read_reg(inst->dev, W5_RET_ENC_NUM_REQUIRED_FB);
+ info->min_src_frame_count = vpu_read_reg(inst->dev, W5_RET_ENC_MIN_SRC_BUF_NUM);
+ info->max_latency_pictures = vpu_read_reg(inst->dev, W5_RET_ENC_PIC_MAX_LATENCY_PICS);
+ info->vlc_buf_size = vpu_read_reg(inst->dev, W5_RET_VLC_BUF_SIZE);
+ info->param_buf_size = vpu_read_reg(inst->dev, W5_RET_PARAM_BUF_SIZE);
+ p_enc_info->vlc_buf_size = info->vlc_buf_size;
+ p_enc_info->param_buf_size = info->param_buf_size;
+
+ return ret;
+}
+
+static u32 calculate_luma_stride(u32 width, u32 bit_depth)
+{
+ return ALIGN(ALIGN(width, 16) * ((bit_depth > 8) ? 5 : 4), 32);
+}
+
+static u32 calculate_chroma_stride(u32 width, u32 bit_depth)
+{
+ return ALIGN(ALIGN(width / 2, 16) * ((bit_depth > 8) ? 5 : 4), 32);
+}
+
+int wave5_vpu_enc_register_framebuffer(struct device *dev, struct vpu_instance *inst,
+ struct frame_buffer *fb_arr, enum tiled_map_type map_type,
+ unsigned int count)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ int ret = 0;
+ u32 stride;
+ u32 start_no, end_no;
+ size_t remain, idx, j, i, cnt_8_chunk;
+ u32 reg_val = 0, pic_size = 0, mv_col_size, fbc_y_tbl_size, fbc_c_tbl_size;
+ u32 sub_sampled_size = 0;
+ u32 endian, luma_stride, chroma_stride, frame_width, frame_height;
+ u32 buf_height = 0, buf_width = 0;
+ u32 bit_depth;
+ bool avc_encoding = (inst->std == W_AVC_ENC);
+ struct vpu_buf vb_mv = {0};
+ struct vpu_buf vb_fbc_y_tbl = {0};
+ struct vpu_buf vb_fbc_c_tbl = {0};
+ struct vpu_buf vb_sub_sam_buf = {0};
+ struct vpu_buf vb_task = {0};
+ struct enc_open_param *p_open_param;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+
+ p_open_param = &p_enc_info->open_param;
+ mv_col_size = 0;
+ fbc_y_tbl_size = 0;
+ fbc_c_tbl_size = 0;
+ stride = p_enc_info->stride;
+ bit_depth = p_open_param->wave_param.internal_bit_depth;
+
+ if (avc_encoding) {
+ buf_width = ALIGN(p_open_param->pic_width, 16);
+ buf_height = ALIGN(p_open_param->pic_height, 16);
+
+ if ((p_enc_info->rotation_angle || p_enc_info->mirror_direction) &&
+ !(p_enc_info->rotation_angle == 180 &&
+ p_enc_info->mirror_direction == MIRDIR_HOR_VER)) {
+ buf_width = ALIGN(p_open_param->pic_width, 16);
+ buf_height = ALIGN(p_open_param->pic_height, 16);
+ }
+
+ if (p_enc_info->rotation_angle == 90 || p_enc_info->rotation_angle == 270) {
+ buf_width = ALIGN(p_open_param->pic_height, 16);
+ buf_height = ALIGN(p_open_param->pic_width, 16);
+ }
+ } else {
+ buf_width = ALIGN(p_open_param->pic_width, 8);
+ buf_height = ALIGN(p_open_param->pic_height, 8);
+
+ if ((p_enc_info->rotation_angle || p_enc_info->mirror_direction) &&
+ !(p_enc_info->rotation_angle == 180 &&
+ p_enc_info->mirror_direction == MIRDIR_HOR_VER)) {
+ buf_width = ALIGN(p_open_param->pic_width, 32);
+ buf_height = ALIGN(p_open_param->pic_height, 32);
+ }
+
+ if (p_enc_info->rotation_angle == 90 || p_enc_info->rotation_angle == 270) {
+ buf_width = ALIGN(p_open_param->pic_height, 32);
+ buf_height = ALIGN(p_open_param->pic_width, 32);
+ }
+ }
+
+ pic_size = (buf_width << 16) | buf_height;
+
+ if (avc_encoding) {
+ mv_col_size = WAVE5_ENC_AVC_BUF_SIZE(buf_width, buf_height);
+ vb_mv.daddr = 0;
+ vb_mv.size = ALIGN(mv_col_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ } else {
+ mv_col_size = WAVE5_ENC_HEVC_BUF_SIZE(buf_width, buf_height);
+ mv_col_size = ALIGN(mv_col_size, 16);
+ vb_mv.daddr = 0;
+ vb_mv.size = ALIGN(mv_col_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ }
+
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_mv);
+ if (ret)
+ return ret;
+
+ p_enc_info->vb_mv = vb_mv;
+
+ frame_width = ALIGN(buf_width, 16);
+ frame_height = ALIGN(buf_height, 16);
+ if (p_enc_info->product_code == WAVE521C_DUAL_CODE) {
+ // Use 1024 for H264(AVC) and 512 for H265(HEVC)
+ fbc_y_tbl_size = calculate_table_size(bit_depth, frame_width, frame_height,
+ (avc_encoding ? 1024 : 512));
+ } else {
+ fbc_y_tbl_size = WAVE5_FBC_LUMA_TABLE_SIZE(buf_width, buf_height);
+ fbc_y_tbl_size = ALIGN(fbc_y_tbl_size, 16);
+ }
+
+ vb_fbc_y_tbl.daddr = 0;
+ vb_fbc_y_tbl.size = ALIGN(fbc_y_tbl_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_fbc_y_tbl);
+ if (ret)
+ goto free_vb_fbc_y_tbl;
+
+ p_enc_info->vb_fbc_y_tbl = vb_fbc_y_tbl;
+
+ if (p_enc_info->product_code == WAVE521C_DUAL_CODE) {
+ // Use 1024 for H264(AVC) and 512 for HEVC
+ fbc_c_tbl_size = calculate_table_size(bit_depth, frame_width, frame_height,
+ (avc_encoding ? 1024 : 512));
+ } else {
+ fbc_c_tbl_size = WAVE5_FBC_CHROMA_TABLE_SIZE(buf_width, buf_height);
+ fbc_c_tbl_size = ALIGN(fbc_c_tbl_size, 16);
+ }
+
+ vb_fbc_c_tbl.daddr = 0;
+ vb_fbc_c_tbl.size = ALIGN(fbc_c_tbl_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_fbc_c_tbl);
+ if (ret)
+ goto free_vb_fbc_c_tbl;
+
+ p_enc_info->vb_fbc_c_tbl = vb_fbc_c_tbl;
+
+ if (avc_encoding)
+ sub_sampled_size = WAVE5_SUBSAMPLED_ONE_SIZE_AVC(buf_width, buf_height);
+ else
+ sub_sampled_size = WAVE5_SUBSAMPLED_ONE_SIZE(buf_width, buf_height);
+ vb_sub_sam_buf.size = ALIGN(sub_sampled_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ vb_sub_sam_buf.daddr = 0;
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_sub_sam_buf);
+ if (ret)
+ goto free_vb_sam_buf;
+
+ p_enc_info->vb_sub_sam_buf = vb_sub_sam_buf;
+
+ vb_task.size = (p_enc_info->vlc_buf_size * VLC_BUF_NUM) +
+ (p_enc_info->param_buf_size * COMMAND_QUEUE_DEPTH);
+ vb_task.daddr = 0;
+ if (p_enc_info->vb_task.size == 0) {
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_task);
+ if (ret)
+ goto free_vb_task;
+
+ p_enc_info->vb_task = vb_task;
+
+ vpu_write_reg(inst->dev, W5_CMD_SET_FB_ADDR_TASK_BUF,
+ p_enc_info->vb_task.daddr);
+ vpu_write_reg(inst->dev, W5_CMD_SET_FB_TASK_BUF_SIZE, vb_task.size);
+ }
+
+ // set sub-sampled buffer base addr
+ vpu_write_reg(inst->dev, W5_ADDR_SUB_SAMPLED_FB_BASE, vb_sub_sam_buf.daddr);
+ // set sub-sampled buffer size for one frame
+ vpu_write_reg(inst->dev, W5_SUB_SAMPLED_ONE_FB_SIZE, sub_sampled_size);
+
+ endian = wave5_vdi_convert_endian(vpu_dev, fb_arr[0].endian);
+
+ vpu_write_reg(inst->dev, W5_PIC_SIZE, pic_size);
+
+ // set stride of luma/chroma for compressed buffer
+ if ((p_enc_info->rotation_angle || p_enc_info->mirror_direction) &&
+ !(p_enc_info->rotation_angle == 180 &&
+ p_enc_info->mirror_direction == MIRDIR_HOR_VER)) {
+ luma_stride = calculate_luma_stride(buf_width, bit_depth);
+ chroma_stride = calculate_chroma_stride(buf_width / 2, bit_depth);
+ } else {
+ luma_stride = calculate_luma_stride(p_open_param->pic_width, bit_depth);
+ chroma_stride = calculate_chroma_stride(p_open_param->pic_width / 2, bit_depth);
+ }
+
+ vpu_write_reg(inst->dev, W5_FBC_STRIDE, luma_stride << 16 | chroma_stride);
+ vpu_write_reg(inst->dev, W5_COMMON_PIC_INFO, stride);
+
+ remain = count;
+ cnt_8_chunk = ALIGN(count, 8) / 8;
+ idx = 0;
+ for (j = 0; j < cnt_8_chunk; j++) {
+ reg_val = (endian << 16) | (j == cnt_8_chunk - 1) << 4 | ((j == 0) << 3);
+ reg_val |= (p_open_param->enable_non_ref_fbc_write << 26);
+ vpu_write_reg(inst->dev, W5_SFB_OPTION, reg_val);
+ start_no = j * 8;
+ end_no = start_no + ((remain >= 8) ? 8 : remain) - 1;
+
+ vpu_write_reg(inst->dev, W5_SET_FB_NUM, (start_no << 8) | end_no);
+
+ for (i = 0; i < 8 && i < remain; i++) {
+ vpu_write_reg(inst->dev, W5_ADDR_LUMA_BASE0 + (i << 4), fb_arr[i +
+ start_no].buf_y);
+ vpu_write_reg(inst->dev, W5_ADDR_CB_BASE0 + (i << 4),
+ fb_arr[i + start_no].buf_cb);
+ /* luma FBC offset table */
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_Y_OFFSET0 + (i << 4),
+ vb_fbc_y_tbl.daddr + idx * fbc_y_tbl_size);
+ /* chroma FBC offset table */
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_C_OFFSET0 + (i << 4),
+ vb_fbc_c_tbl.daddr + idx * fbc_c_tbl_size);
+
+ vpu_write_reg(inst->dev, W5_ADDR_MV_COL0 + (i << 2),
+ vb_mv.daddr + idx * mv_col_size);
+ idx++;
+ }
+ remain -= i;
+
+ wave5_bit_issue_command(inst, W5_SET_FB);
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret)
+ goto free_vb_mem;
+ }
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_SUCCESS);
+ if (!reg_val) {
+ ret = -EIO;
+ goto free_vb_mem;
+ }
+
+ return ret;
+
+free_vb_mem:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_task);
+free_vb_task:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_sub_sam_buf);
+free_vb_sam_buf:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_fbc_c_tbl);
+free_vb_fbc_c_tbl:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_fbc_y_tbl);
+free_vb_fbc_y_tbl:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_mv);
+ return ret;
+}
+
+int wave5_vpu_encode(struct vpu_instance *inst, struct enc_param *option, u32 *fail_res)
+{
+ u32 src_frame_format;
+ u32 reg_val = 0, bs_endian;
+ u32 src_stride_c = 0;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ struct frame_buffer *p_src_frame = option->source_frame;
+ struct enc_open_param *p_open_param = &p_enc_info->open_param;
+ bool justified = WTL_RIGHT_JUSTIFIED;
+ u32 format_no = WTL_PIXEL_8BIT;
+ int ret;
+
+ if (inst->dev->product != PRODUCT_ID_521)
+ return -EINVAL;
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_BS_START_ADDR, option->pic_stream_buffer_addr);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_BS_SIZE, option->pic_stream_buffer_size);
+ p_enc_info->stream_buf_start_addr = option->pic_stream_buffer_addr;
+ p_enc_info->stream_buf_size = option->pic_stream_buffer_size;
+ p_enc_info->stream_buf_end_addr =
+ option->pic_stream_buffer_addr + option->pic_stream_buffer_size;
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_AXI_SEL, DEFAULT_SRC_AXI);
+ /* secondary AXI */
+ reg_val = (p_enc_info->sec_axi_info.wave.use_enc_rdo_enable << 11) |
+ (p_enc_info->sec_axi_info.wave.use_enc_lf_enable << 15);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_USE_SEC_AXI, reg_val);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_REPORT_PARAM, 0);
+
+ /*
+ * CODEOPT_ENC_VCL is used to implicitly encode header/headers to generate bitstream.
+ * (use ENC_PUT_VIDEO_HEADER for give_command to encode only a header)
+ */
+ if (option->code_option.implicit_header_encode)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_CODE_OPTION,
+ CODEOPT_ENC_HEADER_IMPLICIT | CODEOPT_ENC_VCL |
+ (option->code_option.encode_aud << 5) |
+ (option->code_option.encode_eos << 6) |
+ (option->code_option.encode_eob << 7));
+ else
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_CODE_OPTION,
+ option->code_option.implicit_header_encode |
+ (option->code_option.encode_vcl << 1) |
+ (option->code_option.encode_vps << 2) |
+ (option->code_option.encode_sps << 3) |
+ (option->code_option.encode_pps << 4) |
+ (option->code_option.encode_aud << 5) |
+ (option->code_option.encode_eos << 6) |
+ (option->code_option.encode_eob << 7));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_PIC_PARAM, option->skip_picture |
+ (option->force_pic_qp_enable << 1) | (option->force_pic_qp_i << 2) |
+ (option->force_pic_qp_p << 8) | (option->force_pic_qp_b << 14) |
+ (option->force_pic_type_enable << 20) | (option->force_pic_type << 21) |
+ (option->force_all_ctu_coef_drop_enable << 24));
+
+ if (option->src_end_flag)
+ // no more source images.
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_PIC_IDX, 0xFFFFFFFF);
+ else
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_PIC_IDX, option->src_idx);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_ADDR_Y, p_src_frame->buf_y);
+ if (p_open_param->cbcr_order == CBCR_ORDER_NORMAL) {
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_ADDR_U, p_src_frame->buf_cb);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_ADDR_V, p_src_frame->buf_cr);
+ } else {
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_ADDR_U, p_src_frame->buf_cr);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_ADDR_V, p_src_frame->buf_cb);
+ }
+
+ switch (p_open_param->src_format) {
+ case FORMAT_420:
+ case FORMAT_422:
+ case FORMAT_YUYV:
+ case FORMAT_YVYU:
+ case FORMAT_UYVY:
+ case FORMAT_VYUY:
+ justified = WTL_LEFT_JUSTIFIED;
+ format_no = WTL_PIXEL_8BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ (p_src_frame->stride / 2);
+ src_stride_c = (p_open_param->src_format == FORMAT_422) ? src_stride_c * 2 :
+ src_stride_c;
+ break;
+ case FORMAT_420_P10_16BIT_MSB:
+ case FORMAT_422_P10_16BIT_MSB:
+ case FORMAT_YUYV_P10_16BIT_MSB:
+ case FORMAT_YVYU_P10_16BIT_MSB:
+ case FORMAT_UYVY_P10_16BIT_MSB:
+ case FORMAT_VYUY_P10_16BIT_MSB:
+ justified = WTL_RIGHT_JUSTIFIED;
+ format_no = WTL_PIXEL_16BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ (p_src_frame->stride / 2);
+ src_stride_c = (p_open_param->src_format ==
+ FORMAT_422_P10_16BIT_MSB) ? src_stride_c * 2 : src_stride_c;
+ break;
+ case FORMAT_420_P10_16BIT_LSB:
+ case FORMAT_422_P10_16BIT_LSB:
+ case FORMAT_YUYV_P10_16BIT_LSB:
+ case FORMAT_YVYU_P10_16BIT_LSB:
+ case FORMAT_UYVY_P10_16BIT_LSB:
+ case FORMAT_VYUY_P10_16BIT_LSB:
+ justified = WTL_LEFT_JUSTIFIED;
+ format_no = WTL_PIXEL_16BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ (p_src_frame->stride / 2);
+ src_stride_c = (p_open_param->src_format ==
+ FORMAT_422_P10_16BIT_LSB) ? src_stride_c * 2 : src_stride_c;
+ break;
+ case FORMAT_420_P10_32BIT_MSB:
+ case FORMAT_422_P10_32BIT_MSB:
+ case FORMAT_YUYV_P10_32BIT_MSB:
+ case FORMAT_YVYU_P10_32BIT_MSB:
+ case FORMAT_UYVY_P10_32BIT_MSB:
+ case FORMAT_VYUY_P10_32BIT_MSB:
+ justified = WTL_RIGHT_JUSTIFIED;
+ format_no = WTL_PIXEL_32BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ ALIGN(p_src_frame->stride / 2, 16) * BIT(inst->cbcr_interleave);
+ src_stride_c = (p_open_param->src_format ==
+ FORMAT_422_P10_32BIT_MSB) ? src_stride_c * 2 : src_stride_c;
+ break;
+ case FORMAT_420_P10_32BIT_LSB:
+ case FORMAT_422_P10_32BIT_LSB:
+ case FORMAT_YUYV_P10_32BIT_LSB:
+ case FORMAT_YVYU_P10_32BIT_LSB:
+ case FORMAT_UYVY_P10_32BIT_LSB:
+ case FORMAT_VYUY_P10_32BIT_LSB:
+ justified = WTL_LEFT_JUSTIFIED;
+ format_no = WTL_PIXEL_32BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ ALIGN(p_src_frame->stride / 2, 16) * BIT(inst->cbcr_interleave);
+ src_stride_c = (p_open_param->src_format ==
+ FORMAT_422_P10_32BIT_LSB) ? src_stride_c * 2 : src_stride_c;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ src_frame_format = (inst->cbcr_interleave << 1) | (inst->nv21);
+ switch (p_open_param->packed_format) {
+ case PACKED_YUYV:
+ src_frame_format = 4;
+ break;
+ case PACKED_YVYU:
+ src_frame_format = 5;
+ break;
+ case PACKED_UYVY:
+ src_frame_format = 6;
+ break;
+ case PACKED_VYUY:
+ src_frame_format = 7;
+ break;
+ default:
+ break;
+ }
+
+ reg_val = wave5_vdi_convert_endian(inst->dev, p_open_param->source_endian);
+ bs_endian = (~reg_val & VDI_128BIT_ENDIAN_MASK);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_STRIDE,
+ (p_src_frame->stride << 16) | src_stride_c);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_FORMAT, src_frame_format |
+ (format_no << 3) | (justified << 5) | (bs_endian << 6));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_CUSTOM_MAP_OPTION_ADDR,
+ option->custom_map_opt.addr_custom_map);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_CUSTOM_MAP_OPTION_PARAM,
+ option->custom_map_opt.custom_roi_map_enable |
+ (option->custom_map_opt.roi_avg_qp << 1) |
+ (option->custom_map_opt.custom_lambda_map_enable << 8) |
+ (option->custom_map_opt.custom_mode_map_enable << 9) |
+ (option->custom_map_opt.custom_coef_drop_enable << 10));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_LONGTERM_PIC,
+ option->use_cur_src_as_longterm_pic | (option->use_longterm_ref << 1));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_WP_PIXEL_SIGMA_Y, option->wp_pix_sigma_y);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_WP_PIXEL_SIGMA_C,
+ (option->wp_pix_sigma_cr << 16) | option->wp_pix_sigma_cb);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_WP_PIXEL_MEAN_Y, option->wp_pix_mean_y);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_WP_PIXEL_MEAN_C,
+ (option->wp_pix_mean_cr << 16) | (option->wp_pix_mean_cb));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_PREFIX_SEI_INFO, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_PREFIX_SEI_NAL_ADDR, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SUFFIX_SEI_INFO, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SUFFIX_SEI_NAL_ADDR, 0);
+
+ wave5_bit_issue_command(inst, W5_ENC_PIC);
+
+ // check QUEUE_DONE
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(inst->dev->dev, "command: 'W5_ENC_PIC', timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_enc_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_enc_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ // Check if we were able to add a command into the VCPU QUEUE
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS)) {
+ *fail_res = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, *fail_res);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int wave5_vpu_enc_get_result(struct vpu_instance *inst, struct enc_output_info *result)
+{
+ int ret;
+ u32 encoding_success;
+ u32 reg_val;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (vpu_dev->product != PRODUCT_ID_521)
+ return -EINVAL;
+
+ ret = wave5_send_query(inst, GET_RESULT);
+ if (ret) {
+ if (ret == -EIO) {
+ reg_val = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, reg_val);
+ }
+ return ret;
+ }
+ dev_dbg(inst->dev->dev, "%s: enc pic complete\n", __func__);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_enc_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_enc_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ encoding_success = vpu_read_reg(inst->dev, W5_RET_ENC_ENCODING_SUCCESS);
+ if (!encoding_success) {
+ result->error_reason = vpu_read_reg(inst->dev, W5_RET_ENC_ERR_INFO);
+ return -EIO;
+ }
+
+ result->warn_info = vpu_read_reg(inst->dev, W5_RET_ENC_WARN_INFO);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_ENC_PIC_TYPE);
+ result->pic_type = reg_val & 0xFFFF;
+
+ result->enc_vcl_nut = vpu_read_reg(inst->dev, W5_RET_ENC_VCL_NUT);
+ /*
+ * To get the reconstructed frame use the following index on
+ * inst->frame_buf
+ */
+ result->recon_frame_index = vpu_read_reg(inst->dev, W5_RET_ENC_PIC_IDX);
+ result->enc_pic_byte = vpu_read_reg(inst->dev, W5_RET_ENC_PIC_BYTE);
+ result->enc_src_idx = vpu_read_reg(inst->dev, W5_RET_ENC_USED_SRC_IDX);
+ p_enc_info->stream_wr_ptr = wave5_read_reg_for_mem_addr(inst, W5_RET_ENC_WR_PTR);
+ p_enc_info->stream_rd_ptr = wave5_read_reg_for_mem_addr(inst, W5_RET_ENC_RD_PTR);
+
+ result->bitstream_buffer = wave5_read_reg_for_mem_addr(inst, W5_RET_ENC_RD_PTR);
+ result->rd_ptr = p_enc_info->stream_rd_ptr;
+ result->wr_ptr = p_enc_info->stream_wr_ptr;
+
+ //result for header only(no vcl) encoding
+ if (result->recon_frame_index == RECON_IDX_FLAG_HEADER_ONLY)
+ result->bitstream_size = result->enc_pic_byte;
+ else if (result->recon_frame_index < 0)
+ result->bitstream_size = 0;
+ else
+ result->bitstream_size = result->enc_pic_byte;
+
+ result->enc_host_cmd_tick = vpu_read_reg(inst->dev, W5_RET_ENC_HOST_CMD_TICK);
+ result->enc_encode_end_tick = vpu_read_reg(inst->dev, W5_RET_ENC_ENCODING_END_TICK);
+
+ if (!p_enc_info->first_cycle_check) {
+ result->frame_cycle = (result->enc_encode_end_tick - result->enc_host_cmd_tick) *
+ p_enc_info->cycle_per_tick;
+ p_enc_info->first_cycle_check = true;
+ } else {
+ result->frame_cycle =
+ (result->enc_encode_end_tick - vpu_dev->last_performance_cycles) *
+ p_enc_info->cycle_per_tick;
+ if (vpu_dev->last_performance_cycles < result->enc_host_cmd_tick)
+ result->frame_cycle = (result->enc_encode_end_tick -
+ result->enc_host_cmd_tick) * p_enc_info->cycle_per_tick;
+ }
+ vpu_dev->last_performance_cycles = result->enc_encode_end_tick;
+
+ return 0;
+}
+
+int wave5_vpu_enc_finish_seq(struct vpu_instance *inst, u32 *fail_res)
+{
+ int ret;
+
+ if (inst->dev->product != PRODUCT_ID_521)
+ return -EINVAL;
+
+ wave5_bit_issue_command(inst, W5_DESTROY_INSTANCE);
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret)
+ return -ETIMEDOUT;
+
+ if (!vpu_read_reg(inst->dev, W5_RET_SUCCESS)) {
+ *fail_res = vpu_read_reg(inst->dev, W5_RET_FAIL_REASON);
+ wave5_print_reg_err(inst->dev, *fail_res);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int wave5_vpu_enc_check_common_param_valid(struct vpu_instance *inst,
+ struct enc_open_param *open_param)
+{
+ int i = 0;
+ bool low_delay = true;
+ struct enc_wave_param *param = &open_param->wave_param;
+ struct vpu_device *vpu_dev = inst->dev;
+ struct device *dev = vpu_dev->dev;
+ u32 num_ctu_row = (open_param->pic_height + 64 - 1) / 64;
+ u32 num_ctu_col = (open_param->pic_width + 64 - 1) / 64;
+ u32 ctu_sz = num_ctu_col * num_ctu_row;
+
+ // check low-delay gop structure
+ if (param->gop_preset_idx == PRESET_IDX_CUSTOM_GOP) { /* common gop */
+ if (param->gop_param.custom_gop_size > 1) {
+ s32 min_val = param->gop_param.pic_param[0].poc_offset;
+
+ for (i = 1; i < param->gop_param.custom_gop_size; i++) {
+ if (min_val > param->gop_param.pic_param[i].poc_offset) {
+ low_delay = false;
+ break;
+ }
+ min_val = param->gop_param.pic_param[i].poc_offset;
+ }
+ }
+ }
+
+ if (inst->std == W_HEVC_ENC && low_delay &&
+ param->decoding_refresh_type == DEC_REFRESH_TYPE_CRA) {
+ dev_warn(dev,
+ "dec_refresh_type(CRA) shouldn't be used together with low delay GOP\n");
+ dev_warn(dev, "Suggested configuration parameter: decoding refresh type (IDR)\n");
+ param->decoding_refresh_type = 2;
+ }
+
+ if (param->gop_preset_idx == PRESET_IDX_CUSTOM_GOP) {
+ for (i = 0; i < param->gop_param.custom_gop_size; i++) {
+ if (param->gop_param.pic_param[i].temporal_id >= MAX_NUM_TEMPORAL_LAYER) {
+ dev_err(dev, "temporal_id: %d exceeds MAX_NUM_TEMPORAL_LAYER (%u)\n",
+ param->gop_param.pic_param[i].temporal_id,
+ MAX_NUM_TEMPORAL_LAYER);
+ return -EINVAL;
+ }
+
+ if (param->gop_param.pic_param[i].temporal_id < 0) {
+ dev_err(dev, "temporal_id: %d must be greater or equal to 0\n",
+ param->gop_param.pic_param[i].temporal_id);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (param->wpp_enable && param->independ_slice_mode) {
+ unsigned int num_ctb_in_width = ALIGN(open_param->pic_width, 64) >> 6;
+
+ if (param->independ_slice_mode_arg % num_ctb_in_width) {
+ dev_err(dev, "independ_slice_mode_arg %u must be a multiple of %u\n",
+ param->independ_slice_mode_arg, num_ctb_in_width);
+ return -EINVAL;
+ }
+ }
+
+ // multi-slice & wpp
+ if (param->wpp_enable && param->depend_slice_mode) {
+ dev_err(dev, "wpp_enable && depend_slice_mode cannot be used simultaneously\n");
+ return -EINVAL;
+ }
+
+ if (!param->independ_slice_mode && param->depend_slice_mode) {
+ dev_err(dev, "depend_slice_mode requires independ_slice_mode\n");
+ return -EINVAL;
+ } else if (param->independ_slice_mode &&
+ param->depend_slice_mode == DEPEND_SLICE_MODE_RECOMMENDED &&
+ param->independ_slice_mode_arg < param->depend_slice_mode_arg) {
+ dev_err(dev, "independ_slice_mode_arg: %u must be smaller than %u\n",
+ param->independ_slice_mode_arg, param->depend_slice_mode_arg);
+ return -EINVAL;
+ }
+
+ if (param->independ_slice_mode && param->independ_slice_mode_arg > 65535) {
+ dev_err(dev, "independ_slice_mode_arg: %u must be smaller than 65535\n",
+ param->independ_slice_mode_arg);
+ return -EINVAL;
+ }
+
+ if (param->depend_slice_mode && param->depend_slice_mode_arg > 65535) {
+ dev_err(dev, "depend_slice_mode_arg: %u must be smaller than 65535\n",
+ param->depend_slice_mode_arg);
+ return -EINVAL;
+ }
+
+ if (param->conf_win_top % 2) {
+ dev_err(dev, "conf_win_top: %u, must be a multiple of 2\n", param->conf_win_top);
+ return -EINVAL;
+ }
+
+ if (param->conf_win_bot % 2) {
+ dev_err(dev, "conf_win_bot: %u, must be a multiple of 2\n", param->conf_win_bot);
+ return -EINVAL;
+ }
+
+ if (param->conf_win_left % 2) {
+ dev_err(dev, "conf_win_left: %u, must be a multiple of 2\n", param->conf_win_left);
+ return -EINVAL;
+ }
+
+ if (param->conf_win_right % 2) {
+ dev_err(dev, "conf_win_right: %u, Must be a multiple of 2\n",
+ param->conf_win_right);
+ return -EINVAL;
+ }
+
+ if (param->lossless_enable && (param->nr_y_enable || param->nr_cb_enable ||
+ param->nr_cr_enable)) {
+ /* Noise reduction => en_nr_y, en_nr_cb, en_nr_cr */
+ dev_err(dev, "option noise_reduction cannot be used with lossless_coding\n");
+ return -EINVAL;
+ }
+
+ if (param->lossless_enable && param->bg_detect_enable) {
+ dev_err(dev, "option bg_detect cannot be used with lossless_coding\n");
+ return -EINVAL;
+ }
+
+ if (param->lossless_enable && open_param->rc_enable) {
+ dev_err(dev, "option rate_control cannot be used with lossless_coding\n");
+ return -EINVAL;
+ }
+
+ if (param->lossless_enable && param->roi_enable) {
+ dev_err(dev, "option roi cannot be used with lossless_coding\n");
+ return -EINVAL;
+ }
+
+ if (param->lossless_enable && !param->skip_intra_trans) {
+ dev_err(dev, "option intra_trans_skip must be enabled with lossless_coding\n");
+ return -EINVAL;
+ }
+
+ // intra refresh
+ if (param->intra_refresh_mode && param->intra_refresh_arg == 0) {
+ dev_err(dev, "Invalid refresh argument, mode: %u, refresh: %u must be > 0\n",
+ param->intra_refresh_mode, param->intra_refresh_arg);
+ return -EINVAL;
+ }
+ switch (param->intra_refresh_mode) {
+ case REFRESH_MODE_CTU_ROWS:
+ if (param->intra_mb_refresh_arg > num_ctu_row)
+ goto invalid_refresh_argument;
+ break;
+ case REFRESH_MODE_CTU_COLUMNS:
+ if (param->intra_refresh_arg > num_ctu_col)
+ goto invalid_refresh_argument;
+ break;
+ case REFRESH_MODE_CTU_STEP_SIZE:
+ if (param->intra_refresh_arg > ctu_sz)
+ goto invalid_refresh_argument;
+ break;
+ case REFRESH_MODE_CTUS:
+ if (param->intra_refresh_arg > ctu_sz)
+ goto invalid_refresh_argument;
+ if (param->lossless_enable) {
+ dev_err(dev, "mode: %u cannot be used lossless_enable",
+ param->intra_refresh_mode);
+ return -EINVAL;
+ }
+ if (param->roi_enable) {
+ dev_err(dev, "mode: %u cannot be used and roi_enable",
+ param->intra_refresh_mode);
+ return -EINVAL;
+ }
+ };
+ return 0;
+
+invalid_refresh_argument:
+ dev_err(dev, "Invalid refresh argument, mode: %u, refresh: %u > W(%u)xH(%u)\n",
+ param->intra_refresh_mode, param->intra_refresh_arg,
+ num_ctu_row, num_ctu_col);
+ return -EINVAL;
+}
+
+static int wave5_vpu_enc_check_param_valid(struct vpu_device *vpu_dev,
+ struct enc_open_param *open_param)
+{
+ struct enc_wave_param *param = &open_param->wave_param;
+
+ if (open_param->rc_enable) {
+ if (param->min_qp_i > param->max_qp_i || param->min_qp_p > param->max_qp_p ||
+ param->min_qp_b > param->max_qp_b) {
+ dev_err(vpu_dev->dev, "Configuration failed because min_qp is greater than max_qp\n");
+ dev_err(vpu_dev->dev, "Suggested configuration parameters: min_qp = max_qp\n");
+ return -EINVAL;
+ }
+
+ if (open_param->bit_rate <= (int)open_param->frame_rate_info) {
+ dev_err(vpu_dev->dev,
+ "enc_bit_rate: %u must be greater than the frame_rate: %u\n",
+ open_param->bit_rate, (int)open_param->frame_rate_info);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_enc_check_custom_gop(struct vpu_device *vpu_dev,
+ struct enc_open_param *open_param)
+{
+ struct custom_gop_param *gop_param;
+ struct custom_gop_pic_param *gop_pic_param;
+ struct custom_gop_pic_param new_gop[MAX_GOP_NUM * 2 + 1];
+
+ unsigned int i, ei, gi;
+ u32 gop_size;
+ s32 curr_poc, ref_poc;
+ s32 enc_tid[MAX_GOP_NUM * 2 + 1];
+
+ gop_param = &open_param->wave_param.gop_param;
+ gop_size = gop_param->custom_gop_size;
+
+ new_gop[0].poc_offset = 0;
+ new_gop[0].temporal_id = 0;
+ new_gop[0].pic_type = PIC_TYPE_I;
+ new_gop[0].use_multi_ref_p = 0;
+ enc_tid[0] = 0;
+
+ for (i = 0; i < gop_size * 2; i++) {
+ ei = i % gop_size;
+ gi = i / gop_size;
+ gop_pic_param = &gop_param->pic_param[ei];
+
+ curr_poc = gi * gop_size + gop_pic_param->poc_offset;
+ new_gop[i + 1].poc_offset = curr_poc;
+ new_gop[i + 1].temporal_id = gop_pic_param->temporal_id;
+ new_gop[i + 1].pic_type = gop_pic_param->pic_type;
+ new_gop[i + 1].ref_poc_l0 = gop_pic_param->ref_poc_l0 + gi * gop_size;
+ new_gop[i + 1].ref_poc_l1 = gop_pic_param->ref_poc_l1 + gi * gop_size;
+ new_gop[i + 1].use_multi_ref_p = gop_pic_param->use_multi_ref_p;
+ enc_tid[i + 1] = -1;
+ }
+
+ for (i = 0; i < gop_size; i++) {
+ gop_pic_param = &gop_param->pic_param[i];
+
+ if (gop_pic_param->poc_offset <= 0) {
+ dev_err(vpu_dev->dev, "POC of the %u-th pic not greater then -1\n", i + 1);
+ return -EINVAL;
+ }
+ if (gop_pic_param->poc_offset > gop_size) {
+ dev_err(vpu_dev->dev, "POC of %uth pic bigger than gop_size\n", i + 1);
+ return -EINVAL;
+ }
+ if (gop_pic_param->temporal_id < 0) {
+ dev_err(vpu_dev->dev, "temporal_id of the %d-th < 0\n", i + 1);
+ return -EINVAL;
+ }
+ }
+
+ for (ei = 1; ei < gop_size * 2 + 1; ei++) {
+ struct custom_gop_pic_param *cur_pic = &new_gop[ei];
+
+ if (ei <= gop_size) {
+ enc_tid[cur_pic->poc_offset] = cur_pic->temporal_id;
+ continue;
+ }
+
+ if (new_gop[ei].pic_type != PIC_TYPE_I) {
+ ref_poc = cur_pic->ref_poc_l0;
+
+ /* reference picture is not encoded yet */
+ if (enc_tid[ref_poc] < 0) {
+ dev_err(vpu_dev->dev, "1st ref pic can't be ref of pic (POC: %u)\n",
+ cur_pic->poc_offset - gop_size);
+ return -EINVAL;
+ }
+ if (enc_tid[ref_poc] > cur_pic->temporal_id) {
+ dev_err(vpu_dev->dev, "wrong temporal_id of pic (POC: %u)\n",
+ cur_pic->poc_offset - gop_size);
+ return -EINVAL;
+ }
+ if (ref_poc >= cur_pic->poc_offset) {
+ dev_err(vpu_dev->dev, "POC of 1st ref pic of %u-th pic is wrong\n",
+ cur_pic->poc_offset - gop_size);
+ return -EINVAL;
+ }
+ }
+ if (new_gop[ei].pic_type != PIC_TYPE_P) {
+ ref_poc = cur_pic->ref_poc_l1;
+
+ /* reference picture is not encoded yet */
+ if (enc_tid[ref_poc] < 0) {
+ dev_err(vpu_dev->dev, "2nd ref pic can't be ref of pic (POC: %u)\n"
+ , cur_pic->poc_offset - gop_size);
+ return -EINVAL;
+ }
+ if (enc_tid[ref_poc] > cur_pic->temporal_id) {
+ dev_err(vpu_dev->dev, "temporal_id of %u-th picture is wrong\n",
+ cur_pic->poc_offset - gop_size);
+ return -EINVAL;
+ }
+ if (new_gop[ei].pic_type == PIC_TYPE_P && new_gop[ei].use_multi_ref_p > 0) {
+ if (ref_poc >= cur_pic->poc_offset) {
+ dev_err(vpu_dev->dev, "bad POC of 2nd ref pic of %uth pic\n",
+ cur_pic->poc_offset - gop_size);
+ return -EINVAL;
+ }
+ } else if (ref_poc == cur_pic->poc_offset) {
+ /* HOST_PIC_TYPE_B */
+ dev_err(vpu_dev->dev, "POC of 2nd ref pic of %uth pic is wrong\n",
+ cur_pic->poc_offset - gop_size);
+ return -EINVAL;
+ }
+ }
+ curr_poc = cur_pic->poc_offset;
+ enc_tid[curr_poc] = cur_pic->temporal_id;
+ }
+ return 0;
+}
+
+int wave5_vpu_enc_check_open_param(struct vpu_instance *inst, struct enc_open_param *open_param)
+{
+ u32 pic_width;
+ u32 pic_height;
+ s32 product_id = inst->dev->product;
+ struct vpu_attr *p_attr = &inst->dev->attr;
+ struct enc_wave_param *param;
+
+ if (!open_param)
+ return -EINVAL;
+
+ param = &open_param->wave_param;
+ pic_width = open_param->pic_width;
+ pic_height = open_param->pic_height;
+
+ if (inst->id >= MAX_NUM_INSTANCE) {
+ dev_err(inst->dev->dev, "Too many simultaneous instances: %d (max: %u)\n",
+ inst->id, MAX_NUM_INSTANCE);
+ return -EOPNOTSUPP;
+ }
+
+ if (inst->std != W_HEVC_ENC &&
+ !(inst->std == W_AVC_ENC && product_id == PRODUCT_ID_521)) {
+ dev_err(inst->dev->dev, "Unsupported encoder-codec & product combination\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (param->internal_bit_depth == 10) {
+ if (inst->std == W_HEVC_ENC && !p_attr->support_hevc10bit_enc) {
+ dev_err(inst->dev->dev,
+ "Flag support_hevc10bit_enc must be set to encode 10bit HEVC\n");
+ return -EOPNOTSUPP;
+ } else if (inst->std == W_AVC_ENC && !p_attr->support_avc10bit_enc) {
+ dev_err(inst->dev->dev,
+ "Flag support_avc10bit_enc must be set to encode 10bit AVC\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (open_param->ring_buffer_enable) {
+ if (open_param->bitstream_buffer % 8) {
+ dev_err(inst->dev->dev,
+ "Bitstream buffer must be aligned to a multiple of 8\n");
+ return -EINVAL;
+ }
+ if (open_param->bitstream_buffer_size % 1024 ||
+ open_param->bitstream_buffer_size < MIN_BITSTREAM_BUFFER_SIZE) {
+ dev_err(inst->dev->dev,
+ "Bitstream buffer size must be aligned to a multiple of 1024 and have a minimum size of %u\n",
+ MIN_BITSTREAM_BUFFER_SIZE);
+ return -EINVAL;
+ }
+ if (product_id == PRODUCT_ID_521) {
+ if (open_param->bitstream_buffer % 16) {
+ dev_err(inst->dev->dev,
+ "Bitstream buffer must be aligned to a multiple of 16\n");
+ return -EINVAL;
+ }
+ if (open_param->bitstream_buffer_size < MIN_BITSTREAM_BUFFER_SIZE_WAVE521) {
+ dev_err(inst->dev->dev,
+ "Bitstream buffer too small: %u (minimum: %u)\n",
+ open_param->bitstream_buffer_size,
+ MIN_BITSTREAM_BUFFER_SIZE_WAVE521);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (!open_param->frame_rate_info)
+ return -EINVAL;
+ if (open_param->bit_rate > MAX_BIT_RATE) {
+ dev_err(inst->dev->dev, "Invalid encoding bit-rate: %u (valid: 0-%u)\n",
+ open_param->bit_rate, MAX_BIT_RATE);
+ return -EINVAL;
+ }
+
+ if (pic_width < W5_MIN_ENC_PIC_WIDTH || pic_width > W5_MAX_ENC_PIC_WIDTH ||
+ pic_height < W5_MIN_ENC_PIC_HEIGHT || pic_height > W5_MAX_ENC_PIC_HEIGHT) {
+ dev_err(inst->dev->dev, "Invalid encoding dimension: %ux%u\n",
+ pic_width, pic_height);
+ return -EINVAL;
+ }
+
+ if (param->profile) {
+ if (inst->std == W_HEVC_ENC) {
+ if ((param->profile != HEVC_PROFILE_MAIN ||
+ (param->profile == HEVC_PROFILE_MAIN &&
+ param->internal_bit_depth > 8)) &&
+ (param->profile != HEVC_PROFILE_MAIN10 ||
+ (param->profile == HEVC_PROFILE_MAIN10 &&
+ param->internal_bit_depth < 10)) &&
+ param->profile != HEVC_PROFILE_STILLPICTURE) {
+ dev_err(inst->dev->dev,
+ "Invalid HEVC encoding profile: %u (bit-depth: %u)\n",
+ param->profile, param->internal_bit_depth);
+ return -EINVAL;
+ }
+ } else if (inst->std == W_AVC_ENC) {
+ if ((param->internal_bit_depth > 8 &&
+ param->profile != H264_PROFILE_HIGH10)) {
+ dev_err(inst->dev->dev,
+ "Invalid AVC encoding profile: %u (bit-depth: %u)\n",
+ param->profile, param->internal_bit_depth);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (param->decoding_refresh_type > DEC_REFRESH_TYPE_IDR) {
+ dev_err(inst->dev->dev, "Invalid decoding refresh type: %u (valid: 0-2)\n",
+ param->decoding_refresh_type);
+ return -EINVAL;
+ }
+
+ if (param->gop_preset_idx == PRESET_IDX_CUSTOM_GOP) {
+ if (param->gop_param.custom_gop_size < 1 ||
+ param->gop_param.custom_gop_size > MAX_GOP_NUM) {
+ dev_err(inst->dev->dev,
+ "Invalid custom group of pictures size: %u (valid: 1-%u)\n",
+ param->gop_param.custom_gop_size, MAX_GOP_NUM);
+ return -EINVAL;
+ }
+ }
+
+ if (inst->std == W_AVC_ENC && param->custom_lambda_enable) {
+ dev_err(inst->dev->dev,
+ "Cannot combine AVC encoding with the custom lambda option\n");
+ return -EINVAL;
+ }
+ if (param->intra_refresh_mode > REFRESH_MODE_CTUS) {
+ dev_err(inst->dev->dev, "Invalid intra refresh mode: %d (valid: 0-4)\n",
+ param->intra_refresh_mode);
+ return -EINVAL;
+ }
+
+ if (inst->std == W_HEVC_ENC && param->independ_slice_mode &&
+ param->depend_slice_mode > DEPEND_SLICE_MODE_BOOST) {
+ dev_err(inst->dev->dev,
+ "Can't combine slice modes: independent and fast dependent for HEVC\n");
+ return -EINVAL;
+ }
+
+ if (param->scaling_list_enable > 2) {
+ dev_err(inst->dev->dev, "Invalid scaling_list_enable: %u (valid: 0-2)\n",
+ param->scaling_list_enable);
+ return -EINVAL;
+ }
+
+ if (!param->disable_deblk) {
+ if (param->beta_offset_div2 < -6 || param->beta_offset_div2 > 6) {
+ dev_err(inst->dev->dev, "Invalid beta offset: %d (valid: -6-6)\n",
+ param->beta_offset_div2);
+ return -EINVAL;
+ }
+
+ if (param->tc_offset_div2 < -6 || param->tc_offset_div2 > 6) {
+ dev_err(inst->dev->dev, "Invalid tc offset: %d (valid: -6-6)\n",
+ param->tc_offset_div2);
+ return -EINVAL;
+ }
+ }
+
+ if (param->intra_qp > MAX_INTRA_QP) {
+ dev_err(inst->dev->dev,
+ "Invalid intra quantization parameter: %u (valid: 0-%u)\n",
+ param->intra_qp, MAX_INTRA_QP);
+ return -EINVAL;
+ }
+
+ if (open_param->rc_enable) {
+ if (param->min_qp_i > MAX_INTRA_QP || param->max_qp_i > MAX_INTRA_QP ||
+ param->min_qp_p > MAX_INTRA_QP || param->max_qp_p > MAX_INTRA_QP ||
+ param->min_qp_b > MAX_INTRA_QP || param->max_qp_b > MAX_INTRA_QP) {
+ dev_err(inst->dev->dev,
+ "Invalid quantization parameter min/max values: "
+ "I: %u-%u, P: %u-%u, B: %u-%u (valid for each: 0-%u)\n",
+ param->min_qp_i, param->max_qp_i, param->min_qp_p, param->max_qp_p,
+ param->min_qp_b, param->max_qp_b, MAX_INTRA_QP);
+ return -EINVAL;
+ }
+
+ if (param->hvs_qp_enable && param->hvs_max_delta_qp > MAX_HVS_MAX_DELTA_QP) {
+ dev_err(inst->dev->dev,
+ "Invalid HVS max delta quantization parameter: %u (valid: 0-%u)\n",
+ param->hvs_max_delta_qp, MAX_HVS_MAX_DELTA_QP);
+ return -EINVAL;
+ }
+
+ if (param->bit_alloc_mode > BIT_ALLOC_MODE_FIXED_RATIO) {
+ dev_err(inst->dev->dev, "Invalid bit alloc mode: %u (valid: 0-2)\n",
+ param->bit_alloc_mode);
+ return -EINVAL;
+ }
+
+ if (open_param->vbv_buffer_size < MIN_VBV_BUFFER_SIZE ||
+ open_param->vbv_buffer_size > MAX_VBV_BUFFER_SIZE) {
+ dev_err(inst->dev->dev, "VBV buffer size: %u (valid: %u-%u)\n",
+ open_param->vbv_buffer_size, MIN_VBV_BUFFER_SIZE,
+ MAX_VBV_BUFFER_SIZE);
+ return -EINVAL;
+ }
+ }
+
+ if (wave5_vpu_enc_check_common_param_valid(inst, open_param))
+ return -EINVAL;
+
+ if (wave5_vpu_enc_check_param_valid(inst->dev, open_param))
+ return -EINVAL;
+
+ if (param->gop_preset_idx == PRESET_IDX_CUSTOM_GOP) {
+ if (wave5_vpu_enc_check_custom_gop(inst->dev, open_param))
+ return -EINVAL;
+ }
+
+ if (param->chroma_cb_qp_offset < -12 || param->chroma_cb_qp_offset > 12) {
+ dev_err(inst->dev->dev,
+ "Invalid chroma Cb quantization parameter offset: %d (valid: -12-12)\n",
+ param->chroma_cb_qp_offset);
+ return -EINVAL;
+ }
+
+ if (param->chroma_cr_qp_offset < -12 || param->chroma_cr_qp_offset > 12) {
+ dev_err(inst->dev->dev,
+ "Invalid chroma Cr quantization parameter offset: %d (valid: -12-12)\n",
+ param->chroma_cr_qp_offset);
+ return -EINVAL;
+ }
+
+ if (param->intra_refresh_mode == REFRESH_MODE_CTU_STEP_SIZE && !param->intra_refresh_arg) {
+ dev_err(inst->dev->dev,
+ "Intra refresh mode CTU step-size requires an argument\n");
+ return -EINVAL;
+ }
+
+ if (inst->std == W_HEVC_ENC) {
+ if (param->nr_noise_sigma_y > MAX_NOISE_SIGMA ||
+ param->nr_noise_sigma_cb > MAX_NOISE_SIGMA ||
+ param->nr_noise_sigma_cr > MAX_NOISE_SIGMA) {
+ dev_err(inst->dev->dev,
+ "Invalid noise sigma Y(%u) Cb(%u) Cr(%u) (valid: %u)\n",
+ param->nr_noise_sigma_y, param->nr_noise_sigma_cb,
+ param->nr_noise_sigma_cr, MAX_NOISE_SIGMA);
+ return -EINVAL;
+ }
+
+ if (param->nr_intra_weight_y > MAX_INTRA_WEIGHT ||
+ param->nr_intra_weight_cb > MAX_INTRA_WEIGHT ||
+ param->nr_intra_weight_cr > MAX_INTRA_WEIGHT) {
+ dev_err(inst->dev->dev,
+ "Invalid intra weight Y(%u) Cb(%u) Cr(%u) (valid: %u)\n",
+ param->nr_intra_weight_y, param->nr_intra_weight_cb,
+ param->nr_intra_weight_cr, MAX_INTRA_WEIGHT);
+ return -EINVAL;
+ }
+
+ if (param->nr_inter_weight_y > MAX_INTER_WEIGHT ||
+ param->nr_inter_weight_cb > MAX_INTER_WEIGHT ||
+ param->nr_inter_weight_cr > MAX_INTER_WEIGHT) {
+ dev_err(inst->dev->dev,
+ "Invalid inter weight Y(%u) Cb(%u) Cr(%u) (valid: %u)\n",
+ param->nr_inter_weight_y, param->nr_inter_weight_cb,
+ param->nr_inter_weight_cr, MAX_INTER_WEIGHT);
+ return -EINVAL;
+ }
+
+ if ((param->nr_y_enable || param->nr_cb_enable || param->nr_cr_enable) &&
+ param->lossless_enable) {
+ dev_err(inst->dev->dev,
+ "Can't enable lossless mode with either nr_y, nr_cb or nr_cr\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-regdefine.h b/drivers/media/platform/chips-media/wave5/wave5-regdefine.h
new file mode 100644
index 000000000000..79b2f17dd7f0
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-regdefine.h
@@ -0,0 +1,743 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - wave5 register definitions
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#ifndef __WAVE5_REGISTER_DEFINE_H__
+#define __WAVE5_REGISTER_DEFINE_H__
+
+enum W5_VPU_COMMAND {
+ W5_INIT_VPU = 0x0001,
+ W5_WAKEUP_VPU = 0x0002,
+ W5_SLEEP_VPU = 0x0004,
+ W5_CREATE_INSTANCE = 0x0008, /* queuing command */
+ W5_FLUSH_INSTANCE = 0x0010,
+ W5_DESTROY_INSTANCE = 0x0020, /* queuing command */
+ W5_INIT_SEQ = 0x0040, /* queuing command */
+ W5_SET_FB = 0x0080,
+ W5_DEC_PIC = 0x0100, /* queuing command */
+ W5_ENC_PIC = 0x0100, /* queuing command */
+ W5_ENC_SET_PARAM = 0x0200, /* queuing command */
+ W5_QUERY = 0x4000,
+ W5_UPDATE_BS = 0x8000,
+ W5_MAX_VPU_COMD = 0x10000,
+};
+
+enum QUERY_OPT {
+ GET_VPU_INFO = 0,
+ SET_WRITE_PROT = 1,
+ GET_RESULT = 2,
+ UPDATE_DISP_FLAG = 3,
+ GET_BW_REPORT = 4,
+ GET_BS_RD_PTR = 5, // for decoder
+ GET_BS_WR_PTR = 6, // for encoder
+ GET_SRC_BUF_FLAG = 7, // for encoder
+ SET_BS_RD_PTR = 8, // for decoder
+ GET_DEBUG_INFO = 0x61,
+};
+
+/*
+ * A flag of user data buffer full.
+ * User data buffer full flag equal to 1 specifies that de-
+ * coded frame has more user data size than VPU internal
+ * buffer. VPU only dumps the internal buffer size of us-
+ * er data to USER_DATA_BUF_BASE buffer. In other
+ * words, VPU is unable to report the rest of the user data to
+ * USER_DATA_BUF_BASE buffer after the internal buffer
+ * fullness happens.
+ */
+#define USERDATA_FLAG_BUFF_FULL 1
+
+#define W5_REG_BASE 0x00000000
+#define W5_CMD_REG_BASE 0x00000100
+#define W5_CMD_REG_END 0x00000200
+
+/*
+ * common
+ */
+/* power on configuration
+ * PO_DEBUG_MODE [0] 1 - power on with debug mode
+ * USE_PO_CONF [3] 1 - use power-on-configuration
+ */
+#define W5_PO_CONF (W5_REG_BASE + 0x0000)
+#define W5_VCPU_CUR_PC (W5_REG_BASE + 0x0004)
+#define W5_VCPU_CUR_LR (W5_REG_BASE + 0x0008)
+#define W5_VPU_PDBG_STEP_MASK_V (W5_REG_BASE + 0x000C)
+#define W5_VPU_PDBG_CTRL (W5_REG_BASE + 0x0010) // v_cpu debugger ctrl register
+#define W5_VPU_PDBG_IDX_REG (W5_REG_BASE + 0x0014) // v_cpu debugger index register
+#define W5_VPU_PDBG_WDATA_REG (W5_REG_BASE + 0x0018) // v_cpu debugger write data register
+#define W5_VPU_PDBG_RDATA_REG (W5_REG_BASE + 0x001C) // v_cpu debugger read data register
+
+#define W5_VPU_FIO_CTRL_ADDR (W5_REG_BASE + 0x0020)
+#define W5_VPU_FIO_DATA (W5_REG_BASE + 0x0024)
+#define W5_VPU_VINT_REASON_USR (W5_REG_BASE + 0x0030)
+#define W5_VPU_VINT_REASON_CLR (W5_REG_BASE + 0x0034)
+#define W5_VPU_HOST_INT_REQ (W5_REG_BASE + 0x0038)
+#define W5_VPU_VINT_CLEAR (W5_REG_BASE + 0x003C)
+#define W5_VPU_HINT_CLEAR (W5_REG_BASE + 0x0040)
+#define W5_VPU_VPU_INT_STS (W5_REG_BASE + 0x0044)
+#define W5_VPU_VINT_ENABLE (W5_REG_BASE + 0x0048)
+#define W5_VPU_VINT_REASON (W5_REG_BASE + 0x004C)
+#define W5_VPU_RESET_REQ (W5_REG_BASE + 0x0050)
+#define W5_RST_BLOCK_CCLK(_core) BIT((_core))
+#define W5_RST_BLOCK_CCLK_ALL (0xff)
+#define W5_RST_BLOCK_BCLK(_core) (0x100 << (_core))
+#define W5_RST_BLOCK_BCLK_ALL (0xff00)
+#define W5_RST_BLOCK_ACLK(_core) (0x10000 << (_core))
+#define W5_RST_BLOCK_ACLK_ALL (0xff0000)
+#define W5_RST_BLOCK_VCPU_ALL (0x3f000000)
+#define W5_RST_BLOCK_ALL (0x3fffffff)
+#define W5_VPU_RESET_STATUS (W5_REG_BASE + 0x0054)
+
+#define W5_VCPU_RESTART (W5_REG_BASE + 0x0058)
+#define W5_VPU_CLK_MASK (W5_REG_BASE + 0x005C)
+
+/* REMAP_CTRL
+ * PAGE SIZE: [8:0] 0x001 - 4K
+ * 0x002 - 8K
+ * 0x004 - 16K
+ * ...
+ * 0x100 - 1M
+ * REGION ATTR1 [10] 0 - normal
+ * 1 - make bus error for the region
+ * REGION ATTR2 [11] 0 - normal
+ * 1 - bypass region
+ * REMAP INDEX [15:12] - 0 ~ 3
+ * ENDIAN [19:16] - see endian_mode in vdi.h
+ * AXI-ID [23:20] - upper AXI-ID
+ * BUS_ERROR [29] 0 - bypass
+ * 1 - make BUS_ERROR for unmapped region
+ * BYPASS_ALL [30] 1 - bypass all
+ * ENABLE [31] 1 - update control register[30:16]
+ */
+#define W5_VPU_REMAP_CTRL (W5_REG_BASE + 0x0060)
+#define W5_VPU_REMAP_VADDR (W5_REG_BASE + 0x0064)
+#define W5_VPU_REMAP_PADDR (W5_REG_BASE + 0x0068)
+#define W5_VPU_REMAP_CORE_START (W5_REG_BASE + 0x006C)
+#define W5_VPU_BUSY_STATUS (W5_REG_BASE + 0x0070)
+#define W5_VPU_HALT_STATUS (W5_REG_BASE + 0x0074)
+#define W5_VPU_VCPU_STATUS (W5_REG_BASE + 0x0078)
+#define W5_VPU_RET_PRODUCT_VERSION (W5_REG_BASE + 0x0094)
+/*
+ * assign vpu_config0 = {conf_map_converter_reg, // [31]
+ * conf_map_converter_sig, // [30]
+ * 8'd0, // [29:22]
+ * conf_std_switch_en, // [21]
+ * conf_bg_detect, // [20]
+ * conf_3dnr_en, // [19]
+ * conf_one_axi_en, // [18]
+ * conf_sec_axi_en, // [17]
+ * conf_bus_info, // [16]
+ * conf_afbc_en, // [15]
+ * conf_afbc_version_id, // [14:12]
+ * conf_fbc_en, // [11]
+ * conf_fbc_version_id, // [10:08]
+ * conf_scaler_en, // [07]
+ * conf_scaler_version_id, // [06:04]
+ * conf_bwb_en, // [03]
+ * 3'd0}; // [02:00]
+ */
+#define W5_VPU_RET_VPU_CONFIG0 (W5_REG_BASE + 0x0098)
+/*
+ * assign vpu_config1 = {4'd0, // [31:28]
+ * conf_perf_timer_en, // [27]
+ * conf_multi_core_en, // [26]
+ * conf_gcu_en, // [25]
+ * conf_cu_report, // [24]
+ * 4'd0, // [23:20]
+ * conf_vcore_id_3, // [19]
+ * conf_vcore_id_2, // [18]
+ * conf_vcore_id_1, // [17]
+ * conf_vcore_id_0, // [16]
+ * conf_bwb_opt, // [15]
+ * 7'd0, // [14:08]
+ * conf_cod_std_en_reserved_7, // [7]
+ * conf_cod_std_en_reserved_6, // [6]
+ * conf_cod_std_en_reserved_5, // [5]
+ * conf_cod_std_en_reserved_4, // [4]
+ * conf_cod_std_en_reserved_3, // [3]
+ * conf_cod_std_en_reserved_2, // [2]
+ * conf_cod_std_en_vp9, // [1]
+ * conf_cod_std_en_hevc}; // [0]
+ * }
+ */
+#define W5_VPU_RET_VPU_CONFIG1 (W5_REG_BASE + 0x009C)
+
+#define W5_VPU_DBG_REG0 (W5_REG_BASE + 0x00f0)
+#define W5_VPU_DBG_REG1 (W5_REG_BASE + 0x00f4)
+#define W5_VPU_DBG_REG2 (W5_REG_BASE + 0x00f8)
+#define W5_VPU_DBG_REG3 (W5_REG_BASE + 0x00fc)
+
+/************************************************************************/
+/* PRODUCT INFORMATION */
+/************************************************************************/
+#define W5_PRODUCT_NAME (W5_REG_BASE + 0x1040)
+#define W5_PRODUCT_NUMBER (W5_REG_BASE + 0x1044)
+
+/************************************************************************/
+/* DECODER/ENCODER COMMON */
+/************************************************************************/
+#define W5_COMMAND (W5_REG_BASE + 0x0100)
+#define W5_COMMAND_OPTION (W5_REG_BASE + 0x0104)
+#define W5_QUERY_OPTION (W5_REG_BASE + 0x0104)
+#define W5_RET_SUCCESS (W5_REG_BASE + 0x0108)
+#define W5_RET_FAIL_REASON (W5_REG_BASE + 0x010C)
+#define W5_RET_QUEUE_FAIL_REASON (W5_REG_BASE + 0x0110)
+#define W5_CMD_INSTANCE_INFO (W5_REG_BASE + 0x0110)
+
+#define W5_RET_QUEUE_STATUS (W5_REG_BASE + 0x01E0)
+#define W5_RET_BS_EMPTY_INST (W5_REG_BASE + 0x01E4)
+#define W5_RET_QUEUE_CMD_DONE_INST (W5_REG_BASE + 0x01E8)
+#define W5_RET_STAGE0_INSTANCE_INFO (W5_REG_BASE + 0x01EC)
+#define W5_RET_STAGE1_INSTANCE_INFO (W5_REG_BASE + 0x01F0)
+#define W5_RET_STAGE2_INSTANCE_INFO (W5_REG_BASE + 0x01F4)
+
+#define W5_RET_SEQ_DONE_INSTANCE_INFO (W5_REG_BASE + 0x01FC)
+
+#define W5_BS_OPTION (W5_REG_BASE + 0x0120)
+
+// return info when QUERY (GET_RESULT) for en/decoder
+#define W5_RET_VLC_BUF_SIZE (W5_REG_BASE + 0x01B0)
+// return info when QUERY (GET_RESULT) for en/decoder
+#define W5_RET_PARAM_BUF_SIZE (W5_REG_BASE + 0x01B4)
+
+// set when SET_FB for en/decoder
+#define W5_CMD_SET_FB_ADDR_TASK_BUF (W5_REG_BASE + 0x01D4)
+#define W5_CMD_SET_FB_TASK_BUF_SIZE (W5_REG_BASE + 0x01D8)
+/************************************************************************/
+/* INIT_VPU - COMMON */
+/************************************************************************/
+/* note: W5_ADDR_CODE_BASE should be aligned to 4KB */
+#define W5_ADDR_CODE_BASE (W5_REG_BASE + 0x0110)
+#define W5_CODE_SIZE (W5_REG_BASE + 0x0114)
+#define W5_CODE_PARAM (W5_REG_BASE + 0x0118)
+#define W5_ADDR_TEMP_BASE (W5_REG_BASE + 0x011C)
+#define W5_TEMP_SIZE (W5_REG_BASE + 0x0120)
+#define W5_ADDR_SEC_AXI (W5_REG_BASE + 0x0124)
+#define W5_SEC_AXI_SIZE (W5_REG_BASE + 0x0128)
+#define W5_HW_OPTION (W5_REG_BASE + 0x012C)
+#define W5_SEC_AXI_PARAM (W5_REG_BASE + 0x0180)
+
+/************************************************************************/
+/* CREATE_INSTANCE - COMMON */
+/************************************************************************/
+#define W5_ADDR_WORK_BASE (W5_REG_BASE + 0x0114)
+#define W5_WORK_SIZE (W5_REG_BASE + 0x0118)
+#define W5_CMD_DEC_BS_START_ADDR (W5_REG_BASE + 0x011C)
+#define W5_CMD_DEC_BS_SIZE (W5_REG_BASE + 0x0120)
+#define W5_CMD_BS_PARAM (W5_REG_BASE + 0x0124)
+#define W5_CMD_EXT_ADDR (W5_REG_BASE + 0x0138)
+#define W5_CMD_NUM_CQ_DEPTH_M1 (W5_REG_BASE + 0x013C)
+#define W5_CMD_ERR_CONCEAL (W5_REG_BASE + 0x0140)
+
+/************************************************************************/
+/* DECODER - INIT_SEQ */
+/************************************************************************/
+#define W5_BS_RD_PTR (W5_REG_BASE + 0x0118)
+#define W5_BS_WR_PTR (W5_REG_BASE + 0x011C)
+/************************************************************************/
+/* SET_FRAME_BUF */
+/************************************************************************/
+/* SET_FB_OPTION 0x00 REGISTER FRAMEBUFFERS
+ * 0x01 UPDATE FRAMEBUFFER, just one framebuffer(linear, fbc and mvcol)
+ */
+#define W5_SFB_OPTION (W5_REG_BASE + 0x0104)
+#define W5_COMMON_PIC_INFO (W5_REG_BASE + 0x0118)
+#define W5_PIC_SIZE (W5_REG_BASE + 0x011C)
+#define W5_SET_FB_NUM (W5_REG_BASE + 0x0120)
+#define W5_EXTRA_PIC_INFO (W5_REG_BASE + 0x0124)
+
+#define W5_ADDR_LUMA_BASE0 (W5_REG_BASE + 0x0134)
+#define W5_ADDR_CB_BASE0 (W5_REG_BASE + 0x0138)
+#define W5_ADDR_CR_BASE0 (W5_REG_BASE + 0x013C)
+// compression offset table for luma
+#define W5_ADDR_FBC_Y_OFFSET0 (W5_REG_BASE + 0x013C)
+// compression offset table for chroma
+#define W5_ADDR_FBC_C_OFFSET0 (W5_REG_BASE + 0x0140)
+#define W5_ADDR_LUMA_BASE1 (W5_REG_BASE + 0x0144)
+#define W5_ADDR_CB_ADDR1 (W5_REG_BASE + 0x0148)
+#define W5_ADDR_CR_ADDR1 (W5_REG_BASE + 0x014C)
+// compression offset table for luma
+#define W5_ADDR_FBC_Y_OFFSET1 (W5_REG_BASE + 0x014C)
+// compression offset table for chroma
+#define W5_ADDR_FBC_C_OFFSET1 (W5_REG_BASE + 0x0150)
+#define W5_ADDR_LUMA_BASE2 (W5_REG_BASE + 0x0154)
+#define W5_ADDR_CB_ADDR2 (W5_REG_BASE + 0x0158)
+#define W5_ADDR_CR_ADDR2 (W5_REG_BASE + 0x015C)
+// compression offset table for luma
+#define W5_ADDR_FBC_Y_OFFSET2 (W5_REG_BASE + 0x015C)
+// compression offset table for chroma
+#define W5_ADDR_FBC_C_OFFSET2 (W5_REG_BASE + 0x0160)
+#define W5_ADDR_LUMA_BASE3 (W5_REG_BASE + 0x0164)
+#define W5_ADDR_CB_ADDR3 (W5_REG_BASE + 0x0168)
+#define W5_ADDR_CR_ADDR3 (W5_REG_BASE + 0x016C)
+// compression offset table for luma
+#define W5_ADDR_FBC_Y_OFFSET3 (W5_REG_BASE + 0x016C)
+// compression offset table for chroma
+#define W5_ADDR_FBC_C_OFFSET3 (W5_REG_BASE + 0x0170)
+#define W5_ADDR_LUMA_BASE4 (W5_REG_BASE + 0x0174)
+#define W5_ADDR_CB_ADDR4 (W5_REG_BASE + 0x0178)
+#define W5_ADDR_CR_ADDR4 (W5_REG_BASE + 0x017C)
+// compression offset table for luma
+#define W5_ADDR_FBC_Y_OFFSET4 (W5_REG_BASE + 0x017C)
+// compression offset table for chroma
+#define W5_ADDR_FBC_C_OFFSET4 (W5_REG_BASE + 0x0180)
+#define W5_ADDR_LUMA_BASE5 (W5_REG_BASE + 0x0184)
+#define W5_ADDR_CB_ADDR5 (W5_REG_BASE + 0x0188)
+#define W5_ADDR_CR_ADDR5 (W5_REG_BASE + 0x018C)
+// compression offset table for luma
+#define W5_ADDR_FBC_Y_OFFSET5 (W5_REG_BASE + 0x018C)
+// compression offset table for chroma
+#define W5_ADDR_FBC_C_OFFSET5 (W5_REG_BASE + 0x0190)
+#define W5_ADDR_LUMA_BASE6 (W5_REG_BASE + 0x0194)
+#define W5_ADDR_CB_ADDR6 (W5_REG_BASE + 0x0198)
+#define W5_ADDR_CR_ADDR6 (W5_REG_BASE + 0x019C)
+// compression offset table for luma
+#define W5_ADDR_FBC_Y_OFFSET6 (W5_REG_BASE + 0x019C)
+// compression offset table for chroma
+#define W5_ADDR_FBC_C_OFFSET6 (W5_REG_BASE + 0x01A0)
+#define W5_ADDR_LUMA_BASE7 (W5_REG_BASE + 0x01A4)
+#define W5_ADDR_CB_ADDR7 (W5_REG_BASE + 0x01A8)
+#define W5_ADDR_CR_ADDR7 (W5_REG_BASE + 0x01AC)
+// compression offset table for luma
+#define W5_ADDR_FBC_Y_OFFSET7 (W5_REG_BASE + 0x01AC)
+// compression offset table for chroma
+#define W5_ADDR_FBC_C_OFFSET7 (W5_REG_BASE + 0x01B0)
+#define W5_ADDR_MV_COL0 (W5_REG_BASE + 0x01B4)
+#define W5_ADDR_MV_COL1 (W5_REG_BASE + 0x01B8)
+#define W5_ADDR_MV_COL2 (W5_REG_BASE + 0x01BC)
+#define W5_ADDR_MV_COL3 (W5_REG_BASE + 0x01C0)
+#define W5_ADDR_MV_COL4 (W5_REG_BASE + 0x01C4)
+#define W5_ADDR_MV_COL5 (W5_REG_BASE + 0x01C8)
+#define W5_ADDR_MV_COL6 (W5_REG_BASE + 0x01CC)
+#define W5_ADDR_MV_COL7 (W5_REG_BASE + 0x01D0)
+
+/* UPDATE_FB */
+/* CMD_SET_FB_STRIDE [15:0] - FBC framebuffer stride
+ * [31:15] - linear framebuffer stride
+ */
+#define W5_CMD_SET_FB_STRIDE (W5_REG_BASE + 0x0118)
+#define W5_CMD_SET_FB_INDEX (W5_REG_BASE + 0x0120)
+#define W5_ADDR_LUMA_BASE (W5_REG_BASE + 0x0134)
+#define W5_ADDR_CB_BASE (W5_REG_BASE + 0x0138)
+#define W5_ADDR_CR_BASE (W5_REG_BASE + 0x013C)
+#define W5_ADDR_MV_COL (W5_REG_BASE + 0x0140)
+#define W5_ADDR_FBC_Y_BASE (W5_REG_BASE + 0x0144)
+#define W5_ADDR_FBC_C_BASE (W5_REG_BASE + 0x0148)
+#define W5_ADDR_FBC_Y_OFFSET (W5_REG_BASE + 0x014C)
+#define W5_ADDR_FBC_C_OFFSET (W5_REG_BASE + 0x0150)
+
+/************************************************************************/
+/* DECODER - DEC_PIC */
+/************************************************************************/
+#define W5_CMD_DEC_VCORE_INFO (W5_REG_BASE + 0x0194)
+/* sequence change enable mask register
+ * CMD_SEQ_CHANGE_ENABLE_FLAG [5] profile_idc
+ * [16] pic_width/height_in_luma_sample
+ * [19] sps_max_dec_pic_buffering, max_num_reorder, max_latency_increase
+ */
+#define W5_CMD_SEQ_CHANGE_ENABLE_FLAG (W5_REG_BASE + 0x0128)
+#define W5_CMD_DEC_USER_MASK (W5_REG_BASE + 0x012C)
+#define W5_CMD_DEC_TEMPORAL_ID_PLUS1 (W5_REG_BASE + 0x0130)
+#define W5_CMD_DEC_FORCE_FB_LATENCY_PLUS1 (W5_REG_BASE + 0x0134)
+#define W5_USE_SEC_AXI (W5_REG_BASE + 0x0150)
+
+/************************************************************************/
+/* DECODER - QUERY : GET_VPU_INFO */
+/************************************************************************/
+#define W5_RET_FW_VERSION (W5_REG_BASE + 0x0118)
+#define W5_RET_PRODUCT_NAME (W5_REG_BASE + 0x011C)
+#define W5_RET_PRODUCT_VERSION (W5_REG_BASE + 0x0120)
+#define W5_RET_STD_DEF0 (W5_REG_BASE + 0x0124)
+#define W5_RET_STD_DEF1 (W5_REG_BASE + 0x0128)
+#define W5_RET_CONF_FEATURE (W5_REG_BASE + 0x012C)
+#define W5_RET_CONF_DATE (W5_REG_BASE + 0x0130)
+#define W5_RET_CONF_REVISION (W5_REG_BASE + 0x0134)
+#define W5_RET_CONF_TYPE (W5_REG_BASE + 0x0138)
+#define W5_RET_PRODUCT_ID (W5_REG_BASE + 0x013C)
+#define W5_RET_CUSTOMER_ID (W5_REG_BASE + 0x0140)
+
+/************************************************************************/
+/* DECODER - QUERY : GET_RESULT */
+/************************************************************************/
+#define W5_CMD_DEC_ADDR_REPORT_BASE (W5_REG_BASE + 0x0114)
+#define W5_CMD_DEC_REPORT_SIZE (W5_REG_BASE + 0x0118)
+#define W5_CMD_DEC_REPORT_PARAM (W5_REG_BASE + 0x011C)
+
+#define W5_RET_DEC_BS_RD_PTR (W5_REG_BASE + 0x011C)
+#define W5_RET_DEC_SEQ_PARAM (W5_REG_BASE + 0x0120)
+#define W5_RET_DEC_COLOR_SAMPLE_INFO (W5_REG_BASE + 0x0124)
+#define W5_RET_DEC_ASPECT_RATIO (W5_REG_BASE + 0x0128)
+#define W5_RET_DEC_BIT_RATE (W5_REG_BASE + 0x012C)
+#define W5_RET_DEC_FRAME_RATE_NR (W5_REG_BASE + 0x0130)
+#define W5_RET_DEC_FRAME_RATE_DR (W5_REG_BASE + 0x0134)
+#define W5_RET_DEC_NUM_REQUIRED_FB (W5_REG_BASE + 0x0138)
+#define W5_RET_DEC_NUM_REORDER_DELAY (W5_REG_BASE + 0x013C)
+#define W5_RET_DEC_SUB_LAYER_INFO (W5_REG_BASE + 0x0140)
+#define W5_RET_DEC_NOTIFICATION (W5_REG_BASE + 0x0144)
+/*
+ * USER_DATA_FLAGS for HEVC/H264 only.
+ * Bits:
+ * [1] - User data buffer full boolean
+ * [2] - VUI parameter flag
+ * [4] - Pic_timing SEI flag
+ * [5] - 1st user_data_registed_itu_t_t35 prefix SEI flag
+ * [6] - user_data_unregistered prefix SEI flag
+ * [7] - 1st user_data_registed_itu_t_t35 suffix SEI flag
+ * [8] - user_data_unregistered suffix SEI flag
+ * [10]- mastering_display_color_volume prefix SEI flag
+ * [11]- chroma_resampling_display_color_volume prefix SEI flag
+ * [12]- knee_function_info SEI flag
+ * [13]- tone_mapping_info prefix SEI flag
+ * [14]- film_grain_characteristics_info prefix SEI flag
+ * [15]- content_light_level_info prefix SEI flag
+ * [16]- color_remapping_info prefix SEI flag
+ * [28]- 2nd user_data_registed_itu_t_t35 prefix SEI flag
+ * [29]- 3rd user_data_registed_itu_t_t35 prefix SEI flag
+ * [30]- 2nd user_data_registed_itu_t_t35 suffix SEI flag
+ * [31]- 3rd user_data_registed_itu_t_t35 suffix SEI flag
+ */
+#define W5_RET_DEC_USERDATA_IDC (W5_REG_BASE + 0x0148)
+#define W5_RET_DEC_PIC_SIZE (W5_REG_BASE + 0x014C)
+#define W5_RET_DEC_CROP_TOP_BOTTOM (W5_REG_BASE + 0x0150)
+#define W5_RET_DEC_CROP_LEFT_RIGHT (W5_REG_BASE + 0x0154)
+/*
+ * #define W5_RET_DEC_AU_START_POS (W5_REG_BASE + 0x0158)
+ * => Access unit (AU) Bitstream start position
+ * #define W5_RET_DEC_AU_END_POS (W5_REG_BASE + 0x015C)
+ * => Access unit (AU) Bitstream end position
+ */
+
+/*
+ * Decoded picture type:
+ * reg_val & 0x7 => picture type
+ * (reg_val >> 4) & 0x3f => VCL NAL unit type
+ * (reg_val >> 31) & 0x1 => output_flag
+ * 16 << ((reg_val >> 10) & 0x3) => ctu_size
+ */
+#define W5_RET_DEC_PIC_TYPE (W5_REG_BASE + 0x0160)
+#define W5_RET_DEC_PIC_POC (W5_REG_BASE + 0x0164)
+/*
+ * #define W5_RET_DEC_RECOVERY_POINT (W5_REG_BASE + 0x0168)
+ * => HEVC recovery point
+ * reg_val & 0xff => number of signed recovery picture order counts
+ * (reg_val >> 16) & 0x1 => exact match flag
+ * (reg_val >> 17) & 0x1 => broken link flag
+ * (reg_val >> 18) & 0x1 => exist flag
+ */
+#define W5_RET_DEC_DEBUG_INDEX (W5_REG_BASE + 0x016C)
+#define W5_RET_DEC_DECODED_INDEX (W5_REG_BASE + 0x0170)
+#define W5_RET_DEC_DISPLAY_INDEX (W5_REG_BASE + 0x0174)
+/*
+ * #define W5_RET_DEC_REALLOC_INDEX (W5_REG_BASE + 0x0178)
+ * => display picture index in decoded picture buffer
+ * reg_val & 0xf => display picture index for FBC buffer (by reordering)
+ */
+#define W5_RET_DEC_DISP_IDC (W5_REG_BASE + 0x017C)
+/*
+ * #define W5_RET_DEC_ERR_CTB_NUM (W5_REG_BASE + 0x0180)
+ * => Number of error CTUs
+ * reg_val >> 16 => erroneous CTUs in bitstream
+ * reg_val & 0xffff => total CTUs in bitstream
+ *
+ * #define W5_RET_DEC_PIC_PARAM (W5_REG_BASE + 0x01A0)
+ * => Bitstream sequence/picture parameter information (AV1 only)
+ * reg_val & 0x1 => intrabc tool enable
+ * (reg_val >> 1) & 0x1 => screen content tools enable
+ */
+#define W5_RET_DEC_HOST_CMD_TICK (W5_REG_BASE + 0x01B8)
+/*
+ * #define W5_RET_DEC_SEEK_START_TICK (W5_REG_BASE + 0x01BC)
+ * #define W5_RET_DEC_SEEK_END_TICK (W5_REG_BASE + 0x01C0)
+ * => Start and end ticks for seeking slices of the picture
+ * #define W5_RET_DEC_PARSING_START_TICK (W5_REG_BASE + 0x01C4)
+ * #define W5_RET_DEC_PARSING_END_TICK (W5_REG_BASE + 0x01C8)
+ * => Start and end ticks for parsing slices of the picture
+ * #define W5_RET_DEC_DECODING_START_TICK (W5_REG_BASE + 0x01CC)
+ * => Start tick for decoding slices of the picture
+ */
+#define W5_RET_DEC_DECODING_ENC_TICK (W5_REG_BASE + 0x01D0)
+#define W5_RET_DEC_WARN_INFO (W5_REG_BASE + 0x01D4)
+#define W5_RET_DEC_ERR_INFO (W5_REG_BASE + 0x01D8)
+#define W5_RET_DEC_DECODING_SUCCESS (W5_REG_BASE + 0x01DC)
+
+/************************************************************************/
+/* DECODER - FLUSH_INSTANCE */
+/************************************************************************/
+#define W5_CMD_FLUSH_INST_OPT (W5_REG_BASE + 0x104)
+
+/************************************************************************/
+/* DECODER - QUERY : UPDATE_DISP_FLAG */
+/************************************************************************/
+#define W5_CMD_DEC_SET_DISP_IDC (W5_REG_BASE + 0x0118)
+#define W5_CMD_DEC_CLR_DISP_IDC (W5_REG_BASE + 0x011C)
+
+/************************************************************************/
+/* DECODER - QUERY : SET_BS_RD_PTR */
+/************************************************************************/
+#define W5_RET_QUERY_DEC_SET_BS_RD_PTR (W5_REG_BASE + 0x011C)
+
+/************************************************************************/
+/* DECODER - QUERY : GET_BS_RD_PTR */
+/************************************************************************/
+#define W5_RET_QUERY_DEC_BS_RD_PTR (W5_REG_BASE + 0x011C)
+
+/************************************************************************/
+/* QUERY : GET_DEBUG_INFO */
+/************************************************************************/
+#define W5_RET_QUERY_DEBUG_PRI_REASON (W5_REG_BASE + 0x114)
+
+/************************************************************************/
+/* GDI register for debugging */
+/************************************************************************/
+#define W5_GDI_BASE 0x8800
+#define W5_GDI_BUS_CTRL (W5_GDI_BASE + 0x0F0)
+#define W5_GDI_BUS_STATUS (W5_GDI_BASE + 0x0F4)
+
+#define W5_BACKBONE_BASE_VCPU 0xFE00
+#define W5_BACKBONE_BUS_CTRL_VCPU (W5_BACKBONE_BASE_VCPU + 0x010)
+#define W5_BACKBONE_BUS_STATUS_VCPU (W5_BACKBONE_BASE_VCPU + 0x014)
+#define W5_BACKBONE_PROG_AXI_ID (W5_BACKBONE_BASE_VCPU + 0x00C)
+
+#define W5_BACKBONE_PROC_EXT_ADDR (W5_BACKBONE_BASE_VCPU + 0x0C0)
+#define W5_BACKBONE_AXI_PARAM (W5_BACKBONE_BASE_VCPU + 0x0E0)
+
+#define W5_BACKBONE_BASE_VCORE0 0x8E00
+#define W5_BACKBONE_BUS_CTRL_VCORE0 (W5_BACKBONE_BASE_VCORE0 + 0x010)
+#define W5_BACKBONE_BUS_STATUS_VCORE0 (W5_BACKBONE_BASE_VCORE0 + 0x014)
+
+#define W5_BACKBONE_BASE_VCORE1 0x9E00 // for dual-core product
+#define W5_BACKBONE_BUS_CTRL_VCORE1 (W5_BACKBONE_BASE_VCORE1 + 0x010)
+#define W5_BACKBONE_BUS_STATUS_VCORE1 (W5_BACKBONE_BASE_VCORE1 + 0x014)
+
+#define W5_COMBINED_BACKBONE_BASE 0xFE00
+#define W5_COMBINED_BACKBONE_BUS_CTRL (W5_COMBINED_BACKBONE_BASE + 0x010)
+#define W5_COMBINED_BACKBONE_BUS_STATUS (W5_COMBINED_BACKBONE_BASE + 0x014)
+
+/************************************************************************/
+/* */
+/* for ENCODER */
+/* */
+/************************************************************************/
+#define W5_RET_STAGE3_INSTANCE_INFO (W5_REG_BASE + 0x1F8)
+/************************************************************************/
+/* ENCODER - CREATE_INSTANCE */
+/************************************************************************/
+// 0x114 ~ 0x124 : defined above (CREATE_INSTANCE COMMON)
+#define W5_CMD_ENC_VCORE_INFO (W5_REG_BASE + 0x0194)
+#define W5_CMD_ENC_SRC_OPTIONS (W5_REG_BASE + 0x0128)
+
+/************************************************************************/
+/* ENCODER - SET_FB */
+/************************************************************************/
+#define W5_FBC_STRIDE (W5_REG_BASE + 0x128)
+#define W5_ADDR_SUB_SAMPLED_FB_BASE (W5_REG_BASE + 0x12C)
+#define W5_SUB_SAMPLED_ONE_FB_SIZE (W5_REG_BASE + 0x130)
+
+/************************************************************************/
+/* ENCODER - ENC_SET_PARAM (COMMON & CHANGE_PARAM) */
+/************************************************************************/
+#define W5_CMD_ENC_SEQ_SET_PARAM_OPTION (W5_REG_BASE + 0x104)
+#define W5_CMD_ENC_SEQ_SET_PARAM_ENABLE (W5_REG_BASE + 0x118)
+#define W5_CMD_ENC_SEQ_SRC_SIZE (W5_REG_BASE + 0x11C)
+#define W5_CMD_ENC_SEQ_CUSTOM_MAP_ENDIAN (W5_REG_BASE + 0x120)
+#define W5_CMD_ENC_SEQ_SPS_PARAM (W5_REG_BASE + 0x124)
+#define W5_CMD_ENC_SEQ_PPS_PARAM (W5_REG_BASE + 0x128)
+#define W5_CMD_ENC_SEQ_GOP_PARAM (W5_REG_BASE + 0x12C)
+#define W5_CMD_ENC_SEQ_INTRA_PARAM (W5_REG_BASE + 0x130)
+#define W5_CMD_ENC_SEQ_CONF_WIN_TOP_BOT (W5_REG_BASE + 0x134)
+#define W5_CMD_ENC_SEQ_CONF_WIN_LEFT_RIGHT (W5_REG_BASE + 0x138)
+#define W5_CMD_ENC_SEQ_RDO_PARAM (W5_REG_BASE + 0x13C)
+#define W5_CMD_ENC_SEQ_INDEPENDENT_SLICE (W5_REG_BASE + 0x140)
+#define W5_CMD_ENC_SEQ_DEPENDENT_SLICE (W5_REG_BASE + 0x144)
+#define W5_CMD_ENC_SEQ_INTRA_REFRESH (W5_REG_BASE + 0x148)
+#define W5_CMD_ENC_SEQ_INPUT_SRC_PARAM (W5_REG_BASE + 0x14C)
+
+#define W5_CMD_ENC_SEQ_RC_FRAME_RATE (W5_REG_BASE + 0x150)
+#define W5_CMD_ENC_SEQ_RC_TARGET_RATE (W5_REG_BASE + 0x154)
+#define W5_CMD_ENC_SEQ_RC_PARAM (W5_REG_BASE + 0x158)
+#define W5_CMD_ENC_SEQ_RC_MIN_MAX_QP (W5_REG_BASE + 0x15C)
+#define W5_CMD_ENC_SEQ_RC_BIT_RATIO_LAYER_0_3 (W5_REG_BASE + 0x160)
+#define W5_CMD_ENC_SEQ_RC_BIT_RATIO_LAYER_4_7 (W5_REG_BASE + 0x164)
+#define W5_CMD_ENC_SEQ_RC_INTER_MIN_MAX_QP (W5_REG_BASE + 0x168)
+#define W5_CMD_ENC_SEQ_RC_WEIGHT_PARAM (W5_REG_BASE + 0x16C)
+
+#define W5_CMD_ENC_SEQ_ROT_PARAM (W5_REG_BASE + 0x170)
+#define W5_CMD_ENC_SEQ_NUM_UNITS_IN_TICK (W5_REG_BASE + 0x174)
+#define W5_CMD_ENC_SEQ_TIME_SCALE (W5_REG_BASE + 0x178)
+#define W5_CMD_ENC_SEQ_NUM_TICKS_POC_DIFF_ONE (W5_REG_BASE + 0x17C)
+
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_PU04 (W5_REG_BASE + 0x184)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_PU08 (W5_REG_BASE + 0x188)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_PU16 (W5_REG_BASE + 0x18C)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_PU32 (W5_REG_BASE + 0x190)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_CU08 (W5_REG_BASE + 0x194)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_CU16 (W5_REG_BASE + 0x198)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_CU32 (W5_REG_BASE + 0x19C)
+#define W5_CMD_ENC_SEQ_NR_PARAM (W5_REG_BASE + 0x1A0)
+#define W5_CMD_ENC_SEQ_NR_WEIGHT (W5_REG_BASE + 0x1A4)
+#define W5_CMD_ENC_SEQ_BG_PARAM (W5_REG_BASE + 0x1A8)
+#define W5_CMD_ENC_SEQ_CUSTOM_LAMBDA_ADDR (W5_REG_BASE + 0x1AC)
+#define W5_CMD_ENC_SEQ_USER_SCALING_LIST_ADDR (W5_REG_BASE + 0x1B0)
+#define W5_CMD_ENC_SEQ_VUI_HRD_PARAM (W5_REG_BASE + 0x180)
+#define W5_CMD_ENC_SEQ_VUI_RBSP_ADDR (W5_REG_BASE + 0x1B8)
+#define W5_CMD_ENC_SEQ_HRD_RBSP_ADDR (W5_REG_BASE + 0x1BC)
+
+/************************************************************************/
+/* ENCODER - ENC_SET_PARAM (CUSTOM_GOP) */
+/************************************************************************/
+#define W5_CMD_ENC_CUSTOM_GOP_PARAM (W5_REG_BASE + 0x11C)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_0 (W5_REG_BASE + 0x120)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_1 (W5_REG_BASE + 0x124)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_2 (W5_REG_BASE + 0x128)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_3 (W5_REG_BASE + 0x12C)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_4 (W5_REG_BASE + 0x130)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_5 (W5_REG_BASE + 0x134)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_6 (W5_REG_BASE + 0x138)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_7 (W5_REG_BASE + 0x13C)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_8 (W5_REG_BASE + 0x140)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_9 (W5_REG_BASE + 0x144)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_10 (W5_REG_BASE + 0x148)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_11 (W5_REG_BASE + 0x14C)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_12 (W5_REG_BASE + 0x150)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_13 (W5_REG_BASE + 0x154)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_14 (W5_REG_BASE + 0x158)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_15 (W5_REG_BASE + 0x15C)
+
+/************************************************************************/
+/* ENCODER - ENC_PIC */
+/************************************************************************/
+#define W5_CMD_ENC_BS_START_ADDR (W5_REG_BASE + 0x118)
+#define W5_CMD_ENC_BS_SIZE (W5_REG_BASE + 0x11C)
+#define W5_CMD_ENC_PIC_USE_SEC_AXI (W5_REG_BASE + 0x124)
+#define W5_CMD_ENC_PIC_REPORT_PARAM (W5_REG_BASE + 0x128)
+
+#define W5_CMD_ENC_PIC_CUSTOM_MAP_OPTION_PARAM (W5_REG_BASE + 0x138)
+#define W5_CMD_ENC_PIC_CUSTOM_MAP_OPTION_ADDR (W5_REG_BASE + 0x13C)
+#define W5_CMD_ENC_PIC_SRC_PIC_IDX (W5_REG_BASE + 0x144)
+#define W5_CMD_ENC_PIC_SRC_ADDR_Y (W5_REG_BASE + 0x148)
+#define W5_CMD_ENC_PIC_SRC_ADDR_U (W5_REG_BASE + 0x14C)
+#define W5_CMD_ENC_PIC_SRC_ADDR_V (W5_REG_BASE + 0x150)
+#define W5_CMD_ENC_PIC_SRC_STRIDE (W5_REG_BASE + 0x154)
+#define W5_CMD_ENC_PIC_SRC_FORMAT (W5_REG_BASE + 0x158)
+#define W5_CMD_ENC_PIC_SRC_AXI_SEL (W5_REG_BASE + 0x160)
+#define W5_CMD_ENC_PIC_CODE_OPTION (W5_REG_BASE + 0x164)
+#define W5_CMD_ENC_PIC_PIC_PARAM (W5_REG_BASE + 0x168)
+#define W5_CMD_ENC_PIC_LONGTERM_PIC (W5_REG_BASE + 0x16C)
+#define W5_CMD_ENC_PIC_WP_PIXEL_SIGMA_Y (W5_REG_BASE + 0x170)
+#define W5_CMD_ENC_PIC_WP_PIXEL_SIGMA_C (W5_REG_BASE + 0x174)
+#define W5_CMD_ENC_PIC_WP_PIXEL_MEAN_Y (W5_REG_BASE + 0x178)
+#define W5_CMD_ENC_PIC_WP_PIXEL_MEAN_C (W5_REG_BASE + 0x17C)
+#define W5_CMD_ENC_PIC_CF50_Y_OFFSET_TABLE_ADDR (W5_REG_BASE + 0x190)
+#define W5_CMD_ENC_PIC_CF50_CB_OFFSET_TABLE_ADDR (W5_REG_BASE + 0x194)
+#define W5_CMD_ENC_PIC_CF50_CR_OFFSET_TABLE_ADDR (W5_REG_BASE + 0x198)
+#define W5_CMD_ENC_PIC_PREFIX_SEI_NAL_ADDR (W5_REG_BASE + 0x180)
+#define W5_CMD_ENC_PIC_PREFIX_SEI_INFO (W5_REG_BASE + 0x184)
+#define W5_CMD_ENC_PIC_SUFFIX_SEI_NAL_ADDR (W5_REG_BASE + 0x188)
+#define W5_CMD_ENC_PIC_SUFFIX_SEI_INFO (W5_REG_BASE + 0x18c)
+
+/************************************************************************/
+/* ENCODER - QUERY (GET_RESULT) */
+/************************************************************************/
+#define W5_RET_ENC_NUM_REQUIRED_FB (W5_REG_BASE + 0x11C)
+#define W5_RET_ENC_MIN_SRC_BUF_NUM (W5_REG_BASE + 0x120)
+#define W5_RET_ENC_PIC_TYPE (W5_REG_BASE + 0x124)
+/*
+ * #define W5_RET_ENC_PIC_POC (W5_REG_BASE + 0x128)
+ * => picture order count value of current encoded picture
+ */
+#define W5_RET_ENC_PIC_IDX (W5_REG_BASE + 0x12C)
+/*
+ * #define W5_RET_ENC_PIC_SLICE_NUM (W5_REG_BASE + 0x130)
+ * reg_val & 0xffff = total independent slice segment number (16 bits)
+ * (reg_val >> 16) & 0xffff = total dependent slice segment number (16 bits)
+ *
+ * #define W5_RET_ENC_PIC_SKIP (W5_REG_BASE + 0x134)
+ * reg_val & 0xfe = picture skip flag (7 bits)
+ *
+ * #define W5_RET_ENC_PIC_NUM_INTRA (W5_REG_BASE + 0x138)
+ * => number of intra blocks in 8x8 (32 bits)
+ *
+ * #define W5_RET_ENC_PIC_NUM_MERGE (W5_REG_BASE + 0x13C)
+ * => number of merge blocks in 8x8 (32 bits)
+ *
+ * #define W5_RET_ENC_PIC_NUM_SKIP (W5_REG_BASE + 0x144)
+ * => number of skip blocks in 8x8 (32 bits)
+ *
+ * #define W5_RET_ENC_PIC_AVG_CTU_QP (W5_REG_BASE + 0x148)
+ * => Average CTU QP value (32 bits)
+ */
+#define W5_RET_ENC_PIC_BYTE (W5_REG_BASE + 0x14C)
+/*
+ * #define W5_RET_ENC_GOP_PIC_IDX (W5_REG_BASE + 0x150)
+ * => picture index in group of pictures
+ */
+#define W5_RET_ENC_USED_SRC_IDX (W5_REG_BASE + 0x154)
+/*
+ * #define W5_RET_ENC_PIC_NUM (W5_REG_BASE + 0x158)
+ * => encoded picture number
+ */
+#define W5_RET_ENC_VCL_NUT (W5_REG_BASE + 0x15C)
+/*
+ * Only for H264:
+ * #define W5_RET_ENC_PIC_DIST_LOW (W5_REG_BASE + 0x164)
+ * => lower 32 bits of the sum of squared difference between source Y picture
+ * and reconstructed Y picture
+ * #define W5_RET_ENC_PIC_DIST_HIGH (W5_REG_BASE + 0x168)
+ * => upper 32 bits of the sum of squared difference between source Y picture
+ * and reconstructed Y picture
+ */
+#define W5_RET_ENC_PIC_MAX_LATENCY_PICS (W5_REG_BASE + 0x16C)
+
+#define W5_RET_ENC_HOST_CMD_TICK (W5_REG_BASE + 0x1B8)
+/*
+ * #define W5_RET_ENC_PREPARE_START_TICK (W5_REG_BASE + 0x1BC)
+ * #define W5_RET_ENC_PREPARE_END_TICK (W5_REG_BASE + 0x1C0)
+ * => Start and end ticks for preparing slices of the picture
+ * #define W5_RET_ENC_PROCESSING_START_TICK (W5_REG_BASE + 0x1C4)
+ * #define W5_RET_ENC_PROCESSING_END_TICK (W5_REG_BASE + 0x1C8)
+ * => Start and end ticks for processing slices of the picture
+ * #define W5_RET_ENC_ENCODING_START_TICK (W5_REG_BASE + 0x1CC)
+ * => Start tick for encoding slices of the picture
+ */
+#define W5_RET_ENC_ENCODING_END_TICK (W5_REG_BASE + 0x1D0)
+
+#define W5_RET_ENC_WARN_INFO (W5_REG_BASE + 0x1D4)
+#define W5_RET_ENC_ERR_INFO (W5_REG_BASE + 0x1D8)
+#define W5_RET_ENC_ENCODING_SUCCESS (W5_REG_BASE + 0x1DC)
+
+/************************************************************************/
+/* ENCODER - QUERY (GET_BS_WR_PTR) */
+/************************************************************************/
+#define W5_RET_ENC_RD_PTR (W5_REG_BASE + 0x114)
+#define W5_RET_ENC_WR_PTR (W5_REG_BASE + 0x118)
+#define W5_CMD_ENC_REASON_SEL (W5_REG_BASE + 0x11C)
+
+/************************************************************************/
+/* ENCODER - QUERY (GET_BW_REPORT) */
+/************************************************************************/
+#define RET_QUERY_BW_PRP_AXI_READ (W5_REG_BASE + 0x118)
+#define RET_QUERY_BW_PRP_AXI_WRITE (W5_REG_BASE + 0x11C)
+#define RET_QUERY_BW_FBD_Y_AXI_READ (W5_REG_BASE + 0x120)
+#define RET_QUERY_BW_FBC_Y_AXI_WRITE (W5_REG_BASE + 0x124)
+#define RET_QUERY_BW_FBD_C_AXI_READ (W5_REG_BASE + 0x128)
+#define RET_QUERY_BW_FBC_C_AXI_WRITE (W5_REG_BASE + 0x12C)
+#define RET_QUERY_BW_PRI_AXI_READ (W5_REG_BASE + 0x130)
+#define RET_QUERY_BW_PRI_AXI_WRITE (W5_REG_BASE + 0x134)
+#define RET_QUERY_BW_SEC_AXI_READ (W5_REG_BASE + 0x138)
+#define RET_QUERY_BW_SEC_AXI_WRITE (W5_REG_BASE + 0x13C)
+#define RET_QUERY_BW_PROC_AXI_READ (W5_REG_BASE + 0x140)
+#define RET_QUERY_BW_PROC_AXI_WRITE (W5_REG_BASE + 0x144)
+#define RET_QUERY_BW_BWB_AXI_WRITE (W5_REG_BASE + 0x148)
+#define W5_CMD_BW_OPTION (W5_REG_BASE + 0x14C)
+
+/************************************************************************/
+/* ENCODER - QUERY (GET_SRC_FLAG) */
+/************************************************************************/
+#define W5_RET_RELEASED_SRC_INSTANCE (W5_REG_BASE + 0x1EC)
+
+#define W5_ENC_PIC_SUB_FRAME_SYNC_IF (W5_REG_BASE + 0x0300)
+
+#endif /* __WAVE5_REGISTER_DEFINE_H__ */
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vdi.c b/drivers/media/platform/chips-media/wave5/wave5-vdi.c
new file mode 100644
index 000000000000..8c7c090b4fc2
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vdi.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - low level access functions
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#include <linux/bug.h>
+#include "wave5-vdi.h"
+#include "wave5-vpu.h"
+#include "wave5-regdefine.h"
+#include <linux/delay.h>
+
+#define VDI_SRAM_BASE_ADDR 0x00
+
+#define VDI_SYSTEM_ENDIAN VDI_LITTLE_ENDIAN
+#define VDI_128BIT_BUS_SYSTEM_ENDIAN VDI_128BIT_LITTLE_ENDIAN
+
+static int wave5_vdi_allocate_common_memory(struct device *dev)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ if (!vpu_dev->common_mem.vaddr) {
+ int ret;
+
+ vpu_dev->common_mem.size = SIZE_COMMON;
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vpu_dev->common_mem);
+ if (ret) {
+ dev_err(dev, "unable to allocate common buffer\n");
+ return ret;
+ }
+ }
+
+ dev_dbg(dev, "[VDI] common_mem: daddr=%pad size=%zu vaddr=0x%p\n",
+ &vpu_dev->common_mem.daddr, vpu_dev->common_mem.size, vpu_dev->common_mem.vaddr);
+
+ return 0;
+}
+
+int wave5_vdi_init(struct device *dev)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = wave5_vdi_allocate_common_memory(dev);
+ if (ret < 0) {
+ dev_err(dev, "[VDI] failed to get vpu common buffer from driver\n");
+ return ret;
+ }
+
+ if (!PRODUCT_CODE_W_SERIES(vpu_dev->product_code)) {
+ WARN_ONCE(1, "unsupported product code: 0x%x\n", vpu_dev->product_code);
+ return 0;
+ }
+
+ // if BIT processor is not running.
+ if (wave5_vdi_readl(vpu_dev, W5_VCPU_CUR_PC) == 0) {
+ int i;
+
+ for (i = 0; i < 64; i++)
+ wave5_vdi_write_register(vpu_dev, (i * 4) + 0x100, 0x0);
+ }
+
+ dev_dbg(dev, "[VDI] driver initialized successfully\n");
+
+ return 0;
+}
+
+int wave5_vdi_release(struct device *dev)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ vpu_dev->vdb_register = NULL;
+ wave5_vdi_free_dma_memory(vpu_dev, &vpu_dev->common_mem);
+
+ return 0;
+}
+
+void wave5_vdi_write_register(struct vpu_device *vpu_dev, u32 addr, u32 data)
+{
+ writel(data, vpu_dev->vdb_register + addr);
+}
+
+unsigned int wave5_vdi_readl(struct vpu_device *vpu_dev, u32 addr)
+{
+ return readl(vpu_dev->vdb_register + addr);
+}
+
+int wave5_vdi_clear_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb)
+{
+ if (!vb || !vb->vaddr) {
+ dev_err(vpu_dev->dev, "%s: unable to clear unmapped buffer\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(vb->vaddr, 0, vb->size);
+ return vb->size;
+}
+
+static void wave5_swap_endian(struct vpu_device *vpu_dev, u8 *data, size_t len,
+ unsigned int endian);
+
+int wave5_vdi_write_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb, size_t offset,
+ u8 *data, size_t len, unsigned int endian)
+{
+ if (!vb || !vb->vaddr) {
+ dev_err(vpu_dev->dev, "%s: unable to write to unmapped buffer\n", __func__);
+ return -EINVAL;
+ }
+
+ if (offset > vb->size || len > vb->size || offset + len > vb->size) {
+ dev_err(vpu_dev->dev, "%s: buffer too small\n", __func__);
+ return -ENOSPC;
+ }
+
+ wave5_swap_endian(vpu_dev, data, len, endian);
+ memcpy(vb->vaddr + offset, data, len);
+
+ return len;
+}
+
+int wave5_vdi_allocate_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb)
+{
+ void *vaddr;
+ dma_addr_t daddr;
+
+ if (!vb->size) {
+ dev_err(vpu_dev->dev, "%s: requested size==0\n", __func__);
+ return -EINVAL;
+ }
+
+ vaddr = dma_alloc_coherent(vpu_dev->dev, vb->size, &daddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+ vb->vaddr = vaddr;
+ vb->daddr = daddr;
+
+ return 0;
+}
+
+void wave5_vdi_free_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb)
+{
+ if (vb->size == 0)
+ return;
+
+ if (!vb->vaddr)
+ dev_err(vpu_dev->dev, "%s: requested free of unmapped buffer\n", __func__);
+ else
+ dma_free_coherent(vpu_dev->dev, vb->size, vb->vaddr, vb->daddr);
+
+ memset(vb, 0, sizeof(*vb));
+}
+
+unsigned int wave5_vdi_convert_endian(struct vpu_device *vpu_dev, unsigned int endian)
+{
+ if (PRODUCT_CODE_W_SERIES(vpu_dev->product_code)) {
+ switch (endian) {
+ case VDI_LITTLE_ENDIAN:
+ endian = 0x00;
+ break;
+ case VDI_BIG_ENDIAN:
+ endian = 0x0f;
+ break;
+ case VDI_32BIT_LITTLE_ENDIAN:
+ endian = 0x04;
+ break;
+ case VDI_32BIT_BIG_ENDIAN:
+ endian = 0x03;
+ break;
+ }
+ }
+
+ return (endian & 0x0f);
+}
+
+static void byte_swap(unsigned char *data, size_t len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; i += 2)
+ swap(data[i], data[i + 1]);
+}
+
+static void word_swap(unsigned char *data, size_t len)
+{
+ u16 *ptr = (u16 *)data;
+ unsigned int i;
+ size_t size = len / sizeof(uint16_t);
+
+ for (i = 0; i < size; i += 2)
+ swap(ptr[i], ptr[i + 1]);
+}
+
+static void dword_swap(unsigned char *data, size_t len)
+{
+ u32 *ptr = (u32 *)data;
+ size_t size = len / sizeof(u32);
+ unsigned int i;
+
+ for (i = 0; i < size; i += 2)
+ swap(ptr[i], ptr[i + 1]);
+}
+
+static void lword_swap(unsigned char *data, size_t len)
+{
+ u64 *ptr = (u64 *)data;
+ size_t size = len / sizeof(uint64_t);
+ unsigned int i;
+
+ for (i = 0; i < size; i += 2)
+ swap(ptr[i], ptr[i + 1]);
+}
+
+static void wave5_swap_endian(struct vpu_device *vpu_dev, u8 *data, size_t len,
+ unsigned int endian)
+{
+ int changes;
+ unsigned int sys_endian = VDI_128BIT_BUS_SYSTEM_ENDIAN;
+ bool byte_change, word_change, dword_change, lword_change;
+
+ if (!PRODUCT_CODE_W_SERIES(vpu_dev->product_code)) {
+ dev_err(vpu_dev->dev, "unknown product id: %08x\n", vpu_dev->product_code);
+ return;
+ }
+
+ endian = wave5_vdi_convert_endian(vpu_dev, endian);
+ sys_endian = wave5_vdi_convert_endian(vpu_dev, sys_endian);
+ if (endian == sys_endian)
+ return;
+
+ changes = endian ^ sys_endian;
+ byte_change = changes & 0x01;
+ word_change = ((changes & 0x02) == 0x02);
+ dword_change = ((changes & 0x04) == 0x04);
+ lword_change = ((changes & 0x08) == 0x08);
+
+ if (byte_change)
+ byte_swap(data, len);
+ if (word_change)
+ word_swap(data, len);
+ if (dword_change)
+ dword_swap(data, len);
+ if (lword_change)
+ lword_swap(data, len);
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vdi.h b/drivers/media/platform/chips-media/wave5/wave5-vdi.h
new file mode 100644
index 000000000000..780be5747332
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vdi.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - low level access functions
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#ifndef _VDI_H_
+#define _VDI_H_
+
+#include "wave5-vpuconfig.h"
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+/************************************************************************/
+/* COMMON REGISTERS */
+/************************************************************************/
+#define VPU_PRODUCT_CODE_REGISTER 0x1044
+
+/* system register write */
+#define vpu_write_reg(VPU_INST, ADDR, DATA) wave5_vdi_write_register(VPU_INST, ADDR, DATA)
+/* system register read */
+#define vpu_read_reg(CORE, ADDR) wave5_vdi_readl(CORE, ADDR)
+
+struct vpu_buf {
+ size_t size;
+ dma_addr_t daddr;
+ void *vaddr;
+};
+
+struct dma_vpu_buf {
+ size_t size;
+ dma_addr_t daddr;
+};
+
+enum endian_mode {
+ VDI_LITTLE_ENDIAN = 0, /* 64bit LE */
+ VDI_BIG_ENDIAN, /* 64bit BE */
+ VDI_32BIT_LITTLE_ENDIAN,
+ VDI_32BIT_BIG_ENDIAN,
+ /* WAVE PRODUCTS */
+ VDI_128BIT_LITTLE_ENDIAN = 16,
+ VDI_128BIT_LE_BYTE_SWAP,
+ VDI_128BIT_LE_WORD_SWAP,
+ VDI_128BIT_LE_WORD_BYTE_SWAP,
+ VDI_128BIT_LE_DWORD_SWAP,
+ VDI_128BIT_LE_DWORD_BYTE_SWAP,
+ VDI_128BIT_LE_DWORD_WORD_SWAP,
+ VDI_128BIT_LE_DWORD_WORD_BYTE_SWAP,
+ VDI_128BIT_BE_DWORD_WORD_BYTE_SWAP,
+ VDI_128BIT_BE_DWORD_WORD_SWAP,
+ VDI_128BIT_BE_DWORD_BYTE_SWAP,
+ VDI_128BIT_BE_DWORD_SWAP,
+ VDI_128BIT_BE_WORD_BYTE_SWAP,
+ VDI_128BIT_BE_WORD_SWAP,
+ VDI_128BIT_BE_BYTE_SWAP,
+ VDI_128BIT_BIG_ENDIAN = 31,
+ VDI_ENDIAN_MAX
+};
+
+#define VDI_128BIT_ENDIAN_MASK 0xf
+
+int wave5_vdi_init(struct device *dev);
+int wave5_vdi_release(struct device *dev); //this function may be called only at system off.
+
+#endif //#ifndef _VDI_H_
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
new file mode 100644
index 000000000000..526ced543aab
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -0,0 +1,1441 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - decoder interface
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#include "wave5-helper.h"
+
+#define VPU_DEC_DEV_NAME "C&M Wave5 VPU decoder"
+#define VPU_DEC_DRV_NAME "wave5-dec"
+#define V4L2_CID_VPU_THUMBNAIL_MODE (V4L2_CID_USER_BASE + 0x1001)
+
+static const struct vpu_format dec_fmt_list[FMT_TYPES][MAX_FMTS] = {
+ [VPU_FMT_TYPE_CODEC] = {
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_HEVC,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_H264,
+ .max_width = 8192,
+ .min_width = 32,
+ .max_height = 4320,
+ .min_height = 32,
+ },
+ },
+ [VPU_FMT_TYPE_RAW] = {
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV420,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV12,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV21,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV420M,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV12M,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV21M,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ }
+};
+
+static enum wave_std wave5_to_vpu_codstd(unsigned int v4l2_pix_fmt)
+{
+ switch (v4l2_pix_fmt) {
+ case V4L2_PIX_FMT_H264:
+ return W_AVC_DEC;
+ case V4L2_PIX_FMT_HEVC:
+ return W_HEVC_DEC;
+ default:
+ return STD_UNKNOWN;
+ }
+}
+
+static void wave5_handle_bitstream_buffer(struct vpu_instance *inst)
+{
+ struct v4l2_m2m_buffer *buf, *n;
+ int ret;
+
+ v4l2_m2m_for_each_src_buf_safe(inst->v4l2_fh.m2m_ctx, buf, n) {
+ struct vb2_v4l2_buffer *vbuf = &buf->vb;
+ struct vpu_buffer *vpu_buf = wave5_to_vpu_buf(vbuf);
+ size_t src_size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ void *src_buf = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ dma_addr_t rd_ptr = 0;
+ dma_addr_t wr_ptr = 0;
+ size_t remain_size = 0;
+ size_t offset;
+
+ if (vpu_buf->consumed) {
+ dev_dbg(inst->dev->dev, "already consumed src buf (%u)\n",
+ vbuf->vb2_buf.index);
+ continue;
+ }
+
+ if (!src_buf) {
+ dev_dbg(inst->dev->dev,
+ "%s: Acquiring kernel pointer to src buf (%u), fail\n",
+ __func__, vbuf->vb2_buf.index);
+ break;
+ }
+
+ ret = wave5_vpu_dec_get_bitstream_buffer(inst, &rd_ptr, &wr_ptr, &remain_size);
+ if (ret) {
+ dev_err(inst->dev->dev, "Getting the bitstream buffer, fail: %d\n",
+ ret);
+ return;
+ }
+
+ if (remain_size < src_size) {
+ dev_dbg(inst->dev->dev,
+ "%s: remaining size: %zu < source size: %zu for src buf (%u)\n",
+ __func__, remain_size, src_size, vbuf->vb2_buf.index);
+ break;
+ }
+
+ offset = wr_ptr - inst->bitstream_vbuf.daddr;
+ if (wr_ptr + src_size > inst->bitstream_vbuf.daddr + inst->bitstream_vbuf.size) {
+ size_t size;
+
+ size = inst->bitstream_vbuf.daddr + inst->bitstream_vbuf.size - wr_ptr;
+ ret = wave5_vdi_write_memory(inst->dev, &inst->bitstream_vbuf, offset,
+ (u8 *)src_buf, size, VDI_128BIT_LITTLE_ENDIAN);
+ if (ret < 0) {
+ dev_dbg(inst->dev->dev,
+ "%s: 1/2 write src buf (%u) into bitstream buf, fail: %d\n",
+ __func__, vbuf->vb2_buf.index, ret);
+ break;
+ }
+ ret = wave5_vdi_write_memory(inst->dev, &inst->bitstream_vbuf, 0,
+ (u8 *)src_buf + size, src_size - size,
+ VDI_128BIT_LITTLE_ENDIAN);
+ if (ret < 0) {
+ dev_dbg(inst->dev->dev,
+ "%s: 2/2 write src buf (%u) into bitstream buf, fail: %d\n",
+ __func__, vbuf->vb2_buf.index, ret);
+ break;
+ }
+ } else {
+ ret = wave5_vdi_write_memory(inst->dev, &inst->bitstream_vbuf, offset,
+ (u8 *)src_buf, src_size,
+ VDI_128BIT_LITTLE_ENDIAN);
+ if (ret < 0) {
+ dev_dbg(inst->dev->dev,
+ "%s: write src buf (%u) into bitstream buf, fail: %d",
+ __func__, vbuf->vb2_buf.index, ret);
+ break;
+ }
+ }
+
+ ret = wave5_vpu_dec_update_bitstream_buffer(inst, src_size);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "vpu_dec_update_bitstream_buffer fail: %d for src buf (%u)\n",
+ ret, vbuf->vb2_buf.index);
+ break;
+ }
+
+ vpu_buf->consumed = true;
+ }
+}
+
+static void wave5_handle_src_buffer(struct vpu_instance *inst)
+{
+ struct vb2_v4l2_buffer *src_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(inst->v4l2_fh.m2m_ctx);
+ if (src_buf) {
+ struct vpu_buffer *vpu_buf = wave5_to_vpu_buf(src_buf);
+
+ if (vpu_buf->consumed) {
+ dev_dbg(inst->dev->dev, "%s: already consumed buffer\n", __func__);
+ src_buf = v4l2_m2m_src_buf_remove(inst->v4l2_fh.m2m_ctx);
+ inst->timestamp = src_buf->vb2_buf.timestamp;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
+ }
+}
+
+static void wave5_update_pix_fmt(struct v4l2_pix_format_mplane *pix_mp, unsigned int width,
+ unsigned int height)
+{
+ switch (pix_mp->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ pix_mp->width = round_up(width, 32);
+ pix_mp->height = round_up(height, 16);
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = width * height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ pix_mp->width = round_up(width, 32);
+ pix_mp->height = round_up(height, 16);
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = width * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[1].sizeimage = width * height / 4;
+ pix_mp->plane_fmt[2].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[2].sizeimage = width * height / 4;
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV21M:
+ pix_mp->width = round_up(width, 32);
+ pix_mp->height = round_up(height, 16);
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = width * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[1].sizeimage = width * height / 2;
+ break;
+ default:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ pix_mp->plane_fmt[0].sizeimage = width * height;
+ break;
+ }
+}
+
+static void wave5_vpu_dec_start_decode(struct vpu_instance *inst)
+{
+ struct dec_param pic_param;
+ int ret;
+ u32 fail_res = 0;
+
+ memset(&pic_param, 0, sizeof(struct dec_param));
+
+ if (inst->state == VPU_INST_STATE_INIT_SEQ) {
+ u32 non_linear_num = inst->dst_buf_count;
+ u32 linear_num = inst->dst_buf_count;
+ u32 stride = inst->dst_fmt.width;
+
+ ret = wave5_vpu_dec_register_frame_buffer_ex(inst, non_linear_num, linear_num,
+ stride, inst->dst_fmt.height,
+ COMPRESSED_FRAME_MAP);
+ if (ret)
+ dev_dbg(inst->dev->dev, "%s: vpu_dec_register_frame_buffer_ex fail: %d",
+ __func__, ret);
+ }
+
+ ret = wave5_vpu_dec_start_one_frame(inst, &pic_param, &fail_res);
+ if (ret && fail_res != WAVE5_SYSERR_QUEUEING_FAIL) {
+ struct vb2_v4l2_buffer *src_buf;
+
+ src_buf = v4l2_m2m_src_buf_remove(inst->v4l2_fh.m2m_ctx);
+ inst->state = VPU_INST_STATE_STOP;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void wave5_vpu_dec_stop_decode(struct vpu_instance *inst)
+{
+ unsigned int i;
+ int ret;
+
+ inst->state = VPU_INST_STATE_STOP;
+
+ ret = wave5_vpu_dec_update_bitstream_buffer(inst, 0);
+ if (ret) {
+ dev_warn(inst->dev->dev,
+ "Setting EOS for the bitstream, fail: %d\n", ret);
+ }
+
+ for (i = 0; i < inst->dst_buf_count; i++) {
+ ret = wave5_vpu_dec_clr_disp_flag(inst, i);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: Clearing the display flag of buffer index: %u, fail: %d\n",
+ __func__, i, ret);
+ }
+ }
+
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, inst->v4l2_fh.m2m_ctx);
+}
+
+static void wave5_vpu_dec_finish_decode(struct vpu_instance *inst)
+{
+ struct dec_output_info dec_output_info;
+ int ret;
+ u32 irq_status;
+
+ if (kfifo_out(&inst->irq_status, &irq_status, sizeof(int)))
+ wave5_vpu_clear_interrupt_ex(inst, irq_status);
+
+ ret = wave5_vpu_dec_get_output_info(inst, &dec_output_info);
+ if (ret) {
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, inst->v4l2_fh.m2m_ctx);
+ return;
+ }
+ if (dec_output_info.index_frame_decoded == DECODED_IDX_FLAG_NO_FB &&
+ dec_output_info.index_frame_display == DISPLAY_IDX_FLAG_NO_FB) {
+ dev_dbg(inst->dev->dev, "%s: no more frame buffer\n", __func__);
+ } else {
+ wave5_handle_src_buffer(inst);
+
+ if (dec_output_info.index_frame_display >= 0) {
+ struct vb2_v4l2_buffer *dst_buf =
+ v4l2_m2m_dst_buf_remove_by_idx(inst->v4l2_fh.m2m_ctx,
+ dec_output_info.index_frame_display);
+ int stride = dec_output_info.disp_frame.stride;
+ int height = dec_output_info.disp_pic_height -
+ dec_output_info.rc_display.bottom;
+
+ if (inst->dst_fmt.num_planes == 1) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ (stride * height * 3 / 2));
+ } else if (inst->dst_fmt.num_planes == 2) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ (stride * height));
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1,
+ ((stride / 2) * height));
+ } else if (inst->dst_fmt.num_planes == 3) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ (stride * height));
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1,
+ ((stride / 2) * (height / 2)));
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 2,
+ ((stride / 2) * (height / 2)));
+ }
+
+ dst_buf->vb2_buf.timestamp = inst->timestamp;
+ dst_buf->field = V4L2_FIELD_NONE;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+ dev_dbg(inst->dev->dev, "%s: frame_cycle %8u\n",
+ __func__, dec_output_info.frame_cycle);
+ } else if (dec_output_info.index_frame_display == DISPLAY_IDX_FLAG_SEQ_END &&
+ !inst->eos) {
+ static const struct v4l2_event vpu_event_eos = {
+ .type = V4L2_EVENT_EOS
+ };
+ struct vb2_v4l2_buffer *dst_buf =
+ v4l2_m2m_dst_buf_remove(inst->v4l2_fh.m2m_ctx);
+
+ if (!dst_buf)
+ return;
+
+ if (inst->dst_fmt.num_planes == 1) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ vb2_plane_size(&dst_buf->vb2_buf, 0));
+ } else if (inst->dst_fmt.num_planes == 2) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ vb2_plane_size(&dst_buf->vb2_buf, 0));
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1,
+ vb2_plane_size(&dst_buf->vb2_buf, 1));
+ } else if (inst->dst_fmt.num_planes == 3) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ vb2_plane_size(&dst_buf->vb2_buf, 0));
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1,
+ vb2_plane_size(&dst_buf->vb2_buf, 1));
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 2,
+ vb2_plane_size(&dst_buf->vb2_buf, 2));
+ }
+
+ dst_buf->vb2_buf.timestamp = inst->timestamp;
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ dst_buf->field = V4L2_FIELD_NONE;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+ inst->eos = TRUE;
+ v4l2_event_queue_fh(&inst->v4l2_fh, &vpu_event_eos);
+
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, inst->v4l2_fh.m2m_ctx);
+ }
+ }
+}
+
+static int wave5_vpu_dec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, VPU_DEC_DRV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, VPU_DEC_DRV_NAME, sizeof(cap->card));
+ strscpy(cap->bus_info, "platform:" VPU_DEC_DRV_NAME, sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int wave5_vpu_dec_enum_framesizes(struct file *f, void *fh, struct v4l2_frmsizeenum *fsize)
+{
+ const struct vpu_format *vpu_fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ vpu_fmt = wave5_find_vpu_fmt(fsize->pixel_format, dec_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt) {
+ vpu_fmt = wave5_find_vpu_fmt(fsize->pixel_format, dec_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt)
+ return -EINVAL;
+ }
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = vpu_fmt->min_width;
+ fsize->stepwise.max_width = vpu_fmt->max_width;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = vpu_fmt->min_height;
+ fsize->stepwise.max_height = vpu_fmt->max_height;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_enum_fmt_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ const struct vpu_format *vpu_fmt;
+
+ vpu_fmt = wave5_find_vpu_fmt_by_idx(f->index, dec_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt)
+ return -EINVAL;
+
+ f->pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->flags = 0;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev,
+ "%s: fourcc: %u width: %u height: %u nm planes: %u colorspace: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.colorspace, f->fmt.pix_mp.field);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ vpu_fmt = wave5_find_vpu_fmt(f->fmt.pix_mp.pixelformat, dec_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt) {
+ f->fmt.pix_mp.pixelformat = inst->dst_fmt.pixelformat;
+ f->fmt.pix_mp.num_planes = inst->dst_fmt.num_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, inst->dst_fmt.width, inst->dst_fmt.height);
+ } else {
+ int width = clamp(f->fmt.pix_mp.width, vpu_fmt->min_width, vpu_fmt->max_width);
+ int height = clamp(f->fmt.pix_mp.height, vpu_fmt->min_height, vpu_fmt->max_height);
+ const struct v4l2_format_info *info = v4l2_format_info(vpu_fmt->v4l2_pix_fmt);
+
+ f->fmt.pix_mp.pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->fmt.pix_mp.num_planes = info->mem_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, width, height);
+ }
+
+ f->fmt.pix_mp.flags = 0;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.hsv_enc = inst->hsv_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+ memset(&f->fmt.pix_mp.reserved, 0, sizeof(f->fmt.pix_mp.reserved));
+
+ return 0;
+}
+
+static int wave5_vpu_dec_s_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i, ret;
+
+ dev_dbg(inst->dev->dev,
+ "%s: fourcc: %u width: %u height: %u num_planes: %u colorspace: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.colorspace, f->fmt.pix_mp.field);
+
+ ret = wave5_vpu_dec_try_fmt_cap(file, fh, f);
+ if (ret)
+ return ret;
+
+ inst->dst_fmt.width = f->fmt.pix_mp.width;
+ inst->dst_fmt.height = f->fmt.pix_mp.height;
+ inst->dst_fmt.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->dst_fmt.field = f->fmt.pix_mp.field;
+ inst->dst_fmt.flags = f->fmt.pix_mp.flags;
+ inst->dst_fmt.num_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < inst->dst_fmt.num_planes; i++) {
+ inst->dst_fmt.plane_fmt[i].bytesperline = f->fmt.pix_mp.plane_fmt[i].bytesperline;
+ inst->dst_fmt.plane_fmt[i].sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ if (inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV12 ||
+ inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV12M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = false;
+ } else if (inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV21 ||
+ inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV21M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = true;
+ } else {
+ inst->cbcr_interleave = false;
+ inst->nv21 = false;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_dec_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i;
+
+ f->fmt.pix_mp.width = inst->dst_fmt.width;
+ f->fmt.pix_mp.height = inst->dst_fmt.height;
+ f->fmt.pix_mp.pixelformat = inst->dst_fmt.pixelformat;
+ f->fmt.pix_mp.field = inst->dst_fmt.field;
+ f->fmt.pix_mp.flags = inst->dst_fmt.flags;
+ f->fmt.pix_mp.num_planes = inst->dst_fmt.num_planes;
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ f->fmt.pix_mp.plane_fmt[i].bytesperline = inst->dst_fmt.plane_fmt[i].bytesperline;
+ f->fmt.pix_mp.plane_fmt[i].sizeimage = inst->dst_fmt.plane_fmt[i].sizeimage;
+ }
+
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.hsv_enc = inst->hsv_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_enum_fmt_out(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
+
+ vpu_fmt = wave5_find_vpu_fmt_by_idx(f->index, dec_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt)
+ return -EINVAL;
+
+ f->pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->flags = 0;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_try_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev,
+ "%s: fourcc: %u width: %u height: %u num_planes: %u colorspace: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.colorspace, f->fmt.pix_mp.field);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ vpu_fmt = wave5_find_vpu_fmt(f->fmt.pix_mp.pixelformat, dec_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt) {
+ f->fmt.pix_mp.pixelformat = inst->src_fmt.pixelformat;
+ f->fmt.pix_mp.num_planes = inst->src_fmt.num_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, inst->src_fmt.width, inst->src_fmt.height);
+ } else {
+ int width = clamp(f->fmt.pix_mp.width, vpu_fmt->min_width, vpu_fmt->max_width);
+ int height = clamp(f->fmt.pix_mp.height, vpu_fmt->min_height, vpu_fmt->max_height);
+
+ f->fmt.pix_mp.pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->fmt.pix_mp.num_planes = 1;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, width, height);
+ }
+
+ f->fmt.pix_mp.flags = 0;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ memset(&f->fmt.pix_mp.reserved, 0, sizeof(f->fmt.pix_mp.reserved));
+
+ return 0;
+}
+
+static int wave5_vpu_dec_s_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i, ret;
+
+ dev_dbg(inst->dev->dev,
+ "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ ret = wave5_vpu_dec_try_fmt_out(file, fh, f);
+ if (ret)
+ return ret;
+
+ inst->src_fmt.width = f->fmt.pix_mp.width;
+ inst->src_fmt.height = f->fmt.pix_mp.height;
+ inst->src_fmt.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->src_fmt.field = f->fmt.pix_mp.field;
+ inst->src_fmt.flags = f->fmt.pix_mp.flags;
+ inst->src_fmt.num_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < inst->src_fmt.num_planes; i++) {
+ inst->src_fmt.plane_fmt[i].bytesperline = f->fmt.pix_mp.plane_fmt[i].bytesperline;
+ inst->src_fmt.plane_fmt[i].sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ inst->colorspace = f->fmt.pix_mp.colorspace;
+ inst->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ inst->hsv_enc = f->fmt.pix_mp.hsv_enc;
+ inst->quantization = f->fmt.pix_mp.quantization;
+ inst->xfer_func = f->fmt.pix_mp.xfer_func;
+
+ wave5_update_pix_fmt(&inst->dst_fmt, f->fmt.pix_mp.width, f->fmt.pix_mp.height);
+
+ return 0;
+}
+
+static int wave5_vpu_dec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ dev_dbg(inst->dev->dev, "%s: type: %u | target: %u\n", __func__, s->type, s->target);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->dst_fmt.width;
+ s->r.height = inst->dst_fmt.height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ if (inst->state > VPU_INST_STATE_OPEN) {
+ s->r.width = inst->conf_win_width;
+ s->r.height = inst->conf_win_height;
+ } else {
+ s->r.width = inst->src_fmt.width;
+ s->r.height = inst->src_fmt.height;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_dec_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (s->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ dev_dbg(inst->dev->dev, "V4L2_SEL_TGT_COMPOSE w: %u h: %u\n",
+ s->r.width, s->r.height);
+
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->dst_fmt.width;
+ s->r.height = inst->dst_fmt.height;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dc)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int ret;
+
+ dev_dbg(inst->dev->dev, "decoder command: %u\n", dc->cmd);
+
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc);
+ if (ret)
+ return ret;
+
+ if (!wave5_vpu_both_queues_are_streaming(inst))
+ return 0;
+
+ switch (dc->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ inst->state = VPU_INST_STATE_STOP;
+
+ ret = wave5_vpu_dec_update_bitstream_buffer(inst, 0);
+ if (ret) {
+ dev_err(inst->dev->dev,
+ "Setting EOS for the bitstream, fail: %d\n", ret);
+ return ret;
+ }
+ break;
+ case V4L2_DEC_CMD_START:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops wave5_vpu_dec_ioctl_ops = {
+ .vidioc_querycap = wave5_vpu_dec_querycap,
+ .vidioc_enum_framesizes = wave5_vpu_dec_enum_framesizes,
+
+ .vidioc_enum_fmt_vid_cap = wave5_vpu_dec_enum_fmt_cap,
+ .vidioc_s_fmt_vid_cap_mplane = wave5_vpu_dec_s_fmt_cap,
+ .vidioc_g_fmt_vid_cap_mplane = wave5_vpu_dec_g_fmt_cap,
+ .vidioc_try_fmt_vid_cap_mplane = wave5_vpu_dec_try_fmt_cap,
+
+ .vidioc_enum_fmt_vid_out = wave5_vpu_dec_enum_fmt_out,
+ .vidioc_s_fmt_vid_out_mplane = wave5_vpu_dec_s_fmt_out,
+ .vidioc_g_fmt_vid_out_mplane = wave5_vpu_g_fmt_out,
+ .vidioc_try_fmt_vid_out_mplane = wave5_vpu_dec_try_fmt_out,
+
+ .vidioc_g_selection = wave5_vpu_dec_g_selection,
+ .vidioc_s_selection = wave5_vpu_dec_s_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
+ .vidioc_decoder_cmd = wave5_vpu_dec_decoder_cmd,
+
+ .vidioc_subscribe_event = wave5_vpu_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int wave5_vpu_dec_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpu_instance *inst = wave5_ctrl_to_vpu_inst(ctrl);
+
+ dev_dbg(inst->dev->dev, "%s: name: %s | value: %d\n",
+ __func__, ctrl->name, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VPU_THUMBNAIL_MODE:
+ inst->thumbnail_mode = ctrl->val;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops wave5_vpu_dec_ctrl_ops = {
+ .s_ctrl = wave5_vpu_dec_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config wave5_vpu_thumbnail_mode = {
+ .ops = &wave5_vpu_dec_ctrl_ops,
+ .id = V4L2_CID_VPU_THUMBNAIL_MODE,
+ .name = "thumbnail mode",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .def = 0,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_WRITE_ONLY,
+};
+
+static void wave5_set_default_dec_openparam(struct dec_open_param *open_param)
+{
+ open_param->bitstream_mode = BS_MODE_INTERRUPT;
+ open_param->stream_endian = VPU_STREAM_ENDIAN;
+ open_param->frame_endian = VPU_FRAME_ENDIAN;
+}
+
+static int wave5_vpu_dec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_pix_format_mplane inst_format =
+ (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ? inst->src_fmt : inst->dst_fmt;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(inst->dev->dev, "%s: num_buffers: %u | num_planes: %u | type: %u\n", __func__,
+ *num_buffers, *num_planes, q->type);
+
+ if (*num_planes) {
+ if (inst_format.num_planes != *num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *num_planes; i++) {
+ if (sizes[i] < inst_format.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *num_planes = inst_format.num_planes;
+
+ if (*num_planes == 1) {
+ sizes[0] = inst_format.width * inst_format.height * 3 / 2;
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sizes[0] = inst_format.plane_fmt[0].sizeimage;
+ dev_dbg(inst->dev->dev, "%s: size[0]: %u\n", __func__, sizes[0]);
+ } else if (*num_planes == 2) {
+ sizes[0] = inst_format.width * inst_format.height;
+ sizes[1] = inst_format.width * inst_format.height / 2;
+ dev_dbg(inst->dev->dev, "%s: size[0]: %u | size[1]: %u\n",
+ __func__, sizes[0], sizes[1]);
+ } else if (*num_planes == 3) {
+ sizes[0] = inst_format.width * inst_format.height;
+ sizes[1] = inst_format.width * inst_format.height / 4;
+ sizes[2] = inst_format.width * inst_format.height / 4;
+ dev_dbg(inst->dev->dev, "%s: size[0]: %u | size[1]: %u | size[2]: %u\n",
+ __func__, sizes[0], sizes[1], sizes[2]);
+ }
+ }
+
+ if (inst->state == VPU_INST_STATE_NONE && q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct dec_open_param open_param;
+
+ memset(&open_param, 0, sizeof(struct dec_open_param));
+ wave5_set_default_dec_openparam(&open_param);
+
+ inst->bitstream_vbuf.size = ALIGN(inst->src_fmt.plane_fmt[0].sizeimage, 1024) * 4;
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &inst->bitstream_vbuf);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: alloc bitstream of size %zu fail: %d\n",
+ __func__, inst->bitstream_vbuf.size, ret);
+ return ret;
+ }
+
+ inst->std = wave5_to_vpu_codstd(inst->src_fmt.pixelformat);
+ if (inst->std == STD_UNKNOWN) {
+ dev_warn(inst->dev->dev, "unsupported pixelformat: %.4s\n",
+ (char *)&inst->src_fmt.pixelformat);
+ ret = -EINVAL;
+ goto free_bitstream_vbuf;
+ }
+ open_param.bitstream_buffer = inst->bitstream_vbuf.daddr;
+ open_param.bitstream_buffer_size = inst->bitstream_vbuf.size;
+
+ ret = wave5_vpu_dec_open(inst, &open_param);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_dec_open, fail: %d\n",
+ __func__, ret);
+ goto free_bitstream_vbuf;
+ }
+
+ inst->state = VPU_INST_STATE_OPEN;
+
+ if (inst->thumbnail_mode)
+ wave5_vpu_dec_give_command(inst, ENABLE_DEC_THUMBNAIL_MODE, NULL);
+
+ } else if (inst->state == VPU_INST_STATE_INIT_SEQ &&
+ q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ u32 non_linear_num;
+ u32 fb_stride, fb_height;
+ u32 luma_size, chroma_size;
+
+ if (*num_buffers > inst->min_dst_buf_count &&
+ *num_buffers < WAVE5_MAX_FBS)
+ inst->dst_buf_count = *num_buffers;
+
+ *num_buffers = inst->dst_buf_count;
+ non_linear_num = inst->dst_buf_count;
+
+ for (i = 0; i < non_linear_num; i++) {
+ struct frame_buffer *frame = &inst->frame_buf[i];
+ struct vpu_buf *vframe = &inst->frame_vbuf[i];
+
+ fb_stride = inst->dst_fmt.width;
+ fb_height = ALIGN(inst->dst_fmt.height, 32);
+ luma_size = fb_stride * fb_height;
+ chroma_size = ALIGN(fb_stride / 2, 16) * fb_height;
+
+ vframe->size = luma_size + chroma_size;
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, vframe);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: Allocating FBC buf of size %zu, fail: %d\n",
+ __func__, vframe->size, ret);
+ return ret;
+ }
+
+ frame->buf_y = vframe->daddr;
+ frame->buf_cb = vframe->daddr + luma_size;
+ frame->buf_cr = (dma_addr_t)-1;
+ frame->size = vframe->size;
+ frame->width = inst->src_fmt.width;
+ frame->stride = fb_stride;
+ frame->map_type = COMPRESSED_FRAME_MAP;
+ frame->update_fb_info = true;
+ }
+ } else if (inst->state == VPU_INST_STATE_STOP &&
+ q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ *num_buffers = 0;
+ }
+
+ return 0;
+
+free_bitstream_vbuf:
+ wave5_vdi_free_dma_memory(inst->dev, &inst->bitstream_vbuf);
+ return ret;
+}
+
+static int wave5_vpu_dec_start_streaming_open(struct vpu_instance *inst)
+{
+ struct dec_initial_info initial_info;
+ int ret = 0;
+
+ memset(&initial_info, 0, sizeof(struct dec_initial_info));
+
+ ret = wave5_vpu_dec_issue_seq_init(inst);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_dec_issue_seq_init, fail: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (wave5_vpu_wait_interrupt(inst, VPU_DEC_TIMEOUT) < 0)
+ dev_dbg(inst->dev->dev, "%s: failed to call vpu_wait_interrupt()\n", __func__);
+
+ ret = wave5_vpu_dec_complete_seq_init(inst, &initial_info);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: vpu_dec_complete_seq_init, fail: %d, reason: %u\n",
+ __func__, ret, initial_info.seq_init_err_reason);
+ } else {
+ static const struct v4l2_event vpu_event_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(inst->dev->dev, "%s: width: %u height: %u profile: %u | minbuffer: %u\n",
+ __func__, initial_info.pic_width, initial_info.pic_height,
+ initial_info.profile, initial_info.min_frame_buffer_count);
+
+ inst->state = VPU_INST_STATE_INIT_SEQ;
+ inst->min_dst_buf_count = initial_info.min_frame_buffer_count + 1;
+ inst->dst_buf_count = inst->min_dst_buf_count;
+
+ inst->conf_win_width = initial_info.pic_width - initial_info.pic_crop_rect.right;
+ inst->conf_win_height = initial_info.pic_height - initial_info.pic_crop_rect.bottom;
+
+ ctrl = v4l2_ctrl_find(&inst->v4l2_ctrl_hdl,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, inst->min_dst_buf_count);
+
+ if (initial_info.pic_width != inst->src_fmt.width ||
+ initial_info.pic_height != inst->src_fmt.height) {
+ wave5_update_pix_fmt(&inst->src_fmt, initial_info.pic_width,
+ initial_info.pic_height);
+ wave5_update_pix_fmt(&inst->dst_fmt, initial_info.pic_width,
+ initial_info.pic_height);
+ }
+ v4l2_event_queue_fh(&inst->v4l2_fh, &vpu_event_src_ch);
+
+ wave5_handle_src_buffer(inst);
+ }
+
+ return ret;
+}
+
+static int wave5_vpu_dec_start_streaming_seek(struct vpu_instance *inst)
+{
+ struct dec_initial_info initial_info;
+ struct dec_param pic_param;
+ struct dec_output_info dec_output_info;
+ int ret = 0;
+ u32 fail_res = 0;
+
+ memset(&pic_param, 0, sizeof(struct dec_param));
+
+ ret = wave5_vpu_dec_start_one_frame(inst, &pic_param, &fail_res);
+ if (ret && fail_res != WAVE5_SYSERR_QUEUEING_FAIL) {
+ struct vb2_v4l2_buffer *src_buf;
+
+ src_buf = v4l2_m2m_src_buf_remove(inst->v4l2_fh.m2m_ctx);
+ inst->state = VPU_INST_STATE_STOP;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_dec_start_one_frame\n", __func__);
+ return ret;
+ }
+
+ if (wave5_vpu_wait_interrupt(inst, VPU_DEC_TIMEOUT) < 0)
+ dev_dbg(inst->dev->dev, "%s: failed to call vpu_wait_interrupt()\n", __func__);
+
+ ret = wave5_vpu_dec_get_output_info(inst, &dec_output_info);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_dec_get_output_info, fail: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (dec_output_info.sequence_changed) {
+ static const struct v4l2_event vpu_event_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+ struct v4l2_ctrl *ctrl;
+
+ wave5_vpu_dec_give_command(inst, DEC_RESET_FRAMEBUF_INFO, NULL);
+ wave5_vpu_dec_give_command(inst, DEC_GET_SEQ_INFO, &initial_info);
+
+ dev_dbg(inst->dev->dev, "%s: width: %u height: %u profile: %u | minbuffer: %u\n",
+ __func__, initial_info.pic_width, initial_info.pic_height,
+ initial_info.profile, initial_info.min_frame_buffer_count);
+
+ inst->min_dst_buf_count = initial_info.min_frame_buffer_count + 1;
+ inst->dst_buf_count = inst->min_dst_buf_count;
+
+ inst->conf_win_width = initial_info.pic_width - initial_info.pic_crop_rect.right;
+ inst->conf_win_height = initial_info.pic_height - initial_info.pic_crop_rect.bottom;
+
+ ctrl = v4l2_ctrl_find(&inst->v4l2_ctrl_hdl,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, inst->min_dst_buf_count);
+
+ if (initial_info.pic_width != inst->src_fmt.width ||
+ initial_info.pic_height != inst->src_fmt.height) {
+ wave5_update_pix_fmt(&inst->src_fmt, initial_info.pic_width,
+ initial_info.pic_height);
+ wave5_update_pix_fmt(&inst->dst_fmt, initial_info.pic_width,
+ initial_info.pic_height);
+ }
+ v4l2_event_queue_fh(&inst->v4l2_fh, &vpu_event_src_ch);
+
+ wave5_handle_src_buffer(inst);
+ }
+
+ return ret;
+}
+
+static void wave5_vpu_dec_buf_queue_src(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_instance *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpu_buffer *vpu_buf = wave5_to_vpu_buf(vbuf);
+
+ vpu_buf->consumed = false;
+ vbuf->sequence = inst->queued_src_buf_num++;
+
+ if (inst->state == VPU_INST_STATE_PIC_RUN) {
+ wave5_handle_bitstream_buffer(inst);
+ inst->ops->start_process(inst);
+ }
+}
+
+static void wave5_vpu_dec_buf_queue_dst(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_instance *inst = vb2_get_drv_priv(vb->vb2_queue);
+ int ret;
+
+ vbuf->sequence = inst->queued_dst_buf_num++;
+ ret = wave5_vpu_dec_clr_disp_flag(inst, vb->index);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: Clearing the display flag of buffer index: %u, fail: %d\n",
+ __func__, vb->index, ret);
+ }
+
+ if (inst->state == VPU_INST_STATE_INIT_SEQ) {
+ dma_addr_t buf_addr_y = 0, buf_addr_cb = 0, buf_addr_cr = 0;
+ u32 buf_size = 0;
+ u32 non_linear_num = inst->dst_buf_count;
+ u32 fb_stride = inst->dst_fmt.width;
+ u32 luma_size = fb_stride * inst->dst_fmt.height;
+ u32 chroma_size = (fb_stride / 2) * (inst->dst_fmt.height / 2);
+
+ if (inst->dst_fmt.num_planes == 1) {
+ buf_size = vb2_plane_size(&vbuf->vb2_buf, 0);
+ buf_addr_y = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ buf_addr_cb = buf_addr_y + luma_size;
+ buf_addr_cr = buf_addr_cb + chroma_size;
+ } else if (inst->dst_fmt.num_planes == 2) {
+ buf_size = vb2_plane_size(&vbuf->vb2_buf, 0) +
+ vb2_plane_size(&vbuf->vb2_buf, 1);
+ buf_addr_y = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ buf_addr_cb = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 1);
+ buf_addr_cr = buf_addr_cb + chroma_size;
+ } else if (inst->dst_fmt.num_planes == 3) {
+ buf_size = vb2_plane_size(&vbuf->vb2_buf, 0) +
+ vb2_plane_size(&vbuf->vb2_buf, 1) +
+ vb2_plane_size(&vbuf->vb2_buf, 2);
+ buf_addr_y = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ buf_addr_cb = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 1);
+ buf_addr_cr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 2);
+ }
+ inst->frame_buf[vb->index + non_linear_num].buf_y = buf_addr_y;
+ inst->frame_buf[vb->index + non_linear_num].buf_cb = buf_addr_cb;
+ inst->frame_buf[vb->index + non_linear_num].buf_cr = buf_addr_cr;
+ inst->frame_buf[vb->index + non_linear_num].size = buf_size;
+ inst->frame_buf[vb->index + non_linear_num].width = inst->src_fmt.width;
+ inst->frame_buf[vb->index + non_linear_num].stride = fb_stride;
+ inst->frame_buf[vb->index + non_linear_num].map_type = LINEAR_FRAME_MAP;
+ inst->frame_buf[vb->index + non_linear_num].update_fb_info = true;
+ }
+
+ if (!vb2_is_streaming(vb->vb2_queue))
+ return;
+
+ if (inst->state == VPU_INST_STATE_STOP && inst->eos == FALSE)
+ inst->ops->start_process(inst);
+}
+
+static void wave5_vpu_dec_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_instance *inst = vb2_get_drv_priv(vb->vb2_queue);
+
+ dev_dbg(inst->dev->dev, "%s: type: %4u index: %4u size: ([0]=%4lu, [1]=%4lu, [2]=%4lu)\n",
+ __func__, vb->type, vb->index, vb2_plane_size(&vbuf->vb2_buf, 0),
+ vb2_plane_size(&vbuf->vb2_buf, 1), vb2_plane_size(&vbuf->vb2_buf, 2));
+
+ v4l2_m2m_buf_queue(inst->v4l2_fh.m2m_ctx, vbuf);
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ wave5_vpu_dec_buf_queue_src(vb);
+ else if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ wave5_vpu_dec_buf_queue_dst(vb);
+}
+
+static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ int ret = 0;
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ wave5_handle_bitstream_buffer(inst);
+ if (inst->state == VPU_INST_STATE_OPEN)
+ ret = wave5_vpu_dec_start_streaming_open(inst);
+ else if (inst->state == VPU_INST_STATE_INIT_SEQ)
+ ret = wave5_vpu_dec_start_streaming_seek(inst);
+
+ if (ret) {
+ struct vb2_v4l2_buffer *buf;
+
+ while ((buf = v4l2_m2m_src_buf_remove(inst->v4l2_fh.m2m_ctx))) {
+ dev_dbg(inst->dev->dev, "%s: (Multiplanar) buf type %4d | index %4d\n",
+ __func__, buf->vb2_buf.type, buf->vb2_buf.index);
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *buf;
+ bool check_cmd = TRUE;
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
+
+ while (check_cmd) {
+ struct queue_status_info q_status;
+ struct dec_output_info dec_output_info;
+
+ wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, &q_status);
+
+ if (q_status.instance_queue_count + q_status.report_queue_count == 0)
+ break;
+
+ if (wave5_vpu_wait_interrupt(inst, VPU_DEC_TIMEOUT) < 0)
+ break;
+
+ if (wave5_vpu_dec_get_output_info(inst, &dec_output_info))
+ dev_dbg(inst->dev->dev, "Getting decoding results from fw, fail\n");
+ }
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ while ((buf = v4l2_m2m_src_buf_remove(inst->v4l2_fh.m2m_ctx))) {
+ dev_dbg(inst->dev->dev, "%s: (Multiplanar) buf type %4u | index %4u\n",
+ __func__, buf->vb2_buf.type, buf->vb2_buf.index);
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+ inst->queued_src_buf_num = 0;
+ } else {
+ unsigned int i;
+ int ret;
+ dma_addr_t rd_ptr, wr_ptr;
+
+ while ((buf = v4l2_m2m_dst_buf_remove(inst->v4l2_fh.m2m_ctx))) {
+ u32 plane;
+
+ dev_dbg(inst->dev->dev, "%s: buf type %4u | index %4u\n",
+ __func__, buf->vb2_buf.type, buf->vb2_buf.index);
+
+ for (plane = 0; plane < inst->dst_fmt.num_planes; plane++)
+ vb2_set_plane_payload(&buf->vb2_buf, plane, 0);
+
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+
+ for (i = 0; i < inst->dst_buf_count; i++) {
+ ret = wave5_vpu_dec_set_disp_flag(inst, i);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: Setting display flag of buf index: %u, fail: %d\n",
+ __func__, i, ret);
+ }
+ }
+
+ ret = wave5_vpu_dec_get_bitstream_buffer(inst, &rd_ptr, &wr_ptr, NULL);
+ if (ret) {
+ dev_err(inst->dev->dev,
+ "Getting bitstream buf, fail: %d\n", ret);
+ return;
+ }
+ ret = wave5_vpu_dec_set_rd_ptr(inst, wr_ptr, TRUE);
+ if (ret) {
+ dev_err(inst->dev->dev,
+ "Setting read pointer for the decoder, fail: %d\n", ret);
+ return;
+ }
+ if (inst->eos) {
+ inst->eos = FALSE;
+ inst->state = VPU_INST_STATE_INIT_SEQ;
+ }
+ inst->queued_dst_buf_num = 0;
+ }
+}
+
+static const struct vb2_ops wave5_vpu_dec_vb2_ops = {
+ .queue_setup = wave5_vpu_dec_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_queue = wave5_vpu_dec_buf_queue,
+ .start_streaming = wave5_vpu_dec_start_streaming,
+ .stop_streaming = wave5_vpu_dec_stop_streaming,
+};
+
+static void wave5_set_default_format(struct v4l2_pix_format_mplane *src_fmt,
+ struct v4l2_pix_format_mplane *dst_fmt)
+{
+ unsigned int dst_pix_fmt = dec_fmt_list[VPU_FMT_TYPE_RAW][0].v4l2_pix_fmt;
+ const struct v4l2_format_info *dst_fmt_info = v4l2_format_info(dst_pix_fmt);
+
+ src_fmt->pixelformat = dec_fmt_list[VPU_FMT_TYPE_CODEC][0].v4l2_pix_fmt;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->flags = 0;
+ src_fmt->num_planes = 1;
+ wave5_update_pix_fmt(src_fmt, 720, 480);
+
+ dst_fmt->pixelformat = dst_pix_fmt;
+ dst_fmt->field = V4L2_FIELD_NONE;
+ dst_fmt->flags = 0;
+ dst_fmt->num_planes = dst_fmt_info->mem_planes;
+ wave5_update_pix_fmt(dst_fmt, 736, 480);
+}
+
+static int wave5_vpu_dec_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ return wave5_vpu_queue_init(priv, src_vq, dst_vq, &wave5_vpu_dec_vb2_ops);
+}
+
+static const struct vpu_instance_ops wave5_vpu_dec_inst_ops = {
+ .start_process = wave5_vpu_dec_start_decode,
+ .stop_process = wave5_vpu_dec_stop_decode,
+ .finish_process = wave5_vpu_dec_finish_decode,
+};
+
+static void wave5_vpu_dec_device_run(void *priv)
+{
+ struct vpu_instance *inst = priv;
+
+ inst->ops->start_process(inst);
+
+ inst->state = VPU_INST_STATE_PIC_RUN;
+}
+
+static void wave5_vpu_dec_job_abort(void *priv)
+{
+ struct vpu_instance *inst = priv;
+
+ inst->ops->stop_process(inst);
+}
+
+static const struct v4l2_m2m_ops wave5_vpu_dec_m2m_ops = {
+ .device_run = wave5_vpu_dec_device_run,
+ .job_abort = wave5_vpu_dec_job_abort,
+};
+
+static int wave5_vpu_open_dec(struct file *filp)
+{
+ struct video_device *vdev = video_devdata(filp);
+ struct vpu_device *dev = video_drvdata(filp);
+ struct vpu_instance *inst = NULL;
+ int ret = 0;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->dev = dev;
+ inst->type = VPU_INST_TYPE_DEC;
+ inst->ops = &wave5_vpu_dec_inst_ops;
+
+ v4l2_fh_init(&inst->v4l2_fh, vdev);
+ filp->private_data = &inst->v4l2_fh;
+ v4l2_fh_add(&inst->v4l2_fh);
+
+ INIT_LIST_HEAD(&inst->list);
+ list_add_tail(&inst->list, &dev->instances);
+
+ inst->v4l2_m2m_dev = v4l2_m2m_init(&wave5_vpu_dec_m2m_ops);
+ if (IS_ERR(inst->v4l2_m2m_dev)) {
+ ret = PTR_ERR(inst->v4l2_m2m_dev);
+ dev_err(inst->dev->dev, "v4l2_m2m_init, fail: %d\n", ret);
+ goto cleanup_inst;
+ }
+
+ inst->v4l2_fh.m2m_ctx =
+ v4l2_m2m_ctx_init(inst->v4l2_m2m_dev, inst, wave5_vpu_dec_queue_init);
+ if (IS_ERR(inst->v4l2_fh.m2m_ctx)) {
+ ret = PTR_ERR(inst->v4l2_fh.m2m_ctx);
+ goto cleanup_inst;
+ }
+
+ v4l2_ctrl_handler_init(&inst->v4l2_ctrl_hdl, 10);
+ v4l2_ctrl_new_custom(&inst->v4l2_ctrl_hdl, &wave5_vpu_thumbnail_mode, NULL);
+ v4l2_ctrl_new_std(&inst->v4l2_ctrl_hdl, &wave5_vpu_dec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 1);
+
+ if (inst->v4l2_ctrl_hdl.error) {
+ ret = -ENODEV;
+ goto cleanup_inst;
+ }
+
+ inst->v4l2_fh.ctrl_handler = &inst->v4l2_ctrl_hdl;
+ v4l2_ctrl_handler_setup(&inst->v4l2_ctrl_hdl);
+
+ wave5_set_default_format(&inst->src_fmt, &inst->dst_fmt);
+ inst->colorspace = V4L2_COLORSPACE_REC709;
+ inst->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ inst->hsv_enc = 0;
+ inst->quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ init_completion(&inst->irq_done);
+
+ if (inst->dev->irq < 0) {
+ ret = mutex_lock_interruptible(&inst->dev->dev_lock);
+ if (ret)
+ return ret;
+
+ if (!hrtimer_active(&inst->dev->hrtimer))
+ hrtimer_start(&inst->dev->hrtimer, ns_to_ktime(0), HRTIMER_MODE_REL_PINNED);
+
+ mutex_unlock(&inst->dev->dev_lock);
+ }
+
+ ret = kfifo_alloc(&inst->irq_status, 16 * sizeof(int), GFP_KERNEL);
+ if (ret) {
+ dev_err(inst->dev->dev, "failed to allocate fifo\n");
+ goto cleanup_inst;
+ }
+
+ inst->id = ida_alloc(&inst->dev->inst_ida, GFP_KERNEL);
+ if (inst->id < 0) {
+ dev_warn(inst->dev->dev, "Allocating instance ID, fail: %d\n", inst->id);
+ ret = inst->id;
+ goto cleanup_inst;
+ }
+
+ return 0;
+
+cleanup_inst:
+ wave5_cleanup_instance(inst);
+ return ret;
+}
+
+static int wave5_vpu_dec_release(struct file *filp)
+{
+ return wave5_vpu_release_device(filp, wave5_vpu_dec_close, "decoder");
+}
+
+static const struct v4l2_file_operations wave5_vpu_dec_fops = {
+ .owner = THIS_MODULE,
+ .open = wave5_vpu_open_dec,
+ .release = wave5_vpu_dec_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+int wave5_vpu_dec_register_device(struct vpu_device *dev)
+{
+ struct video_device *vdev_dec;
+ int ret;
+
+ vdev_dec = devm_kzalloc(dev->v4l2_dev.dev, sizeof(*vdev_dec), GFP_KERNEL);
+ if (!vdev_dec)
+ return -ENOMEM;
+
+ dev->video_dev_dec = vdev_dec;
+
+ strscpy(vdev_dec->name, VPU_DEC_DEV_NAME, sizeof(vdev_dec->name));
+ vdev_dec->fops = &wave5_vpu_dec_fops;
+ vdev_dec->ioctl_ops = &wave5_vpu_dec_ioctl_ops;
+ vdev_dec->release = video_device_release_empty;
+ vdev_dec->v4l2_dev = &dev->v4l2_dev;
+ vdev_dec->vfl_dir = VFL_DIR_M2M;
+ vdev_dec->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ vdev_dec->lock = &dev->dev_lock;
+
+ ret = video_register_device(vdev_dec, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ return ret;
+
+ video_set_drvdata(vdev_dec, dev);
+
+ return 0;
+}
+
+void wave5_vpu_dec_unregister_device(struct vpu_device *dev)
+{
+ video_unregister_device(dev->video_dev_dec);
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
new file mode 100644
index 000000000000..80943249395b
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
@@ -0,0 +1,1758 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - encoder interface
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#include "wave5-helper.h"
+
+#define VPU_ENC_DEV_NAME "C&M Wave5 VPU encoder"
+#define VPU_ENC_DRV_NAME "wave5-enc"
+
+static const struct vpu_format enc_fmt_list[FMT_TYPES][MAX_FMTS] = {
+ [VPU_FMT_TYPE_CODEC] = {
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_HEVC,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_H264,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ },
+ [VPU_FMT_TYPE_RAW] = {
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV420,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV12,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV21,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV420M,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV12M,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV21M,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ }
+};
+
+static enum wave_std wave5_to_vpu_wavestd(unsigned int v4l2_pix_fmt)
+{
+ switch (v4l2_pix_fmt) {
+ case V4L2_PIX_FMT_H264:
+ return W_AVC_ENC;
+ case V4L2_PIX_FMT_HEVC:
+ return W_HEVC_ENC;
+ default:
+ return STD_UNKNOWN;
+ }
+}
+
+static struct vb2_v4l2_buffer *wave5_get_valid_src_buf(struct vpu_instance *inst)
+{
+ struct v4l2_m2m_buffer *v4l2_m2m_buf;
+
+ v4l2_m2m_for_each_src_buf(inst->v4l2_fh.m2m_ctx, v4l2_m2m_buf) {
+ struct vb2_v4l2_buffer *vb2_v4l2_buf;
+ struct vpu_buffer *vpu_buf = NULL;
+
+ vb2_v4l2_buf = &v4l2_m2m_buf->vb;
+ vpu_buf = wave5_to_vpu_buf(vb2_v4l2_buf);
+
+ if (!vpu_buf->consumed) {
+ dev_dbg(inst->dev->dev, "%s: src buf (index: %u) has not been consumed\n",
+ __func__, vb2_v4l2_buf->vb2_buf.index);
+ return vb2_v4l2_buf;
+ }
+ }
+
+ return NULL;
+}
+
+static struct vb2_v4l2_buffer *wave5_get_valid_dst_buf(struct vpu_instance *inst)
+{
+ struct v4l2_m2m_buffer *v4l2_m2m_buf;
+
+ v4l2_m2m_for_each_dst_buf(inst->v4l2_fh.m2m_ctx, v4l2_m2m_buf) {
+ struct vb2_v4l2_buffer *vb2_v4l2_buf;
+ struct vpu_buffer *vpu_buf = NULL;
+
+ vb2_v4l2_buf = &v4l2_m2m_buf->vb;
+ vpu_buf = wave5_to_vpu_buf(vb2_v4l2_buf);
+
+ if (!vpu_buf->consumed) {
+ dev_dbg(inst->dev->dev, "%s: dst buf (index: %u) has not been consumed\n",
+ __func__, vb2_v4l2_buf->vb2_buf.index);
+ return vb2_v4l2_buf;
+ }
+ }
+
+ return NULL;
+}
+
+static void wave5_update_pix_fmt(struct v4l2_pix_format_mplane *pix_mp, unsigned int width,
+ unsigned int height)
+{
+ switch (pix_mp->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = round_up(width, 32) * height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = round_up(width, 32) * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[1].sizeimage = round_up(width, 32) * height / 4;
+ pix_mp->plane_fmt[2].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[2].sizeimage = round_up(width, 32) * height / 4;
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV21M:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = round_up(width, 32) * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[1].sizeimage = round_up(width, 32) * height / 2;
+ break;
+ default:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ pix_mp->plane_fmt[0].sizeimage = width * height;
+ break;
+ }
+}
+
+static void wave5_vpu_enc_start_encode(struct vpu_instance *inst)
+{
+ u32 max_cmd_q = 0;
+
+ max_cmd_q = (inst->src_buf_count < COMMAND_QUEUE_DEPTH) ?
+ inst->src_buf_count : COMMAND_QUEUE_DEPTH;
+
+ if (inst->state == VPU_INST_STATE_STOP)
+ max_cmd_q = 1;
+
+ while (max_cmd_q) {
+ struct vb2_v4l2_buffer *src_buf;
+ struct vb2_v4l2_buffer *dst_buf;
+ struct vpu_buffer *src_vbuf;
+ struct vpu_buffer *dst_vbuf;
+ struct frame_buffer frame_buf;
+ struct enc_param pic_param;
+ u32 stride = ALIGN(inst->dst_fmt.width, 32);
+ u32 luma_size = (stride * inst->dst_fmt.height);
+ u32 chroma_size = ((stride / 2) * (inst->dst_fmt.height / 2));
+ u32 fail_res;
+ int ret;
+
+ memset(&pic_param, 0, sizeof(struct enc_param));
+ memset(&frame_buf, 0, sizeof(struct frame_buffer));
+
+ src_buf = wave5_get_valid_src_buf(inst);
+ dst_buf = wave5_get_valid_dst_buf(inst);
+
+ if (!dst_buf) {
+ dev_dbg(inst->dev->dev, "%s: No valid dst buf\n", __func__);
+ break;
+ }
+
+ dst_vbuf = wave5_to_vpu_buf(dst_buf);
+ pic_param.pic_stream_buffer_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ pic_param.pic_stream_buffer_size =
+ vb2_plane_size(&dst_buf->vb2_buf, 0);
+
+ if (!src_buf) {
+ dev_dbg(inst->dev->dev, "%s: No valid src buf\n", __func__);
+ if (inst->state == VPU_INST_STATE_STOP)
+ pic_param.src_end_flag = true;
+ else
+ break;
+ } else {
+ src_vbuf = wave5_to_vpu_buf(src_buf);
+ if (inst->src_fmt.num_planes == 1) {
+ frame_buf.buf_y =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ frame_buf.buf_cb = frame_buf.buf_y + luma_size;
+ frame_buf.buf_cr = frame_buf.buf_cb + chroma_size;
+ } else if (inst->src_fmt.num_planes == 2) {
+ frame_buf.buf_y =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ frame_buf.buf_cb =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1);
+ frame_buf.buf_cr = frame_buf.buf_cb + chroma_size;
+ } else if (inst->src_fmt.num_planes == 3) {
+ frame_buf.buf_y =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ frame_buf.buf_cb =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1);
+ frame_buf.buf_cr =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 2);
+ }
+ frame_buf.stride = stride;
+ pic_param.src_idx = src_buf->vb2_buf.index;
+ }
+
+ pic_param.source_frame = &frame_buf;
+ pic_param.code_option.implicit_header_encode = 1;
+ ret = wave5_vpu_enc_start_one_frame(inst, &pic_param, &fail_res);
+ if (ret) {
+ if (fail_res == WAVE5_SYSERR_QUEUEING_FAIL)
+ break;
+
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_enc_start_one_frame fail: %d\n",
+ __func__, ret);
+ src_buf = v4l2_m2m_src_buf_remove(inst->v4l2_fh.m2m_ctx);
+ if (!src_buf) {
+ dev_dbg(inst->dev->dev,
+ "%s: Removing src buf failed, the queue is empty\n",
+ __func__);
+ continue;
+ }
+ dst_buf = v4l2_m2m_dst_buf_remove(inst->v4l2_fh.m2m_ctx);
+ if (!dst_buf) {
+ dev_dbg(inst->dev->dev,
+ "%s: Removing dst buf failed, the queue is empty\n",
+ __func__);
+ continue;
+ }
+ inst->state = VPU_INST_STATE_STOP;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_enc_start_one_frame success\n",
+ __func__);
+ if (src_buf)
+ src_vbuf->consumed = true;
+ if (dst_buf)
+ dst_vbuf->consumed = true;
+ }
+
+ max_cmd_q--;
+ }
+}
+
+static void wave5_vpu_enc_stop_encode(struct vpu_instance *inst)
+{
+ inst->state = VPU_INST_STATE_STOP;
+
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, inst->v4l2_fh.m2m_ctx);
+}
+
+static void wave5_vpu_enc_finish_encode(struct vpu_instance *inst)
+{
+ int ret;
+ struct enc_output_info enc_output_info;
+ u32 irq_status;
+ struct vb2_v4l2_buffer *dst_buf = NULL;
+ struct v4l2_m2m_buffer *v4l2_m2m_buf = NULL;
+
+ if (kfifo_out(&inst->irq_status, &irq_status, sizeof(int)))
+ wave5_vpu_clear_interrupt_ex(inst, irq_status);
+
+ ret = wave5_vpu_enc_get_output_info(inst, &enc_output_info);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: vpu_enc_get_output_info fail: %d reason: %u | info: %u\n",
+ __func__, ret, enc_output_info.error_reason, enc_output_info.warn_info);
+ return;
+ }
+
+ v4l2_m2m_for_each_dst_buf(inst->v4l2_fh.m2m_ctx, v4l2_m2m_buf) {
+ dst_buf = &v4l2_m2m_buf->vb;
+ if (enc_output_info.bitstream_buffer ==
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0))
+ break;
+ }
+
+ if (enc_output_info.enc_src_idx >= 0) {
+ struct vb2_v4l2_buffer *src_buf =
+ v4l2_m2m_src_buf_remove_by_idx(inst->v4l2_fh.m2m_ctx,
+ enc_output_info.enc_src_idx);
+
+ inst->timestamp = src_buf->vb2_buf.timestamp;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
+
+ if (enc_output_info.recon_frame_index == RECON_IDX_FLAG_ENC_END) {
+ static const struct v4l2_event vpu_event_eos = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ dst_buf->vb2_buf.timestamp = inst->timestamp;
+ dst_buf->field = V4L2_FIELD_NONE;
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_dst_buf_remove_by_buf(inst->v4l2_fh.m2m_ctx, dst_buf);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+ inst->state = VPU_INST_STATE_PIC_RUN;
+ v4l2_event_queue_fh(&inst->v4l2_fh, &vpu_event_eos);
+
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, inst->v4l2_fh.m2m_ctx);
+ } else {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, enc_output_info.bitstream_size);
+
+ dst_buf->vb2_buf.timestamp = inst->timestamp;
+ dst_buf->field = V4L2_FIELD_NONE;
+ if (enc_output_info.pic_type == PIC_TYPE_I) {
+ if (enc_output_info.enc_vcl_nut == 19 ||
+ enc_output_info.enc_vcl_nut == 20)
+ dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ else
+ dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ } else if (enc_output_info.pic_type == PIC_TYPE_P) {
+ dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ } else if (enc_output_info.pic_type == PIC_TYPE_B) {
+ dst_buf->flags |= V4L2_BUF_FLAG_BFRAME;
+ }
+
+ v4l2_m2m_dst_buf_remove_by_buf(inst->v4l2_fh.m2m_ctx, dst_buf);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+ dev_dbg(inst->dev->dev, "%s: frame_cycle %8u\n",
+ __func__, enc_output_info.frame_cycle);
+ }
+}
+
+static int wave5_vpu_enc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, VPU_ENC_DRV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, VPU_ENC_DRV_NAME, sizeof(cap->card));
+ strscpy(cap->bus_info, "platform:" VPU_ENC_DRV_NAME, sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int wave5_vpu_enc_enum_framesizes(struct file *f, void *fh, struct v4l2_frmsizeenum *fsize)
+{
+ const struct vpu_format *vpu_fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ vpu_fmt = wave5_find_vpu_fmt(fsize->pixel_format, enc_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt) {
+ vpu_fmt = wave5_find_vpu_fmt(fsize->pixel_format, enc_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt)
+ return -EINVAL;
+ }
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = vpu_fmt->min_width;
+ fsize->stepwise.max_width = vpu_fmt->max_width;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = vpu_fmt->min_height;
+ fsize->stepwise.max_height = vpu_fmt->max_height;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_enum_fmt_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
+
+ vpu_fmt = wave5_find_vpu_fmt_by_idx(f->index, enc_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt)
+ return -EINVAL;
+
+ f->pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->flags = 0;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ vpu_fmt = wave5_find_vpu_fmt(f->fmt.pix_mp.pixelformat, enc_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt) {
+ f->fmt.pix_mp.pixelformat = inst->dst_fmt.pixelformat;
+ f->fmt.pix_mp.num_planes = inst->dst_fmt.num_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, inst->dst_fmt.width, inst->dst_fmt.height);
+ } else {
+ int width = clamp(f->fmt.pix_mp.width, vpu_fmt->min_width, vpu_fmt->max_width);
+ int height = clamp(f->fmt.pix_mp.height, vpu_fmt->min_height, vpu_fmt->max_height);
+
+ f->fmt.pix_mp.pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->fmt.pix_mp.num_planes = 1;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, width, height);
+ }
+
+ f->fmt.pix_mp.flags = 0;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.hsv_enc = inst->hsv_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+ memset(&f->fmt.pix_mp.reserved, 0, sizeof(f->fmt.pix_mp.reserved));
+
+ return 0;
+}
+
+static int wave5_vpu_enc_s_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i, ret;
+
+ dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ ret = wave5_vpu_enc_try_fmt_cap(file, fh, f);
+ if (ret)
+ return ret;
+
+ inst->dst_fmt.width = f->fmt.pix_mp.width;
+ inst->dst_fmt.height = f->fmt.pix_mp.height;
+ inst->dst_fmt.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->dst_fmt.field = f->fmt.pix_mp.field;
+ inst->dst_fmt.flags = f->fmt.pix_mp.flags;
+ inst->dst_fmt.num_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < inst->dst_fmt.num_planes; i++) {
+ inst->dst_fmt.plane_fmt[i].bytesperline = f->fmt.pix_mp.plane_fmt[i].bytesperline;
+ inst->dst_fmt.plane_fmt[i].sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_enc_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i;
+
+ f->fmt.pix_mp.width = inst->dst_fmt.width;
+ f->fmt.pix_mp.height = inst->dst_fmt.height;
+ f->fmt.pix_mp.pixelformat = inst->dst_fmt.pixelformat;
+ f->fmt.pix_mp.field = inst->dst_fmt.field;
+ f->fmt.pix_mp.flags = inst->dst_fmt.flags;
+ f->fmt.pix_mp.num_planes = inst->dst_fmt.num_planes;
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ f->fmt.pix_mp.plane_fmt[i].bytesperline = inst->dst_fmt.plane_fmt[i].bytesperline;
+ f->fmt.pix_mp.plane_fmt[i].sizeimage = inst->dst_fmt.plane_fmt[i].sizeimage;
+ }
+
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.hsv_enc = inst->hsv_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_enum_fmt_out(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
+
+ vpu_fmt = wave5_find_vpu_fmt_by_idx(f->index, enc_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt)
+ return -EINVAL;
+
+ f->pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->flags = 0;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_try_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ vpu_fmt = wave5_find_vpu_fmt(f->fmt.pix_mp.pixelformat, enc_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt) {
+ f->fmt.pix_mp.pixelformat = inst->src_fmt.pixelformat;
+ f->fmt.pix_mp.num_planes = inst->src_fmt.num_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, inst->src_fmt.width, inst->src_fmt.height);
+ } else {
+ int width = clamp(f->fmt.pix_mp.width, vpu_fmt->min_width, vpu_fmt->max_width);
+ int height = clamp(f->fmt.pix_mp.height, vpu_fmt->min_height, vpu_fmt->max_height);
+ const struct v4l2_format_info *info = v4l2_format_info(vpu_fmt->v4l2_pix_fmt);
+
+ f->fmt.pix_mp.pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->fmt.pix_mp.num_planes = info->mem_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, width, height);
+ }
+
+ f->fmt.pix_mp.flags = 0;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ memset(&f->fmt.pix_mp.reserved, 0, sizeof(f->fmt.pix_mp.reserved));
+
+ return 0;
+}
+
+static int wave5_vpu_enc_s_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i, ret;
+
+ dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ ret = wave5_vpu_enc_try_fmt_out(file, fh, f);
+ if (ret)
+ return ret;
+
+ inst->src_fmt.width = f->fmt.pix_mp.width;
+ inst->src_fmt.height = f->fmt.pix_mp.height;
+ inst->src_fmt.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->src_fmt.field = f->fmt.pix_mp.field;
+ inst->src_fmt.flags = f->fmt.pix_mp.flags;
+ inst->src_fmt.num_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < inst->src_fmt.num_planes; i++) {
+ inst->src_fmt.plane_fmt[i].bytesperline = f->fmt.pix_mp.plane_fmt[i].bytesperline;
+ inst->src_fmt.plane_fmt[i].sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ if (inst->src_fmt.pixelformat == V4L2_PIX_FMT_NV12 ||
+ inst->src_fmt.pixelformat == V4L2_PIX_FMT_NV12M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = false;
+ } else if (inst->src_fmt.pixelformat == V4L2_PIX_FMT_NV21 ||
+ inst->src_fmt.pixelformat == V4L2_PIX_FMT_NV21M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = true;
+ } else {
+ inst->cbcr_interleave = false;
+ inst->nv21 = false;
+ }
+
+ inst->colorspace = f->fmt.pix_mp.colorspace;
+ inst->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ inst->hsv_enc = f->fmt.pix_mp.hsv_enc;
+ inst->quantization = f->fmt.pix_mp.quantization;
+ inst->xfer_func = f->fmt.pix_mp.xfer_func;
+
+ wave5_update_pix_fmt(&inst->dst_fmt, f->fmt.pix_mp.width, f->fmt.pix_mp.height);
+
+ return 0;
+}
+
+static int wave5_vpu_enc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ dev_dbg(inst->dev->dev, "%s: type: %u | target: %u\n", __func__, s->type, s->target);
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->dst_fmt.width;
+ s->r.height = inst->dst_fmt.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->dst_fmt.width;
+ s->r.height = inst->dst_fmt.height;
+ dev_dbg(inst->dev->dev, "%s: V4L2_SEL_TGT_CROP width: %u | height: %u\n",
+ __func__, s->r.width, s->r.height);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_enc_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ dev_dbg(inst->dev->dev, "%s: V4L2_SEL_TGT_CROP width: %u | height: %u\n",
+ __func__, s->r.width, s->r.height);
+
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->src_fmt.width;
+ s->r.height = inst->src_fmt.height;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *ec)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int ret;
+
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, ec);
+ if (ret)
+ return ret;
+
+ if (!wave5_vpu_both_queues_are_streaming(inst))
+ return 0;
+
+ switch (ec->cmd) {
+ case V4L2_ENC_CMD_STOP:
+ inst->state = VPU_INST_STATE_STOP;
+ inst->ops->start_process(inst);
+ break;
+ case V4L2_ENC_CMD_START:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_enc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, a->type);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.output.timeperframe.numerator = 1;
+ a->parm.output.timeperframe.denominator = inst->frame_rate;
+
+ dev_dbg(inst->dev->dev, "%s: numerator: %u | denominator: %u\n",
+ __func__, a->parm.output.timeperframe.numerator,
+ a->parm.output.timeperframe.denominator);
+
+ return 0;
+}
+
+static int wave5_vpu_enc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, a->type);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ if (a->parm.output.timeperframe.denominator && a->parm.output.timeperframe.numerator) {
+ inst->frame_rate = a->parm.output.timeperframe.denominator /
+ a->parm.output.timeperframe.numerator;
+ } else {
+ a->parm.output.timeperframe.numerator = 1;
+ a->parm.output.timeperframe.denominator = inst->frame_rate;
+ }
+
+ dev_dbg(inst->dev->dev, "%s: numerator: %u | denominator: %u\n",
+ __func__, a->parm.output.timeperframe.numerator,
+ a->parm.output.timeperframe.denominator);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops wave5_vpu_enc_ioctl_ops = {
+ .vidioc_querycap = wave5_vpu_enc_querycap,
+ .vidioc_enum_framesizes = wave5_vpu_enc_enum_framesizes,
+
+ .vidioc_enum_fmt_vid_cap = wave5_vpu_enc_enum_fmt_cap,
+ .vidioc_s_fmt_vid_cap_mplane = wave5_vpu_enc_s_fmt_cap,
+ .vidioc_g_fmt_vid_cap_mplane = wave5_vpu_enc_g_fmt_cap,
+ .vidioc_try_fmt_vid_cap_mplane = wave5_vpu_enc_try_fmt_cap,
+
+ .vidioc_enum_fmt_vid_out = wave5_vpu_enc_enum_fmt_out,
+ .vidioc_s_fmt_vid_out_mplane = wave5_vpu_enc_s_fmt_out,
+ .vidioc_g_fmt_vid_out_mplane = wave5_vpu_g_fmt_out,
+ .vidioc_try_fmt_vid_out_mplane = wave5_vpu_enc_try_fmt_out,
+
+ .vidioc_g_selection = wave5_vpu_enc_g_selection,
+ .vidioc_s_selection = wave5_vpu_enc_s_selection,
+
+ .vidioc_g_parm = wave5_vpu_enc_g_parm,
+ .vidioc_s_parm = wave5_vpu_enc_s_parm,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = wave5_vpu_enc_encoder_cmd,
+
+ .vidioc_subscribe_event = wave5_vpu_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int wave5_vpu_enc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpu_instance *inst = wave5_ctrl_to_vpu_inst(ctrl);
+
+ dev_dbg(inst->dev->dev, "%s: name: %s | value: %d\n", __func__, ctrl->name, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ inst->mirror_direction |= (ctrl->val << 1);
+ break;
+ case V4L2_CID_VFLIP:
+ inst->mirror_direction |= ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ inst->rot_angle = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
+ inst->vbv_buf_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR:
+ inst->rc_mode = 0;
+ break;
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR:
+ inst->rc_mode = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ inst->bit_rate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ inst->enc_param.avc_idr_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ inst->enc_param.independ_slice_mode = ctrl->val;
+ inst->enc_param.avc_slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ inst->enc_param.independ_slice_mode_arg = ctrl->val;
+ inst->enc_param.avc_slice_arg = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ inst->rc_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ inst->enc_param.mb_level_rc_enable = ctrl->val;
+ inst->enc_param.cu_level_rc_enable = ctrl->val;
+ inst->enc_param.hvs_qp_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ inst->enc_param.profile = HEVC_PROFILE_MAIN;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ inst->enc_param.profile = HEVC_PROFILE_STILLPICTURE;
+ inst->enc_param.en_still_picture = 1;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ inst->enc_param.profile = HEVC_PROFILE_MAIN10;
+ inst->bit_depth = 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ inst->enc_param.level = 10 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ inst->enc_param.level = 20 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ inst->enc_param.level = 21 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ inst->enc_param.level = 30 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ inst->enc_param.level = 31 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ inst->enc_param.level = 40 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ inst->enc_param.level = 41 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ inst->enc_param.level = 50 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ inst->enc_param.level = 51 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ inst->enc_param.level = 52 * 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
+ inst->enc_param.min_qp_i = ctrl->val;
+ inst->enc_param.min_qp_p = ctrl->val;
+ inst->enc_param.min_qp_b = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
+ inst->enc_param.max_qp_i = ctrl->val;
+ inst->enc_param.max_qp_p = ctrl->val;
+ inst->enc_param.max_qp_b = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+ inst->enc_param.intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED:
+ inst->enc_param.disable_deblk = 1;
+ inst->enc_param.sao_enable = 0;
+ inst->enc_param.lf_cross_slice_boundary_enable = 0;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED:
+ inst->enc_param.disable_deblk = 0;
+ inst->enc_param.sao_enable = 1;
+ inst->enc_param.lf_cross_slice_boundary_enable = 1;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
+ inst->enc_param.disable_deblk = 0;
+ inst->enc_param.sao_enable = 1;
+ inst->enc_param.lf_cross_slice_boundary_enable = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2:
+ inst->enc_param.beta_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2:
+ inst->enc_param.tc_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE:
+ inst->enc_param.decoding_refresh_type = 0;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA:
+ inst->enc_param.decoding_refresh_type = 1;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR:
+ inst->enc_param.decoding_refresh_type = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD:
+ inst->enc_param.intra_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU:
+ inst->enc_param.lossless_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED:
+ inst->enc_param.const_intra_pred_flag = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT:
+ inst->enc_param.wpp_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING:
+ inst->enc_param.strong_intra_smooth_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1:
+ inst->enc_param.max_num_merge = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION:
+ inst->enc_param.tmvp_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ inst->enc_param.profile = H264_PROFILE_BP;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ inst->enc_param.profile = H264_PROFILE_MP;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ inst->enc_param.profile = H264_PROFILE_EXTENDED;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ inst->enc_param.profile = H264_PROFILE_HP;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10:
+ inst->enc_param.profile = H264_PROFILE_HIGH10;
+ inst->bit_depth = 10;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422:
+ inst->enc_param.profile = H264_PROFILE_HIGH422;
+ inst->bit_depth = 10;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE:
+ inst->enc_param.profile = H264_PROFILE_HIGH444;
+ inst->bit_depth = 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ inst->enc_param.level = 10;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ inst->enc_param.level = 9;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ inst->enc_param.level = 11;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ inst->enc_param.level = 12;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ inst->enc_param.level = 13;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ inst->enc_param.level = 20;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ inst->enc_param.level = 21;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ inst->enc_param.level = 22;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ inst->enc_param.level = 30;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ inst->enc_param.level = 31;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ inst->enc_param.level = 32;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ inst->enc_param.level = 40;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ inst->enc_param.level = 41;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ inst->enc_param.level = 42;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ inst->enc_param.level = 50;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ inst->enc_param.level = 51;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ inst->enc_param.min_qp_i = ctrl->val;
+ inst->enc_param.min_qp_p = ctrl->val;
+ inst->enc_param.min_qp_b = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ inst->enc_param.max_qp_i = ctrl->val;
+ inst->enc_param.max_qp_p = ctrl->val;
+ inst->enc_param.max_qp_b = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ inst->enc_param.intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED:
+ inst->enc_param.disable_deblk = 1;
+ inst->enc_param.lf_cross_slice_boundary_enable = 1;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED:
+ inst->enc_param.disable_deblk = 0;
+ inst->enc_param.lf_cross_slice_boundary_enable = 1;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
+ inst->enc_param.disable_deblk = 0;
+ inst->enc_param.lf_cross_slice_boundary_enable = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ inst->enc_param.beta_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ inst->enc_param.tc_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ inst->enc_param.transform8x8_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
+ inst->enc_param.const_intra_pred_flag = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET:
+ inst->enc_param.chroma_cb_qp_offset = ctrl->val;
+ inst->enc_param.chroma_cr_qp_offset = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ inst->enc_param.intra_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ inst->enc_param.entropy_coding_mode = ctrl->val;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops wave5_vpu_enc_ctrl_ops = {
+ .s_ctrl = wave5_vpu_enc_s_ctrl,
+};
+
+static void wave5_set_enc_openparam(struct enc_open_param *open_param,
+ struct vpu_instance *inst)
+{
+ struct enc_wave_param input = inst->enc_param;
+ u32 num_ctu_row = ALIGN(inst->dst_fmt.height, 64) / 64;
+ u32 num_mb_row = ALIGN(inst->dst_fmt.height, 16) / 16;
+
+ open_param->wave_param.gop_preset_idx = PRESET_IDX_IPP_SINGLE;
+ open_param->wave_param.hvs_qp_scale = 2;
+ open_param->wave_param.hvs_max_delta_qp = 10;
+ open_param->wave_param.skip_intra_trans = 1;
+ open_param->wave_param.intra_nx_n_enable = 1;
+ open_param->wave_param.nr_intra_weight_y = 7;
+ open_param->wave_param.nr_intra_weight_cb = 7;
+ open_param->wave_param.nr_intra_weight_cr = 7;
+ open_param->wave_param.nr_inter_weight_y = 4;
+ open_param->wave_param.nr_inter_weight_cb = 4;
+ open_param->wave_param.nr_inter_weight_cr = 4;
+ open_param->wave_param.rdo_skip = 1;
+ open_param->wave_param.lambda_scaling_enable = 1;
+
+ open_param->stream_endian = VPU_STREAM_ENDIAN;
+ open_param->source_endian = VPU_SOURCE_ENDIAN;
+ open_param->line_buf_int_en = true;
+ open_param->pic_width = inst->dst_fmt.width;
+ open_param->pic_height = inst->dst_fmt.height;
+ open_param->frame_rate_info = inst->frame_rate;
+ open_param->rc_enable = inst->rc_enable;
+ if (inst->rc_enable) {
+ open_param->wave_param.initial_rc_qp = -1;
+ open_param->wave_param.rc_weight_param = 16;
+ open_param->wave_param.rc_weight_buf = 128;
+ }
+ open_param->wave_param.mb_level_rc_enable = input.mb_level_rc_enable;
+ open_param->wave_param.cu_level_rc_enable = input.cu_level_rc_enable;
+ open_param->wave_param.hvs_qp_enable = input.hvs_qp_enable;
+ open_param->bit_rate = inst->bit_rate;
+ open_param->vbv_buffer_size = inst->vbv_buf_size;
+ if (inst->rc_mode == 0)
+ open_param->vbv_buffer_size = 3000;
+ open_param->wave_param.profile = input.profile;
+ open_param->wave_param.en_still_picture = input.en_still_picture;
+ open_param->wave_param.level = input.level;
+ open_param->wave_param.internal_bit_depth = inst->bit_depth;
+ open_param->wave_param.intra_qp = input.intra_qp;
+ open_param->wave_param.min_qp_i = input.min_qp_i;
+ open_param->wave_param.max_qp_i = input.max_qp_i;
+ open_param->wave_param.min_qp_p = input.min_qp_p;
+ open_param->wave_param.max_qp_p = input.max_qp_p;
+ open_param->wave_param.min_qp_b = input.min_qp_b;
+ open_param->wave_param.max_qp_b = input.max_qp_b;
+ open_param->wave_param.disable_deblk = input.disable_deblk;
+ open_param->wave_param.lf_cross_slice_boundary_enable =
+ input.lf_cross_slice_boundary_enable;
+ open_param->wave_param.tc_offset_div2 = input.tc_offset_div2;
+ open_param->wave_param.beta_offset_div2 = input.beta_offset_div2;
+ open_param->wave_param.decoding_refresh_type = input.decoding_refresh_type;
+ open_param->wave_param.intra_period = input.intra_period;
+ if (inst->std == W_HEVC_ENC) {
+ if (input.intra_period == 0) {
+ open_param->wave_param.decoding_refresh_type = DEC_REFRESH_TYPE_IDR;
+ open_param->wave_param.intra_period = input.avc_idr_period;
+ }
+ } else {
+ open_param->wave_param.avc_idr_period = input.avc_idr_period;
+ }
+ open_param->wave_param.entropy_coding_mode = input.entropy_coding_mode;
+ open_param->wave_param.lossless_enable = input.lossless_enable;
+ open_param->wave_param.const_intra_pred_flag = input.const_intra_pred_flag;
+ open_param->wave_param.wpp_enable = input.wpp_enable;
+ open_param->wave_param.strong_intra_smooth_enable = input.strong_intra_smooth_enable;
+ open_param->wave_param.max_num_merge = input.max_num_merge;
+ open_param->wave_param.tmvp_enable = input.tmvp_enable;
+ open_param->wave_param.transform8x8_enable = input.transform8x8_enable;
+ open_param->wave_param.chroma_cb_qp_offset = input.chroma_cb_qp_offset;
+ open_param->wave_param.chroma_cr_qp_offset = input.chroma_cr_qp_offset;
+ open_param->wave_param.independ_slice_mode = input.independ_slice_mode;
+ open_param->wave_param.independ_slice_mode_arg = input.independ_slice_mode_arg;
+ open_param->wave_param.avc_slice_mode = input.avc_slice_mode;
+ open_param->wave_param.avc_slice_arg = input.avc_slice_arg;
+ open_param->wave_param.intra_mb_refresh_mode = input.intra_mb_refresh_mode;
+ if (input.intra_mb_refresh_mode != REFRESH_MB_MODE_NONE) {
+ if (num_mb_row >= input.intra_mb_refresh_arg)
+ open_param->wave_param.intra_mb_refresh_arg =
+ num_mb_row / input.intra_mb_refresh_arg;
+ else
+ open_param->wave_param.intra_mb_refresh_arg = num_mb_row;
+ }
+ open_param->wave_param.intra_refresh_mode = input.intra_refresh_mode;
+ if (input.intra_refresh_mode != 0) {
+ if (num_ctu_row >= input.intra_refresh_arg)
+ open_param->wave_param.intra_refresh_arg =
+ num_ctu_row / input.intra_refresh_arg;
+ else
+ open_param->wave_param.intra_refresh_arg = num_ctu_row;
+ }
+}
+
+static int wave5_vpu_enc_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_pix_format_mplane inst_format =
+ (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ? inst->src_fmt : inst->dst_fmt;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(inst->dev->dev, "%s: num_buffers: %u | num_planes: %u | type: %u\n", __func__,
+ *num_buffers, *num_planes, q->type);
+
+ if (*num_planes) {
+ if (inst_format.num_planes != *num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *num_planes; i++) {
+ if (sizes[i] < inst_format.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *num_planes = inst_format.num_planes;
+ for (i = 0; i < *num_planes; i++) {
+ sizes[i] = inst_format.plane_fmt[i].sizeimage;
+ dev_dbg(inst->dev->dev, "%s: size[%u]: %u\n", __func__, i, sizes[i]);
+ }
+ }
+
+ dev_dbg(inst->dev->dev, "%s: size: %u\n", __func__, sizes[0]);
+
+ if (inst->state == VPU_INST_STATE_NONE && q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ unsigned int non_linear_num = 0;
+ u32 fb_stride = 0;
+ u32 fb_height = 0;
+ struct enc_open_param open_param;
+ struct enc_initial_info initial_info;
+ struct v4l2_ctrl *ctrl;
+
+ memset(&open_param, 0, sizeof(struct enc_open_param));
+
+ inst->std = wave5_to_vpu_wavestd(inst->dst_fmt.pixelformat);
+ if (inst->std == STD_UNKNOWN) {
+ dev_warn(inst->dev->dev, "unsupported pixelformat: %.4s\n",
+ (char *)&inst->dst_fmt.pixelformat);
+ return -EINVAL;
+ }
+
+ wave5_set_enc_openparam(&open_param, inst);
+
+ ret = wave5_vpu_enc_open(inst, &open_param);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_enc_open, fail: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ inst->state = VPU_INST_STATE_OPEN;
+
+ if (inst->mirror_direction) {
+ wave5_vpu_enc_give_command(inst, ENABLE_MIRRORING, NULL);
+ wave5_vpu_enc_give_command(inst, SET_MIRROR_DIRECTION,
+ &inst->mirror_direction);
+ }
+ if (inst->rot_angle) {
+ wave5_vpu_enc_give_command(inst, ENABLE_ROTATION, NULL);
+ wave5_vpu_enc_give_command(inst, SET_ROTATION_ANGLE, &inst->rot_angle);
+ }
+
+ ret = wave5_vpu_enc_issue_seq_init(inst);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_enc_issue_seq_init, fail: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (wave5_vpu_wait_interrupt(inst, VPU_ENC_TIMEOUT) < 0) {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_wait_interrupt failed\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = wave5_vpu_enc_complete_seq_init(inst, &initial_info);
+ if (ret)
+ return ret;
+
+ dev_dbg(inst->dev->dev, "%s: min_frame_buffer: %u | min_source_buffer: %u\n",
+ __func__, initial_info.min_frame_buffer_count,
+ initial_info.min_src_frame_count);
+ inst->state = VPU_INST_STATE_INIT_SEQ;
+ inst->min_src_buf_count = initial_info.min_src_frame_count +
+ COMMAND_QUEUE_DEPTH;
+
+ ctrl = v4l2_ctrl_find(&inst->v4l2_ctrl_hdl,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, inst->min_src_buf_count);
+
+ inst->min_dst_buf_count = initial_info.min_frame_buffer_count;
+ inst->src_buf_count = inst->min_src_buf_count;
+
+ if (*num_buffers > inst->src_buf_count)
+ inst->src_buf_count = *num_buffers;
+
+ *num_buffers = inst->src_buf_count;
+ non_linear_num = inst->min_dst_buf_count;
+
+ fb_stride = ALIGN(inst->dst_fmt.width, 32);
+ fb_height = ALIGN(inst->dst_fmt.height, 32);
+
+ for (i = 0; i < non_linear_num; i++) {
+ u32 luma_size = fb_stride * fb_height;
+ u32 chroma_size = ALIGN(fb_stride / 2, 16) * fb_height;
+
+ inst->frame_vbuf[i].size = luma_size + chroma_size;
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &inst->frame_vbuf[i]);
+ if (ret < 0) {
+ dev_dbg(inst->dev->dev, "%s: failed to allocate FBC buffer %zu\n",
+ __func__, inst->frame_vbuf[i].size);
+ goto free_buffers;
+ }
+
+ inst->frame_buf[i].buf_y = inst->frame_vbuf[i].daddr;
+ inst->frame_buf[i].buf_cb = (dma_addr_t)-1;
+ inst->frame_buf[i].buf_cr = (dma_addr_t)-1;
+ inst->frame_buf[i].update_fb_info = true;
+ inst->frame_buf[i].size = inst->frame_vbuf[i].size;
+ }
+
+ ret = wave5_vpu_enc_register_frame_buffer(inst, non_linear_num, fb_stride,
+ fb_height, COMPRESSED_FRAME_MAP);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: wave5_vpu_enc_register_frame_buffer, fail: %d\n",
+ __func__, ret);
+ goto free_buffers;
+ }
+
+ inst->state = VPU_INST_STATE_PIC_RUN;
+ }
+
+ if (inst->state == VPU_INST_STATE_INIT_SEQ &&
+ q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ *num_buffers = inst->src_buf_count;
+ dev_dbg(inst->dev->dev, "%s: src buf num: %u", __func__, *num_buffers);
+ }
+
+ return 0;
+
+free_buffers:
+ for (i = 0; i < inst->min_dst_buf_count; i++)
+ wave5_vdi_free_dma_memory(inst->dev, &inst->frame_vbuf[i]);
+ return ret;
+}
+
+static void wave5_vpu_enc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_instance *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpu_buffer *vpu_buf = wave5_to_vpu_buf(vbuf);
+
+ dev_dbg(inst->dev->dev, "%s: type: %4u index: %4u size: ([0]=%4lu, [1]=%4lu, [2]=%4lu)\n",
+ __func__, vb->type, vb->index, vb2_plane_size(&vbuf->vb2_buf, 0),
+ vb2_plane_size(&vbuf->vb2_buf, 1), vb2_plane_size(&vbuf->vb2_buf, 2));
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ vbuf->sequence = inst->queued_src_buf_num++;
+ else
+ vbuf->sequence = inst->queued_dst_buf_num++;
+
+ vpu_buf->consumed = FALSE;
+ v4l2_m2m_buf_queue(inst->v4l2_fh.m2m_ctx, vbuf);
+
+ if (vb2_start_streaming_called(vb->vb2_queue))
+ inst->ops->start_process(inst);
+}
+
+static void wave5_vpu_enc_stop_streaming(struct vb2_queue *q)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *buf;
+ bool check_cmd = true;
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
+
+ if (wave5_vpu_both_queues_are_streaming(inst))
+ inst->state = VPU_INST_STATE_STOP;
+
+ while (check_cmd) {
+ struct queue_status_info q_status;
+ struct enc_output_info enc_output_info;
+
+ wave5_vpu_enc_give_command(inst, ENC_GET_QUEUE_STATUS, &q_status);
+
+ if (q_status.instance_queue_count + q_status.report_queue_count == 0)
+ break;
+
+ if (wave5_vpu_wait_interrupt(inst, VPU_ENC_TIMEOUT) < 0)
+ break;
+
+ if (wave5_vpu_enc_get_output_info(inst, &enc_output_info))
+ dev_dbg(inst->dev->dev, "Getting encoding results from fw, fail\n");
+ }
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ while ((buf = v4l2_m2m_src_buf_remove(inst->v4l2_fh.m2m_ctx))) {
+ dev_dbg(inst->dev->dev, "%s: buf type %4u | index %4u\n",
+ __func__, buf->vb2_buf.type, buf->vb2_buf.index);
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(inst->v4l2_fh.m2m_ctx))) {
+ dev_dbg(inst->dev->dev, "%s: buf type %4u | index %4u\n",
+ __func__, buf->vb2_buf.type, buf->vb2_buf.index);
+ vb2_set_plane_payload(&buf->vb2_buf, 0, 0);
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+ }
+}
+
+static const struct vb2_ops wave5_vpu_enc_vb2_ops = {
+ .queue_setup = wave5_vpu_enc_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_queue = wave5_vpu_enc_buf_queue,
+ .stop_streaming = wave5_vpu_enc_stop_streaming,
+};
+
+static void wave5_set_default_format(struct v4l2_pix_format_mplane *src_fmt,
+ struct v4l2_pix_format_mplane *dst_fmt)
+{
+ unsigned int src_pix_fmt = enc_fmt_list[VPU_FMT_TYPE_RAW][0].v4l2_pix_fmt;
+ const struct v4l2_format_info *src_fmt_info = v4l2_format_info(src_pix_fmt);
+
+ src_fmt->pixelformat = src_pix_fmt;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->flags = 0;
+ src_fmt->num_planes = src_fmt_info->mem_planes;
+ wave5_update_pix_fmt(src_fmt, 416, 240);
+
+ dst_fmt->pixelformat = enc_fmt_list[VPU_FMT_TYPE_CODEC][0].v4l2_pix_fmt;
+ dst_fmt->field = V4L2_FIELD_NONE;
+ dst_fmt->flags = 0;
+ dst_fmt->num_planes = 1;
+ wave5_update_pix_fmt(dst_fmt, 416, 240);
+}
+
+static int wave5_vpu_enc_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ return wave5_vpu_queue_init(priv, src_vq, dst_vq, &wave5_vpu_enc_vb2_ops);
+}
+
+static const struct vpu_instance_ops wave5_vpu_enc_inst_ops = {
+ .start_process = wave5_vpu_enc_start_encode,
+ .stop_process = wave5_vpu_enc_stop_encode,
+ .finish_process = wave5_vpu_enc_finish_encode,
+};
+
+static void wave5_vpu_enc_device_run(void *priv)
+{
+ struct vpu_instance *inst = priv;
+
+ inst->ops->start_process(inst);
+}
+
+static int wave5_vpu_enc_job_ready(void *priv)
+{
+ struct vpu_instance *inst = priv;
+
+ if (inst->state == VPU_INST_STATE_STOP)
+ return 0;
+
+ return 1;
+}
+
+static void wave5_vpu_enc_job_abort(void *priv)
+{
+ struct vpu_instance *inst = priv;
+
+ inst->ops->stop_process(inst);
+}
+
+static const struct v4l2_m2m_ops wave5_vpu_enc_m2m_ops = {
+ .device_run = wave5_vpu_enc_device_run,
+ .job_ready = wave5_vpu_enc_job_ready,
+ .job_abort = wave5_vpu_enc_job_abort,
+};
+
+static int wave5_vpu_open_enc(struct file *filp)
+{
+ struct video_device *vdev = video_devdata(filp);
+ struct vpu_device *dev = video_drvdata(filp);
+ struct vpu_instance *inst = NULL;
+ struct v4l2_ctrl_handler *v4l2_ctrl_hdl;
+ int ret = 0;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ v4l2_ctrl_hdl = &inst->v4l2_ctrl_hdl;
+
+ inst->dev = dev;
+ inst->type = VPU_INST_TYPE_ENC;
+ inst->ops = &wave5_vpu_enc_inst_ops;
+
+ v4l2_fh_init(&inst->v4l2_fh, vdev);
+ filp->private_data = &inst->v4l2_fh;
+ v4l2_fh_add(&inst->v4l2_fh);
+
+ INIT_LIST_HEAD(&inst->list);
+ list_add_tail(&inst->list, &dev->instances);
+
+ inst->v4l2_m2m_dev = v4l2_m2m_init(&wave5_vpu_enc_m2m_ops);
+ if (IS_ERR(inst->v4l2_m2m_dev)) {
+ ret = PTR_ERR(inst->v4l2_m2m_dev);
+ dev_err(inst->dev->dev, "v4l2_m2m_init, fail: %d\n", ret);
+ goto cleanup_inst;
+ }
+
+ inst->v4l2_fh.m2m_ctx =
+ v4l2_m2m_ctx_init(inst->v4l2_m2m_dev, inst, wave5_vpu_enc_queue_init);
+ if (IS_ERR(inst->v4l2_fh.m2m_ctx)) {
+ ret = PTR_ERR(inst->v4l2_fh.m2m_ctx);
+ goto cleanup_inst;
+ }
+
+ v4l2_ctrl_handler_init(v4l2_ctrl_hdl, 50);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10, 0,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, 0,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_1);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ 0, 63, 1, 8);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP,
+ 0, 63, 1, 51);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
+ 0, 63, 1, 30);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE,
+ V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY, 0,
+ V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2,
+ -6, 6, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2,
+ -6, 6, 1, 0);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE,
+ V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR, 0,
+ V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD,
+ 0, 2047, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING,
+ 0, 1, 1, 1);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1,
+ 1, 2, 1, 2);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION,
+ 0, 1, 1, 1);
+
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE, 0,
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 0,
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ 0, 63, 1, 8);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ 0, 63, 1, 51);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+ 0, 63, 1, 30);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY, 0,
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA,
+ -6, 6, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA,
+ -6, 6, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+ 0, 1, 1, 1);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET,
+ -12, 12, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+ 0, 2047, 1, 0);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC, 0,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);
+
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_ROTATE,
+ 0, 270, 90, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VBV_SIZE,
+ 10, 3000, 1, 3000);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ 0, 700000000, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 0, 2047, 1, 0);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB, 0,
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
+ 0, 0xFFFF, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 1, 32, 1, 1);
+
+ if (v4l2_ctrl_hdl->error) {
+ ret = -ENODEV;
+ goto cleanup_inst;
+ }
+
+ inst->v4l2_fh.ctrl_handler = v4l2_ctrl_hdl;
+ v4l2_ctrl_handler_setup(v4l2_ctrl_hdl);
+
+ wave5_set_default_format(&inst->src_fmt, &inst->dst_fmt);
+ inst->colorspace = V4L2_COLORSPACE_REC709;
+ inst->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ inst->hsv_enc = 0;
+ inst->quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ inst->frame_rate = 30;
+
+ init_completion(&inst->irq_done);
+
+ if (inst->dev->irq < 0) {
+ ret = mutex_lock_interruptible(&inst->dev->dev_lock);
+ if (ret)
+ return ret;
+
+ if (!hrtimer_active(&inst->dev->hrtimer))
+ hrtimer_start(&inst->dev->hrtimer, ns_to_ktime(0), HRTIMER_MODE_REL_PINNED);
+
+ mutex_unlock(&inst->dev->dev_lock);
+ }
+
+ ret = kfifo_alloc(&inst->irq_status, 16 * sizeof(int), GFP_KERNEL);
+ if (ret) {
+ dev_err(inst->dev->dev, "Allocating fifo, fail: %d\n", ret);
+ goto cleanup_inst;
+ }
+
+ inst->id = ida_alloc(&inst->dev->inst_ida, GFP_KERNEL);
+ if (inst->id < 0) {
+ dev_warn(inst->dev->dev, "Allocating instance ID, fail: %d\n", inst->id);
+ ret = inst->id;
+ goto cleanup_inst;
+ }
+
+ return 0;
+
+cleanup_inst:
+ wave5_cleanup_instance(inst);
+ return ret;
+}
+
+static int wave5_vpu_enc_release(struct file *filp)
+{
+ return wave5_vpu_release_device(filp, wave5_vpu_enc_close, "encoder");
+}
+
+static const struct v4l2_file_operations wave5_vpu_enc_fops = {
+ .owner = THIS_MODULE,
+ .open = wave5_vpu_open_enc,
+ .release = wave5_vpu_enc_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+int wave5_vpu_enc_register_device(struct vpu_device *dev)
+{
+ struct video_device *vdev_enc;
+ int ret;
+
+ vdev_enc = devm_kzalloc(dev->v4l2_dev.dev, sizeof(*vdev_enc), GFP_KERNEL);
+ if (!vdev_enc)
+ return -ENOMEM;
+
+ dev->video_dev_enc = vdev_enc;
+
+ strscpy(vdev_enc->name, VPU_ENC_DEV_NAME, sizeof(vdev_enc->name));
+ vdev_enc->fops = &wave5_vpu_enc_fops;
+ vdev_enc->ioctl_ops = &wave5_vpu_enc_ioctl_ops;
+ vdev_enc->release = video_device_release_empty;
+ vdev_enc->v4l2_dev = &dev->v4l2_dev;
+ vdev_enc->vfl_dir = VFL_DIR_M2M;
+ vdev_enc->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ vdev_enc->lock = &dev->dev_lock;
+
+ ret = video_register_device(vdev_enc, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ return ret;
+
+ video_set_drvdata(vdev_enc, dev);
+
+ return 0;
+}
+
+void wave5_vpu_enc_unregister_device(struct vpu_device *dev)
+{
+ video_unregister_device(dev->video_dev_enc);
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
new file mode 100644
index 000000000000..11388a6ad856
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - platform driver
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include "wave5-vpu.h"
+#include "wave5-regdefine.h"
+#include "wave5-vpuconfig.h"
+#include "wave5.h"
+
+#define VPU_PLATFORM_DEVICE_NAME "vdec"
+#define VPU_CLK_NAME "vcodec"
+
+#define WAVE5_IS_ENC BIT(0)
+#define WAVE5_IS_DEC BIT(1)
+
+struct wave5_match_data {
+ int flags;
+ const char *fw_name;
+};
+
+static int vpu_poll_interval = 5;
+module_param(vpu_poll_interval, int, 0644);
+
+int wave5_vpu_wait_interrupt(struct vpu_instance *inst, unsigned int timeout)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&inst->irq_done,
+ msecs_to_jiffies(timeout));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ reinit_completion(&inst->irq_done);
+
+ return 0;
+}
+
+static void wave5_vpu_get_interrupt_for_inst(struct vpu_instance *inst, u32 status)
+{
+ struct vpu_device *dev = inst->dev;
+ u32 seq_done;
+ u32 cmd_done;
+ int val;
+
+ seq_done = wave5_vdi_readl(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
+ cmd_done = wave5_vdi_readl(dev, W5_RET_QUEUE_CMD_DONE_INST);
+
+ if (status & BIT(INT_WAVE5_INIT_SEQ)) {
+ if (seq_done & BIT(inst->id)) {
+ seq_done &= ~BIT(inst->id);
+ wave5_vdi_write_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO, seq_done);
+ val = BIT(INT_WAVE5_INIT_SEQ);
+ kfifo_in(&inst->irq_status, &val, sizeof(int));
+ }
+ }
+ if (status & BIT(INT_WAVE5_ENC_SET_PARAM)) {
+ if (seq_done & BIT(inst->id)) {
+ seq_done &= ~BIT(inst->id);
+ wave5_vdi_write_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO, seq_done);
+ val = BIT(INT_WAVE5_ENC_SET_PARAM);
+ kfifo_in(&inst->irq_status, &val, sizeof(int));
+ }
+ }
+ if (status & BIT(INT_WAVE5_DEC_PIC) ||
+ status & BIT(INT_WAVE5_ENC_PIC)) {
+ if (cmd_done & BIT(inst->id)) {
+ cmd_done &= ~BIT(inst->id);
+ wave5_vdi_write_register(dev, W5_RET_QUEUE_CMD_DONE_INST, cmd_done);
+ val = BIT(INT_WAVE5_DEC_PIC);
+ kfifo_in(&inst->irq_status, &val, sizeof(int));
+ }
+ }
+}
+
+static irqreturn_t wave5_vpu_irq(int irq, void *dev_id)
+{
+ struct vpu_device *dev = dev_id;
+
+ if (wave5_vdi_readl(dev, W5_VPU_VPU_INT_STS)) {
+ struct vpu_instance *inst;
+ u32 irq_status = wave5_vdi_readl(dev, W5_VPU_VINT_REASON);
+
+ list_for_each_entry(inst, &dev->instances, list) {
+ wave5_vpu_get_interrupt_for_inst(inst, irq_status);
+ }
+
+ wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_status);
+ wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1);
+
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wave5_vpu_irq_thread(int irq, void *dev_id)
+{
+ struct vpu_device *dev = dev_id;
+ struct vpu_instance *inst;
+ int irq_status, ret;
+ u32 val;
+
+ list_for_each_entry(inst, &dev->instances, list) {
+ while (kfifo_len(&inst->irq_status)) {
+ struct vpu_instance *curr;
+
+ curr = v4l2_m2m_get_curr_priv(inst->v4l2_m2m_dev);
+ if (curr) {
+ inst->ops->finish_process(inst);
+ } else {
+ ret = kfifo_out(&inst->irq_status, &irq_status, sizeof(int));
+ if (!ret)
+ break;
+
+ val = wave5_vdi_readl(dev, W5_VPU_VINT_REASON_USR);
+ val &= ~irq_status;
+ wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_USR, val);
+ complete(&inst->irq_done);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void wave5_vpu_irq_work_fn(struct kthread_work *work)
+{
+ struct vpu_device *dev = container_of(work, struct vpu_device, work);
+ struct vpu_instance *inst;
+ int irq_status, ret;
+ u32 val;
+
+ list_for_each_entry(inst, &dev->instances, list) {
+ while (kfifo_len(&inst->irq_status)) {
+ struct vpu_instance *curr;
+
+ curr = v4l2_m2m_get_curr_priv(inst->v4l2_m2m_dev);
+ if (curr) {
+ inst->ops->finish_process(inst);
+ } else {
+ ret = kfifo_out(&inst->irq_status, &irq_status, sizeof(int));
+ if (!ret)
+ break;
+
+ val = wave5_vdi_readl(dev, W5_VPU_VINT_REASON_USR);
+ val &= ~irq_status;
+ wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_USR, val);
+ complete(&inst->irq_done);
+ }
+ }
+ }
+}
+
+static enum hrtimer_restart wave5_vpu_timer_callback(struct hrtimer *timer)
+{
+ irqreturn_t ret;
+ struct vpu_device *dev =
+ container_of(timer, struct vpu_device, hrtimer);
+ ret = wave5_vpu_irq(0, dev);
+
+ if (ret == IRQ_WAKE_THREAD)
+ kthread_queue_work(dev->worker, &dev->work);
+
+ hrtimer_forward_now(timer, ns_to_ktime(vpu_poll_interval * NSEC_PER_MSEC));
+
+ return HRTIMER_RESTART;
+}
+
+static int wave5_vpu_load_firmware(struct device *dev, const char *fw_name)
+{
+ const struct firmware *fw;
+ int ret;
+ u32 revision;
+ unsigned int product_id;
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret) {
+ dev_err(dev, "request_firmware, fail: %d\n", ret);
+ return ret;
+ }
+
+ ret = wave5_vpu_init_with_bitcode(dev, (u8 *)fw->data, fw->size);
+ if (ret) {
+ dev_err(dev, "vpu_init_with_bitcode, fail: %d\n", ret);
+ goto release_fw;
+ }
+ release_firmware(fw);
+
+ ret = wave5_vpu_get_version_info(dev, &revision, &product_id);
+ if (ret) {
+ dev_err(dev, "vpu_get_version_info fail: %d\n", ret);
+ goto err_without_release;
+ }
+
+ dev_dbg(dev, "%s: enum product_id: %08x, fw revision: %u\n",
+ __func__, product_id, revision);
+
+ return 0;
+
+release_fw:
+ release_firmware(fw);
+err_without_release:
+ return ret;
+}
+
+static int wave5_vpu_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct vpu_device *dev;
+ struct device_node *np;
+ const struct wave5_match_data *match_data;
+ struct resource sram;
+
+ match_data = device_get_match_data(&pdev->dev);
+ if (!match_data) {
+ dev_err(&pdev->dev, "missing device match data\n");
+ return -EINVAL;
+ }
+
+ /* physical addresses limited to 48 bits */
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(48));
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(48));
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vdb_register = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev->vdb_register))
+ return PTR_ERR(dev->vdb_register);
+ ida_init(&dev->inst_ida);
+
+ mutex_init(&dev->dev_lock);
+ mutex_init(&dev->hw_lock);
+ dev_set_drvdata(&pdev->dev, dev);
+ dev->dev = &pdev->dev;
+
+ ret = devm_clk_bulk_get_all(&pdev->dev, &dev->clks);
+
+ /* continue without clock, assume externally managed */
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "Getting clocks, fail: %d\n", ret);
+ ret = 0;
+ }
+ dev->num_clks = ret;
+
+ ret = clk_bulk_prepare_enable(dev->num_clks, dev->clks);
+ if (ret) {
+ dev_err(&pdev->dev, "Enabling clocks, fail: %d\n", ret);
+ return ret;
+ }
+
+ np = of_parse_phandle(pdev->dev.of_node, "sram", 0);
+ if (!np) {
+ dev_warn(&pdev->dev, "sram node not found\n");
+ } else {
+ ret = of_address_to_resource(np, 0, &sram);
+ if (ret) {
+ dev_err(&pdev->dev, "sram resource not available\n");
+ goto err_put_node;
+ }
+ dev->sram_buf.daddr = sram.start;
+ dev->sram_buf.size = resource_size(&sram);
+ dev_dbg(&pdev->dev, "%s: sram daddr: %pad, size: 0x%lx\n",
+ __func__, &dev->sram_buf.daddr, dev->sram_buf.size);
+ }
+
+ dev->product_code = wave5_vdi_readl(dev, VPU_PRODUCT_CODE_REGISTER);
+ ret = wave5_vdi_init(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "wave5_vdi_init, fail: %d\n", ret);
+ goto err_clk_dis;
+ }
+ dev->ext_addr = ((dev->common_mem.daddr >> 32) & 0xFFFF);
+ dev->product = wave5_vpu_get_product_id(dev);
+
+ INIT_LIST_HEAD(&dev->instances);
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "v4l2_device_register, fail: %d\n", ret);
+ goto err_vdi_release;
+ }
+
+ if (match_data->flags & WAVE5_IS_DEC) {
+ ret = wave5_vpu_dec_register_device(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "wave5_vpu_dec_register_device, fail: %d\n", ret);
+ goto err_v4l2_unregister;
+ }
+ }
+ if (match_data->flags & WAVE5_IS_ENC) {
+ ret = wave5_vpu_enc_register_device(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "wave5_vpu_enc_register_device, fail: %d\n", ret);
+ goto err_dec_unreg;
+ }
+ }
+
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ ret = mutex_lock_interruptible(&dev->dev_lock);
+ if (ret)
+ return ret;
+
+ hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ dev->hrtimer.function = &wave5_vpu_timer_callback;
+ dev->worker = kthread_create_worker(0, "vpu_irq_thread");
+ if (IS_ERR(dev->worker)) {
+ dev_err(&pdev->dev, "failed to create vpu irq worker\n");
+ mutex_unlock(&dev->dev_lock);
+ return PTR_ERR(dev->worker);
+ }
+
+ kthread_init_work(&dev->work, wave5_vpu_irq_work_fn);
+ mutex_unlock(&dev->dev_lock);
+ } else {
+ ret = devm_request_threaded_irq(&pdev->dev, dev->irq, wave5_vpu_irq,
+ wave5_vpu_irq_thread, 0, "vpu_irq", dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Register interrupt handler, fail: %d\n", ret);
+ goto err_enc_unreg;
+ }
+ }
+
+ ret = wave5_vpu_load_firmware(&pdev->dev, match_data->fw_name);
+ if (ret) {
+ dev_err(&pdev->dev, "wave5_vpu_load_firmware, fail: %d\n", ret);
+ goto err_enc_unreg;
+ }
+
+ dev_dbg(&pdev->dev, "Added wave5 driver with caps: %s %s and product code: 0x%x\n",
+ (match_data->flags & WAVE5_IS_ENC) ? "'ENCODE'" : "",
+ (match_data->flags & WAVE5_IS_DEC) ? "'DECODE'" : "",
+ dev->product_code);
+ return 0;
+
+err_enc_unreg:
+ if (match_data->flags & WAVE5_IS_ENC)
+ wave5_vpu_enc_unregister_device(dev);
+err_dec_unreg:
+ if (match_data->flags & WAVE5_IS_DEC)
+ wave5_vpu_dec_unregister_device(dev);
+err_v4l2_unregister:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_vdi_release:
+ wave5_vdi_release(&pdev->dev);
+err_clk_dis:
+ clk_bulk_disable_unprepare(dev->num_clks, dev->clks);
+err_put_node:
+ of_node_put(np);
+
+ return ret;
+}
+
+static int wave5_vpu_remove(struct platform_device *pdev)
+{
+ struct vpu_device *dev = dev_get_drvdata(&pdev->dev);
+
+ clk_bulk_disable_unprepare(dev->num_clks, dev->clks);
+ wave5_vpu_enc_unregister_device(dev);
+ wave5_vpu_dec_unregister_device(dev);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ wave5_vdi_release(&pdev->dev);
+ ida_destroy(&dev->inst_ida);
+
+ if (dev->irq < 0) {
+ kthread_destroy_worker(dev->worker);
+ hrtimer_cancel(&dev->hrtimer);
+ }
+
+ return 0;
+}
+
+static const struct wave5_match_data wave511_data = {
+ .flags = WAVE5_IS_DEC,
+ .fw_name = "wave511_dec_fw.bin",
+};
+
+static const struct wave5_match_data wave521_data = {
+ .flags = WAVE5_IS_ENC,
+ .fw_name = "wave521_enc_fw.bin",
+};
+
+static const struct wave5_match_data wave521c_data = {
+ .flags = WAVE5_IS_ENC | WAVE5_IS_DEC,
+ .fw_name = "wave521c_codec_fw.bin",
+};
+
+static const struct wave5_match_data default_match_data = {
+ .flags = WAVE5_IS_ENC | WAVE5_IS_DEC,
+ .fw_name = "chagall.bin",
+};
+
+static const struct of_device_id wave5_dt_ids[] = {
+ { .compatible = "cnm,cm511-vpu", .data = &wave511_data },
+ { .compatible = "cnm,cm517-vpu", .data = &default_match_data },
+ { .compatible = "cnm,cm521-vpu", .data = &wave521_data },
+ { .compatible = "cnm,cm521c-vpu", .data = &wave521c_data },
+ { .compatible = "cnm,cm521c-dual-vpu", .data = &wave521c_data },
+ { .compatible = "cnm,cm521e1-vpu", .data = &default_match_data },
+ { .compatible = "cnm,cm537-vpu", .data = &default_match_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, wave5_dt_ids);
+
+static struct platform_driver wave5_vpu_driver = {
+ .driver = {
+ .name = VPU_PLATFORM_DEVICE_NAME,
+ .of_match_table = of_match_ptr(wave5_dt_ids),
+ },
+ .probe = wave5_vpu_probe,
+ .remove = wave5_vpu_remove,
+};
+
+module_platform_driver(wave5_vpu_driver);
+MODULE_DESCRIPTION("chips&media VPU V4L2 driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.h b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
new file mode 100644
index 000000000000..7156d8c3fee4
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - basic types
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+#ifndef __VPU_DRV_H__
+#define __VPU_DRV_H__
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+#include "wave5-vpuconfig.h"
+#include "wave5-vpuapi.h"
+
+#define VPU_BUF_SYNC_TO_DEVICE 0
+#define VPU_BUF_SYNC_FROM_DEVICE 1
+
+struct vpu_buffer {
+ struct v4l2_m2m_buffer v4l2_m2m_buf;
+ bool consumed;
+};
+
+enum vpu_fmt_type {
+ VPU_FMT_TYPE_CODEC = 0,
+ VPU_FMT_TYPE_RAW = 1
+};
+
+struct vpu_format {
+ unsigned int v4l2_pix_fmt;
+ unsigned int max_width;
+ unsigned int min_width;
+ unsigned int max_height;
+ unsigned int min_height;
+};
+
+static inline struct vpu_instance *wave5_to_vpu_inst(struct v4l2_fh *vfh)
+{
+ return container_of(vfh, struct vpu_instance, v4l2_fh);
+}
+
+static inline struct vpu_instance *wave5_ctrl_to_vpu_inst(struct v4l2_ctrl *vctrl)
+{
+ return container_of(vctrl->handler, struct vpu_instance, v4l2_ctrl_hdl);
+}
+
+static inline struct vpu_buffer *wave5_to_vpu_buf(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct vpu_buffer, v4l2_m2m_buf.vb);
+}
+
+int wave5_vpu_wait_interrupt(struct vpu_instance *inst, unsigned int timeout);
+
+int wave5_vpu_dec_register_device(struct vpu_device *dev);
+void wave5_vpu_dec_unregister_device(struct vpu_device *dev);
+int wave5_vpu_enc_register_device(struct vpu_device *dev);
+void wave5_vpu_enc_unregister_device(struct vpu_device *dev);
+static inline bool wave5_vpu_both_queues_are_streaming(struct vpu_instance *inst)
+{
+ struct vb2_queue *vq_cap =
+ v4l2_m2m_get_vq(inst->v4l2_fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ struct vb2_queue *vq_out =
+ v4l2_m2m_get_vq(inst->v4l2_fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ return vb2_is_streaming(vq_cap) && vb2_is_streaming(vq_out);
+}
+
+#endif
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
new file mode 100644
index 000000000000..1458c5587dbc
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
@@ -0,0 +1,1040 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - helper functions
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#include <linux/bug.h>
+#include "wave5-vpuapi.h"
+#include "wave5-regdefine.h"
+#include "wave5.h"
+
+#define DECODE_ALL_TEMPORAL_LAYERS 0
+#define DECODE_ALL_SPATIAL_LAYERS 0
+
+void wave5_vpu_clear_interrupt_ex(struct vpu_instance *inst, u32 intr_flag)
+{
+ wave5_vpu_clear_interrupt(inst, intr_flag);
+}
+
+static int wave5_initialize_vpu(struct device *dev, u8 *code, size_t size)
+{
+ int ret;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ if (wave5_vpu_is_init(vpu_dev)) {
+ wave5_vpu_re_init(dev, (void *)code, size);
+ ret = -EBUSY;
+ goto err_out;
+ }
+
+ ret = wave5_vpu_reset(dev, SW_RESET_ON_BOOT);
+ if (ret)
+ goto err_out;
+
+ ret = wave5_vpu_init(dev, (void *)code, size);
+
+err_out:
+ mutex_unlock(&vpu_dev->hw_lock);
+ return ret;
+}
+
+int wave5_vpu_init_with_bitcode(struct device *dev, u8 *bitcode, size_t size)
+{
+ if (!bitcode || size == 0)
+ return -EINVAL;
+
+ return wave5_initialize_vpu(dev, bitcode, size);
+}
+
+int wave5_vpu_get_version_info(struct device *dev, u32 *revision, unsigned int *product_id)
+{
+ int ret;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ if (!wave5_vpu_is_init(vpu_dev)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ if (product_id)
+ *product_id = vpu_dev->product;
+ ret = wave5_vpu_get_version(vpu_dev, revision);
+
+err_out:
+ mutex_unlock(&vpu_dev->hw_lock);
+ return ret;
+}
+
+static int wave5_check_dec_open_param(struct vpu_instance *inst, struct dec_open_param *param)
+{
+ struct vpu_attr *p_attr = &inst->dev->attr;
+
+ if (inst->id >= MAX_NUM_INSTANCE) {
+ dev_err(inst->dev->dev, "Too many simultaneous instances: %d (max: %u)\n",
+ inst->id, MAX_NUM_INSTANCE);
+ return -EOPNOTSUPP;
+ }
+
+ if (param->bitstream_buffer % 8) {
+ dev_err(inst->dev->dev,
+ "Bitstream buffer must be aligned to a multiple of 8\n");
+ return -EINVAL;
+ }
+
+ if (param->bitstream_mode == BS_MODE_INTERRUPT &&
+ (param->bitstream_buffer_size % 1024 ||
+ param->bitstream_buffer_size < MIN_BITSTREAM_BUFFER_SIZE)) {
+ dev_err(inst->dev->dev,
+ "Bitstream buffer size must be aligned to a multiple of 1024 and have a minimum size of %d\n",
+ MIN_BITSTREAM_BUFFER_SIZE);
+ return -EINVAL;
+ }
+
+ if (!(BIT(param->bitstream_mode) && p_attr->support_bitstream_mode)) {
+ dev_err(inst->dev->dev,
+ "Bitstream mode only configurable with the 'support_bitstream_mode' flag");
+ return -EINVAL;
+ }
+
+ if (!(BIT(param->frame_endian) && p_attr->support_endian_mask)) {
+ dev_err(inst->dev->dev,
+ "Frame endianness only configurable with the 'support_endian_mask' flag");
+ return -EINVAL;
+ }
+
+ if (!(BIT(param->stream_endian) && p_attr->support_endian_mask)) {
+ dev_err(inst->dev->dev,
+ "Stream endianness only configurable with the 'support_endian_mask' flag");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wave5_vpu_dec_open(struct vpu_instance *inst, struct dec_open_param *open_param)
+{
+ struct dec_info *p_dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+ dma_addr_t buffer_addr;
+ size_t buffer_size;
+
+ ret = wave5_check_dec_open_param(inst, open_param);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ if (!wave5_vpu_is_init(vpu_dev)) {
+ mutex_unlock(&vpu_dev->hw_lock);
+ return -ENODEV;
+ }
+
+ inst->codec_info = kzalloc(sizeof(*inst->codec_info), GFP_KERNEL);
+ if (!inst->codec_info) {
+ mutex_unlock(&vpu_dev->hw_lock);
+ return -ENOMEM;
+ }
+
+ p_dec_info = &inst->codec_info->dec_info;
+ memcpy(&p_dec_info->open_param, open_param, sizeof(struct dec_open_param));
+
+ buffer_addr = open_param->bitstream_buffer;
+ buffer_size = open_param->bitstream_buffer_size;
+ p_dec_info->stream_wr_ptr = buffer_addr;
+ p_dec_info->stream_rd_ptr = buffer_addr;
+ p_dec_info->stream_buf_start_addr = buffer_addr;
+ p_dec_info->stream_buf_size = buffer_size;
+ p_dec_info->stream_buf_end_addr = buffer_addr + buffer_size;
+ p_dec_info->reorder_enable = TRUE;
+ p_dec_info->mirror_direction = MIRDIR_NONE;
+ p_dec_info->temp_id_select_mode = TEMPORAL_ID_MODE_ABSOLUTE;
+ p_dec_info->target_temp_id = DECODE_ALL_TEMPORAL_LAYERS;
+ p_dec_info->target_spatial_id = DECODE_ALL_SPATIAL_LAYERS;
+
+ ret = wave5_vpu_build_up_dec_param(inst, open_param);
+ if (ret)
+ goto free_codec_info;
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return 0;
+
+free_codec_info:
+ kfree(inst->codec_info);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_close(struct vpu_instance *inst, u32 *fail_res)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+ int i;
+
+ *fail_res = 0;
+ if (!inst->codec_info)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_dec_finish_seq(inst, fail_res);
+ if (ret) {
+ dev_warn(inst->dev->dev, "dec_finish_seq timed out\n");
+
+ if (*fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING) {
+ mutex_unlock(&vpu_dev->hw_lock);
+ return ret;
+ }
+ }
+
+ dev_dbg(inst->dev->dev, "%s: dec_finish_seq complete\n", __func__);
+
+ if (p_dec_info->vb_work.size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work);
+
+ for (i = 0 ; i < MAX_REG_FRAME; i++) {
+ if (p_dec_info->vb_mv[i].size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_mv[i]);
+ if (p_dec_info->vb_fbc_y_tbl[i].size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_fbc_y_tbl[i]);
+ if (p_dec_info->vb_fbc_c_tbl[i].size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_fbc_c_tbl[i]);
+ }
+
+ if (p_dec_info->vb_task.size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_task);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ kfree(inst->codec_info);
+
+ return 0;
+}
+
+int wave5_vpu_dec_issue_seq_init(struct vpu_instance *inst)
+{
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_dec_init_seq(inst);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_complete_seq_init(struct vpu_instance *inst, struct dec_initial_info *info)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_dec_get_seq_info(inst, info);
+ if (!ret)
+ p_dec_info->initial_info_obtained = true;
+
+ info->rd_ptr = wave5_vpu_dec_get_rd_ptr(inst);
+ info->wr_ptr = p_dec_info->stream_wr_ptr;
+
+ p_dec_info->initial_info = *info;
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_register_frame_buffer_ex(struct vpu_instance *inst, int num_of_decoding_fbs,
+ int num_of_display_fbs, int stride, int height,
+ int map_type)
+{
+ struct dec_info *p_dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+ struct frame_buffer *fb;
+
+ if (num_of_decoding_fbs >= WAVE5_MAX_FBS || num_of_display_fbs >= WAVE5_MAX_FBS)
+ return -EINVAL;
+
+ p_dec_info = &inst->codec_info->dec_info;
+ p_dec_info->num_of_decoding_fbs = num_of_decoding_fbs;
+ p_dec_info->num_of_display_fbs = num_of_display_fbs;
+ p_dec_info->stride = stride;
+
+ if (!p_dec_info->initial_info_obtained)
+ return -EINVAL;
+
+ if (stride < p_dec_info->initial_info.pic_width || (stride % 8 != 0) ||
+ height < p_dec_info->initial_info.pic_height)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ fb = inst->frame_buf;
+ ret = wave5_vpu_dec_register_framebuffer(inst, &fb[p_dec_info->num_of_decoding_fbs],
+ LINEAR_FRAME_MAP, p_dec_info->num_of_display_fbs);
+ if (ret)
+ goto err_out;
+
+ ret = wave5_vpu_dec_register_framebuffer(inst, &fb[0], COMPRESSED_FRAME_MAP,
+ p_dec_info->num_of_decoding_fbs);
+
+err_out:
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_get_bitstream_buffer(struct vpu_instance *inst, dma_addr_t *prd_ptr,
+ dma_addr_t *pwr_ptr, size_t *size)
+{
+ struct dec_info *p_dec_info;
+ dma_addr_t rd_ptr;
+ dma_addr_t wr_ptr;
+ int room;
+ struct vpu_device *vpu_dev = inst->dev;
+ int ret;
+
+ p_dec_info = &inst->codec_info->dec_info;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+ rd_ptr = wave5_vpu_dec_get_rd_ptr(inst);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ wr_ptr = p_dec_info->stream_wr_ptr;
+
+ if (p_dec_info->open_param.bitstream_mode != BS_MODE_PIC_END) {
+ if (wr_ptr < rd_ptr)
+ room = rd_ptr - wr_ptr;
+ else
+ room = (p_dec_info->stream_buf_end_addr - wr_ptr) +
+ (rd_ptr - p_dec_info->stream_buf_start_addr);
+ room--;
+ } else {
+ room = (p_dec_info->stream_buf_end_addr - wr_ptr);
+ }
+
+ if (prd_ptr)
+ *prd_ptr = rd_ptr;
+ if (pwr_ptr)
+ *pwr_ptr = wr_ptr;
+ if (size)
+ *size = room;
+
+ return 0;
+}
+
+int wave5_vpu_dec_update_bitstream_buffer(struct vpu_instance *inst, size_t size)
+{
+ struct dec_info *p_dec_info;
+ dma_addr_t wr_ptr;
+ dma_addr_t rd_ptr;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (!inst->codec_info)
+ return -EINVAL;
+
+ p_dec_info = &inst->codec_info->dec_info;
+ wr_ptr = p_dec_info->stream_wr_ptr;
+ rd_ptr = p_dec_info->stream_rd_ptr;
+
+ if (size > 0) {
+ if (wr_ptr < rd_ptr && rd_ptr <= wr_ptr + size)
+ return -EINVAL;
+
+ wr_ptr += size;
+
+ if (p_dec_info->open_param.bitstream_mode != BS_MODE_PIC_END) {
+ if (wr_ptr > p_dec_info->stream_buf_end_addr) {
+ u32 room = wr_ptr - p_dec_info->stream_buf_end_addr;
+
+ wr_ptr = p_dec_info->stream_buf_start_addr;
+ wr_ptr += room;
+ } else if (wr_ptr == p_dec_info->stream_buf_end_addr) {
+ wr_ptr = p_dec_info->stream_buf_start_addr;
+ }
+ }
+
+ p_dec_info->stream_wr_ptr = wr_ptr;
+ p_dec_info->stream_rd_ptr = rd_ptr;
+ }
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+ ret = wave5_vpu_dec_set_bitstream_flag(inst, (size == 0));
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_start_one_frame(struct vpu_instance *inst, struct dec_param *param, u32 *res_fail)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (p_dec_info->stride == 0) // this means frame buffers have not been registered.
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_decode(inst, param, res_fail);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr, int update_wr_ptr)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_dec_set_rd_ptr(inst, addr);
+
+ p_dec_info->stream_rd_ptr = addr;
+ if (update_wr_ptr)
+ p_dec_info->stream_wr_ptr = addr;
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_get_output_info(struct vpu_instance *inst, struct dec_output_info *info)
+{
+ struct dec_info *p_dec_info;
+ int ret;
+ struct vpu_rect rect_info;
+ u32 val;
+ u32 decoded_index;
+ u32 disp_idx;
+ u32 max_dec_index;
+ struct vpu_device *vpu_dev = inst->dev;
+ struct dec_output_info *disp_info;
+
+ if (!info)
+ return -EINVAL;
+
+ p_dec_info = &inst->codec_info->dec_info;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ memset(info, 0, sizeof(*info));
+
+ ret = wave5_vpu_dec_get_result(inst, info);
+ if (ret) {
+ info->rd_ptr = p_dec_info->stream_rd_ptr;
+ info->wr_ptr = p_dec_info->stream_wr_ptr;
+ goto err_out;
+ }
+
+ decoded_index = info->index_frame_decoded;
+
+ // calculate display frame region
+ val = 0;
+ //default value
+ rect_info.left = 0;
+ rect_info.right = 0;
+ rect_info.top = 0;
+ rect_info.bottom = 0;
+ if (decoded_index < WAVE5_MAX_FBS) {
+ if (inst->std == W_HEVC_DEC || inst->std == W_AVC_DEC)
+ rect_info = p_dec_info->initial_info.pic_crop_rect;
+
+ if (inst->std == W_HEVC_DEC)
+ p_dec_info->dec_out_info[decoded_index].decoded_poc = info->decoded_poc;
+
+ if (inst->std == W_AVS2_DEC)
+ p_dec_info->dec_out_info[decoded_index].avs2_info.decoded_poi =
+ info->avs2_info.decoded_poi;
+
+ p_dec_info->dec_out_info[decoded_index].rc_decoded = rect_info;
+ }
+ info->rc_decoded = rect_info;
+
+ disp_idx = info->index_frame_display;
+ disp_info = &p_dec_info->dec_out_info[disp_idx];
+ if (info->index_frame_display >= 0 && info->index_frame_display < WAVE5_MAX_FBS) {
+ u32 width = info->dec_pic_width;
+ u32 height = info->dec_pic_height;
+
+ if (info->index_frame_display != info->index_frame_decoded) {
+ /*
+ * when index_frame_decoded < 0, and index_frame_display >= 0
+ * info->dec_pic_width and info->dec_pic_height are still valid
+ * but those of p_dec_info->dec_out_info[disp_idx] are invalid in VP9
+ */
+ width = disp_info->dec_pic_width;
+ height = disp_info->dec_pic_height;
+ }
+ // TODO no rotation/mirror v4l2 cmd implemented for the decoder
+ if (p_dec_info->rotation_enable || p_dec_info->mirror_enable)
+ if (p_dec_info->rotation_angle == 90 || p_dec_info->rotation_angle == 270)
+ swap(width, height);
+
+ if (p_dec_info->rotation_enable) {
+ switch (p_dec_info->rotation_angle) {
+ case 90:
+ info->rc_display.left = disp_info->rc_decoded.top;
+ info->rc_display.right = disp_info->rc_decoded.bottom;
+ info->rc_display.top = disp_info->rc_decoded.right;
+ info->rc_display.bottom = disp_info->rc_decoded.left;
+ break;
+ case 270:
+ info->rc_display.left = disp_info->rc_decoded.bottom;
+ info->rc_display.right = disp_info->rc_decoded.top;
+ info->rc_display.top = disp_info->rc_decoded.left;
+ info->rc_display.bottom = disp_info->rc_decoded.right;
+ break;
+ case 180:
+ info->rc_display.left = disp_info->rc_decoded.right;
+ info->rc_display.right = disp_info->rc_decoded.left;
+ info->rc_display.top = disp_info->rc_decoded.bottom;
+ info->rc_display.bottom = disp_info->rc_decoded.top;
+ break;
+ default:
+ info->rc_display = disp_info->rc_decoded;
+ break;
+ }
+ } else {
+ info->rc_display = disp_info->rc_decoded;
+ }
+
+ if (p_dec_info->mirror_enable) {
+ if (p_dec_info->mirror_direction & MIRDIR_VER)
+ swap(info->rc_display.top, info->rc_display.bottom);
+ if (p_dec_info->mirror_direction & MIRDIR_HOR)
+ swap(info->rc_display.left, info->rc_display.right);
+ }
+
+ switch (inst->std) {
+ case W_AVS2_DEC:
+ info->avs2_info.display_poi =
+ disp_info->avs2_info.decoded_poi;
+ break;
+ default:
+ break;
+ }
+
+ info->disp_pic_width = width;
+ info->disp_pic_height = height;
+ } else {
+ info->rc_display.left = 0;
+ info->rc_display.right = 0;
+ info->rc_display.top = 0;
+ info->rc_display.bottom = 0;
+
+ if (p_dec_info->rotation_enable || p_dec_info->mirror_enable ||
+ p_dec_info->dering_enable) {
+ info->disp_pic_width = info->dec_pic_width;
+ info->disp_pic_height = info->dec_pic_height;
+ } else {
+ info->disp_pic_width = 0;
+ info->disp_pic_height = 0;
+ }
+ }
+
+ p_dec_info->stream_rd_ptr = wave5_vpu_dec_get_rd_ptr(inst);
+ p_dec_info->frame_display_flag = vpu_read_reg(vpu_dev, W5_RET_DEC_DISP_IDC);
+ if (inst->std == W_VP9_DEC)
+ p_dec_info->frame_display_flag &= 0xFFFF;
+
+ if (p_dec_info->dering_enable || p_dec_info->mirror_enable || p_dec_info->rotation_enable) {
+ info->disp_frame = p_dec_info->rotator_output;
+ info->disp_frame.stride = p_dec_info->rotator_stride;
+ } else {
+ val = p_dec_info->num_of_decoding_fbs; //fb_offset
+
+ max_dec_index = (p_dec_info->num_of_decoding_fbs > p_dec_info->num_of_display_fbs) ?
+ p_dec_info->num_of_decoding_fbs : p_dec_info->num_of_display_fbs;
+
+ if (info->index_frame_display >= 0 &&
+ info->index_frame_display < (int)max_dec_index)
+ info->disp_frame = inst->frame_buf[val + info->index_frame_display];
+ }
+
+ info->rd_ptr = p_dec_info->stream_rd_ptr;
+ info->wr_ptr = p_dec_info->stream_wr_ptr;
+ info->frame_display_flag = p_dec_info->frame_display_flag;
+
+ info->sequence_no = p_dec_info->initial_info.sequence_no;
+ if (decoded_index < WAVE5_MAX_FBS)
+ p_dec_info->dec_out_info[decoded_index] = *info;
+
+ if (disp_idx < WAVE5_MAX_FBS)
+ info->disp_frame.sequence_no = info->sequence_no;
+
+ if (info->sequence_changed &&
+ !(info->sequence_changed & SEQ_CHANGE_INTER_RES_CHANGE)) {
+ memcpy((void *)&p_dec_info->initial_info, (void *)&p_dec_info->new_seq_info,
+ sizeof(struct dec_initial_info));
+ p_dec_info->initial_info.sequence_no++;
+ }
+
+err_out:
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_clr_disp_flag(struct vpu_instance *inst, int index)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret = 0;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (index >= p_dec_info->num_of_display_fbs)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+ ret = wave5_dec_clr_disp_flag(inst, index);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_set_disp_flag(struct vpu_instance *inst, int index)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret = 0;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (index >= p_dec_info->num_of_display_fbs)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+ ret = wave5_dec_set_disp_flag(inst, index);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+
+ switch (cmd) {
+ case DEC_GET_QUEUE_STATUS: {
+ struct queue_status_info *queue_info = parameter;
+
+ queue_info->instance_queue_count = p_dec_info->instance_queue_count;
+ queue_info->report_queue_count = p_dec_info->report_queue_count;
+ break;
+ }
+ case ENABLE_DEC_THUMBNAIL_MODE:
+ p_dec_info->thumbnail_mode = true;
+ break;
+ case DEC_RESET_FRAMEBUF_INFO: {
+ int i;
+
+ for (i = 0; i < inst->dst_buf_count; i++) {
+ wave5_vdi_free_dma_memory(inst->dev, &inst->frame_vbuf[i]);
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_mv[i]);
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_y_tbl[i]);
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_c_tbl[i]);
+ }
+
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_task);
+ break;
+ }
+ case DEC_GET_SEQ_INFO: {
+ struct dec_initial_info *seq_info = parameter;
+
+ *seq_info = p_dec_info->initial_info;
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wave5_vpu_enc_open(struct vpu_instance *inst, struct enc_open_param *open_param)
+{
+ struct enc_info *p_enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = wave5_vpu_enc_check_open_param(inst, open_param);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ if (!wave5_vpu_is_init(vpu_dev)) {
+ mutex_unlock(&vpu_dev->hw_lock);
+ return -ENODEV;
+ }
+
+ inst->codec_info = kzalloc(sizeof(*inst->codec_info), GFP_KERNEL);
+ if (!inst->codec_info) {
+ mutex_unlock(&vpu_dev->hw_lock);
+ return -ENOMEM;
+ }
+
+ p_enc_info = &inst->codec_info->enc_info;
+ p_enc_info->open_param = *open_param;
+
+ ret = wave5_vpu_build_up_enc_param(vpu_dev->dev, inst, open_param);
+ if (ret)
+ goto free_codec_info;
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return 0;
+
+free_codec_info:
+ kfree(inst->codec_info);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_enc_close(struct vpu_instance *inst, u32 *fail_res)
+{
+ struct enc_info *p_enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+ *fail_res = 0;
+
+ if (!inst->codec_info)
+ return -EINVAL;
+ p_enc_info = &inst->codec_info->enc_info;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_enc_finish_seq(inst, fail_res);
+ if (ret) {
+ dev_warn(inst->dev->dev, "enc seq end timed out\n");
+
+ if (*fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING) {
+ mutex_unlock(&vpu_dev->hw_lock);
+ return ret;
+ }
+ }
+
+ dev_dbg(inst->dev->dev, "%s: enc seq end timed out\n", __func__);
+
+ if (p_enc_info->vb_work.size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_work);
+
+ if (inst->std == W_HEVC_ENC || inst->std == W_AVC_ENC) {
+ if (p_enc_info->vb_sub_sam_buf.size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_sub_sam_buf);
+
+ if (p_enc_info->vb_mv.size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_mv);
+
+ if (p_enc_info->vb_fbc_y_tbl.size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_fbc_y_tbl);
+
+ if (p_enc_info->vb_fbc_c_tbl.size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_fbc_c_tbl);
+ }
+
+ if (p_enc_info->vb_task.size)
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_task);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ kfree(inst->codec_info);
+
+ return 0;
+}
+
+int wave5_vpu_enc_register_frame_buffer(struct vpu_instance *inst, unsigned int num,
+ unsigned int stride, int height,
+ enum tiled_map_type map_type)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+ unsigned int size_luma, size_chroma;
+ int i;
+
+ if (p_enc_info->stride)
+ return -EINVAL;
+
+ if (!p_enc_info->initial_info_obtained)
+ return -EINVAL;
+
+ if (num < p_enc_info->initial_info.min_frame_buffer_count)
+ return -EINVAL;
+
+ if (stride == 0 || stride % 8 != 0)
+ return -EINVAL;
+
+ if (height <= 0)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ p_enc_info->num_frame_buffers = num;
+ p_enc_info->stride = stride;
+
+ size_luma = stride * height;
+ size_chroma = ALIGN(stride / 2, 16) * height;
+
+ for (i = 0; i < num; i++) {
+ if (!inst->frame_buf[i].update_fb_info)
+ continue;
+
+ inst->frame_buf[i].update_fb_info = false;
+ inst->frame_buf[i].stride = stride;
+ inst->frame_buf[i].height = height;
+ inst->frame_buf[i].map_type = COMPRESSED_FRAME_MAP;
+ inst->frame_buf[i].endian = VDI_128BIT_LITTLE_ENDIAN;
+ inst->frame_buf[i].buf_y_size = size_luma;
+ inst->frame_buf[i].buf_cb = inst->frame_buf[i].buf_y + size_luma;
+ inst->frame_buf[i].buf_cb_size = size_chroma;
+ inst->frame_buf[i].buf_cr_size = 0;
+ }
+
+ ret = wave5_vpu_enc_register_framebuffer(inst->dev->dev, inst, &inst->frame_buf[0],
+ COMPRESSED_FRAME_MAP,
+ p_enc_info->num_frame_buffers);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+static int wave5_check_enc_param(struct vpu_instance *inst, struct enc_param *param)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+
+ if (!param)
+ return -EINVAL;
+
+ if (!param->skip_picture && !param->source_frame)
+ return -EINVAL;
+
+ if (p_enc_info->open_param.bit_rate == 0 && inst->std == W_HEVC_ENC) {
+ if (param->force_pic_qp_enable &&
+ (param->force_pic_qp_i > MAX_INTRA_QP || param->force_pic_qp_p > MAX_INTRA_QP ||
+ param->force_pic_qp_b > MAX_INTRA_QP))
+ return -EINVAL;
+ if (!p_enc_info->ring_buffer_enable &&
+ (param->pic_stream_buffer_addr % 16 || param->pic_stream_buffer_size == 0))
+ return -EINVAL;
+ }
+ if (!p_enc_info->ring_buffer_enable &&
+ (param->pic_stream_buffer_addr % 8 || param->pic_stream_buffer_size == 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static uint64_t wave5_get_timestamp(struct vpu_instance *inst)
+{
+ struct enc_info *p_enc_info;
+ u64 pts;
+ u32 fps;
+
+ if (!inst->codec_info)
+ return 0;
+
+ p_enc_info = &inst->codec_info->enc_info;
+ fps = p_enc_info->open_param.frame_rate_info;
+ if (fps == 0)
+ fps = 30;
+
+ pts = p_enc_info->cur_pts;
+ p_enc_info->cur_pts += 90000 / fps; /* 90_k_hz/fps */
+
+ return pts;
+}
+
+int wave5_vpu_enc_start_one_frame(struct vpu_instance *inst, struct enc_param *param, u32 *fail_res)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ *fail_res = 0;
+
+ if (p_enc_info->stride == 0) // this means frame buffers have not been registered.
+ return -EINVAL;
+
+ ret = wave5_check_enc_param(inst, param);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ p_enc_info->pts_map[param->src_idx] = p_enc_info->open_param.enable_pts ?
+ wave5_get_timestamp(inst) : param->pts;
+
+ ret = wave5_vpu_encode(inst, param, fail_res);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_enc_get_output_info(struct vpu_instance *inst, struct enc_output_info *info)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_enc_get_result(inst, info);
+ if (ret) {
+ info->pts = 0;
+ goto unlock;
+ }
+
+ if (info->recon_frame_index >= 0)
+ info->pts = p_enc_info->pts_map[info->enc_src_idx];
+
+unlock:
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_enc_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+
+ switch (cmd) {
+ case ENABLE_ROTATION:
+ p_enc_info->rotation_enable = true;
+ break;
+ case ENABLE_MIRRORING:
+ p_enc_info->mirror_enable = true;
+ break;
+ case SET_MIRROR_DIRECTION: {
+ enum mirror_direction mir_dir;
+
+ mir_dir = *(enum mirror_direction *)parameter;
+ if (mir_dir != MIRDIR_NONE && mir_dir != MIRDIR_HOR &&
+ mir_dir != MIRDIR_VER && mir_dir != MIRDIR_HOR_VER)
+ return -EINVAL;
+ p_enc_info->mirror_direction = mir_dir;
+ break;
+ }
+ case SET_ROTATION_ANGLE: {
+ int angle;
+
+ angle = *(int *)parameter;
+ if (angle && angle != 90 && angle != 180 && angle != 270)
+ return -EINVAL;
+ if (p_enc_info->initial_info_obtained && (angle == 90 || angle == 270))
+ return -EINVAL;
+ p_enc_info->rotation_angle = angle;
+ break;
+ }
+ case ENC_GET_QUEUE_STATUS: {
+ struct queue_status_info *queue_info = parameter;
+
+ queue_info->instance_queue_count = p_enc_info->instance_queue_count;
+ queue_info->report_queue_count = p_enc_info->report_queue_count;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int wave5_vpu_enc_issue_seq_init(struct vpu_instance *inst)
+{
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_enc_init_seq(inst);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_enc_complete_seq_init(struct vpu_instance *inst, struct enc_initial_info *info)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (!info)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_enc_get_seq_info(inst, info);
+ if (ret) {
+ p_enc_info->initial_info_obtained = false;
+ mutex_unlock(&vpu_dev->hw_lock);
+ return ret;
+ }
+
+ p_enc_info->initial_info_obtained = true;
+ p_enc_info->initial_info = *info;
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return 0;
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
new file mode 100644
index 000000000000..4808121c16f8
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
@@ -0,0 +1,1138 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - helper definitions
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#ifndef VPUAPI_H_INCLUDED
+#define VPUAPI_H_INCLUDED
+
+#include <linux/kfifo.h>
+#include <linux/idr.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ctrls.h>
+#include "wave5-vpuerror.h"
+#include "wave5-vpuconfig.h"
+#include "wave5-vdi.h"
+
+enum product_id {
+ PRODUCT_ID_521,
+ PRODUCT_ID_511,
+ PRODUCT_ID_517,
+ PRODUCT_ID_NONE,
+};
+
+struct vpu_attr;
+
+enum vpu_instance_type {
+ VPU_INST_TYPE_DEC = 0,
+ VPU_INST_TYPE_ENC = 1
+};
+
+enum vpu_instance_state {
+ VPU_INST_STATE_NONE = 0,
+ VPU_INST_STATE_OPEN = 1,
+ VPU_INST_STATE_INIT_SEQ = 2,
+ VPU_INST_STATE_PIC_RUN = 3,
+ VPU_INST_STATE_STOP = 4
+};
+
+#define WAVE5_MAX_FBS 32
+
+#define MAX_REG_FRAME (WAVE5_MAX_FBS * 2)
+
+#define WAVE5_DEC_HEVC_BUF_SIZE(_w, _h) (DIV_ROUND_UP(_w, 64) * DIV_ROUND_UP(_h, 64) * 256 + 64)
+#define WAVE5_DEC_AVC_BUF_SIZE(_w, _h) ((((ALIGN(_w, 256) / 16) * (ALIGN(_h, 16) / 16)) + 16) * 80)
+#define WAVE5_DEC_VP9_BUF_SIZE(_w, _h) (((ALIGN(_w, 64) * ALIGN(_h, 64)) >> 2))
+#define WAVE5_DEC_AVS2_BUF_SIZE(_w, _h) (((ALIGN(_w, 64) * ALIGN(_h, 64)) >> 5))
+// AV1 BUF SIZE : MFMV + segment ID + CDF probs table + film grain param Y+ film graim param C
+#define WAVE5_DEC_AV1_BUF_SZ_1(_w, _h) \
+ (((ALIGN(_w, 64) / 64) * (ALIGN(_h, 64) / 64) * 512) + 41984 + 8192 + 4864)
+#define WAVE5_DEC_AV1_BUF_SZ_2(_w1, _w2, _h) \
+ (((ALIGN(_w1, 64) / 64) * 256 + (ALIGN(_w2, 256) / 64) * 128) * (ALIGN(_h, 64) / 64))
+
+#define WAVE5_FBC_LUMA_TABLE_SIZE(_w, _h) (ALIGN(_h, 64) * ALIGN(_w, 256) / 32)
+#define WAVE5_FBC_CHROMA_TABLE_SIZE(_w, _h) (ALIGN((_h), 64) * ALIGN((_w) / 2, 256) / 32)
+#define WAVE5_ENC_AVC_BUF_SIZE(_w, _h) (ALIGN(_w, 64) * ALIGN(_h, 64) / 32)
+#define WAVE5_ENC_HEVC_BUF_SIZE(_w, _h) (ALIGN(_w, 64) / 64 * ALIGN(_h, 64) / 64 * 128)
+
+/*
+ * common struct and definition
+ */
+enum cod_std {
+ STD_AVC = 0,
+ STD_VC1 = 1,
+ STD_MPEG2 = 2,
+ STD_MPEG4 = 3,
+ STD_H263 = 4,
+ STD_DIV3 = 5,
+ STD_RV = 6,
+ STD_AVS = 7,
+ STD_THO = 9,
+ STD_VP3 = 10,
+ STD_VP8 = 11,
+ STD_HEVC = 12,
+ STD_VP9 = 13,
+ STD_AVS2 = 14,
+ STD_AV1 = 16,
+ STD_MAX
+};
+
+enum wave_std {
+ W_HEVC_DEC = 0x00,
+ W_HEVC_ENC = 0x01,
+ W_AVC_DEC = 0x02,
+ W_AVC_ENC = 0x03,
+ W_VP9_DEC = 0x16,
+ W_AVS2_DEC = 0x18,
+ W_AV1_DEC = 0x1A,
+ STD_UNKNOWN = 0xFF
+};
+
+enum SET_PARAM_OPTION {
+ OPT_COMMON = 0, /* SET_PARAM command option for encoding sequence */
+ OPT_CUSTOM_GOP = 1, /* SET_PARAM command option for setting custom GOP */
+ OPT_CUSTOM_HEADER = 2, /* SET_PARAM command option for setting custom VPS/SPS/PPS */
+ OPT_VUI = 3, /* SET_PARAM command option for encoding VUI */
+ OPT_CHANGE_PARAM = 0x10,
+};
+
+enum DEC_PIC_HDR_OPTION {
+ INIT_SEQ_NORMAL = 0x01,
+ INIT_SEQ_W_THUMBNAIL = 0x11,
+};
+
+enum DEC_PIC_OPTION {
+ DEC_PIC_NORMAL = 0x00, /* it is normal mode of DEC_PIC command */
+ DEC_PIC_W_THUMBNAIL = 0x10, /* thumbnail mode (skip non-IRAP without reference reg) */
+ SKIP_NON_IRAP = 0x11, /* it skips to decode non-IRAP pictures */
+ SKIP_NON_REF_PIC = 0x13
+};
+
+/************************************************************************/
+/* PROFILE & LEVEL */
+/************************************************************************/
+/* HEVC */
+#define HEVC_PROFILE_MAIN 1
+#define HEVC_PROFILE_MAIN10 2
+#define HEVC_PROFILE_STILLPICTURE 3
+#define HEVC_PROFILE_MAIN10_STILLPICTURE 2
+
+/* H.264 profile for encoder*/
+#define H264_PROFILE_BP 1
+#define H264_PROFILE_MP 2
+#define H264_PROFILE_EXTENDED 3
+#define H264_PROFILE_HP 4
+#define H264_PROFILE_HIGH10 5
+#define H264_PROFILE_HIGH422 6
+#define H264_PROFILE_HIGH444 7
+
+/************************************************************************/
+/* error codes */
+/************************************************************************/
+
+/************************************************************************/
+/* utility macros */
+/************************************************************************/
+
+/* bit_alloc_mode */
+#define BIT_ALLOC_MODE_FIXED_RATIO 2
+
+/* bit_rate */
+#define MAX_BIT_RATE 700000000
+
+/* decoding_refresh_type */
+#define DEC_REFRESH_TYPE_NON_IRAP 0
+#define DEC_REFRESH_TYPE_CRA 1
+#define DEC_REFRESH_TYPE_IDR 2
+
+/* depend_slice_mode */
+#define DEPEND_SLICE_MODE_RECOMMENDED 1
+#define DEPEND_SLICE_MODE_BOOST 2
+#define DEPEND_SLICE_MODE_FAST 3
+
+/* hvs_max_delta_qp */
+#define MAX_HVS_MAX_DELTA_QP 51
+
+/* intra_refresh_mode */
+#define REFRESH_MODE_CTU_ROWS 1
+#define REFRESH_MODE_CTU_COLUMNS 2
+#define REFRESH_MODE_CTU_STEP_SIZE 3
+#define REFRESH_MODE_CTUS 4
+
+/* intra_mb_refresh_mode */
+#define REFRESH_MB_MODE_NONE 0
+#define REFRESH_MB_MODE_CTU_ROWS 1
+#define REFRESH_MB_MODE_CTU_COLUMNS 2
+#define REFRESH_MB_MODE_CTU_STEP_SIZE 3
+
+/* intra_qp */
+#define MAX_INTRA_QP 63
+
+/* nr_inter_weight_* */
+#define MAX_INTER_WEIGHT 31
+
+/* nr_intra_weight_* */
+#define MAX_INTRA_WEIGHT 31
+
+/* nr_noise_sigma_* */
+#define MAX_NOISE_SIGMA 255
+
+/* bitstream_buffer_size */
+#define MIN_BITSTREAM_BUFFER_SIZE 1024
+#define MIN_BITSTREAM_BUFFER_SIZE_WAVE521 (1024 * 64)
+
+/* vbv_buffer_size */
+#define MIN_VBV_BUFFER_SIZE 10
+#define MAX_VBV_BUFFER_SIZE 3000
+
+/* Bitstream buffer option: Explicit End
+ * When set to 1 the VPU assumes that the bitstream has at least one frame and
+ * will read until the end of the bitstream buffer.
+ * When set to 0 the VPU will not read the last few bytes.
+ * This option can be set anytime but cannot be cleared during processing.
+ * It can be set to force finish decoding even though there is not enough
+ * bitstream data for a full frame.
+ */
+#define BS_EXPLICIT_END_MODE_ON 1
+
+#define BUFFER_MARGIN 4096
+
+/************************************************************************/
+/* */
+/************************************************************************/
+/**
+ * \brief parameters of DEC_SET_SEQ_CHANGE_MASK
+ */
+#define SEQ_CHANGE_ENABLE_PROFILE BIT(5)
+#define SEQ_CHANGE_CHROMA_FORMAT_IDC BIT(15) /* AV1 */
+#define SEQ_CHANGE_ENABLE_SIZE BIT(16)
+#define SEQ_CHANGE_INTER_RES_CHANGE BIT(17) /* VP9 */
+#define SEQ_CHANGE_ENABLE_BITDEPTH BIT(18)
+#define SEQ_CHANGE_ENABLE_DPB_COUNT BIT(19)
+
+#define SEQ_CHANGE_ENABLE_ALL_VP9 (SEQ_CHANGE_ENABLE_PROFILE | \
+ SEQ_CHANGE_ENABLE_SIZE | \
+ SEQ_CHANGE_INTER_RES_CHANGE | \
+ SEQ_CHANGE_ENABLE_BITDEPTH | \
+ SEQ_CHANGE_ENABLE_DPB_COUNT)
+
+#define SEQ_CHANGE_ENABLE_ALL_HEVC (SEQ_CHANGE_ENABLE_PROFILE | \
+ SEQ_CHANGE_ENABLE_SIZE | \
+ SEQ_CHANGE_ENABLE_BITDEPTH | \
+ SEQ_CHANGE_ENABLE_DPB_COUNT)
+
+#define SEQ_CHANGE_ENABLE_ALL_AVS2 (SEQ_CHANGE_ENABLE_PROFILE | \
+ SEQ_CHANGE_ENABLE_SIZE | \
+ SEQ_CHANGE_ENABLE_BITDEPTH | \
+ SEQ_CHANGE_ENABLE_DPB_COUNT)
+
+#define SEQ_CHANGE_ENABLE_ALL_AVC (SEQ_CHANGE_ENABLE_SIZE | \
+ SEQ_CHANGE_ENABLE_BITDEPTH | \
+ SEQ_CHANGE_ENABLE_DPB_COUNT)
+
+#define SEQ_CHANGE_ENABLE_ALL_AV1 (SEQ_CHANGE_ENABLE_PROFILE | \
+ SEQ_CHANGE_CHROMA_FORMAT_IDC | \
+ SEQ_CHANGE_ENABLE_SIZE | \
+ SEQ_CHANGE_ENABLE_BITDEPTH | \
+ SEQ_CHANGE_ENABLE_DPB_COUNT)
+
+#define DISPLAY_IDX_FLAG_SEQ_END -1
+#define DISPLAY_IDX_FLAG_NO_FB -3
+#define DECODED_IDX_FLAG_NO_FB -1
+#define DECODED_IDX_FLAG_SKIP -2
+
+#define RECON_IDX_FLAG_ENC_END -1
+#define RECON_IDX_FLAG_ENC_DELAY -2
+#define RECON_IDX_FLAG_HEADER_ONLY -3
+#define RECON_IDX_FLAG_CHANGE_PARAM -4
+
+enum codec_command {
+ ENABLE_ROTATION,
+ ENABLE_MIRRORING,
+ SET_MIRROR_DIRECTION,
+ SET_ROTATION_ANGLE,
+ ENABLE_DEC_THUMBNAIL_MODE,
+ DEC_GET_QUEUE_STATUS,
+ ENC_GET_QUEUE_STATUS,
+ DEC_RESET_FRAMEBUF_INFO,
+ DEC_GET_SEQ_INFO,
+};
+
+enum error_conceal_mode {
+ ERROR_CONCEAL_MODE_OFF = 0, /* conceal off */
+ ERROR_CONCEAL_MODE_INTRA_ONLY = 1, /* intra conceal in intra-picture, inter-picture */
+ ERROR_CONCEAL_MODE_INTRA_INTER = 2
+};
+
+enum error_conceal_unit {
+ ERROR_CONCEAL_UNIT_PICTURE = 0, /* picture-level error conceal */
+ ERROR_CONCEAL_UNIT_SLICE_TILE = 1, /* slice/tile-level error conceal */
+ ERROR_CONCEAL_UNIT_BLOCK_ROW = 2, /* block-row-level error conceal */
+ ERROR_CONCEAL_UNIT_BLOCK = 3 /* block-level conceal */
+};
+
+enum cb_cr_order {
+ CBCR_ORDER_NORMAL,
+ CBCR_ORDER_REVERSED
+};
+
+enum mirror_direction {
+ MIRDIR_NONE, /* no mirroring */
+ MIRDIR_VER, /* vertical mirroring */
+ MIRDIR_HOR, /* horizontal mirroring */
+ MIRDIR_HOR_VER /* horizontal and vertical mirroring */
+};
+
+enum frame_buffer_format {
+ FORMAT_ERR = -1,
+ FORMAT_420 = 0, /* 8bit */
+ FORMAT_422, /* 8bit */
+ FORMAT_224, /* 8bit */
+ FORMAT_444, /* 8bit */
+ FORMAT_400, /* 8bit */
+
+ /* little endian perspective */
+ /* | addr 0 | addr 1 | */
+ FORMAT_420_P10_16BIT_MSB = 5, /* lsb |000000xx|xxxxxxxx | msb */
+ FORMAT_420_P10_16BIT_LSB, /* lsb |xxxxxxx |xx000000 | msb */
+ FORMAT_420_P10_32BIT_MSB, /* lsb |00xxxxxxxxxxxxxxxxxxxxxxxxxxx| msb */
+ FORMAT_420_P10_32BIT_LSB, /* lsb |xxxxxxxxxxxxxxxxxxxxxxxxxxx00| msb */
+
+ /* 4:2:2 packed format */
+ /* little endian perspective */
+ /* | addr 0 | addr 1 | */
+ FORMAT_422_P10_16BIT_MSB, /* lsb |000000xx |xxxxxxxx | msb */
+ FORMAT_422_P10_16BIT_LSB, /* lsb |xxxxxxxx |xx000000 | msb */
+ FORMAT_422_P10_32BIT_MSB, /* lsb |00xxxxxxxxxxxxxxxxxxxxxxxxxxx| msb */
+ FORMAT_422_P10_32BIT_LSB, /* lsb |xxxxxxxxxxxxxxxxxxxxxxxxxxx00| msb */
+
+ FORMAT_YUYV, /* 8bit packed format : Y0U0Y1V0 Y2U1Y3V1 ... */
+ FORMAT_YUYV_P10_16BIT_MSB,
+ FORMAT_YUYV_P10_16BIT_LSB,
+ FORMAT_YUYV_P10_32BIT_MSB,
+ FORMAT_YUYV_P10_32BIT_LSB,
+
+ FORMAT_YVYU, /* 8bit packed format : Y0V0Y1U0 Y2V1Y3U1 ... */
+ FORMAT_YVYU_P10_16BIT_MSB,
+ FORMAT_YVYU_P10_16BIT_LSB,
+ FORMAT_YVYU_P10_32BIT_MSB,
+ FORMAT_YVYU_P10_32BIT_LSB,
+
+ FORMAT_UYVY, /* 8bit packed format : U0Y0V0Y1 U1Y2V1Y3 ... */
+ FORMAT_UYVY_P10_16BIT_MSB,
+ FORMAT_UYVY_P10_16BIT_LSB,
+ FORMAT_UYVY_P10_32BIT_MSB,
+ FORMAT_UYVY_P10_32BIT_LSB,
+
+ FORMAT_VYUY, /* 8bit packed format : V0Y0U0Y1 V1Y2U1Y3 ... */
+ FORMAT_VYUY_P10_16BIT_MSB,
+ FORMAT_VYUY_P10_16BIT_LSB,
+ FORMAT_VYUY_P10_32BIT_MSB,
+ FORMAT_VYUY_P10_32BIT_LSB,
+
+ FORMAT_MAX,
+};
+
+enum packed_format_num {
+ NOT_PACKED = 0,
+ PACKED_YUYV,
+ PACKED_YVYU,
+ PACKED_UYVY,
+ PACKED_VYUY,
+};
+
+enum wave5_interrupt_bit {
+ INT_WAVE5_INIT_VPU = 0,
+ INT_WAVE5_WAKEUP_VPU = 1,
+ INT_WAVE5_SLEEP_VPU = 2,
+ INT_WAVE5_CREATE_INSTANCE = 3,
+ INT_WAVE5_FLUSH_INSTANCE = 4,
+ INT_WAVE5_DESTROY_INSTANCE = 5,
+ INT_WAVE5_INIT_SEQ = 6,
+ INT_WAVE5_SET_FRAMEBUF = 7,
+ INT_WAVE5_DEC_PIC = 8,
+ INT_WAVE5_ENC_PIC = 8,
+ INT_WAVE5_ENC_SET_PARAM = 9,
+ INT_WAVE5_DEC_QUERY = 14,
+ INT_WAVE5_BSBUF_EMPTY = 15,
+ INT_WAVE5_BSBUF_FULL = 15,
+};
+
+enum pic_type {
+ PIC_TYPE_I = 0, /* I picture */
+ PIC_TYPE_KEY = 0, /* KEY frame for AV1*/
+ PIC_TYPE_P = 1, /* P picture */
+ PIC_TYPE_INTER = 1, /* inter frame for AV1*/
+ PIC_TYPE_B = 2, /* B picture (except VC1) */
+ PIC_TYPE_REPEAT = 2, /* repeat frame (VP9 only) */
+ PIC_TYPE_AV1_INTRA = 2, /* intra only frame (AV1 only) */
+ PIC_TYPE_VC1_BI = 2, /* VC1 BI picture (VC1 only) */
+ PIC_TYPE_VC1_B = 3, /* VC1 B picture (VC1 only) */
+ PIC_TYPE_D = 3,
+ PIC_TYPE_S = 3,
+ PIC_TYPE_AVS2_F = 3, /* F picture in AVS2 */
+ PIC_TYPE_AV1_SWITCH = 3, /* switch frame (AV1 only) */
+ PIC_TYPE_VC1_P_SKIP = 4, /* VC1 P skip picture (VC1 only) */
+ PIC_TYPE_MP4_P_SKIP_NOT_CODED = 4, /* not coded P picture in MPEG4 packed mode */
+ PIC_TYPE_AVS2_S = 4, /* S picture in AVS2 */
+ PIC_TYPE_IDR = 5, /* H.264/H.265 IDR picture */
+ PIC_TYPE_AVS2_G = 5, /* G picture in AVS2 */
+ PIC_TYPE_AVS2_GB = 6, /* GB picture in AVS2 */
+ PIC_TYPE_MAX /* no meaning */
+};
+
+enum bit_stream_mode {
+ BS_MODE_INTERRUPT,
+ BS_MODE_RESERVED, /* reserved for the future */
+ BS_MODE_PIC_END,
+};
+
+enum sw_reset_mode {
+ SW_RESET_SAFETY,
+ SW_RESET_FORCE,
+ SW_RESET_ON_BOOT
+};
+
+enum tiled_map_type {
+ LINEAR_FRAME_MAP = 0, /* linear frame map type */
+ COMPRESSED_FRAME_MAP = 17, /* compressed frame map type*/
+};
+
+enum temporal_id_mode {
+ TEMPORAL_ID_MODE_ABSOLUTE,
+ TEMPORAL_ID_MODE_RELATIVE,
+};
+
+struct vpu_attr {
+ u32 product_id; /* the product ID */
+ char product_name[8]; /* the product name in ascii code */
+ u32 product_version; /* the product version number */
+ u32 fw_version; /* the F/W version */
+ u32 customer_id; /* customer ID number */
+ u32 support_decoders; /* bitmask: see <<vpuapi_h_cod_std>> */
+ u32 support_encoders; /* bitmask: see <<vpuapi_h_cod_std>> */
+ u32 support_endian_mask; /* A variable of supported endian mode in product */
+ u32 support_bitstream_mode;
+ u32 support_backbone: 1;
+ u32 support_avc10bit_enc: 1;
+ u32 support_hevc10bit_enc: 1;
+ u32 support_dual_core: 1; /* this indicates whether a product has two vcores */
+ u32 support_vcore_backbone: 1;
+ u32 support_vcpu_backbone: 1;
+};
+
+struct frame_buffer {
+ dma_addr_t buf_y;
+ dma_addr_t buf_cb;
+ dma_addr_t buf_cr;
+ unsigned int buf_y_size;
+ unsigned int buf_cb_size;
+ unsigned int buf_cr_size;
+ unsigned int endian;
+ enum tiled_map_type map_type;
+ unsigned int stride; /* A horizontal stride for given frame buffer */
+ unsigned int width; /* A width for given frame buffer */
+ unsigned int height; /* A height for given frame buffer */
+ size_t size; /* A size for given frame buffer */
+ unsigned int sequence_no;
+ bool update_fb_info;
+};
+
+struct vpu_rect {
+ unsigned int left; /* horizontal pixel offset from left edge */
+ unsigned int top; /* vertical pixel offset from top edge */
+ unsigned int right; /* horizontal pixel offset from right edge */
+ unsigned int bottom; /* vertical pixel offset from bottom edge */
+};
+
+/*
+ * decode struct and definition
+ */
+
+struct dec_open_param {
+ dma_addr_t bitstream_buffer;
+ size_t bitstream_buffer_size;
+ enum cb_cr_order cbcr_order;
+ unsigned int frame_endian;
+ unsigned int stream_endian;
+ enum bit_stream_mode bitstream_mode;
+ u32 av1_format;
+ enum error_conceal_unit error_conceal_unit;
+ enum error_conceal_mode error_conceal_mode;
+ u32 pri_axprot;
+ u32 pri_axcache;
+ u32 enable_non_ref_fbc_write: 1;
+};
+
+struct dec_initial_info {
+ u32 pic_width;
+ u32 pic_height;
+ s32 f_rate_numerator; /* the numerator part of frame rate fraction */
+ s32 f_rate_denominator; /* the denominator part of frame rate fraction */
+ struct vpu_rect pic_crop_rect;
+ u32 min_frame_buffer_count; /* between 1 to 16 */
+ u32 frame_buf_delay;
+
+ u32 max_temporal_layers; /* it indicates the max number of temporal sub-layers */
+ u32 profile;
+ u32 level;
+ u32 tier;
+ bool is_ext_sar;
+ u32 aspect_rate_info;
+ u32 bit_rate;
+ u32 user_data_header;
+ u32 user_data_size;
+ bool user_data_buf_full;
+ u32 chroma_format_idc;/* A chroma format indicator */
+ u32 luma_bitdepth; /* A bit-depth of luma sample */
+ u32 chroma_bitdepth; /* A bit-depth of chroma sample */
+ u32 seq_init_err_reason;
+ u32 warn_info;
+ dma_addr_t rd_ptr; /* A read pointer of bitstream buffer */
+ dma_addr_t wr_ptr; /* A write pointer of bitstream buffer */
+ u32 sequence_no;
+ u32 output_bit_depth;
+ u32 vlc_buf_size; /* the size of vlc buffer */
+ u32 param_buf_size; /* the size of param buffer */
+};
+
+#define WAVE_SKIPMODE_WAVE_NONE 0
+#define WAVE_SKIPMODE_NON_IRAP 1
+#define WAVE_SKIPMODE_NON_REF 2
+
+struct dec_param {
+ u32 skipframe_mode: 2;
+ u32 cra_as_bla_flag: 1;
+ u32 disable_film_grain: 1;
+};
+
+struct avs2_info {
+ s32 decoded_poi;
+ int display_poi;
+};
+
+struct dec_output_info {
+ /**
+ * this is a frame buffer index for the picture to be displayed at the moment among
+ * frame buffers which are registered using vpu_dec_register_frame_buffer(). frame
+ * data to be displayed are stored into the frame buffer with this index
+ * when there is no display delay, this index is always
+ * the same with index_frame_decoded, however, if display delay does exist for display
+ * reordering in AVC
+ * or B-frames in VC1), this index might be different with index_frame_decoded.
+ * by checking this index, HOST application can easily know whether sequence decoding
+ * has been finished or not.
+ *
+ * -3(0xFFFD) or -2(0xFFFE) : it is when a display output cannot be given due to picture
+ * reordering or skip option
+ * -1(0xFFFF) : it is when there is no more output for display at the end of sequence
+ * decoding
+ */
+ s32 index_frame_display;
+ /**
+ * this is a frame buffer index of decoded picture among frame buffers which were
+ * registered using vpu_dec_register_frame_buffer(). the currently decoded frame is stored
+ * into the frame buffer specified by
+ * this index.
+ *
+ * -2 : it indicates that no decoded output is generated because decoder meets EOS
+ * (end of sequence) or skip
+ * -1 : it indicates that decoder fails to decode a picture because there is no available
+ * frame buffer
+ */
+ s32 index_frame_decoded;
+ s32 index_frame_decoded_for_tiled;
+ u32 nal_type;
+ unsigned int pic_type;
+ struct vpu_rect rc_display;
+ unsigned int disp_pic_width;
+ unsigned int disp_pic_height;
+ struct vpu_rect rc_decoded;
+ u32 dec_pic_width;
+ u32 dec_pic_height;
+ struct avs2_info avs2_info;
+ s32 decoded_poc;
+ int temporal_id; /* A temporal ID of the picture */
+ dma_addr_t rd_ptr; /* A stream buffer read pointer for the current decoder instance */
+ dma_addr_t wr_ptr; /* A stream buffer write pointer for the current decoder instance */
+ struct frame_buffer disp_frame;
+ u32 frame_display_flag; /* it reports a frame buffer flag to be displayed */
+ /**
+ * this variable reports that sequence has been changed while H.264/AVC stream decoding.
+ * if it is 1, HOST application can get the new sequence information by calling
+ * vpu_dec_get_initial_info() or wave5_vpu_dec_issue_seq_init().
+ *
+ * for H.265/HEVC decoder, each bit has a different meaning as follows.
+ *
+ * sequence_changed[5] : it indicates that the profile_idc has been changed
+ * sequence_changed[16] : it indicates that the resolution has been changed
+ * sequence_changed[19] : it indicates that the required number of frame buffer has
+ * been changed.
+ */
+ unsigned int frame_cycle; /* reports the number of cycles for processing a frame */
+ u32 sequence_no;
+
+ u32 dec_host_cmd_tick; /* tick of DEC_PIC command for the picture */
+ u32 dec_decode_end_tick; /* end tick of decoding slices of the picture */
+
+ u32 sequence_changed;
+};
+
+struct queue_status_info {
+ u32 instance_queue_count;
+ u32 report_queue_count;
+};
+
+/*
+ * encode struct and definition
+ */
+
+#define MAX_NUM_TEMPORAL_LAYER 7
+#define MAX_NUM_SPATIAL_LAYER 3
+#define MAX_GOP_NUM 8
+
+struct custom_gop_pic_param {
+ u32 pic_type; /* A picture type of nth picture in the custom GOP */
+ u32 poc_offset; /* A POC of nth picture in the custom GOP */
+ u32 pic_qp; /* A quantization parameter of nth picture in the custom GOP */
+ u32 use_multi_ref_p; /* use multiref pic for P picture. valid only if PIC_TYPE is P */
+ u32 ref_poc_l0; /* A POC of reference L0 of nth picture in the custom GOP */
+ u32 ref_poc_l1; /* A POC of reference L1 of nth picture in the custom GOP */
+ s32 temporal_id; /* A temporal ID of nth picture in the custom GOP */
+};
+
+struct custom_gop_param {
+ u32 custom_gop_size; /* the size of custom GOP (0~8) */
+ struct custom_gop_pic_param pic_param[MAX_GOP_NUM];
+};
+
+struct wave_custom_map_opt {
+ u32 roi_avg_qp; /* it sets an average QP of ROI map */
+ u32 addr_custom_map;
+ u32 custom_roi_map_enable: 1; /* it enables ROI map */
+ u32 custom_lambda_map_enable: 1; /* it enables custom lambda map */
+ u32 custom_mode_map_enable: 1;
+ u32 custom_coef_drop_enable: 1;
+};
+
+struct enc_wave_param {
+ /*
+ * A profile indicator (HEVC only)
+ *
+ * 0 : the firmware determines a profile according to internalbitdepth
+ * 1 : main profile
+ * 2 : main10 profile
+ * 3 : main still picture profile
+ * in AVC encoder, a profile cannot be set by host application. the firmware decides it
+ * based on internalbitdepth. it is HIGH profile for bitdepth of 8 and HIGH10 profile for
+ * bitdepth of 10.
+ */
+ u32 profile;
+ u32 level; /* A level indicator (level * 10) */
+ u32 internal_bit_depth: 4; /* 8/10 */
+ u32 gop_preset_idx: 4; /* 0 - 9 */
+ u32 decoding_refresh_type: 2; /* 0=non-IRAP, 1=CRA, 2=IDR */
+ u32 intra_qp; /* A quantization parameter of intra picture */
+ u32 intra_period; /* A period of intra picture in GOP size */
+ u32 forced_idr_header_enable: 2;
+ u32 conf_win_top; /* A top offset of conformance window */
+ u32 conf_win_bot; /* A bottom offset of conformance window */
+ u32 conf_win_left; /* A left offset of conformance window */
+ u32 conf_win_right; /* A right offset of conformance window */
+ u32 independ_slice_mode_arg;
+ u32 depend_slice_mode_arg;
+ u32 intra_refresh_mode: 3;
+ /*
+ * it specifies an intra CTU refresh interval. depending on intra_refresh_mode,
+ * it can mean one of the following.
+ *
+ * the number of consecutive CTU rows for intra_ctu_refresh_mode of 1
+ * the number of consecutive CTU columns for intra_ctu_refresh_mode of 2
+ * A step size in CTU for intra_ctu_refresh_mode of 3
+ * the number of intra ct_us to be encoded in a picture for intra_ctu_refresh_mode of 4
+ */
+ u32 intra_refresh_arg;
+ /*
+ * 0 : custom setting
+ * 1 : recommended encoder parameters (slow encoding speed, highest picture quality)
+ * 2 : boost mode (normal encoding speed, moderate picture quality)
+ * 3 : fast mode (fast encoding speed, low picture quality)
+ */
+ u32 depend_slice_mode : 2;
+ u32 use_recommend_enc_param: 2;
+ u32 max_num_merge: 2;
+ u32 scaling_list_enable: 2;
+ u32 bit_alloc_mode: 2; /* 0=ref-pic-priority, 1=uniform, 2=fixed_bit_ratio */
+ s32 beta_offset_div2: 4; /* it sets beta_offset_div2 for deblocking filter */
+ s32 tc_offset_div2: 4; /* it sets tc_offset_div3 for deblocking filter */
+ u32 hvs_qp_scale: 4; /* QP scaling factor for CU QP adjust if hvs_qp_scale_enable is 1 */
+ u32 hvs_max_delta_qp; /* A maximum delta QP for HVS */
+ /*
+ * A fixed bit ratio (1 ~ 255) for each picture of GOP's bit
+ * allocation
+ *
+ * N = 0 ~ (MAX_GOP_SIZE - 1)
+ * MAX_GOP_SIZE = 8
+ *
+ * for instance when MAX_GOP_SIZE is 3, fixed_bit_ratio0, fixed_bit_ratio1, and
+ * fixed_bit_ratio2 can be set as 2, 1, and 1 respectively for
+ * the fixed bit ratio 2:1:1. this is only valid when bit_alloc_mode is 2.
+ */
+ u8 fixed_bit_ratio[MAX_GOP_NUM];
+ struct custom_gop_param gop_param; /* <<vpuapi_h_custom_gop_param>> */
+ u32 num_units_in_tick;
+ u32 time_scale;
+ u32 num_ticks_poc_diff_one;
+ s32 chroma_cb_qp_offset; /* the value of chroma(cb) QP offset */
+ s32 chroma_cr_qp_offset; /* the value of chroma(cr) QP offset */
+ s32 initial_rc_qp;
+ u32 nr_intra_weight_y;
+ u32 nr_intra_weight_cb; /* A weight to cb noise level for intra picture (0 ~ 31) */
+ u32 nr_intra_weight_cr; /* A weight to cr noise level for intra picture (0 ~ 31) */
+ u32 nr_inter_weight_y;
+ u32 nr_inter_weight_cb; /* A weight to cb noise level for inter picture (0 ~ 31) */
+ u32 nr_inter_weight_cr; /* A weight to cr noise level for inter picture (0 ~ 31) */
+ u32 nr_noise_sigma_y; /* Y noise standard deviation if nr_noise_est_enable is 0 */
+ u32 nr_noise_sigma_cb;/* cb noise standard deviation if nr_noise_est_enable is 0 */
+ u32 nr_noise_sigma_cr;/* cr noise standard deviation if nr_noise_est_enable is 0 */
+ u32 bg_thr_diff;
+ u32 bg_thr_mean_diff;
+ u32 bg_lambda_qp;
+ u32 bg_delta_qp;
+ u32 pu04_delta_rate: 8; /* added to the total cost of 4x4 blocks */
+ u32 pu08_delta_rate: 8; /* added to the total cost of 8x8 blocks */
+ u32 pu16_delta_rate: 8; /* added to the total cost of 16x16 blocks */
+ u32 pu32_delta_rate: 8; /* added to the total cost of 32x32 blocks */
+ u32 pu04_intra_planar_delta_rate: 8;
+ u32 pu04_intra_dc_delta_rate: 8;
+ u32 pu04_intra_angle_delta_rate: 8;
+ u32 pu08_intra_planar_delta_rate: 8;
+ u32 pu08_intra_dc_delta_rate: 8;
+ u32 pu08_intra_angle_delta_rate: 8;
+ u32 pu16_intra_planar_delta_rate: 8;
+ u32 pu16_intra_dc_delta_rate: 8;
+ u32 pu16_intra_angle_delta_rate: 8;
+ u32 pu32_intra_planar_delta_rate: 8;
+ u32 pu32_intra_dc_delta_rate: 8;
+ u32 pu32_intra_angle_delta_rate: 8;
+ u32 cu08_intra_delta_rate: 8;
+ u32 cu08_inter_delta_rate: 8;
+ u32 cu08_merge_delta_rate: 8;
+ u32 cu16_intra_delta_rate: 8;
+ u32 cu16_inter_delta_rate: 8;
+ u32 cu16_merge_delta_rate: 8;
+ u32 cu32_intra_delta_rate: 8;
+ u32 cu32_inter_delta_rate: 8;
+ u32 cu32_merge_delta_rate: 8;
+ u32 coef_clear_disable: 8;
+ u32 min_qp_i; /* A minimum QP of I picture for rate control */
+ u32 max_qp_i; /* A maximum QP of I picture for rate control */
+ u32 min_qp_p; /* A minimum QP of P picture for rate control */
+ u32 max_qp_p; /* A maximum QP of P picture for rate control */
+ u32 min_qp_b; /* A minimum QP of B picture for rate control */
+ u32 max_qp_b; /* A maximum QP of B picture for rate control */
+ u32 custom_lambda_addr; /* it specifies the address of custom lambda map */
+ u32 user_scaling_list_addr; /* it specifies the address of user scaling list file */
+ u32 avc_idr_period;/* A period of IDR picture (0 ~ 1024). 0 - implies an infinite period */
+ u32 avc_slice_arg; /* the number of MB for a slice when avc_slice_mode is set with 1 */
+ u32 intra_mb_refresh_mode: 2; /* 0=none, 1=row, 2=column, 3=step-size-in-mb */
+ /**
+ * it specifies an intra MB refresh interval. depending on intra_mb_refresh_mode,
+ * it can mean one of the following.
+ *
+ * the number of consecutive MB rows for intra_mb_refresh_mode of 1
+ * the number of consecutive MB columns for intra_mb_refresh_mode of 2
+ * A step size in MB for intra_mb_refresh_mode of 3
+ */
+ u32 intra_mb_refresh_arg;
+ u32 rc_weight_param;
+ u32 rc_weight_buf;
+
+ /* flags */
+ u32 en_still_picture: 1; /* still picture profile */
+ u32 tier: 1; /* 0=main, 1=high */
+ u32 independ_slice_mode : 1; /* 0=no-multi-slice, 1=slice-in-ctu-number*/
+ u32 avc_slice_mode: 1; /* 0=none, 1=slice-in-mb-number */
+ u32 entropy_coding_mode: 1; /* 0=CAVLC, 1=CABAC */
+ u32 lossless_enable: 1; /* enables lossless coding */
+ u32 const_intra_pred_flag: 1; /* enables constrained intra prediction */
+ u32 tmvp_enable: 1; /* enables temporal motion vector prediction */
+ u32 wpp_enable: 1;
+ u32 disable_deblk: 1; /* it disables in-loop deblocking filtering */
+ u32 lf_cross_slice_boundary_enable: 1;
+ u32 skip_intra_trans: 1;
+ u32 sao_enable: 1; /* it enables SAO (sample adaptive offset) */
+ u32 intra_nx_n_enable: 1; /* it enables intra nx_n p_us */
+ u32 cu_level_rc_enable: 1; /* it enable CU level rate control */
+ u32 hvs_qp_enable: 1; /* enable CU QP adjustment for subjective quality enhancement */
+ u32 roi_enable: 1; /* it enables ROI map. NOTE: it is valid when rate control is on */
+ u32 nr_y_enable: 1; /* it enables noise reduction algorithm to Y component */
+ u32 nr_noise_est_enable: 1;
+ u32 nr_cb_enable: 1; /* it enables noise reduction algorithm to cb component */
+ u32 nr_cr_enable: 1; /* it enables noise reduction algorithm to cr component */
+ u32 use_long_term: 1; /* it enables long-term reference function */
+ u32 monochrome_enable: 1; /* it enables monochrom encoding mode */
+ u32 strong_intra_smooth_enable: 1; /* it enables strong intra smoothing */
+ u32 weight_pred_enable: 1; /* it enables to use weighted prediction*/
+ u32 bg_detect_enable: 1; /* it enables background detection */
+ u32 custom_lambda_enable: 1; /* it enables custom lambda table */
+ u32 custom_md_enable: 1; /* it enables custom mode decision */
+ u32 rdo_skip: 1; /* it skips RDO(rate distortion optimization) */
+ u32 lambda_scaling_enable: 1; /* it enables lambda scaling using custom GOP */
+ u32 transform8x8_enable: 1; /* it enables 8x8 intra prediction and 8x8 transform */
+ u32 mb_level_rc_enable: 1; /* it enables MB-level rate control */
+ u32 s2fme_disable: 1; /* it disables s2me_fme (only for AVC encoder) */
+};
+
+struct enc_sub_frame_sync_config {
+ u32 sub_frame_sync_mode; /* 0=wire-based, 1=register-based */
+ u32 sub_frame_sync_on;
+};
+
+struct enc_open_param {
+ dma_addr_t bitstream_buffer;
+ unsigned int bitstream_buffer_size;
+ u32 pic_width; /* the width of a picture to be encoded in unit of sample */
+ u32 pic_height; /* the height of a picture to be encoded in unit of sample */
+ u32 frame_rate_info;/* desired fps */
+ u32 vbv_buffer_size;
+ u32 bit_rate; /* target bitrate in bps */
+ struct enc_wave_param wave_param;
+ enum cb_cr_order cbcr_order;
+ unsigned int stream_endian;
+ unsigned int source_endian;
+ enum packed_format_num packed_format; /* <<vpuapi_h_packed_format_num>> */
+ enum frame_buffer_format src_format;
+ /* enum frame_buffer_format output_format; not used yet */
+ u32 enc_hrd_rbsp_in_vps; /* it encodes the HRD syntax rbsp into VPS */
+ u32 hrd_rbsp_data_size; /* the bit size of the HRD rbsp data */
+ u32 hrd_rbsp_data_addr; /* the address of the HRD rbsp data */
+ u32 encode_vui_rbsp;
+ u32 vui_rbsp_data_size; /* the bit size of the VUI rbsp data */
+ u32 vui_rbsp_data_addr; /* the address of the VUI rbsp data */
+ u32 pri_axprot;
+ u32 pri_axcache;
+ bool ring_buffer_enable;
+ bool line_buf_int_en;
+ bool enable_pts; /* an enable flag to report PTS(presentation timestamp) */
+ u32 rc_enable : 1; /* rate control */
+ u32 enable_non_ref_fbc_write: 1;
+ u32 sub_frame_sync_enable: 1;
+ u32 sub_frame_sync_mode: 1;
+};
+
+struct enc_initial_info {
+ u32 min_frame_buffer_count; /* minimum number of frame buffer */
+ u32 min_src_frame_count; /* minimum number of source buffer */
+ u32 max_latency_pictures; /* maximum number of picture latency */
+ u32 seq_init_err_reason; /* error information */
+ u32 warn_info; /* warn information */
+ u32 vlc_buf_size; /* the size of task buffer */
+ u32 param_buf_size; /* the size of task buffer */
+};
+
+struct enc_code_opt {
+ u32 implicit_header_encode: 1;
+ u32 encode_vcl: 1; /* A flag to encode VCL nal unit explicitly */
+ u32 encode_vps: 1; /* A flag to encode VPS nal unit explicitly */
+ u32 encode_sps: 1; /* A flag to encode SPS nal unit explicitly */
+ u32 encode_pps: 1; /* A flag to encode PPS nal unit explicitly */
+ u32 encode_aud: 1; /* A flag to encode AUD nal unit explicitly */
+ u32 encode_eos: 1;
+ u32 encode_eob: 1;
+ u32 encode_vui: 1; /* A flag to encode VUI nal unit explicitly */
+};
+
+struct enc_param {
+ struct frame_buffer *source_frame;
+ dma_addr_t pic_stream_buffer_addr;
+ u64 pic_stream_buffer_size;
+ u32 force_pic_qp_i;
+ u32 force_pic_qp_p;
+ u32 force_pic_qp_b;
+ u32 force_pic_type: 2;
+ u32 src_idx; /* A source frame buffer index */
+ struct enc_code_opt code_option;
+ u32 use_cur_src_as_longterm_pic;
+ u32 use_longterm_ref;
+ u64 pts; /* the presentation timestamp (PTS) of input source */
+ struct wave_custom_map_opt custom_map_opt;
+ u32 wp_pix_sigma_y; /* pixel variance of Y component for weighted prediction */
+ u32 wp_pix_sigma_cb; /* pixel variance of cb component for weighted prediction */
+ u32 wp_pix_sigma_cr; /* pixel variance of cr component for weighted prediction */
+ u32 wp_pix_mean_y; /* pixel mean value of Y component for weighted prediction */
+ u32 wp_pix_mean_cb; /* pixel mean value of cb component for weighted prediction */
+ u32 wp_pix_mean_cr; /* pixel mean value of cr component for weighted prediction */
+ bool src_end_flag;
+ u32 skip_picture: 1;
+ u32 force_pic_qp_enable: 1; /* flag used to force picture quantization parameter */
+ u32 force_pic_type_enable: 1; /* A flag to use a force picture type */
+ u32 force_all_ctu_coef_drop_enable: 1; /* forces all coefficients to be zero after TQ */
+};
+
+struct enc_output_info {
+ dma_addr_t bitstream_buffer;
+ u32 bitstream_size; /* the byte size of encoded bitstream */
+ u32 pic_type: 2; /* <<vpuapi_h_pic_type>> */
+ s32 recon_frame_index;
+ dma_addr_t rd_ptr;
+ dma_addr_t wr_ptr;
+ u32 enc_pic_byte; /* the number of encoded picture bytes */
+ s32 enc_src_idx; /* the source buffer index of the currently encoded picture */
+ u32 enc_vcl_nut;
+ u32 error_reason; /* the error reason of the currently encoded picture */
+ u32 warn_info; /* the warning information of the currently encoded picture */
+ unsigned int frame_cycle; /* param for reporting the cycle number of encoding one frame*/
+ u64 pts;
+ u32 enc_host_cmd_tick; /* tick of ENC_PIC command for the picture */
+ u32 enc_encode_end_tick; /* end tick of encoding slices of the picture */
+};
+
+enum ENC_PIC_CODE_OPTION {
+ CODEOPT_ENC_HEADER_IMPLICIT = BIT(0),
+ CODEOPT_ENC_VCL = BIT(1), /* A flag to encode VCL nal unit explicitly */
+};
+
+enum GOP_PRESET_IDX {
+ PRESET_IDX_CUSTOM_GOP = 0, /* user defined GOP structure */
+ PRESET_IDX_ALL_I = 1, /* all intra, gopsize = 1 */
+ PRESET_IDX_IPP = 2, /* consecutive P, cyclic gopsize = 1 */
+ PRESET_IDX_IBBB = 3, /* consecutive B, cyclic gopsize = 1 */
+ PRESET_IDX_IBPBP = 4, /* gopsize = 2 */
+ PRESET_IDX_IBBBP = 5, /* gopsize = 4 */
+ PRESET_IDX_IPPPP = 6, /* consecutive P, cyclic gopsize = 4 */
+ PRESET_IDX_IBBBB = 7, /* consecutive B, cyclic gopsize = 4 */
+ PRESET_IDX_RA_IB = 8, /* random access, cyclic gopsize = 8 */
+ PRESET_IDX_IPP_SINGLE = 9, /* consecutive P, cyclic gopsize = 1, with single ref */
+};
+
+struct sec_axi_info {
+ struct {
+ u32 use_ip_enable;
+ u32 use_bit_enable;
+ u32 use_lf_row_enable: 1;
+ u32 use_enc_rdo_enable: 1;
+ u32 use_enc_lf_enable: 1;
+ } wave;
+ unsigned int buf_size;
+ dma_addr_t buf_base;
+};
+
+struct dec_info {
+ struct dec_open_param open_param;
+ struct dec_initial_info initial_info;
+ struct dec_initial_info new_seq_info; /* temporal new sequence information */
+ dma_addr_t stream_wr_ptr;
+ dma_addr_t stream_rd_ptr;
+ u32 frame_display_flag;
+ dma_addr_t stream_buf_start_addr;
+ dma_addr_t stream_buf_end_addr;
+ u32 stream_buf_size;
+ struct vpu_buf vb_mv[MAX_REG_FRAME];
+ struct vpu_buf vb_fbc_y_tbl[MAX_REG_FRAME];
+ struct vpu_buf vb_fbc_c_tbl[MAX_REG_FRAME];
+ unsigned int num_of_decoding_fbs;
+ unsigned int num_of_display_fbs;
+ unsigned int stride;
+ enum mirror_direction mirror_direction;
+ unsigned int rotation_angle;
+ struct frame_buffer rotator_output;
+ unsigned int rotator_stride;
+ struct sec_axi_info sec_axi_info;
+ dma_addr_t user_data_buf_addr;
+ u32 user_data_enable;
+ u32 user_data_buf_size;
+ struct vpu_buf vb_work;
+ struct vpu_buf vb_task;
+ struct dec_output_info dec_out_info[WAVE5_MAX_FBS];
+ u32 seq_change_mask;
+ enum temporal_id_mode temp_id_select_mode;
+ u32 target_temp_id;
+ u32 target_spatial_id;
+ u32 instance_queue_count;
+ u32 report_queue_count;
+ u32 cycle_per_tick;
+ u32 product_code;
+ u32 vlc_buf_size;
+ u32 param_buf_size;
+ bool rotation_enable;
+ bool mirror_enable;
+ bool dering_enable;
+ bool initial_info_obtained;
+ bool reorder_enable;
+ bool thumbnail_mode;
+ bool first_cycle_check;
+ u32 stream_endflag: 1;
+};
+
+struct enc_info {
+ struct enc_open_param open_param;
+ struct enc_initial_info initial_info;
+ dma_addr_t stream_rd_ptr;
+ dma_addr_t stream_wr_ptr;
+ dma_addr_t stream_buf_start_addr;
+ dma_addr_t stream_buf_end_addr;
+ u32 stream_buf_size;
+ unsigned int num_frame_buffers;
+ unsigned int stride;
+ bool rotation_enable;
+ bool mirror_enable;
+ enum mirror_direction mirror_direction;
+ unsigned int rotation_angle;
+ bool initial_info_obtained;
+ bool ring_buffer_enable;
+ struct sec_axi_info sec_axi_info;
+ struct enc_sub_frame_sync_config sub_frame_sync_config;
+ bool line_buf_int_en;
+ struct vpu_buf vb_work;
+ struct vpu_buf vb_mv; /* col_mv buffer */
+ struct vpu_buf vb_fbc_y_tbl; /* FBC luma table buffer */
+ struct vpu_buf vb_fbc_c_tbl; /* FBC chroma table buffer */
+ struct vpu_buf vb_sub_sam_buf; /* sub-sampled buffer for ME */
+ struct vpu_buf vb_task;
+ u64 cur_pts; /* current timestamp in 90_k_hz */
+ u64 pts_map[32]; /* PTS mapped with source frame index */
+ u32 instance_queue_count;
+ u32 report_queue_count;
+ bool first_cycle_check;
+ u32 cycle_per_tick;
+ u32 product_code;
+ u32 vlc_buf_size;
+ u32 param_buf_size;
+};
+
+struct vpu_device {
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct list_head instances;
+ struct video_device *video_dev_dec;
+ struct video_device *video_dev_enc;
+ struct mutex dev_lock; /* the lock for the src,dst v4l2 queues */
+ struct mutex hw_lock; /* lock hw configurations */
+ int irq;
+ enum product_id product;
+ struct vpu_attr attr;
+ struct vpu_buf common_mem;
+ u32 last_performance_cycles;
+ struct dma_vpu_buf sram_buf;
+ void __iomem *vdb_register;
+ u32 product_code;
+ u32 ext_addr;
+ struct ida inst_ida;
+ struct clk_bulk_data *clks;
+ struct hrtimer hrtimer;
+ struct kthread_work work;
+ struct kthread_worker *worker;
+ int num_clks;
+};
+
+struct vpu_instance;
+
+struct vpu_instance_ops {
+ void (*start_process)(struct vpu_instance *inst);
+ void (*stop_process)(struct vpu_instance *inst);
+ void (*finish_process)(struct vpu_instance *inst);
+};
+
+struct vpu_instance {
+ struct list_head list;
+ struct v4l2_fh v4l2_fh;
+ struct v4l2_ctrl_handler v4l2_ctrl_hdl;
+ struct vpu_device *dev;
+ struct v4l2_m2m_dev *v4l2_m2m_dev;
+ struct kfifo irq_status;
+ struct completion irq_done;
+
+ struct v4l2_pix_format_mplane src_fmt;
+ struct v4l2_pix_format_mplane dst_fmt;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_hsv_encoding hsv_enc;
+
+ enum vpu_instance_state state;
+ enum vpu_instance_type type;
+ const struct vpu_instance_ops *ops;
+
+ enum wave_std std;
+ s32 id;
+ union {
+ struct enc_info enc_info;
+ struct dec_info dec_info;
+ } *codec_info;
+ struct frame_buffer frame_buf[MAX_REG_FRAME];
+ struct vpu_buf frame_vbuf[MAX_REG_FRAME];
+ u32 min_dst_buf_count;
+ u32 dst_buf_count;
+ u32 queued_src_buf_num;
+ u32 queued_dst_buf_num;
+ u32 conf_win_width;
+ u32 conf_win_height;
+ u64 timestamp;
+ bool cbcr_interleave;
+ bool nv21;
+ bool eos;
+
+ struct vpu_buf bitstream_vbuf;
+ bool thumbnail_mode;
+
+ unsigned int min_src_buf_count;
+ unsigned int src_buf_count;
+ unsigned int rot_angle;
+ unsigned int mirror_direction;
+ unsigned int bit_depth;
+ unsigned int frame_rate;
+ unsigned int vbv_buf_size;
+ unsigned int rc_mode;
+ unsigned int rc_enable;
+ unsigned int bit_rate;
+ struct enc_wave_param enc_param;
+};
+
+void wave5_vdi_write_register(struct vpu_device *vpu_dev, u32 addr, u32 data);
+u32 wave5_vdi_readl(struct vpu_device *vpu_dev, u32 addr);
+int wave5_vdi_clear_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb);
+int wave5_vdi_allocate_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb);
+int wave5_vdi_write_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb, size_t offset,
+ u8 *data, size_t len, unsigned int endian);
+unsigned int wave5_vdi_convert_endian(struct vpu_device *vpu_dev, unsigned int endian);
+void wave5_vdi_free_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb);
+
+int wave5_vpu_init_with_bitcode(struct device *dev, u8 *bitcode, size_t size);
+void wave5_vpu_clear_interrupt_ex(struct vpu_instance *inst, u32 intr_flag);
+int wave5_vpu_get_version_info(struct device *dev, u32 *revision, unsigned int *product_id);
+int wave5_vpu_dec_open(struct vpu_instance *inst, struct dec_open_param *open_param);
+int wave5_vpu_dec_close(struct vpu_instance *inst, u32 *fail_res);
+int wave5_vpu_dec_issue_seq_init(struct vpu_instance *inst);
+int wave5_vpu_dec_complete_seq_init(struct vpu_instance *inst, struct dec_initial_info *info);
+int wave5_vpu_dec_register_frame_buffer_ex(struct vpu_instance *inst, int num_of_decoding_fbs,
+ int num_of_display_fbs, int stride, int height,
+ int map_type);
+int wave5_vpu_dec_start_one_frame(struct vpu_instance *inst, struct dec_param *param,
+ u32 *res_fail);
+int wave5_vpu_dec_get_output_info(struct vpu_instance *inst, struct dec_output_info *info);
+int wave5_vpu_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr, int update_wr_ptr);
+int wave5_vpu_dec_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter);
+int wave5_vpu_dec_get_bitstream_buffer(struct vpu_instance *inst, dma_addr_t *prd_ptr,
+ dma_addr_t *pwr_ptr, size_t *size);
+int wave5_vpu_dec_update_bitstream_buffer(struct vpu_instance *inst, size_t size);
+int wave5_vpu_dec_clr_disp_flag(struct vpu_instance *inst, int index);
+int wave5_vpu_dec_set_disp_flag(struct vpu_instance *inst, int index);
+
+int wave5_vpu_enc_open(struct vpu_instance *inst, struct enc_open_param *open_param);
+int wave5_vpu_enc_close(struct vpu_instance *inst, u32 *fail_res);
+int wave5_vpu_enc_issue_seq_init(struct vpu_instance *inst);
+int wave5_vpu_enc_complete_seq_init(struct vpu_instance *inst, struct enc_initial_info *info);
+int wave5_vpu_enc_register_frame_buffer(struct vpu_instance *inst, unsigned int num,
+ unsigned int stride, int height,
+ enum tiled_map_type map_type);
+int wave5_vpu_enc_start_one_frame(struct vpu_instance *inst, struct enc_param *param,
+ u32 *fail_res);
+int wave5_vpu_enc_get_output_info(struct vpu_instance *inst, struct enc_output_info *info);
+int wave5_vpu_enc_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter);
+
+#endif
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h b/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h
new file mode 100644
index 000000000000..4527eaf88848
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - product config definitions
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#ifndef _VPU_CONFIG_H_
+#define _VPU_CONFIG_H_
+
+#define WAVE517_CODE 0x5170
+#define WAVE537_CODE 0x5370
+#define WAVE511_CODE 0x5110
+#define WAVE521_CODE 0x5210
+#define WAVE521C_CODE 0x521c
+#define WAVE521C_DUAL_CODE 0x521d // wave521 dual core
+#define WAVE521E1_CODE 0x5211
+
+#define PRODUCT_CODE_W_SERIES(x) ({ \
+ int c = x; \
+ ((c) == WAVE517_CODE || (c) == WAVE537_CODE || \
+ (c) == WAVE511_CODE || (c) == WAVE521_CODE || \
+ (c) == WAVE521E1_CODE || (c) == WAVE521C_CODE || \
+ (c) == WAVE521C_DUAL_CODE); \
+})
+
+#define WAVE517_WORKBUF_SIZE (2 * 1024 * 1024)
+#define WAVE521ENC_WORKBUF_SIZE (128 * 1024) //HEVC 128K, AVC 40K
+#define WAVE521DEC_WORKBUF_SIZE (1784 * 1024)
+
+#define MAX_NUM_INSTANCE 32
+
+#define W5_MIN_ENC_PIC_WIDTH 256
+#define W5_MIN_ENC_PIC_HEIGHT 128
+#define W5_MAX_ENC_PIC_WIDTH 8192
+#define W5_MAX_ENC_PIC_HEIGHT 8192
+
+// application specific configuration
+#define VPU_ENC_TIMEOUT 60000
+#define VPU_DEC_TIMEOUT 60000
+
+#define HOST_ENDIAN VDI_128BIT_LITTLE_ENDIAN
+#define VPU_FRAME_ENDIAN HOST_ENDIAN
+#define VPU_STREAM_ENDIAN HOST_ENDIAN
+#define VPU_USER_DATA_ENDIAN HOST_ENDIAN
+#define VPU_SOURCE_ENDIAN HOST_ENDIAN
+
+// for WAVE encoder
+#define USE_SRC_PRP_AXI 0
+#define USE_SRC_PRI_AXI 1
+#define DEFAULT_SRC_AXI USE_SRC_PRP_AXI
+
+/************************************************************************/
+/* VPU COMMON MEMORY */
+/************************************************************************/
+#define VLC_BUF_NUM (3)
+
+#define COMMAND_QUEUE_DEPTH (4)
+
+#define W5_REMAP_INDEX0 0
+#define W5_REMAP_INDEX1 1
+#define W5_REMAP_MAX_SIZE (1024 * 1024)
+
+#define WAVE5_MAX_CODE_BUF_SIZE (2 * 1024 * 1024)
+#define WAVE5_TEMPBUF_OFFSET WAVE5_MAX_CODE_BUF_SIZE
+#define WAVE5_TEMPBUF_SIZE (1024 * 1024)
+
+#define SIZE_COMMON (WAVE5_MAX_CODE_BUF_SIZE + WAVE5_TEMPBUF_SIZE)
+
+//=====4. VPU REPORT MEMORY ======================//
+
+#define WAVE5_UPPER_PROC_AXI_ID 0x0
+
+#define WAVE5_PROC_AXI_ID 0x0
+#define WAVE5_PRP_AXI_ID 0x0
+#define WAVE5_FBD_Y_AXI_ID 0x0
+#define WAVE5_FBC_Y_AXI_ID 0x0
+#define WAVE5_FBD_C_AXI_ID 0x0
+#define WAVE5_FBC_C_AXI_ID 0x0
+#define WAVE5_SEC_AXI_ID 0x0
+#define WAVE5_PRI_AXI_ID 0x0
+
+#define WAVE5_PROC_AXI_AXPROT 0x0
+#define WAVE5_PROC_AXI_AXCACHE 0x0
+#define WAVE5_PROC_AXI_EXT_ADDR 0x0
+#define WAVE5_SEC_AXI_AXPROT 0x0
+#define WAVE5_SEC_AXI_AXCACHE 0x0
+#define WAVE5_SEC_AXI_EXT_ADDR 0x0
+
+#endif /* _VPU_CONFIG_H_ */
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuerror.h b/drivers/media/platform/chips-media/wave5/wave5-vpuerror.h
new file mode 100644
index 000000000000..6d8c7bb0e8b2
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuerror.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - error values
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#ifndef ERROR_CODE_H_INCLUDED
+#define ERROR_CODE_H_INCLUDED
+
+/*
+ * WAVE5
+ */
+
+/************************************************************************/
+/* WAVE5 COMMON SYSTEM ERROR (FAIL_REASON) */
+/************************************************************************/
+#define WAVE5_SYSERR_QUEUEING_FAIL 0x00000001
+#define WAVE5_SYSERR_ACCESS_VIOLATION_HW 0x00000040
+#define WAVE5_SYSERR_BUS_ERROR 0x00000200
+#define WAVE5_SYSERR_DOUBLE_FAULT 0x00000400
+#define WAVE5_SYSERR_RESULT_NOT_READY 0x00000800
+#define WAVE5_SYSERR_VPU_STILL_RUNNING 0x00001000
+#define WAVE5_SYSERR_UNKNOWN_CMD 0x00002000
+#define WAVE5_SYSERR_UNKNOWN_CODEC_STD 0x00004000
+#define WAVE5_SYSERR_UNKNOWN_QUERY_OPTION 0x00008000
+#define WAVE5_SYSERR_VLC_BUF_FULL 0x00010000
+#define WAVE5_SYSERR_WATCHDOG_TIMEOUT 0x00020000
+#define WAVE5_SYSERR_VCPU_TIMEOUT 0x00080000
+#define WAVE5_SYSERR_TEMP_SEC_BUF_OVERFLOW 0x00200000
+#define WAVE5_SYSERR_NEED_MORE_TASK_BUF 0x00400000
+#define WAVE5_SYSERR_PRESCAN_ERR 0x00800000
+#define WAVE5_SYSERR_ENC_GBIN_OVERCONSUME 0x01000000
+#define WAVE5_SYSERR_ENC_MAX_ZERO_DETECT 0x02000000
+#define WAVE5_SYSERR_ENC_LVL_FIRST_ERROR 0x04000000
+#define WAVE5_SYSERR_ENC_EG_RANGE_OVER 0x08000000
+#define WAVE5_SYSERR_ENC_IRB_FRAME_DROP 0x10000000
+#define WAVE5_SYSERR_INPLACE_V 0x20000000
+#define WAVE5_SYSERR_FATAL_VPU_HANGUP 0xf0000000
+
+/************************************************************************/
+/* WAVE5 COMMAND QUEUE ERROR (FAIL_REASON) */
+/************************************************************************/
+#define WAVE5_CMDQ_ERR_NOT_QUEABLE_CMD 0x00000001
+#define WAVE5_CMDQ_ERR_SKIP_MODE_ENABLE 0x00000002
+#define WAVE5_CMDQ_ERR_INST_FLUSHING 0x00000003
+#define WAVE5_CMDQ_ERR_INST_INACTIVE 0x00000004
+#define WAVE5_CMDQ_ERR_QUEUE_FAIL 0x00000005
+#define WAVE5_CMDQ_ERR_CMD_BUF_FULL 0x00000006
+
+/************************************************************************/
+/* WAVE5 ERROR ON DECODER (ERR_INFO) */
+/************************************************************************/
+// HEVC
+#define HEVC_SPSERR_SEQ_PARAMETER_SET_ID 0x00001000
+#define HEVC_SPSERR_CHROMA_FORMAT_IDC 0x00001001
+#define HEVC_SPSERR_PIC_WIDTH_IN_LUMA_SAMPLES 0x00001002
+#define HEVC_SPSERR_PIC_HEIGHT_IN_LUMA_SAMPLES 0x00001003
+#define HEVC_SPSERR_CONF_WIN_LEFT_OFFSET 0x00001004
+#define HEVC_SPSERR_CONF_WIN_RIGHT_OFFSET 0x00001005
+#define HEVC_SPSERR_CONF_WIN_TOP_OFFSET 0x00001006
+#define HEVC_SPSERR_CONF_WIN_BOTTOM_OFFSET 0x00001007
+#define HEVC_SPSERR_BIT_DEPTH_LUMA_MINUS8 0x00001008
+#define HEVC_SPSERR_BIT_DEPTH_CHROMA_MINUS8 0x00001009
+#define HEVC_SPSERR_LOG2_MAX_PIC_ORDER_CNT_LSB_MINUS4 0x0000100A
+#define HEVC_SPSERR_SPS_MAX_DEC_PIC_BUFFERING 0x0000100B
+#define HEVC_SPSERR_SPS_MAX_NUM_REORDER_PICS 0x0000100C
+#define HEVC_SPSERR_SPS_MAX_LATENCY_INCREASE 0x0000100D
+#define HEVC_SPSERR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3 0x0000100E
+#define HEVC_SPSERR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE 0x0000100F
+#define HEVC_SPSERR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2 0x00001010
+#define HEVC_SPSERR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE 0x00001011
+#define HEVC_SPSERR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER 0x00001012
+#define HEVC_SPSERR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA 0x00001013
+#define HEVC_SPSERR_SCALING_LIST 0x00001014
+#define HEVC_SPSERR_LOG2_DIFF_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3 0x00001015
+#define HEVC_SPSERR_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE 0x00001016
+#define HEVC_SPSERR_NUM_SHORT_TERM_REF_PIC_SETS 0x00001017
+#define HEVC_SPSERR_NUM_LONG_TERM_REF_PICS_SPS 0x00001018
+#define HEVC_SPSERR_GBU_PARSING_ERROR 0x00001019
+#define HEVC_SPSERR_EXTENSION_FLAG 0x0000101A
+#define HEVC_SPSERR_VUI_ERROR 0x0000101B
+#define HEVC_SPSERR_ACTIVATE_SPS 0x0000101C
+#define HEVC_SPSERR_PROFILE_SPACE 0x0000101D
+#define HEVC_PPSERR_PPS_PIC_PARAMETER_SET_ID 0x00002000
+#define HEVC_PPSERR_PPS_SEQ_PARAMETER_SET_ID 0x00002001
+#define HEVC_PPSERR_NUM_REF_IDX_L0_DEFAULT_ACTIVE_MINUS1 0x00002002
+#define HEVC_PPSERR_NUM_REF_IDX_L1_DEFAULT_ACTIVE_MINUS1 0x00002003
+#define HEVC_PPSERR_INIT_QP_MINUS26 0x00002004
+#define HEVC_PPSERR_DIFF_CU_QP_DELTA_DEPTH 0x00002005
+#define HEVC_PPSERR_PPS_CB_QP_OFFSET 0x00002006
+#define HEVC_PPSERR_PPS_CR_QP_OFFSET 0x00002007
+#define HEVC_PPSERR_NUM_TILE_COLUMNS_MINUS1 0x00002008
+#define HEVC_PPSERR_NUM_TILE_ROWS_MINUS1 0x00002009
+#define HEVC_PPSERR_COLUMN_WIDTH_MINUS1 0x0000200A
+#define HEVC_PPSERR_ROW_HEIGHT_MINUS1 0x0000200B
+#define HEVC_PPSERR_PPS_BETA_OFFSET_DIV2 0x0000200C
+#define HEVC_PPSERR_PPS_TC_OFFSET_DIV2 0x0000200D
+#define HEVC_PPSERR_SCALING_LIST 0x0000200E
+#define HEVC_PPSERR_LOG2_PARALLEL_MERGE_LEVEL_MINUS2 0x0000200F
+#define HEVC_PPSERR_NUM_TILE_COLUMNS_RANGE_OUT 0x00002010
+#define HEVC_PPSERR_NUM_TILE_ROWS_RANGE_OUT 0x00002011
+#define HEVC_PPSERR_MORE_RBSP_DATA_ERROR 0x00002012
+#define HEVC_PPSERR_PPS_PIC_PARAMETER_SET_ID_RANGE_OUT 0x00002013
+#define HEVC_PPSERR_PPS_SEQ_PARAMETER_SET_ID_RANGE_OUT 0x00002014
+#define HEVC_PPSERR_NUM_REF_IDX_L0_DEFAULT_ACTIVE_MINUS1_RANGE_OUT 0x00002015
+#define HEVC_PPSERR_NUM_REF_IDX_L1_DEFAULT_ACTIVE_MINUS1_RANGE_OUT 0x00002016
+#define HEVC_PPSERR_PPS_CB_QP_OFFSET_RANGE_OUT 0x00002017
+#define HEVC_PPSERR_PPS_CR_QP_OFFSET_RANGE_OUT 0x00002018
+#define HEVC_PPSERR_COLUMN_WIDTH_MINUS1_RANGE_OUT 0x00002019
+#define HEVC_PPSERR_ROW_HEIGHT_MINUS1_RANGE_OUT 0x00002020
+#define HEVC_PPSERR_PPS_BETA_OFFSET_DIV2_RANGE_OUT 0x00002021
+#define HEVC_PPSERR_PPS_TC_OFFSET_DIV2_RANGE_OUT 0x00002022
+#define HEVC_SHERR_SLICE_PIC_PARAMETER_SET_ID 0x00003000
+#define HEVC_SHERR_ACTIVATE_PPS 0x00003001
+#define HEVC_SHERR_ACTIVATE_SPS 0x00003002
+#define HEVC_SHERR_SLICE_TYPE 0x00003003
+#define HEVC_SHERR_FIRST_SLICE_IS_DEPENDENT_SLICE 0x00003004
+#define HEVC_SHERR_SHORT_TERM_REF_PIC_SET_SPS_FLAG 0x00003005
+#define HEVC_SHERR_SHORT_TERM_REF_PIC_SET 0x00003006
+#define HEVC_SHERR_SHORT_TERM_REF_PIC_SET_IDX 0x00003007
+#define HEVC_SHERR_NUM_LONG_TERM_SPS 0x00003008
+#define HEVC_SHERR_NUM_LONG_TERM_PICS 0x00003009
+#define HEVC_SHERR_LT_IDX_SPS_IS_OUT_OF_RANGE 0x0000300A
+#define HEVC_SHERR_DELTA_POC_MSB_CYCLE_LT 0x0000300B
+#define HEVC_SHERR_NUM_REF_IDX_L0_ACTIVE_MINUS1 0x0000300C
+#define HEVC_SHERR_NUM_REF_IDX_L1_ACTIVE_MINUS1 0x0000300D
+#define HEVC_SHERR_COLLOCATED_REF_IDX 0x0000300E
+#define HEVC_SHERR_PRED_WEIGHT_TABLE 0x0000300F
+#define HEVC_SHERR_FIVE_MINUS_MAX_NUM_MERGE_CAND 0x00003010
+#define HEVC_SHERR_SLICE_QP_DELTA 0x00003011
+#define HEVC_SHERR_SLICE_QP_DELTA_IS_OUT_OF_RANGE 0x00003012
+#define HEVC_SHERR_SLICE_CB_QP_OFFSET 0x00003013
+#define HEVC_SHERR_SLICE_CR_QP_OFFSET 0x00003014
+#define HEVC_SHERR_SLICE_BETA_OFFSET_DIV2 0x00003015
+#define HEVC_SHERR_SLICE_TC_OFFSET_DIV2 0x00003016
+#define HEVC_SHERR_NUM_ENTRY_POINT_OFFSETS 0x00003017
+#define HEVC_SHERR_OFFSET_LEN_MINUS1 0x00003018
+#define HEVC_SHERR_SLICE_SEGMENT_HEADER_EXTENSION_LENGTH 0x00003019
+#define HEVC_SHERR_WRONG_POC_IN_STILL_PICTURE_PROFILE 0x0000301A
+#define HEVC_SHERR_SLICE_TYPE_ERROR_IN_STILL_PICTURE_PROFILE 0x0000301B
+#define HEVC_SHERR_PPS_ID_NOT_EQUAL_PREV_VALUE 0x0000301C
+#define HEVC_SPECERR_OVER_PICTURE_WIDTH_SIZE 0x00004000
+#define HEVC_SPECERR_OVER_PICTURE_HEIGHT_SIZE 0x00004001
+#define HEVC_SPECERR_OVER_CHROMA_FORMAT 0x00004002
+#define HEVC_SPECERR_OVER_BIT_DEPTH 0x00004003
+#define HEVC_SPECERR_OVER_BUFFER_OVER_FLOW 0x00004004
+#define HEVC_SPECERR_OVER_WRONG_BUFFER_ACCESS 0x00004005
+#define HEVC_ETCERR_INIT_SEQ_SPS_NOT_FOUND 0x00005000
+#define HEVC_ETCERR_DEC_PIC_VCL_NOT_FOUND 0x00005001
+#define HEVC_ETCERR_NO_VALID_SLICE_IN_AU 0x00005002
+#define HEVC_ETCERR_INPLACE_V 0x0000500F
+
+// AVC
+#define AVC_SPSERR_SEQ_PARAMETER_SET_ID 0x00001000
+#define AVC_SPSERR_CHROMA_FORMAT_IDC 0x00001001
+#define AVC_SPSERR_PIC_WIDTH_IN_LUMA_SAMPLES 0x00001002
+#define AVC_SPSERR_PIC_HEIGHT_IN_LUMA_SAMPLES 0x00001003
+#define AVC_SPSERR_CONF_WIN_LEFT_OFFSET 0x00001004
+#define AVC_SPSERR_CONF_WIN_RIGHT_OFFSET 0x00001005
+#define AVC_SPSERR_CONF_WIN_TOP_OFFSET 0x00001006
+#define AVC_SPSERR_CONF_WIN_BOTTOM_OFFSET 0x00001007
+#define AVC_SPSERR_BIT_DEPTH_LUMA_MINUS8 0x00001008
+#define AVC_SPSERR_BIT_DEPTH_CHROMA_MINUS8 0x00001009
+#define AVC_SPSERR_SPS_MAX_DEC_PIC_BUFFERING 0x0000100B
+#define AVC_SPSERR_SPS_MAX_NUM_REORDER_PICS 0x0000100C
+#define AVC_SPSERR_SCALING_LIST 0x00001014
+#define AVC_SPSERR_GBU_PARSING_ERROR 0x00001019
+#define AVC_SPSERR_VUI_ERROR 0x0000101B
+#define AVC_SPSERR_ACTIVATE_SPS 0x0000101C
+#define AVC_PPSERR_PPS_PIC_PARAMETER_SET_ID 0x00002000
+#define AVC_PPSERR_PPS_SEQ_PARAMETER_SET_ID 0x00002001
+#define AVC_PPSERR_NUM_REF_IDX_L0_DEFAULT_ACTIVE_MINUS1 0x00002002
+#define AVC_PPSERR_NUM_REF_IDX_L1_DEFAULT_ACTIVE_MINUS1 0x00002003
+#define AVC_PPSERR_INIT_QP_MINUS26 0x00002004
+#define AVC_PPSERR_PPS_CB_QP_OFFSET 0x00002006
+#define AVC_PPSERR_PPS_CR_QP_OFFSET 0x00002007
+#define AVC_PPSERR_SCALING_LIST 0x0000200E
+#define AVC_PPSERR_MORE_RBSP_DATA_ERROR 0x00002012
+#define AVC_PPSERR_PPS_PIC_PARAMETER_SET_ID_RANGE_OUT 0x00002013
+#define AVC_PPSERR_PPS_SEQ_PARAMETER_SET_ID_RANGE_OUT 0x00002014
+#define AVC_PPSERR_NUM_REF_IDX_L0_DEFAULT_ACTIVE_MINUS1_RANGE_OUT 0x00002015
+#define AVC_PPSERR_NUM_REF_IDX_L1_DEFAULT_ACTIVE_MINUS1_RANGE_OUT 0x00002016
+#define AVC_PPSERR_PPS_CB_QP_OFFSET_RANGE_OUT 0x00002017
+#define AVC_PPSERR_PPS_CR_QP_OFFSET_RANGE_OUT 0x00002018
+#define AVC_SHERR_SLICE_PIC_PARAMETER_SET_ID 0x00003000
+#define AVC_SHERR_ACTIVATE_PPS 0x00003001
+#define AVC_SHERR_ACTIVATE_SPS 0x00003002
+#define AVC_SHERR_SLICE_TYPE 0x00003003
+#define AVC_SHERR_FIRST_MB_IN_SLICE 0x00003004
+#define AVC_SHERR_RPLM 0x00003006
+#define AVC_SHERR_LT_IDX_SPS_IS_OUT_OF_RANGE 0x0000300A
+#define AVC_SHERR_NUM_REF_IDX_L0_ACTIVE_MINUS1 0x0000300C
+#define AVC_SHERR_NUM_REF_IDX_L1_ACTIVE_MINUS1 0x0000300D
+#define AVC_SHERR_PRED_WEIGHT_TABLE 0x0000300F
+#define AVC_SHERR_SLICE_QP_DELTA 0x00003011
+#define AVC_SHERR_SLICE_BETA_OFFSET_DIV2 0x00003015
+#define AVC_SHERR_SLICE_TC_OFFSET_DIV2 0x00003016
+#define AVC_SHERR_DISABLE_DEBLOCK_FILTER_IDC 0x00003017
+#define AVC_SPECERR_OVER_PICTURE_WIDTH_SIZE 0x00004000
+#define AVC_SPECERR_OVER_PICTURE_HEIGHT_SIZE 0x00004001
+#define AVC_SPECERR_OVER_CHROMA_FORMAT 0x00004002
+#define AVC_SPECERR_OVER_BIT_DEPTH 0x00004003
+#define AVC_SPECERR_OVER_BUFFER_OVER_FLOW 0x00004004
+#define AVC_SPECERR_OVER_WRONG_BUFFER_ACCESS 0x00004005
+#define AVC_ETCERR_INIT_SEQ_SPS_NOT_FOUND 0x00005000
+#define AVC_ETCERR_DEC_PIC_VCL_NOT_FOUND 0x00005001
+#define AVC_ETCERR_NO_VALID_SLICE_IN_AU 0x00005002
+#define AVC_ETCERR_ASO 0x00005004
+#define AVC_ETCERR_FMO 0x00005005
+#define AVC_ETCERR_INPLACE_V 0x0000500F
+
+// AV1
+#define AV1_SPSERR_MAX_FRAME_WIDTH 0x00001001
+#define AV1_SPSERR_MAX_FRAME_HEIGHT 0x00001002
+#define AV1_SPSERR_ID_LEN_EXCEED_16 0x00001004
+#define AV1_SPSERR_NOT_FOUND_FIRST_SPS 0x0000100A
+#define AV1_SPSERR_SEQ_PROFILE 0x0000100B
+#define AV1_SPSERR_STILL_PICTURE 0x0000100C
+#define AV1_PPSERR_FRAME_SIZE_WIDTH 0x00002001
+#define AV1_PPSERR_FRAME_SIZE_HEIGHT 0x00002002
+#define AV1_PPSERR_SEEN_FRAME_HEADER 0x00002003
+#define AV1_PPSERR_REF_VALID 0x00002007
+#define AV1_PPSERR_LAST_ORDER_HINT 0x0000200B
+#define AV1_PPSERR_GOLD_ORDER_HINT 0x0000200C
+#define AV1_PPSERR_CODED_LOSSLESS_DELTA_Q 0x0000200E
+#define AV1_PPSERR_FILM_GRAIN_PARAM_REF_IDX 0x0000200F
+#define AV1_PPSERR_SEQ_CHANGE_BIT_DEPTH 0x00002010
+#define AV1_PPSERR_SEQ_CHANGE_PROFILE 0x00002012
+#define AV1_PPSERR_SEQ_CHANGE_DETECTED_INTER 0x00002013
+#define AV1_PPSERR_NUM_Y_POINTS 0x00002014
+#define AV1_PPSERR_POINT_Y_VALUE 0x00002015
+#define AV1_PPSERR_NUM_CB_POINTS 0x00002016
+#define AV1_PPSERR_POINT_CB_VALUE 0x00002017
+#define AV1_PPSERR_NUM_CR_POINTS 0x00002018
+#define AV1_PPSERR_POINT_CR_VALUE 0x00002019
+#define AV1_PPSERR_SUBSAMPLING_FORMAT 0x0000201A
+#define AV1_FRAMEERR_TILE_START_END_PRESENT 0x00003001
+#define AV1_FRAMEERR_SHOW_EXISING_FRAME 0x00003002
+#define AV1_TGERR_NUM_TILES_ZERO 0x00004001
+#define AV1_TGERR_START_NOT_TILE_NUM 0x00004002
+#define AV1_TGERR_END_LESS_THAN_TG_START 0x00004003
+#define AV1_TGERR_TILE_SIZE_GREATER_THAN_32M 0x00004004
+#define AV1_SPECERR_OVER_MAX_H_SIZE 0x00005001
+#define AV1_SPECERR_OVER_MAX_V_SIZE 0x00005002
+#define AV1_SPECERR_OVER_MAX_TILE_COLS 0x00005004
+#define AV1_SPECERR_OVER_MAX_TILE_ROWS 0x00005005
+#define AV1_SPECERR_OVER_TILE_SIZE 0x00005006
+#define AV1_SPECERR_OVER_NUMTILES_GT_MAX_TILES 0x00005007
+#define AV1_ETCERR_OBU_HEADER 0x00006001
+#define AV1_ETCERR_OBU_SIZE 0x00006003
+#define AV1_ETCERR_OVERCONSUME 0x00006004
+#define AV1_ETCERR_NOT_SUPPORTED_FEATURE 0x00006005
+#define AV1_ETCERR_RESILIENCE_FAIL 0x00006006
+
+// VP9
+#define VP9_PICERR_FRAME_MARKER 0x00001000
+#define VP9_PICERR_PROFILE 0x00001001
+#define VP9_PICERR_SYNC_CODE 0x00001002
+#define VP9_PICERR_PROFILE_COLOR_SAMPLE 0x00001003
+#define VP9_PICERR_FRAME_SIZE 0x00001004
+#define VP9_PICERR_SEGMENT 0x00001005
+#define VP9_PICERR_TILE 0x00001006
+#define VP9_PICERR_PROFILE_COMP_MISMATCH_WITH_REF 0x00001007
+#define VP9_PICERR_COMP_DAT_OVER_CS 0x00001008
+#define VP9_PICERR_COMP_TRAILING_BIT_ERR 0x00001009
+#define VP9_PICERR_MARKER 0x0000100A
+#define VP9_PICERR_NOT_EXIST_REF_FRAME 0x0000100B
+#define VP9_PICERR_UNINIT_CTX 0x0000100C
+#define VP9_PICERR_FRAME_SIZE_LIMIT_BY_REF 0x0000100D
+#define VP9_SPECERR_OVER_PICTURE_WIDTH_SIZE 0x00004000
+#define VP9_SPECERR_OVER_PICTURE_HEIGHT_SIZE 0x00004001
+#define VP9_SPECERR_OVER_CHROMA_FORMAT 0x00004002
+#define VP9_SPECERR_OVER_BIT_DEPTH 0x00004003
+#define VP9_ETCERR_INIT_KEY_FRAME_NOT_FOUND 0x00005000
+#define VP9_ETCERR_FORBIDDEN_BS_MODE 0x00005004
+#define VP9_ETCERR_SPP_OVER_CS_AU 0x00005005
+
+// AVS2
+#define AVS2_SPSERR_PROFILE_ID 0x00001000
+#define AVS2_SPSERR_LEVEL_ID 0x00001001
+#define AVS2_SPSERR_HORIZONTAL_SIZE 0x00001002
+#define AVS2_SPSERR_VERTICAL_SIZE 0x00001003
+#define AVS2_SPSERR_CHROMA_FORMAT 0x00001004
+#define AVS2_SPSERR_SAMPLE_PRECISION 0x00001005
+#define AVS2_SPSERR_ENCODING_PRECISION 0x00001006
+#define AVS2_SPSERR_LCU_SIZE 0x00001007
+#define AVS2_SPSERR_WEIGHT_QUANT_MATRIX 0x00001008
+#define AVS2_SPSERR_NUM_OF_RCS 0x00001009
+#define AVS2_SPSERR_REFERENCE_CONFIGURATION_SET 0x0000100A
+#define AVS2_SPSERR_OUTPUT_REORDER_DELAY 0x0000100B
+#define AVS2_PPSERR_BBV_DELAY 0x00002000
+#define AVS2_PPSERR_TIME_CODE 0x00002001
+#define AVS2_PPSERR_DECODE_ORDER_INDEX 0x00002002
+#define AVS2_PPSERR_TEMPORAL_ID 0x00002003
+#define AVS2_PPSERR_PICTURE_OUTPUT_DELAY 0x00002004
+#define AVS2_PPSERR_RCS_INDEX 0x00002005
+#define AVS2_PPSERR_REFERENCE_CONFIGURATION_SET 0x00002006
+#define AVS2_PPSERR_BBV_CHECK_TIMES 0x00002007
+#define AVS2_PPSERR_PICTURE_QP 0x00002008
+#define AVS2_PPSERR_ALPHA_C_OFFSET 0x00002009
+#define AVS2_PPSERR_BETA_OFFSET 0x0000200A
+#define AVS2_PPSERR_CHROMA_QUANT_PARAM_DELTA_CB 0x0000200B
+#define AVS2_PPSERR_CHROMA_QUANT_PARAM_DELTA_CR 0x0000200C
+#define AVS2_PPSERR_WEIGHT_QUANT_PARAM_DELTA1 0x0000200D
+#define AVS2_PPSERR_WEIGHT_QUANT_PARAM_DELTA2 0x0000200E
+#define AVS2_PPSERR_PICTURE_CODING_TYPE 0x0000200F
+#define AVS2_PPSERR_ALF_FILTER_NUM_MINUS1 0x00002010
+#define AVS2_PPSERR_ALF_REGION_DISTANCE 0x00002011
+#define AVS2_PPSERR_ALF_COEFF_LUMA 0x00002012
+#define AVS2_PPSERR_ALF_COEFF_CHROMA_CB 0x00002013
+#define AVS2_PPSERR_ALF_COEFF_CHROMA_CR 0x00002014
+#define AVS2_SHERR_SLICE_VERTICAL_POSITION 0x00003000
+#define AVS2_SHERR_SLICE_VERTICAL_POSITION_EXTENSION 0x00003001
+#define AVS2_SHERR_SLICE_HORIZONTAL_POSITION 0x00003002
+#define AVS2_SHERR_SLICE_HORIZONTAL_POSITION_EXTENSION 0x00003003
+#define AVS2_SHERR_FIXED_SLICE_QP 0x00003004
+#define AVS2_SHERR_SLICE_QP 0x00003005
+#define AVS2_SHERR_SLICE_SAO_ENABLE_FLAG 0x00003006
+#define AVS2_SHERR_AEC_BYTE_ALIGNMENT_BIT 0x00003007
+#define AVS2_SHERR_STREAM_END 0x00003008
+#define AVS2_SPECERR_OVER_PICTURE_WIDTH_SIZE 0x00004000
+#define AVS2_SPECERR_OVER_PICTURE_HEIGHT_SIZE 0x00004001
+#define AVS2_SPECERR_OVER_CHROMA_FORMAT 0x00004002
+#define AVS2_SPECERR_OVER_BIT_DEPTH 0x00004003
+#define AVS2_SPECERR_OVER_REF_TEMPORAL_ID 0x00004004
+#define AVS2_ETCERR_SPS_NOT_FOUND 0x00005000
+#define AVS2_ETCERR_DEC_PIC_VCL_NOT_FOUND 0x00005001
+#define AVS2_ETCERR_NO_VALID_SLICE_IN_AU 0x00005002
+#define AVS2_ETCERR_PPS_ERROR 0x00005003
+#define AVS2_ETCERR_SLICE_NUM_OVERFLOW 0x00005004
+
+/************************************************************************/
+/* WAVE5 WARNING ON DECODER (WARN_INFO) */
+/************************************************************************/
+// HEVC
+#define HEVC_SPSWARN_MAX_SUB_LAYERS_MINUS1 0x00000001
+#define HEVC_SPSWARN_GENERAL_RESERVED_ZERO_44BITS 0x00000002
+#define HEVC_SPSWARN_RESERVED_ZERO_2BITS 0x00000004
+#define HEVC_SPSWARN_SUB_LAYER_RESERVED_ZERO_44BITS 0x00000008
+#define HEVC_SPSWARN_GENERAL_LEVEL_IDC 0x00000010
+#define HEVC_SPSWARN_SPS_MAX_DEC_PIC_BUFFERING_VALUE_OVER 0x00000020
+#define HEVC_SPSWARN_RBSP_TRAILING_BITS 0x00000040
+#define HEVC_SPSWARN_ST_RPS_UE_ERROR 0x00000080
+#define HEVC_SPSWARN_EXTENSION_FLAG 0x01000000
+#define HEVC_SPSWARN_REPLACED_WITH_PREV_SPS 0x02000000
+#define HEVC_PPSWARN_RBSP_TRAILING_BITS 0x00000100
+#define HEVC_PPSWARN_REPLACED_WITH_PREV_PPS 0x00000200
+#define HEVC_SHWARN_FIRST_SLICE_SEGMENT_IN_PIC_FLAG 0x00001000
+#define HEVC_SHWARN_NO_OUTPUT_OF_PRIOR_PICS_FLAG 0x00002000
+#define HEVC_SHWARN_PIC_OUTPUT_FLAG 0x00004000
+#define HEVC_SHWARN_DUPLICATED_SLICE_SEGMENT 0x00008000
+#define HEVC_ETCWARN_INIT_SEQ_VCL_NOT_FOUND 0x00010000
+#define HEVC_ETCWARN_MISSING_REFERENCE_PICTURE 0x00020000
+#define HEVC_ETCWARN_WRONG_TEMPORAL_ID 0x00040000
+#define HEVC_ETCWARN_ERROR_PICTURE_IS_REFERENCED 0x00080000
+#define HEVC_SPECWARN_OVER_PROFILE 0x00100000
+#define HEVC_SPECWARN_OVER_LEVEL 0x00200000
+#define HEVC_PRESWARN_PARSING_ERR 0x04000000
+#define HEVC_PRESWARN_MVD_OUT_OF_RANGE 0x08000000
+#define HEVC_PRESWARN_CU_QP_DELTA_VAL_OUT_OF_RANGE 0x09000000
+#define HEVC_PRESWARN_COEFF_LEVEL_REMAINING_OUT_OF_RANGE 0x0A000000
+#define HEVC_PRESWARN_PCM_ERR 0x0B000000
+#define HEVC_PRESWARN_OVERCONSUME 0x0C000000
+#define HEVC_PRESWARN_END_OF_SUBSET_ONE_BIT_ERR 0x10000000
+#define HEVC_PRESWARN_END_OF_SLICE_SEGMENT_FLAG 0x20000000
+
+// AVC
+#define AVC_SPSWARN_RESERVED_ZERO_2BITS 0x00000004
+#define AVC_SPSWARN_GENERAL_LEVEL_IDC 0x00000010
+#define AVC_SPSWARN_RBSP_TRAILING_BITS 0x00000040
+#define AVC_PPSWARN_RBSP_TRAILING_BITS 0x00000100
+#define AVC_SHWARN_NO_OUTPUT_OF_PRIOR_PICS_FLAG 0x00002000
+#define AVC_ETCWARN_INIT_SEQ_VCL_NOT_FOUND 0x00010000
+#define AVC_ETCWARN_MISSING_REFERENCE_PICTURE 0x00020000
+#define AVC_ETCWARN_ERROR_PICTURE_IS_REFERENCED 0x00080000
+#define AVC_SPECWARN_OVER_PROFILE 0x00100000
+#define AVC_SPECWARN_OVER_LEVEL 0x00200000
+#define AVC_PRESWARN_MVD_RANGE_OUT 0x00400000
+#define AVC_PRESWARN_MB_QPD_RANGE_OUT 0x00500000
+#define AVC_PRESWARN_COEFF_RANGE_OUT 0x00600000
+#define AVC_PRESWARN_MV_RANGE_OUT 0x00700000
+#define AVC_PRESWARN_MB_SKIP_RUN_RANGE_OUT 0x00800000
+#define AVC_PRESWARN_MB_TYPE_RANGE_OUT 0x00900000
+#define AVC_PRESWARN_SUB_MB_TYPE_RANGE_OUT 0x00A00000
+#define AVC_PRESWARN_CBP_RANGE_OUT 0x00B00000
+#define AVC_PRESWARN_INTRA_CHROMA_PRED_MODE_RANGE_OUT 0x00C00000
+#define AVC_PRESWARN_REF_IDX_RANGE_OUT 0x00D00000
+#define AVC_PRESWARN_COEFF_TOKEN_RANGE_OUT 0x00E00000
+#define AVC_PRESWARN_TOTAL_ZERO_RANGE_OUT 0x00F00000
+#define AVC_PRESWARN_RUN_BEFORE_RANGE_OUT 0x01000000
+#define AVC_PRESWARN_OVERCONSUME 0x01100000
+#define AVC_PRESWARN_MISSING_SLICE 0x01200000
+
+// AV1
+#define AV1_SPSWARN_OBU_EXTENSION_FLAG_ZERO 0x00001000
+#define AV1_SPSWARN_DUPLICATE_OPERATING_POINT_IDX 0x00001001
+#define AV1_SPSWARN_MC_IDENTIY_SUBSAMPLING_X 0x00001002
+#define AV1_PPSWARN_MC_IDENTIY_SUBSAMPLING_Y 0x00001003
+#define AV1_SPSWARN_NUM_UNITS_IN_DISPLAY_TICK 0x00001004
+#define AV1_SPSWARN_TIME_SCALE_ZERO 0x00001005
+#define AV1_SPSWARN_NUM_TICKS_PER_PICTURE 0x00001006
+#define AV1_PPSWARN_TILE_WIDTH 0x00002001
+#define AV1_PPSWARN_TILE_HEIGHT 0x00002002
+#define AV1_PPSWARN_SHOW_EXISTING_KEY_FRAME_OUTPUT 0x00002004
+#define AV1_PPSWARN_DIFF_FRAME_ID 0x00002008
+#define AV1_PPSWARN_CURRENT_FRAME_ID 0x00002010
+#define AV1_PPSWARN_REFRESH_FRAME_FLAGS 0x00002020
+#define AV1_PPSWARN_DISPLAY_ID 0x00002040
+#define AV1_PPSWARN_PREV_FRAME_SHOWABLE_FLAG_ZERO 0x00002080
+#define AV1_PPSWARN_EXPECTED_FRAME_ID 0x00002100
+#define AV1_SPECWARN_OVER_MAX_TILE_AREA_SB 0x00005000
+#define AV1_SPECWARN_OVER_MAX_PIC_SIZE 0x00005001
+#define AV1_ETCWARN_OBU_EXTENSION_FLAG 0x00006000
+#define AV1_TGWARN_TRAIL_BIT_POS 0x00400000
+#define AV1_TGWARN_TRAIL_PAD_BIT 0x00800000
+#define AV1_TGWARN_SYM_MAX_OVER 0x01000000
+#define AV1_TGWARN_EXP_GOLB_OVER 0x02000000
+#define AV1_TGWARN_MV_NOT_VALID 0x04000000
+
+// VP9
+#define VP9_PICWARN_COLOR_SPACE_MISMATCH_WITH_REF 0x00001000
+#define VP9_PRESWARN_OVERCONSUME 0x00400000
+#define VP9_PRESWARN_TRAILING_BITS 0x00800000
+#define VP9_PRESWARN_MARKER 0x01000000
+#define VP9_PRESWARN_MV_RANGE_OVER 0x02000000
+#define VP9_PRESWARN_MISIZE_SEG_LVL_ACTIVE 0x04000000
+
+// AVS2
+#define AVS2_ETCWARN_INIT_SEQ_VCL_NOT_FOUND 0x00010000
+#define AVS2_ETCWARN_MISSING_REFERENCE_PICTURE 0x00020000
+#define AVS2_ETCWARN_WRONG_TEMPORAL_ID 0x00040000
+#define AVS2_ETCWARN_ERROR_PICTURE_IS_REFERENCED 0x00080000
+#define AVS2_ETCWARN_REF_WRONG_TEMPORAL_ID 0x00080001
+#define AVS2_ETCWARN_SPS_ERROR 0x00080002
+
+/************************************************************************/
+/* WAVE5 ERROR ON ENCODER (ERR_INFO) */
+/************************************************************************/
+
+/************************************************************************/
+/* WAVE5 WARNING ON ENCODER (WARN_INFO) */
+/************************************************************************/
+#define WAVE5_ETCWARN_FORCED_SPLIT_BY_CU8X8 0x000000001
+
+/************************************************************************/
+/* WAVE5 debug info (PRI_REASON) */
+/************************************************************************/
+#define WAVE5_DEC_VCORE_VCE_HANGUP 0x0001
+#define WAVE5_DEC_VCORE_UNDETECTED_SYNTAX_ERR 0x0002
+#define WAVE5_DEC_VCORE_MIB_BUSY 0x0003
+#define WAVE5_DEC_VCORE_VLC_BUSY 0x0004
+
+#endif /* ERROR_CODE_H_INCLUDED */
diff --git a/drivers/media/platform/chips-media/wave5/wave5.h b/drivers/media/platform/chips-media/wave5/wave5.h
new file mode 100644
index 000000000000..d3afb541e356
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - wave5 backend definitions
+ *
+ * Copyright (C) 2021 CHIPS&MEDIA INC
+ */
+
+#ifndef __WAVE5_FUNCTION_H__
+#define __WAVE5_FUNCTION_H__
+
+#define WAVE5_SUBSAMPLED_ONE_SIZE(_w, _h) (ALIGN((_w) / 4, 16) * ALIGN((_h) / 4, 8))
+#define WAVE5_SUBSAMPLED_ONE_SIZE_AVC(_w, _h) (ALIGN((_w) / 4, 32) * ALIGN((_h) / 4, 4))
+
+#define BSOPTION_ENABLE_EXPLICIT_END BIT(0)
+
+#define WTL_RIGHT_JUSTIFIED 0
+#define WTL_LEFT_JUSTIFIED 1
+#define WTL_PIXEL_8BIT 0
+#define WTL_PIXEL_16BIT 1
+#define WTL_PIXEL_32BIT 2
+
+/* Mirror & rotation modes of the PRP (pre-processing) module */
+#define NONE_ROTATE 0x0
+#define ROT_CLOCKWISE_90 0x3
+#define ROT_CLOCKWISE_180 0x5
+#define ROT_CLOCKWISE_270 0x7
+#define MIR_HOR_FLIP 0x11
+#define MIR_VER_FLIP 0x9
+#define MIR_HOR_VER_FLIP (MIR_HOR_FLIP | MIR_VER_FLIP)
+
+bool wave5_vpu_is_init(struct vpu_device *vpu_dev);
+
+unsigned int wave5_vpu_get_product_id(struct vpu_device *vpu_dev);
+
+void wave5_bit_issue_command(struct vpu_instance *inst, u32 cmd);
+
+int wave5_vpu_get_version(struct vpu_device *vpu_dev, u32 *revision);
+
+int wave5_vpu_init(struct device *dev, u8 *fw, size_t size);
+
+int wave5_vpu_reset(struct device *dev, enum sw_reset_mode reset_mode);
+
+int wave5_vpu_build_up_dec_param(struct vpu_instance *inst, struct dec_open_param *param);
+
+int wave5_vpu_dec_set_bitstream_flag(struct vpu_instance *inst, bool eos);
+
+int wave5_vpu_dec_register_framebuffer(struct vpu_instance *inst,
+ struct frame_buffer *fb_arr, enum tiled_map_type map_type,
+ unsigned int count);
+
+int wave5_vpu_re_init(struct device *dev, u8 *fw, size_t size);
+
+int wave5_vpu_dec_init_seq(struct vpu_instance *inst);
+
+int wave5_vpu_dec_get_seq_info(struct vpu_instance *inst, struct dec_initial_info *info);
+
+int wave5_vpu_decode(struct vpu_instance *inst, struct dec_param *option, u32 *fail_res);
+
+int wave5_vpu_dec_get_result(struct vpu_instance *inst, struct dec_output_info *result);
+
+int wave5_vpu_dec_finish_seq(struct vpu_instance *inst, u32 *fail_res);
+
+int wave5_dec_clr_disp_flag(struct vpu_instance *inst, unsigned int index);
+
+int wave5_dec_set_disp_flag(struct vpu_instance *inst, unsigned int index);
+
+int wave5_vpu_clear_interrupt(struct vpu_instance *inst, u32 flags);
+
+dma_addr_t wave5_vpu_dec_get_rd_ptr(struct vpu_instance *inst);
+
+int wave5_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr);
+
+/***< WAVE5 encoder >******/
+
+int wave5_vpu_build_up_enc_param(struct device *dev, struct vpu_instance *inst,
+ struct enc_open_param *open_param);
+
+int wave5_vpu_enc_init_seq(struct vpu_instance *inst);
+
+int wave5_vpu_enc_get_seq_info(struct vpu_instance *inst, struct enc_initial_info *info);
+
+int wave5_vpu_enc_register_framebuffer(struct device *dev, struct vpu_instance *inst,
+ struct frame_buffer *fb_arr, enum tiled_map_type map_type,
+ unsigned int count);
+
+int wave5_vpu_encode(struct vpu_instance *inst, struct enc_param *option, u32 *fail_res);
+
+int wave5_vpu_enc_get_result(struct vpu_instance *inst, struct enc_output_info *result);
+
+int wave5_vpu_enc_finish_seq(struct vpu_instance *inst, u32 *fail_res);
+
+int wave5_vpu_enc_check_open_param(struct vpu_instance *inst, struct enc_open_param *open_param);
+
+#endif /* __WAVE5_FUNCTION_H__ */
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 808b490c1910..922df8cb30e5 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -524,7 +524,7 @@ static int fimc_capture_release(struct file *file)
mutex_lock(&fimc->lock);
if (close && vc->streaming) {
- media_pipeline_stop(&vc->ve.vdev.entity);
+ media_pipeline_stop(vc->ve.vdev.entity.pads);
vc->streaming = false;
}
@@ -1184,7 +1184,7 @@ static int fimc_cap_streamon(struct file *file, void *priv,
if (fimc_capture_active(fimc))
return -EBUSY;
- ret = media_pipeline_start(entity, &vc->ve.pipe->mp);
+ ret = media_pipeline_start(entity->pads, &vc->ve.pipe->mp);
if (ret < 0)
return ret;
@@ -1218,7 +1218,7 @@ static int fimc_cap_streamon(struct file *file, void *priv,
}
err_p_stop:
- media_pipeline_stop(entity);
+ media_pipeline_stop(entity->pads);
return ret;
}
@@ -1234,7 +1234,7 @@ static int fimc_cap_streamoff(struct file *file, void *priv,
return ret;
if (vc->streaming) {
- media_pipeline_stop(&vc->ve.vdev.entity);
+ media_pipeline_stop(vc->ve.vdev.entity.pads);
vc->streaming = false;
}
@@ -1454,7 +1454,7 @@ void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
}
static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct fimc_fmt *fmt;
@@ -1467,7 +1467,7 @@ static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1476,7 +1476,7 @@ static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1508,7 +1508,7 @@ static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
}
static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1531,7 +1531,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
mf->colorspace = V4L2_COLORSPACE_JPEG;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
return 0;
}
@@ -1574,7 +1574,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1601,10 +1601,10 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
return 0;
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
f = &ctx->d_frame;
break;
default:
@@ -1630,7 +1630,7 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
}
static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
@@ -1648,10 +1648,10 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
f = &ctx->d_frame;
break;
default:
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 83688a7982f7..e2862b3dcdfc 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -312,7 +312,7 @@ static int isp_video_release(struct file *file)
is_singular_file = v4l2_fh_is_singular_file(file);
if (is_singular_file && ivc->streaming) {
- media_pipeline_stop(entity);
+ media_pipeline_stop(entity->pads);
ivc->streaming = 0;
}
@@ -493,7 +493,7 @@ static int isp_video_streamon(struct file *file, void *priv,
struct media_entity *me = &ve->vdev.entity;
int ret;
- ret = media_pipeline_start(me, &ve->pipe->mp);
+ ret = media_pipeline_start(me->pads, &ve->pipe->mp);
if (ret < 0)
return ret;
@@ -508,7 +508,7 @@ static int isp_video_streamon(struct file *file, void *priv,
isp->video_capture.streaming = 1;
return 0;
p_stop:
- media_pipeline_stop(me);
+ media_pipeline_stop(me->pads);
return ret;
}
@@ -523,7 +523,7 @@ static int isp_video_streamoff(struct file *file, void *priv,
if (ret < 0)
return ret;
- media_pipeline_stop(&video->ve.vdev.entity);
+ media_pipeline_stop(video->ve.vdev.entity.pads);
video->streaming = 0;
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index 74b49d30901e..80274e29ccc5 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -106,7 +106,7 @@ static const struct media_entity_operations fimc_is_subdev_media_ops = {
};
static int fimc_is_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct fimc_fmt *fmt;
@@ -119,14 +119,14 @@ static int fimc_is_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_isp *isp = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf = &fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *mf = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *mf = *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
return 0;
}
@@ -156,7 +156,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
}
static void __isp_subdev_try_format(struct fimc_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *mf = &fmt->format;
@@ -172,8 +172,9 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
mf->code = MEDIA_BUS_FMT_SGRBG10_1X10;
} else {
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- format = v4l2_subdev_get_try_format(&isp->subdev, cfg,
- FIMC_ISP_SD_PAD_SINK);
+ format = v4l2_subdev_get_try_format(&isp->subdev,
+ sd_state,
+ FIMC_ISP_SD_PAD_SINK);
else
format = &isp->sink_fmt;
@@ -191,7 +192,7 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
}
static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_isp *isp = v4l2_get_subdevdata(sd);
@@ -203,10 +204,10 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
__func__, fmt->pad, mf->code, mf->width, mf->height);
mutex_lock(&isp->subdev_lock);
- __isp_subdev_try_format(isp, cfg, fmt);
+ __isp_subdev_try_format(isp, sd_state, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
/* Propagate format to the source pads */
@@ -217,20 +218,23 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
for (pad = FIMC_ISP_SD_PAD_SRC_FIFO;
pad < FIMC_ISP_SD_PADS_NUM; pad++) {
format.pad = pad;
- __isp_subdev_try_format(isp, cfg, &format);
- mf = v4l2_subdev_get_try_format(sd, cfg, pad);
+ __isp_subdev_try_format(isp, sd_state,
+ &format);
+ mf = v4l2_subdev_get_try_format(sd, sd_state,
+ pad);
*mf = format.format;
}
}
} else {
- if (sd->entity.stream_count == 0) {
+ if (sd->entity.pads->stream_count == 0) {
if (fmt->pad == FIMC_ISP_SD_PAD_SINK) {
struct v4l2_subdev_format format = *fmt;
isp->sink_fmt = *mf;
format.pad = FIMC_ISP_SD_PAD_SRC_DMA;
- __isp_subdev_try_format(isp, cfg, &format);
+ __isp_subdev_try_format(isp, sd_state,
+ &format);
isp->src_fmt = format.format;
__is_set_frame_size(is, &isp->src_fmt);
@@ -370,15 +374,18 @@ static int fimc_isp_subdev_open(struct v4l2_subdev *sd,
.field = V4L2_FIELD_NONE,
};
- format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SINK);
+ format = v4l2_subdev_get_try_format(sd, fh->state,
+ FIMC_ISP_SD_PAD_SINK);
*format = fmt;
- format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SRC_FIFO);
+ format = v4l2_subdev_get_try_format(sd, fh->state,
+ FIMC_ISP_SD_PAD_SRC_FIFO);
fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
*format = fmt;
- format = v4l2_subdev_get_try_format(sd, fh->pad, FIMC_ISP_SD_PAD_SRC_DMA);
+ format = v4l2_subdev_get_try_format(sd, fh->state,
+ FIMC_ISP_SD_PAD_SRC_DMA);
*format = fmt;
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index d279f282d592..3bc63884a748 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -516,7 +516,7 @@ static int fimc_lite_release(struct file *file)
if (v4l2_fh_is_singular_file(file) &&
atomic_read(&fimc->out_path) == FIMC_IO_DMA) {
if (fimc->streaming) {
- media_pipeline_stop(entity);
+ media_pipeline_stop(entity->pads);
fimc->streaming = false;
}
fimc_lite_stop_capture(fimc, false);
@@ -550,7 +550,7 @@ static const struct v4l2_file_operations fimc_lite_fops = {
*/
static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct flite_drvdata *dd = fimc->dd;
@@ -574,14 +574,16 @@ static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc,
struct v4l2_rect *rect;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sink_fmt = v4l2_subdev_get_try_format(&fimc->subdev, cfg,
- FLITE_SD_PAD_SINK);
+ sink_fmt = v4l2_subdev_get_try_format(&fimc->subdev,
+ sd_state,
+ FLITE_SD_PAD_SINK);
mf->code = sink_fmt->code;
mf->colorspace = sink_fmt->colorspace;
- rect = v4l2_subdev_get_try_crop(&fimc->subdev, cfg,
- FLITE_SD_PAD_SINK);
+ rect = v4l2_subdev_get_try_crop(&fimc->subdev,
+ sd_state,
+ FLITE_SD_PAD_SINK);
} else {
mf->code = sink->fmt->mbus_code;
mf->colorspace = sink->fmt->colorspace;
@@ -820,7 +822,7 @@ static int fimc_lite_streamon(struct file *file, void *priv,
if (fimc_lite_active(fimc))
return -EBUSY;
- ret = media_pipeline_start(entity, &fimc->ve.pipe->mp);
+ ret = media_pipeline_start(entity->pads, &fimc->ve.pipe->mp);
if (ret < 0)
return ret;
@@ -837,7 +839,7 @@ static int fimc_lite_streamon(struct file *file, void *priv,
}
err_p_stop:
- media_pipeline_stop(entity);
+ media_pipeline_stop(entity->pads);
return 0;
}
@@ -851,7 +853,7 @@ static int fimc_lite_streamoff(struct file *file, void *priv,
if (ret < 0)
return ret;
- media_pipeline_stop(&fimc->ve.vdev.entity);
+ media_pipeline_stop(fimc->ve.vdev.entity.pads);
fimc->streaming = false;
return 0;
}
@@ -1002,7 +1004,7 @@ static const struct media_entity_operations fimc_lite_subdev_media_ops = {
};
static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct fimc_fmt *fmt;
@@ -1016,16 +1018,16 @@ static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
static struct v4l2_mbus_framefmt *__fimc_lite_subdev_get_try_fmt(
struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad)
+ struct v4l2_subdev_state *sd_state, unsigned int pad)
{
if (pad != FLITE_SD_PAD_SINK)
pad = FLITE_SD_PAD_SOURCE_DMA;
- return v4l2_subdev_get_try_format(sd, cfg, pad);
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
}
static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1033,7 +1035,7 @@ static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
struct flite_frame *f = &fimc->inp_frame;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = __fimc_lite_subdev_get_try_fmt(sd, cfg, fmt->pad);
+ mf = __fimc_lite_subdev_get_try_fmt(sd, sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1056,7 +1058,7 @@ static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
}
static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1071,24 +1073,25 @@ static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&fimc->lock);
if ((atomic_read(&fimc->out_path) == FIMC_IO_ISP &&
- sd->entity.stream_count > 0) ||
+ sd->entity.pads->stream_count > 0) ||
(atomic_read(&fimc->out_path) == FIMC_IO_DMA &&
vb2_is_busy(&fimc->vb_queue))) {
mutex_unlock(&fimc->lock);
return -EBUSY;
}
- ffmt = fimc_lite_subdev_try_fmt(fimc, cfg, fmt);
+ ffmt = fimc_lite_subdev_try_fmt(fimc, sd_state, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *src_fmt;
- mf = __fimc_lite_subdev_get_try_fmt(sd, cfg, fmt->pad);
+ mf = __fimc_lite_subdev_get_try_fmt(sd, sd_state, fmt->pad);
*mf = fmt->format;
if (fmt->pad == FLITE_SD_PAD_SINK) {
unsigned int pad = FLITE_SD_PAD_SOURCE_DMA;
- src_fmt = __fimc_lite_subdev_get_try_fmt(sd, cfg, pad);
+ src_fmt = __fimc_lite_subdev_get_try_fmt(sd, sd_state,
+ pad);
*src_fmt = *mf;
}
@@ -1116,7 +1119,7 @@ static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1128,7 +1131,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
return 0;
}
@@ -1151,7 +1154,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
}
static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
@@ -1165,7 +1168,7 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
fimc_lite_try_crop(fimc, &sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_crop(sd, cfg, sel->pad) = sel->r;
+ *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad) = sel->r;
} else {
unsigned long flags;
spin_lock_irqsave(&fimc->slock, flags);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index bd37011fb671..8230dd040ffe 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1161,7 +1161,7 @@ static int __fimc_md_modify_pipeline(struct media_entity *entity, bool enable)
static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable,
struct media_graph *graph)
{
- struct media_entity *entity_err = entity;
+ struct media_pad *pad, *pad_err = entity->pads;
int ret;
/*
@@ -1170,13 +1170,13 @@ static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable,
* through active links. This is needed as we cannot power on/off the
* subdevs in random order.
*/
- media_graph_walk_start(graph, entity);
+ media_graph_walk_start(graph, pad_err);
- while ((entity = media_graph_walk_next(graph))) {
- if (!is_media_entity_v4l2_video_device(entity))
+ while ((pad = media_graph_walk_next(graph))) {
+ if (!is_media_entity_v4l2_video_device(pad->entity))
continue;
- ret = __fimc_md_modify_pipeline(entity, enable);
+ ret = __fimc_md_modify_pipeline(pad->entity, enable);
if (ret < 0)
goto err;
@@ -1185,15 +1185,15 @@ static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable,
return 0;
err:
- media_graph_walk_start(graph, entity_err);
+ media_graph_walk_start(graph, pad_err);
- while ((entity_err = media_graph_walk_next(graph))) {
- if (!is_media_entity_v4l2_video_device(entity_err))
+ while ((pad_err = media_graph_walk_next(graph))) {
+ if (!is_media_entity_v4l2_video_device(pad_err->entity))
continue;
- __fimc_md_modify_pipeline(entity_err, !enable);
+ __fimc_md_modify_pipeline(pad_err->entity, !enable);
- if (entity_err == entity)
+ if (pad_err == pad)
break;
}
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index ebf39c856894..32b23329b033 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -537,7 +537,7 @@ unlock:
}
static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5pcsis_formats))
@@ -565,23 +565,25 @@ static struct csis_pix_format const *s5pcsis_try_format(
}
static struct v4l2_mbus_framefmt *__s5pcsis_get_format(
- struct csis_state *state, struct v4l2_subdev_pad_config *cfg,
+ struct csis_state *state, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return cfg ? v4l2_subdev_get_try_format(&state->sd, cfg, 0) : NULL;
+ return sd_state ? v4l2_subdev_get_try_format(&state->sd,
+ sd_state, 0) : NULL;
return &state->format;
}
-static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5pcsis_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct csis_pix_format const *csis_fmt;
struct v4l2_mbus_framefmt *mf;
- mf = __s5pcsis_get_format(state, cfg, fmt->which);
+ mf = __s5pcsis_get_format(state, sd_state, fmt->which);
if (fmt->pad == CSIS_PAD_SOURCE) {
if (mf) {
@@ -602,13 +604,14 @@ static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
return 0;
}
-static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int s5pcsis_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct v4l2_mbus_framefmt *mf;
- mf = __s5pcsis_get_format(state, cfg, fmt->which);
+ mf = __s5pcsis_get_format(state, sd_state, fmt->which);
if (!mf)
return -EINVAL;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index e56c5e56e824..37e96f591d84 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1350,6 +1350,9 @@ static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
struct mcam_format_struct *f;
struct v4l2_pix_format *pix = &fmt->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1358,7 +1361,7 @@ static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
f = mcam_find_format(pix->pixelformat);
pix->pixelformat = f->pixelformat;
v4l2_fill_mbus_format(&format.format, pix, f->mbus_code);
- ret = sensor_call(cam, pad, set_fmt, &pad_cfg, &format);
+ ret = sensor_call(cam, pad, set_fmt, &pad_state, &format);
v4l2_fill_pix_format(pix, &format.format);
pix->bytesperline = pix->width * f->bpp;
switch (f->pixelformat) {
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 1311b4996ece..bab1f37c3778 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -930,7 +930,7 @@ static int isp_pipeline_is_last(struct media_entity *me)
struct isp_pipeline *pipe;
struct media_pad *pad;
- if (!me->pipe)
+ if (!me->pads->pipe)
return 0;
pipe = to_isp_pipeline(me);
if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 0fbb2aa6dd2c..bd8f4f4bc8f3 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -29,7 +29,8 @@
#define CCDC_MIN_HEIGHT 32
static struct v4l2_mbus_framefmt *
-__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+__ccdc_get_format(struct isp_ccdc_device *ccdc,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which);
static const unsigned int ccdc_fmts[] = {
@@ -1937,21 +1938,25 @@ static int ccdc_set_stream(struct v4l2_subdev *sd, int enable)
}
static struct v4l2_mbus_framefmt *
-__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+__ccdc_get_format(struct isp_ccdc_device *ccdc,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ccdc->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&ccdc->subdev, sd_state,
+ pad);
else
return &ccdc->formats[pad];
}
static struct v4l2_rect *
-__ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+__ccdc_get_crop(struct isp_ccdc_device *ccdc,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&ccdc->subdev, cfg, CCDC_PAD_SOURCE_OF);
+ return v4l2_subdev_get_try_crop(&ccdc->subdev, sd_state,
+ CCDC_PAD_SOURCE_OF);
else
return &ccdc->crop;
}
@@ -1964,7 +1969,8 @@ __ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg
* @fmt: Format
*/
static void
-ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg,
+ccdc_try_format(struct isp_ccdc_device *ccdc,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -2000,7 +2006,8 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg
case CCDC_PAD_SOURCE_OF:
pixelcode = fmt->code;
field = fmt->field;
- *fmt = *__ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, which);
+ *fmt = *__ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK,
+ which);
/* In SYNC mode the bridge converts YUV formats from 2X8 to
* 1X16. In BT.656 no such conversion occurs. As we don't know
@@ -2025,7 +2032,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg
}
/* Hardcode the output size to the crop rectangle size. */
- crop = __ccdc_get_crop(ccdc, cfg, which);
+ crop = __ccdc_get_crop(ccdc, sd_state, which);
fmt->width = crop->width;
fmt->height = crop->height;
@@ -2042,7 +2049,8 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_pad_config *cfg
break;
case CCDC_PAD_SOURCE_VP:
- *fmt = *__ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, which);
+ *fmt = *__ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK,
+ which);
/* The video port interface truncates the data to 10 bits. */
info = omap3isp_video_format_info(fmt->code);
@@ -2119,7 +2127,7 @@ static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
* return -EINVAL or zero on success
*/
static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2134,7 +2142,7 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
break;
case CCDC_PAD_SOURCE_OF:
- format = __ccdc_get_format(ccdc, cfg, code->pad,
+ format = __ccdc_get_format(ccdc, sd_state, code->pad,
code->which);
if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
@@ -2165,7 +2173,7 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __ccdc_get_format(ccdc, cfg, code->pad,
+ format = __ccdc_get_format(ccdc, sd_state, code->pad,
code->which);
/* A pixel code equal to 0 means that the video port doesn't
@@ -2185,7 +2193,7 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2197,7 +2205,7 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ccdc_try_format(ccdc, cfg, fse->pad, &format, fse->which);
+ ccdc_try_format(ccdc, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -2207,7 +2215,7 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ccdc_try_format(ccdc, cfg, fse->pad, &format, fse->which);
+ ccdc_try_format(ccdc, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -2225,7 +2233,8 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
*
* Return 0 on success or a negative error code otherwise.
*/
-static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccdc_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2241,12 +2250,13 @@ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
sel->r.width = INT_MAX;
sel->r.height = INT_MAX;
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, sel->which);
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK,
+ sel->which);
ccdc_try_crop(ccdc, format, &sel->r);
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *__ccdc_get_crop(ccdc, cfg, sel->which);
+ sel->r = *__ccdc_get_crop(ccdc, sd_state, sel->which);
break;
default:
@@ -2267,7 +2277,8 @@ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
*
* Return 0 on success or a negative error code otherwise.
*/
-static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccdc_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
@@ -2286,17 +2297,19 @@ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
* rectangle.
*/
if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
- sel->r = *__ccdc_get_crop(ccdc, cfg, sel->which);
+ sel->r = *__ccdc_get_crop(ccdc, sd_state, sel->which);
return 0;
}
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SINK, sel->which);
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK, sel->which);
ccdc_try_crop(ccdc, format, &sel->r);
- *__ccdc_get_crop(ccdc, cfg, sel->which) = sel->r;
+ *__ccdc_get_crop(ccdc, sd_state, sel->which) = sel->r;
/* Update the source format. */
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, sel->which);
- ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, format, sel->which);
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF,
+ sel->which);
+ ccdc_try_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF, format,
+ sel->which);
return 0;
}
@@ -2310,13 +2323,14 @@ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccdc_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ccdc_get_format(ccdc, cfg, fmt->pad, fmt->which);
+ format = __ccdc_get_format(ccdc, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -2333,24 +2347,25 @@ static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
-static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccdc_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- format = __ccdc_get_format(ccdc, cfg, fmt->pad, fmt->which);
+ format = __ccdc_get_format(ccdc, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ccdc_try_format(ccdc, cfg, fmt->pad, &fmt->format, fmt->which);
+ ccdc_try_format(ccdc, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CCDC_PAD_SINK) {
/* Reset the crop rectangle. */
- crop = __ccdc_get_crop(ccdc, cfg, fmt->which);
+ crop = __ccdc_get_crop(ccdc, sd_state, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
@@ -2359,16 +2374,16 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
ccdc_try_crop(ccdc, &fmt->format, crop);
/* Update the source formats. */
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_OF,
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF,
fmt->which);
*format = fmt->format;
- ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_OF, format,
+ ccdc_try_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF, format,
fmt->which);
- format = __ccdc_get_format(ccdc, cfg, CCDC_PAD_SOURCE_VP,
+ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
- ccdc_try_format(ccdc, cfg, CCDC_PAD_SOURCE_VP, format,
+ ccdc_try_format(ccdc, sd_state, CCDC_PAD_SOURCE_VP, format,
fmt->which);
}
@@ -2455,7 +2470,7 @@ static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ccdc_set_format(sd, fh ? fh->pad : NULL, &format);
+ ccdc_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index d0a49cdfd22d..366da6fb8b4f 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -618,11 +618,13 @@ static const unsigned int ccp2_fmts[] = {
* return format structure or NULL on error
*/
static struct v4l2_mbus_framefmt *
-__ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_pad_config *cfg,
+__ccp2_get_format(struct isp_ccp2_device *ccp2,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ccp2->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&ccp2->subdev, sd_state,
+ pad);
else
return &ccp2->formats[pad];
}
@@ -636,7 +638,8 @@ __ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_pad_config *c
* @which : wanted subdev format
*/
static void ccp2_try_format(struct isp_ccp2_device *ccp2,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -670,7 +673,8 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
* When CCP2 write to memory feature will be added this
* should be changed properly.
*/
- format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SINK, which);
+ format = __ccp2_get_format(ccp2, sd_state, CCP2_PAD_SINK,
+ which);
memcpy(fmt, format, sizeof(*fmt));
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
break;
@@ -688,7 +692,7 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
* return -EINVAL or zero on success
*/
static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
@@ -703,7 +707,7 @@ static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SINK,
+ format = __ccp2_get_format(ccp2, sd_state, CCP2_PAD_SINK,
code->which);
code->code = format->code;
}
@@ -712,7 +716,7 @@ static int ccp2_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
@@ -724,7 +728,7 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ccp2_try_format(ccp2, cfg, fse->pad, &format, fse->which);
+ ccp2_try_format(ccp2, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -734,7 +738,7 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ccp2_try_format(ccp2, cfg, fse->pad, &format, fse->which);
+ ccp2_try_format(ccp2, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -748,13 +752,14 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccp2_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ccp2_get_format(ccp2, cfg, fmt->pad, fmt->which);
+ format = __ccp2_get_format(ccp2, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -769,25 +774,27 @@ static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
* @fmt : pointer to v4l2 subdev format structure
* returns zero
*/
-static int ccp2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ccp2_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ccp2_get_format(ccp2, cfg, fmt->pad, fmt->which);
+ format = __ccp2_get_format(ccp2, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ccp2_try_format(ccp2, cfg, fmt->pad, &fmt->format, fmt->which);
+ ccp2_try_format(ccp2, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CCP2_PAD_SINK) {
- format = __ccp2_get_format(ccp2, cfg, CCP2_PAD_SOURCE,
+ format = __ccp2_get_format(ccp2, sd_state, CCP2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- ccp2_try_format(ccp2, cfg, CCP2_PAD_SOURCE, format, fmt->which);
+ ccp2_try_format(ccp2, sd_state, CCP2_PAD_SOURCE, format,
+ fmt->which);
}
return 0;
@@ -812,7 +819,7 @@ static int ccp2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ccp2_set_format(sd, fh ? fh->pad : NULL, &format);
+ ccp2_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index fd493c5e4e24..6302e0c94034 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -827,17 +827,20 @@ static const struct isp_video_operations csi2_ispvideo_ops = {
*/
static struct v4l2_mbus_framefmt *
-__csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
+__csi2_get_format(struct isp_csi2_device *csi2,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
+ pad);
else
return &csi2->formats[pad];
}
static void
-csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg,
+csi2_try_format(struct isp_csi2_device *csi2,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -867,7 +870,8 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg
* compression.
*/
pixelcode = fmt->code;
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK, which);
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
+ which);
memcpy(fmt, format, sizeof(*fmt));
/*
@@ -893,7 +897,7 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_pad_config *cfg
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -906,7 +910,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csi2_input_fmts[code->index];
} else {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK,
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
code->which);
switch (code->index) {
case 0:
@@ -930,7 +934,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
}
static int csi2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -942,7 +946,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -952,7 +956,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -966,13 +970,14 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int csi2_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -987,25 +992,27 @@ static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int csi2_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- csi2_try_format(csi2, cfg, fmt->pad, &fmt->format, fmt->which);
+ csi2_try_format(csi2, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CSI2_PAD_SINK) {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SOURCE,
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- csi2_try_format(csi2, cfg, CSI2_PAD_SOURCE, format, fmt->which);
+ csi2_try_format(csi2, sd_state, CSI2_PAD_SOURCE, format,
+ fmt->which);
}
return 0;
@@ -1030,7 +1037,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- csi2_set_format(sd, fh ? fh->pad : NULL, &format);
+ csi2_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 607b7685c982..53aedec7990d 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -1679,21 +1679,25 @@ static int preview_set_stream(struct v4l2_subdev *sd, int enable)
}
static struct v4l2_mbus_framefmt *
-__preview_get_format(struct isp_prev_device *prev, struct v4l2_subdev_pad_config *cfg,
+__preview_get_format(struct isp_prev_device *prev,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&prev->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&prev->subdev, sd_state,
+ pad);
else
return &prev->formats[pad];
}
static struct v4l2_rect *
-__preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_pad_config *cfg,
+__preview_get_crop(struct isp_prev_device *prev,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&prev->subdev, cfg, PREV_PAD_SINK);
+ return v4l2_subdev_get_try_crop(&prev->subdev, sd_state,
+ PREV_PAD_SINK);
else
return &prev->crop;
}
@@ -1729,7 +1733,8 @@ static const unsigned int preview_output_fmts[] = {
* engine limits and the format and crop rectangles on other pads.
*/
static void preview_try_format(struct isp_prev_device *prev,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1770,7 +1775,8 @@ static void preview_try_format(struct isp_prev_device *prev,
case PREV_PAD_SOURCE:
pixelcode = fmt->code;
- *fmt = *__preview_get_format(prev, cfg, PREV_PAD_SINK, which);
+ *fmt = *__preview_get_format(prev, sd_state, PREV_PAD_SINK,
+ which);
switch (pixelcode) {
case MEDIA_BUS_FMT_YUYV8_1X16:
@@ -1788,7 +1794,7 @@ static void preview_try_format(struct isp_prev_device *prev,
* is not supported yet, hardcode the output size to the crop
* rectangle size.
*/
- crop = __preview_get_crop(prev, cfg, which);
+ crop = __preview_get_crop(prev, sd_state, which);
fmt->width = crop->width;
fmt->height = crop->height;
@@ -1862,7 +1868,7 @@ static void preview_try_crop(struct isp_prev_device *prev,
* return -EINVAL or zero on success
*/
static int preview_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
@@ -1886,7 +1892,7 @@ static int preview_enum_mbus_code(struct v4l2_subdev *sd,
}
static int preview_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
@@ -1898,7 +1904,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- preview_try_format(prev, cfg, fse->pad, &format, fse->which);
+ preview_try_format(prev, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1908,7 +1914,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- preview_try_format(prev, cfg, fse->pad, &format, fse->which);
+ preview_try_format(prev, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1926,7 +1932,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
* Return 0 on success or a negative error code otherwise.
*/
static int preview_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
@@ -1942,13 +1948,13 @@ static int preview_get_selection(struct v4l2_subdev *sd,
sel->r.width = INT_MAX;
sel->r.height = INT_MAX;
- format = __preview_get_format(prev, cfg, PREV_PAD_SINK,
+ format = __preview_get_format(prev, sd_state, PREV_PAD_SINK,
sel->which);
preview_try_crop(prev, format, &sel->r);
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *__preview_get_crop(prev, cfg, sel->which);
+ sel->r = *__preview_get_crop(prev, sd_state, sel->which);
break;
default:
@@ -1969,7 +1975,7 @@ static int preview_get_selection(struct v4l2_subdev *sd,
* Return 0 on success or a negative error code otherwise.
*/
static int preview_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
@@ -1988,17 +1994,20 @@ static int preview_set_selection(struct v4l2_subdev *sd,
* rectangle.
*/
if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
- sel->r = *__preview_get_crop(prev, cfg, sel->which);
+ sel->r = *__preview_get_crop(prev, sd_state, sel->which);
return 0;
}
- format = __preview_get_format(prev, cfg, PREV_PAD_SINK, sel->which);
+ format = __preview_get_format(prev, sd_state, PREV_PAD_SINK,
+ sel->which);
preview_try_crop(prev, format, &sel->r);
- *__preview_get_crop(prev, cfg, sel->which) = sel->r;
+ *__preview_get_crop(prev, sd_state, sel->which) = sel->r;
/* Update the source format. */
- format = __preview_get_format(prev, cfg, PREV_PAD_SOURCE, sel->which);
- preview_try_format(prev, cfg, PREV_PAD_SOURCE, format, sel->which);
+ format = __preview_get_format(prev, sd_state, PREV_PAD_SOURCE,
+ sel->which);
+ preview_try_format(prev, sd_state, PREV_PAD_SOURCE, format,
+ sel->which);
return 0;
}
@@ -2010,13 +2019,14 @@ static int preview_set_selection(struct v4l2_subdev *sd,
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int preview_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __preview_get_format(prev, cfg, fmt->pad, fmt->which);
+ format = __preview_get_format(prev, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -2031,24 +2041,25 @@ static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int preview_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- format = __preview_get_format(prev, cfg, fmt->pad, fmt->which);
+ format = __preview_get_format(prev, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- preview_try_format(prev, cfg, fmt->pad, &fmt->format, fmt->which);
+ preview_try_format(prev, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == PREV_PAD_SINK) {
/* Reset the crop rectangle. */
- crop = __preview_get_crop(prev, cfg, fmt->which);
+ crop = __preview_get_crop(prev, sd_state, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
@@ -2057,9 +2068,9 @@ static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
preview_try_crop(prev, &fmt->format, crop);
/* Update the source format. */
- format = __preview_get_format(prev, cfg, PREV_PAD_SOURCE,
+ format = __preview_get_format(prev, sd_state, PREV_PAD_SOURCE,
fmt->which);
- preview_try_format(prev, cfg, PREV_PAD_SOURCE, format,
+ preview_try_format(prev, sd_state, PREV_PAD_SOURCE, format,
fmt->which);
}
@@ -2086,7 +2097,7 @@ static int preview_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- preview_set_format(sd, fh ? fh->pad : NULL, &format);
+ preview_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 78d9dd7ea2da..ed2fb0c7a57e 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -114,11 +114,12 @@ static const struct isprsz_coef filter_coefs = {
* return zero
*/
static struct v4l2_mbus_framefmt *
-__resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_pad_config *cfg,
+__resizer_get_format(struct isp_res_device *res,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&res->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&res->subdev, sd_state, pad);
else
return &res->formats[pad];
}
@@ -130,11 +131,13 @@ __resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_pad_config *
* @which : wanted subdev crop rectangle
*/
static struct v4l2_rect *
-__resizer_get_crop(struct isp_res_device *res, struct v4l2_subdev_pad_config *cfg,
+__resizer_get_crop(struct isp_res_device *res,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&res->subdev, cfg, RESZ_PAD_SINK);
+ return v4l2_subdev_get_try_crop(&res->subdev, sd_state,
+ RESZ_PAD_SINK);
else
return &res->crop.request;
}
@@ -1220,7 +1223,7 @@ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
* Return 0 on success or a negative error code otherwise.
*/
static int resizer_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1231,9 +1234,9 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
if (sel->pad != RESZ_PAD_SINK)
return -EINVAL;
- format_sink = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ format_sink = __resizer_get_format(res, sd_state, RESZ_PAD_SINK,
sel->which);
- format_source = __resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
+ format_source = __resizer_get_format(res, sd_state, RESZ_PAD_SOURCE,
sel->which);
switch (sel->target) {
@@ -1248,7 +1251,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *__resizer_get_crop(res, cfg, sel->which);
+ sel->r = *__resizer_get_crop(res, sd_state, sel->which);
resizer_calc_ratios(res, &sel->r, format_source, &ratio);
break;
@@ -1273,7 +1276,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
* Return 0 on success or a negative error code otherwise.
*/
static int resizer_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1287,9 +1290,9 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
sel->pad != RESZ_PAD_SINK)
return -EINVAL;
- format_sink = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ format_sink = __resizer_get_format(res, sd_state, RESZ_PAD_SINK,
sel->which);
- format_source = *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
+ format_source = *__resizer_get_format(res, sd_state, RESZ_PAD_SOURCE,
sel->which);
dev_dbg(isp->dev, "%s(%s): req %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
@@ -1307,7 +1310,7 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
* stored the mangled rectangle.
*/
resizer_try_crop(format_sink, &format_source, &sel->r);
- *__resizer_get_crop(res, cfg, sel->which) = sel->r;
+ *__resizer_get_crop(res, sd_state, sel->which) = sel->r;
resizer_calc_ratios(res, &sel->r, &format_source, &ratio);
dev_dbg(isp->dev, "%s(%s): got %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
@@ -1317,7 +1320,8 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
format_source.width, format_source.height);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE, sel->which) =
+ *__resizer_get_format(res, sd_state, RESZ_PAD_SOURCE,
+ sel->which) =
format_source;
return 0;
}
@@ -1328,7 +1332,7 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
*/
spin_lock_irqsave(&res->lock, flags);
- *__resizer_get_format(res, cfg, RESZ_PAD_SOURCE, sel->which) =
+ *__resizer_get_format(res, sd_state, RESZ_PAD_SOURCE, sel->which) =
format_source;
res->ratio = ratio;
@@ -1371,7 +1375,8 @@ static unsigned int resizer_max_in_width(struct isp_res_device *res)
* @which : wanted subdev format
*/
static void resizer_try_format(struct isp_res_device *res,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -1392,10 +1397,11 @@ static void resizer_try_format(struct isp_res_device *res,
break;
case RESZ_PAD_SOURCE:
- format = __resizer_get_format(res, cfg, RESZ_PAD_SINK, which);
+ format = __resizer_get_format(res, sd_state, RESZ_PAD_SINK,
+ which);
fmt->code = format->code;
- crop = *__resizer_get_crop(res, cfg, which);
+ crop = *__resizer_get_crop(res, sd_state, which);
resizer_calc_ratios(res, &crop, fmt, &ratio);
break;
}
@@ -1412,7 +1418,7 @@ static void resizer_try_format(struct isp_res_device *res,
* return -EINVAL or zero on success
*/
static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1427,7 +1433,7 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __resizer_get_format(res, cfg, RESZ_PAD_SINK,
+ format = __resizer_get_format(res, sd_state, RESZ_PAD_SINK,
code->which);
code->code = format->code;
}
@@ -1436,7 +1442,7 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
}
static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
@@ -1448,7 +1454,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- resizer_try_format(res, cfg, fse->pad, &format, fse->which);
+ resizer_try_format(res, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1458,7 +1464,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- resizer_try_format(res, cfg, fse->pad, &format, fse->which);
+ resizer_try_format(res, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1472,13 +1478,14 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int resizer_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(res, cfg, fmt->pad, fmt->which);
+ format = __resizer_get_format(res, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -1493,33 +1500,34 @@ static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int resizer_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- format = __resizer_get_format(res, cfg, fmt->pad, fmt->which);
+ format = __resizer_get_format(res, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- resizer_try_format(res, cfg, fmt->pad, &fmt->format, fmt->which);
+ resizer_try_format(res, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
if (fmt->pad == RESZ_PAD_SINK) {
/* reset crop rectangle */
- crop = __resizer_get_crop(res, cfg, fmt->which);
+ crop = __resizer_get_crop(res, sd_state, fmt->which);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
crop->height = fmt->format.height;
/* Propagate the format from sink to source */
- format = __resizer_get_format(res, cfg, RESZ_PAD_SOURCE,
+ format = __resizer_get_format(res, sd_state, RESZ_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- resizer_try_format(res, cfg, RESZ_PAD_SOURCE, format,
+ resizer_try_format(res, sd_state, RESZ_PAD_SOURCE, format,
fmt->which);
}
@@ -1570,7 +1578,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_YUYV8_1X16;
format.format.width = 4096;
format.format.height = 4096;
- resizer_set_format(sd, fh ? fh->pad : NULL, &format);
+ resizer_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 8811d6dd4ee7..87334477f223 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -222,8 +222,8 @@ static int isp_video_get_graph_data(struct isp_video *video,
struct isp_pipeline *pipe)
{
struct media_graph graph;
- struct media_entity *entity = &video->video.entity;
- struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_pad *pad = video->video.entity.pads;
+ struct media_device *mdev = video->video.entity.graph_obj.mdev;
struct isp_video *far_end = NULL;
int ret;
@@ -234,23 +234,24 @@ static int isp_video_get_graph_data(struct isp_video *video,
return ret;
}
- media_graph_walk_start(&graph, entity);
+ media_graph_walk_start(&graph, pad);
- while ((entity = media_graph_walk_next(&graph))) {
+ while ((pad = media_graph_walk_next(&graph))) {
struct isp_video *__video;
- media_entity_enum_set(&pipe->ent_enum, entity);
+ media_entity_enum_set(&pipe->ent_enum, pad->entity);
if (far_end != NULL)
continue;
- if (entity == &video->video.entity)
+ if (pad == video->video.entity.pads)
continue;
- if (!is_media_entity_v4l2_video_device(entity))
+ if (!is_media_entity_v4l2_video_device(pad->entity))
continue;
- __video = to_isp_video(media_entity_to_video_device(entity));
+ __video = to_isp_video(media_entity_to_video_device(
+ pad->entity));
if (__video->type != video->type)
far_end = __video;
}
@@ -1093,7 +1094,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
/* Start streaming on the pipeline. No link touching an entity in the
* pipeline can be activated or deactivated once streaming is started.
*/
- pipe = video->video.entity.pipe
+ pipe = video->video.entity.pads->pipe
? to_isp_pipeline(&video->video.entity) : &video->pipe;
ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev);
@@ -1104,7 +1105,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
pipe->max_rate = pipe->l3_ick;
- ret = media_pipeline_start(&video->video.entity, &pipe->pipe);
+ ret = media_pipeline_start(video->video.entity.pads, &pipe->pipe);
if (ret < 0)
goto err_pipeline_start;
@@ -1161,7 +1162,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
return 0;
err_check_format:
- media_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(video->video.entity.pads);
err_pipeline_start:
/* TODO: Implement PM QoS */
/* The DMA queue must be emptied here, otherwise CCDC interrupts that
@@ -1228,7 +1229,7 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
video->error = false;
/* TODO: Implement PM QoS */
- media_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(video->video.entity.pads);
media_entity_enum_cleanup(&pipe->ent_enum);
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index a0908670c0cf..4c9c5b719ec5 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -100,7 +100,7 @@ struct isp_pipeline {
};
#define to_isp_pipeline(__e) \
- container_of((__e)->pipe, struct isp_pipeline, pipe)
+ container_of((__e)->pads->pipe, struct isp_pipeline, pipe)
static inline int isp_pipeline_ready(struct isp_pipeline *pipe)
{
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 4ee7d5327df0..60bb0ebe8e25 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -1796,6 +1796,9 @@ static int pxac_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
const struct pxa_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1820,7 +1823,7 @@ static int pxac_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
v4l2_fill_mbus_format(mf, pix, xlate->code);
- ret = sensor_call(pcdev, pad, set_fmt, &pad_cfg, &format);
+ ret = sensor_call(pcdev, pad, set_fmt, &pad_state, &format);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 2ffcda06706b..573e51c9370f 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -762,12 +762,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
*/
static struct v4l2_mbus_framefmt *
__csid_get_format(struct csid_device *csid,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csid->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csid->subdev, sd_state,
+ pad);
return &csid->fmt[pad];
}
@@ -781,7 +782,7 @@ __csid_get_format(struct csid_device *csid,
* @which: wanted subdev format
*/
static void csid_try_format(struct csid_device *csid,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -814,7 +815,7 @@ static void csid_try_format(struct csid_device *csid,
/* keep pad formats in sync */
u32 code = fmt->code;
- *fmt = *__csid_get_format(csid, cfg,
+ *fmt = *__csid_get_format(csid, sd_state,
MSM_CSID_PAD_SINK, which);
fmt->code = csid_src_pad_code(csid, fmt->code, 0, code);
} else {
@@ -848,7 +849,7 @@ static void csid_try_format(struct csid_device *csid,
* return -EINVAL or zero on success
*/
static int csid_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
@@ -862,7 +863,7 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
if (csid->testgen_mode->cur.val == 0) {
struct v4l2_mbus_framefmt *sink_fmt;
- sink_fmt = __csid_get_format(csid, cfg,
+ sink_fmt = __csid_get_format(csid, sd_state,
MSM_CSID_PAD_SINK,
code->which);
@@ -889,7 +890,7 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csid_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
@@ -901,7 +902,7 @@ static int csid_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csid_try_format(csid, cfg, fse->pad, &format, fse->which);
+ csid_try_format(csid, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -911,7 +912,7 @@ static int csid_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csid_try_format(csid, cfg, fse->pad, &format, fse->which);
+ csid_try_format(csid, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -927,13 +928,13 @@ static int csid_enum_frame_size(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int csid_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csid_get_format(csid, cfg, fmt->pad, fmt->which);
+ format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -951,26 +952,26 @@ static int csid_get_format(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int csid_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csid_get_format(csid, cfg, fmt->pad, fmt->which);
+ format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- csid_try_format(csid, cfg, fmt->pad, &fmt->format, fmt->which);
+ csid_try_format(csid, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == MSM_CSID_PAD_SINK) {
- format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SRC,
+ format = __csid_get_format(csid, sd_state, MSM_CSID_PAD_SRC,
fmt->which);
*format = fmt->format;
- csid_try_format(csid, cfg, MSM_CSID_PAD_SRC, format,
+ csid_try_format(csid, sd_state, MSM_CSID_PAD_SRC, format,
fmt->which);
}
@@ -999,7 +1000,7 @@ static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
}
};
- return csid_set_format(sd, fh ? fh->pad : NULL, &format);
+ return csid_set_format(sd, fh ? fh->state : NULL, &format);
}
static const char * const csid_test_pattern_menu[] = {
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 85b24054f35e..efd1517c6341 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -318,12 +318,13 @@ static int csiphy_set_stream(struct v4l2_subdev *sd, int enable)
*/
static struct v4l2_mbus_framefmt *
__csiphy_get_format(struct csiphy_device *csiphy,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csiphy->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csiphy->subdev, sd_state,
+ pad);
return &csiphy->fmt[pad];
}
@@ -337,7 +338,7 @@ __csiphy_get_format(struct csiphy_device *csiphy,
* @which: wanted subdev format
*/
static void csiphy_try_format(struct csiphy_device *csiphy,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -367,7 +368,8 @@ static void csiphy_try_format(struct csiphy_device *csiphy,
case MSM_CSIPHY_PAD_SRC:
/* Set and return a format same as sink pad */
- *fmt = *__csiphy_get_format(csiphy, cfg, MSM_CSID_PAD_SINK,
+ *fmt = *__csiphy_get_format(csiphy, sd_state,
+ MSM_CSID_PAD_SINK,
which);
break;
@@ -382,7 +384,7 @@ static void csiphy_try_format(struct csiphy_device *csiphy,
* return -EINVAL or zero on success
*/
static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
@@ -397,7 +399,8 @@ static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SINK,
+ format = __csiphy_get_format(csiphy, sd_state,
+ MSM_CSIPHY_PAD_SINK,
code->which);
code->code = format->code;
@@ -414,7 +417,7 @@ static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
@@ -426,7 +429,7 @@ static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which);
+ csiphy_try_format(csiphy, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -436,7 +439,7 @@ static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which);
+ csiphy_try_format(csiphy, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -452,13 +455,13 @@ static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int csiphy_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which);
+ format = __csiphy_get_format(csiphy, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -476,26 +479,29 @@ static int csiphy_get_format(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int csiphy_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which);
+ format = __csiphy_get_format(csiphy, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- csiphy_try_format(csiphy, cfg, fmt->pad, &fmt->format, fmt->which);
+ csiphy_try_format(csiphy, sd_state, fmt->pad, &fmt->format,
+ fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == MSM_CSIPHY_PAD_SINK) {
- format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC,
+ format = __csiphy_get_format(csiphy, sd_state,
+ MSM_CSIPHY_PAD_SRC,
fmt->which);
*format = fmt->format;
- csiphy_try_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC, format,
+ csiphy_try_format(csiphy, sd_state, MSM_CSIPHY_PAD_SRC,
+ format,
fmt->which);
}
@@ -525,7 +531,7 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
}
};
- return csiphy_set_format(sd, fh ? fh->pad : NULL, &format);
+ return csiphy_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index db94cfd6c508..ec2539df1df0 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -844,12 +844,13 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
*/
static struct v4l2_mbus_framefmt *
__ispif_get_format(struct ispif_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&line->subdev, sd_state,
+ pad);
return &line->fmt[pad];
}
@@ -863,7 +864,7 @@ __ispif_get_format(struct ispif_line *line,
* @which: wanted subdev format
*/
static void ispif_try_format(struct ispif_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -893,7 +894,7 @@ static void ispif_try_format(struct ispif_line *line,
case MSM_ISPIF_PAD_SRC:
/* Set and return a format same as sink pad */
- *fmt = *__ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK,
+ *fmt = *__ispif_get_format(line, sd_state, MSM_ISPIF_PAD_SINK,
which);
break;
@@ -910,7 +911,7 @@ static void ispif_try_format(struct ispif_line *line,
* return -EINVAL or zero on success
*/
static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
@@ -925,7 +926,8 @@ static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK,
+ format = __ispif_get_format(line, sd_state,
+ MSM_ISPIF_PAD_SINK,
code->which);
code->code = format->code;
@@ -942,7 +944,7 @@ static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int ispif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
@@ -954,7 +956,7 @@ static int ispif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ispif_try_format(line, cfg, fse->pad, &format, fse->which);
+ ispif_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -964,7 +966,7 @@ static int ispif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ispif_try_format(line, cfg, fse->pad, &format, fse->which);
+ ispif_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -980,13 +982,13 @@ static int ispif_enum_frame_size(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int ispif_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ispif_get_format(line, cfg, fmt->pad, fmt->which);
+ format = __ispif_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -1004,26 +1006,26 @@ static int ispif_get_format(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int ispif_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ispif_get_format(line, cfg, fmt->pad, fmt->which);
+ format = __ispif_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- ispif_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
+ ispif_try_format(line, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == MSM_ISPIF_PAD_SINK) {
- format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SRC,
+ format = __ispif_get_format(line, sd_state, MSM_ISPIF_PAD_SRC,
fmt->which);
*format = fmt->format;
- ispif_try_format(line, cfg, MSM_ISPIF_PAD_SRC, format,
+ ispif_try_format(line, sd_state, MSM_ISPIF_PAD_SRC, format,
fmt->which);
}
@@ -1052,7 +1054,7 @@ static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
}
};
- return ispif_set_format(sd, fh ? fh->pad : NULL, &format);
+ return ispif_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index b7d2293a5004..c42165300637 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -1466,12 +1466,13 @@ static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
*/
static struct v4l2_mbus_framefmt *
__vfe_get_format(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&line->subdev, sd_state,
+ pad);
return &line->fmt[pad];
}
@@ -1486,11 +1487,11 @@ __vfe_get_format(struct vfe_line *line,
*/
static struct v4l2_rect *
__vfe_get_compose(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_compose(&line->subdev, cfg,
+ return v4l2_subdev_get_try_compose(&line->subdev, sd_state,
MSM_VFE_PAD_SINK);
return &line->compose;
@@ -1506,11 +1507,11 @@ __vfe_get_compose(struct vfe_line *line,
*/
static struct v4l2_rect *
__vfe_get_crop(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&line->subdev, cfg,
+ return v4l2_subdev_get_try_crop(&line->subdev, sd_state,
MSM_VFE_PAD_SRC);
return &line->crop;
@@ -1525,7 +1526,7 @@ __vfe_get_crop(struct vfe_line *line,
* @which: wanted subdev format
*/
static void vfe_try_format(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -1557,14 +1558,15 @@ static void vfe_try_format(struct vfe_line *line,
/* Set and return a format same as sink pad */
code = fmt->code;
- *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
+ *fmt = *__vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK,
+ which);
fmt->code = vfe_src_pad_code(line, fmt->code, 0, code);
if (line->id == VFE_LINE_PIX) {
struct v4l2_rect *rect;
- rect = __vfe_get_crop(line, cfg, which);
+ rect = __vfe_get_crop(line, sd_state, which);
fmt->width = rect->width;
fmt->height = rect->height;
@@ -1584,13 +1586,13 @@ static void vfe_try_format(struct vfe_line *line,
* @which: wanted subdev format
*/
static void vfe_try_compose(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *rect,
enum v4l2_subdev_format_whence which)
{
struct v4l2_mbus_framefmt *fmt;
- fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
+ fmt = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK, which);
if (rect->width > fmt->width)
rect->width = fmt->width;
@@ -1623,13 +1625,13 @@ static void vfe_try_compose(struct vfe_line *line,
* @which: wanted subdev format
*/
static void vfe_try_crop(struct vfe_line *line,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *rect,
enum v4l2_subdev_format_whence which)
{
struct v4l2_rect *compose;
- compose = __vfe_get_compose(line, cfg, which);
+ compose = __vfe_get_compose(line, sd_state, which);
if (rect->width > compose->width)
rect->width = compose->width;
@@ -1667,7 +1669,7 @@ static void vfe_try_crop(struct vfe_line *line,
* return -EINVAL or zero on success
*/
static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
@@ -1680,7 +1682,7 @@ static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
} else {
struct v4l2_mbus_framefmt *sink_fmt;
- sink_fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
+ sink_fmt = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK,
code->which);
code->code = vfe_src_pad_code(line, sink_fmt->code,
@@ -1701,7 +1703,7 @@ static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
@@ -1713,7 +1715,7 @@ static int vfe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- vfe_try_format(line, cfg, fse->pad, &format, fse->which);
+ vfe_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -1723,7 +1725,7 @@ static int vfe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- vfe_try_format(line, cfg, fse->pad, &format, fse->which);
+ vfe_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -1739,13 +1741,13 @@ static int vfe_enum_frame_size(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
+ format = __vfe_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
@@ -1755,7 +1757,7 @@ static int vfe_get_format(struct v4l2_subdev *sd,
}
static int vfe_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel);
/*
@@ -1767,17 +1769,17 @@ static int vfe_set_selection(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
+ format = __vfe_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
- vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
+ vfe_try_format(line, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
if (fmt->pad == MSM_VFE_PAD_SINK) {
@@ -1785,11 +1787,11 @@ static int vfe_set_format(struct v4l2_subdev *sd,
int ret;
/* Propagate the format from sink to source */
- format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC,
+ format = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SRC,
fmt->which);
*format = fmt->format;
- vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format,
+ vfe_try_format(line, sd_state, MSM_VFE_PAD_SRC, format,
fmt->which);
if (line->id != VFE_LINE_PIX)
@@ -1801,7 +1803,7 @@ static int vfe_set_format(struct v4l2_subdev *sd,
sel.target = V4L2_SEL_TGT_COMPOSE;
sel.r.width = fmt->format.width;
sel.r.height = fmt->format.height;
- ret = vfe_set_selection(sd, cfg, &sel);
+ ret = vfe_set_selection(sd, sd_state, &sel);
if (ret < 0)
return ret;
}
@@ -1818,7 +1820,7 @@ static int vfe_set_format(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
@@ -1834,7 +1836,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
fmt.pad = sel->pad;
fmt.which = sel->which;
- ret = vfe_get_format(sd, cfg, &fmt);
+ ret = vfe_get_format(sd, sd_state, &fmt);
if (ret < 0)
return ret;
@@ -1844,7 +1846,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
sel->r.height = fmt.format.height;
break;
case V4L2_SEL_TGT_COMPOSE:
- rect = __vfe_get_compose(line, cfg, sel->which);
+ rect = __vfe_get_compose(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
@@ -1856,7 +1858,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
else if (sel->pad == MSM_VFE_PAD_SRC)
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
- rect = __vfe_get_compose(line, cfg, sel->which);
+ rect = __vfe_get_compose(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
@@ -1866,7 +1868,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
sel->r.height = rect->height;
break;
case V4L2_SEL_TGT_CROP:
- rect = __vfe_get_crop(line, cfg, sel->which);
+ rect = __vfe_get_crop(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
@@ -1888,7 +1890,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
* Return -EINVAL or zero on success
*/
static int vfe_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
@@ -1902,11 +1904,11 @@ static int vfe_set_selection(struct v4l2_subdev *sd,
sel->pad == MSM_VFE_PAD_SINK) {
struct v4l2_subdev_selection crop = { 0 };
- rect = __vfe_get_compose(line, cfg, sel->which);
+ rect = __vfe_get_compose(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
- vfe_try_compose(line, cfg, &sel->r, sel->which);
+ vfe_try_compose(line, sd_state, &sel->r, sel->which);
*rect = sel->r;
/* Reset source crop selection */
@@ -1914,28 +1916,28 @@ static int vfe_set_selection(struct v4l2_subdev *sd,
crop.pad = MSM_VFE_PAD_SRC;
crop.target = V4L2_SEL_TGT_CROP;
crop.r = *rect;
- ret = vfe_set_selection(sd, cfg, &crop);
+ ret = vfe_set_selection(sd, sd_state, &crop);
} else if (sel->target == V4L2_SEL_TGT_CROP &&
sel->pad == MSM_VFE_PAD_SRC) {
struct v4l2_subdev_format fmt = { 0 };
- rect = __vfe_get_crop(line, cfg, sel->which);
+ rect = __vfe_get_crop(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
- vfe_try_crop(line, cfg, &sel->r, sel->which);
+ vfe_try_crop(line, sd_state, &sel->r, sel->which);
*rect = sel->r;
/* Reset source pad format width and height */
fmt.which = sel->which;
fmt.pad = MSM_VFE_PAD_SRC;
- ret = vfe_get_format(sd, cfg, &fmt);
+ ret = vfe_get_format(sd, sd_state, &fmt);
if (ret < 0)
return ret;
fmt.format.width = rect->width;
fmt.format.height = rect->height;
- ret = vfe_set_format(sd, cfg, &fmt);
+ ret = vfe_set_format(sd, sd_state, &fmt);
} else {
ret = -EINVAL;
}
@@ -1965,7 +1967,7 @@ static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
}
};
- return vfe_set_format(sd, fh ? fh->pad : NULL, &format);
+ return vfe_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index 9333a7a33d4d..4299dba8245a 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -442,7 +442,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
struct v4l2_subdev *subdev;
int ret;
- ret = media_pipeline_start(&vdev->entity, &video->pipe);
+ ret = media_pipeline_start(vdev->entity.pads, &video->pipe);
if (ret < 0)
goto flush_buffers;
@@ -471,7 +471,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
error:
- media_pipeline_stop(&vdev->entity);
+ media_pipeline_stop(vdev->entity.pads);
flush_buffers:
video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
@@ -503,7 +503,7 @@ static void video_stop_streaming(struct vb2_queue *q)
v4l2_subdev_call(subdev, video, s_stream, 0);
}
- media_pipeline_stop(&vdev->entity);
+ media_pipeline_stop(vdev->entity.pads);
video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR);
}
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 34d003e0e9b9..8b59083b3c89 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -132,13 +132,17 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
return 0;
/*
- * Don't allow link changes if any entity in the graph is
- * streaming, modifying the CHSEL register fields can disrupt
- * running streams.
+ * Don't allow link changes if any stream in the graph is active as
+ * modifying the CHSEL register fields can disrupt running streams.
*/
- media_device_for_each_entity(entity, &group->mdev)
- if (entity->stream_count)
- return -EBUSY;
+ media_device_for_each_entity(entity, &group->mdev) {
+ struct media_pad *iter;
+
+ media_entity_for_each_pad(entity, iter) {
+ if (iter->stream_count)
+ return -EBUSY;
+ }
+ }
mutex_lock(&group->lock);
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index 5e8e48a721a0..bfe92e9719af 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -717,7 +717,7 @@ out:
}
static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
@@ -729,7 +729,7 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
priv->mf = format->format;
} else {
- framefmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
*framefmt = format->format;
}
@@ -737,7 +737,7 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
}
static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
@@ -745,7 +745,7 @@ static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
format->format = priv->mf;
else
- format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
return 0;
}
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 692dea300b0d..4d2795b13e71 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -1235,7 +1235,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
sd = media_entity_to_v4l2_subdev(pad->entity);
if (!on) {
- media_pipeline_stop(&vin->vdev.entity);
+ media_pipeline_stop(vin->vdev.entity.pads);
return v4l2_subdev_call(sd, video, s_stream, 0);
}
@@ -1251,8 +1251,8 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
*/
mdev = vin->vdev.entity.graph_obj.mdev;
mutex_lock(&mdev->graph_mutex);
- pipe = sd->entity.pipe ? sd->entity.pipe : &vin->vdev.pipe;
- ret = __media_pipeline_start(&vin->vdev.entity, pipe);
+ pipe = sd->entity.pads->pipe ? sd->entity.pads->pipe : &vin->vdev.pipe;
+ ret = __media_pipeline_start(vin->vdev.entity.pads, pipe);
mutex_unlock(&mdev->graph_mutex);
if (ret)
return ret;
@@ -1261,7 +1261,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
if (ret == -ENOIOCTLCMD)
ret = 0;
if (ret)
- media_pipeline_stop(&vin->vdev.entity);
+ media_pipeline_stop(vin->vdev.entity.pads);
return ret;
}
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 0bbe6f9f9206..32563cffbf05 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -250,7 +250,8 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
struct v4l2_rect *src_rect)
{
struct v4l2_subdev *sd = vin_to_source(vin);
- struct v4l2_subdev_pad_config *pad_cfg;
+ struct v4l2_subdev_state *sd_state;
+ static struct lock_class_key key;
struct v4l2_subdev_format format = {
.which = which,
.pad = vin->parallel->source_pad,
@@ -259,9 +260,9 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
u32 width, height;
int ret;
- pad_cfg = v4l2_subdev_alloc_pad_config(sd);
- if (pad_cfg == NULL)
- return -ENOMEM;
+ sd_state = __v4l2_subdev_state_alloc(sd, "rvin:state->lock", &key);
+ if (IS_ERR(sd_state))
+ return PTR_ERR(sd_state);
if (!rvin_format_from_pixel(vin, pix->pixelformat))
pix->pixelformat = RVIN_DEFAULT_FORMAT;
@@ -273,7 +274,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
width = pix->width;
height = pix->height;
- ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
+ ret = v4l2_subdev_call(sd, pad, set_fmt, sd_state, &format);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto done;
ret = 0;
@@ -295,7 +296,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
rvin_format_align(vin, pix);
done:
- v4l2_subdev_free_pad_config(pad_cfg);
+ __v4l2_subdev_state_free(sd_state);
return ret;
}
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index 4a633ad0e8fa..9cea9fab7bdf 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -794,6 +794,9 @@ static int __ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt
struct v4l2_pix_format_mplane *pix = &v4l2_fmt->fmt.pix_mp;
struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
const struct ceu_fmt *ceu_fmt;
u32 mbus_code_old;
u32 mbus_code;
@@ -850,13 +853,13 @@ static int __ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt
* time.
*/
sd_format.format.code = mbus_code;
- ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_cfg, &sd_format);
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_state, &sd_format);
if (ret) {
if (ret == -EINVAL) {
/* fallback */
sd_format.format.code = mbus_code_old;
ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt,
- &pad_cfg, &sd_format);
+ &pad_state, &sd_format);
}
if (ret)
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 9ca49af29542..77ad4d3d0d48 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -848,13 +848,13 @@ static int s3c_camif_streamon(struct file *file, void *priv,
if (s3c_vp_active(vp))
return 0;
- ret = media_pipeline_start(sensor, camif->m_pipeline);
+ ret = media_pipeline_start(sensor->pads, camif->m_pipeline);
if (ret < 0)
return ret;
ret = camif_pipeline_validate(camif);
if (ret < 0) {
- media_pipeline_stop(sensor);
+ media_pipeline_stop(sensor->pads);
return ret;
}
@@ -878,7 +878,7 @@ static int s3c_camif_streamoff(struct file *file, void *priv,
ret = vb2_streamoff(&vp->vb_queue, type);
if (ret == 0)
- media_pipeline_stop(&camif->sensor.sd->entity);
+ media_pipeline_stop(camif->sensor.sd->entity.pads);
return ret;
}
@@ -1199,7 +1199,7 @@ static const u32 camif_mbus_formats[] = {
*/
static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(camif_mbus_formats))
@@ -1210,14 +1210,14 @@ static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
}
static int s3c_camif_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf = &fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1278,7 +1278,7 @@ static void __camif_subdev_try_format(struct camif_dev *camif,
}
static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
@@ -1306,7 +1306,7 @@ static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
__camif_subdev_try_format(camif, mf, fmt->pad);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
mutex_unlock(&camif->lock);
return 0;
@@ -1345,7 +1345,7 @@ static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
@@ -1358,7 +1358,7 @@ static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- sel->r = *v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
return 0;
}
@@ -1432,7 +1432,7 @@ static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
}
static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
@@ -1446,7 +1446,7 @@ static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
__camif_try_crop(camif, &sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_crop(sd, cfg, sel->pad) = sel->r;
+ *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad) = sel->r;
} else {
unsigned long flags;
unsigned int i;
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 233e4d3feacd..10430f44240b 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -607,7 +607,7 @@ static struct media_entity *dcmi_find_source(struct stm32_dcmi *dcmi)
}
static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
- struct v4l2_subdev_pad_config *pad_cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct media_entity *entity = &dcmi->entity.source->entity;
@@ -649,7 +649,7 @@ static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
format->format.width, format->format.height);
fmt.pad = pad->index;
- ret = v4l2_subdev_call(subdev, pad, set_fmt, pad_cfg, &fmt);
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
if (ret < 0) {
dev_err(dcmi->dev, "%s: Failed to set format 0x%x %ux%u on \"%s\":%d pad (%d)\n",
__func__, format->format.code,
@@ -737,7 +737,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
goto err_pm_put;
}
- ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
+ ret = media_pipeline_start(dcmi->vdev->entity.pads, &dcmi->pipeline);
if (ret < 0) {
dev_err(dcmi->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
__func__, ret);
@@ -834,7 +834,7 @@ err_pipeline_stop:
dcmi_pipeline_stop(dcmi);
err_media_pipeline_stop:
- media_pipeline_stop(&dcmi->vdev->entity);
+ media_pipeline_stop(dcmi->vdev->entity.pads);
err_pm_put:
pm_runtime_put(dcmi->dev);
@@ -860,7 +860,7 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
dcmi_pipeline_stop(dcmi);
- media_pipeline_stop(&dcmi->vdev->entity);
+ media_pipeline_stop(dcmi->vdev->entity.pads);
spin_lock_irq(&dcmi->irqlock);
@@ -967,6 +967,9 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
struct dcmi_framesize sd_fsize;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1002,7 +1005,7 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
ret = v4l2_subdev_call(dcmi->entity.source, pad, set_fmt,
- &pad_cfg, &format);
+ &pad_state, &format);
if (ret < 0)
return ret;
@@ -1151,6 +1154,9 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
.which = V4L2_SUBDEV_FORMAT_TRY,
};
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
int ret;
sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
@@ -1164,7 +1170,7 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
ret = v4l2_subdev_call(dcmi->entity.source, pad, set_fmt,
- &pad_cfg, &format);
+ &pad_state, &format);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
index 2c39cd7f2862..be0defdf74f1 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
@@ -266,7 +266,7 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
goto err_clear_dma_queue;
}
- ret = media_pipeline_start(&csi->vdev.entity, &csi->vdev.pipe);
+ ret = media_pipeline_start(csi->vdev.entity.pads, &csi->vdev.pipe);
if (ret < 0)
goto err_free_scratch_buffer;
@@ -330,7 +330,7 @@ err_disable_device:
sun4i_csi_capture_stop(csi);
err_disable_pipeline:
- media_pipeline_stop(&csi->vdev.entity);
+ media_pipeline_stop(csi->vdev.entity.pads);
err_free_scratch_buffer:
dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
@@ -359,7 +359,7 @@ static void sun4i_csi_stop_streaming(struct vb2_queue *vq)
return_all_buffers(csi, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&csi->qlock, flags);
- media_pipeline_stop(&csi->vdev.entity);
+ media_pipeline_stop(csi->vdev.entity.pads);
dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
csi->scratch.paddr);
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
index 8f4e254b6a41..99e0fe5f4156 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
@@ -273,25 +273,26 @@ static const struct v4l2_mbus_framefmt sun4i_csi_pad_fmt_default = {
};
static int sun4i_csi_subdev_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(subdev, cfg, CSI_SUBDEV_SINK);
+ fmt = v4l2_subdev_get_try_format(subdev, sd_state, CSI_SUBDEV_SINK);
*fmt = sun4i_csi_pad_fmt_default;
return 0;
}
static int sun4i_csi_subdev_get_fmt(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct sun4i_csi *csi = container_of(subdev, struct sun4i_csi, subdev);
struct v4l2_mbus_framefmt *subdev_fmt;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- subdev_fmt = v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
+ subdev_fmt = v4l2_subdev_get_try_format(subdev, sd_state,
+ fmt->pad);
else
subdev_fmt = &csi->subdev_fmt;
@@ -301,14 +302,15 @@ static int sun4i_csi_subdev_get_fmt(struct v4l2_subdev *subdev,
}
static int sun4i_csi_subdev_set_fmt(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct sun4i_csi *csi = container_of(subdev, struct sun4i_csi, subdev);
struct v4l2_mbus_framefmt *subdev_fmt;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- subdev_fmt = v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
+ subdev_fmt = v4l2_subdev_get_try_format(subdev, sd_state,
+ fmt->pad);
else
subdev_fmt = &csi->subdev_fmt;
@@ -327,7 +329,7 @@ static int sun4i_csi_subdev_set_fmt(struct v4l2_subdev *subdev,
static int
sun4i_csi_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *mbus)
{
if (mbus->index >= ARRAY_SIZE(sun4i_csi_formats))
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 3181d0781b61..537057a75eaa 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -141,7 +141,7 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
video->sequence = 0;
- ret = media_pipeline_start(&video->vdev.entity, &video->vdev.pipe);
+ ret = media_pipeline_start(video->vdev.entity.pads, &video->vdev.pipe);
if (ret < 0)
goto clear_dma_queue;
@@ -207,7 +207,7 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
stop_csi_stream:
sun6i_csi_set_stream(video->csi, false);
stop_media_pipeline:
- media_pipeline_stop(&video->vdev.entity);
+ media_pipeline_stop(video->vdev.entity.pads);
clear_dma_queue:
spin_lock_irqsave(&video->dma_queue_lock, flags);
list_for_each_entry(buf, &video->dma_queue, list)
@@ -231,7 +231,7 @@ static void sun6i_video_stop_streaming(struct vb2_queue *vq)
sun6i_csi_set_stream(video->csi, false);
- media_pipeline_stop(&video->vdev.entity);
+ media_pipeline_stop(video->vdev.entity.pads);
/* Release all active buffers */
spin_lock_irqsave(&video->dma_queue_lock, flags);
diff --git a/drivers/media/platform/ti-vpe/cal-video.c b/drivers/media/platform/ti-vpe/cal-video.c
deleted file mode 100644
index df472a175e83..000000000000
--- a/drivers/media/platform/ti-vpe/cal-video.c
+++ /dev/null
@@ -1,886 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * TI Camera Access Layer (CAL) - Video Device
- *
- * Copyright (c) 2015-2020 Texas Instruments Inc.
- *
- * Authors:
- * Benoit Parrot <bparrot@ti.com>
- * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- */
-
-#include <linux/delay.h>
-#include <linux/ioctl.h>
-#include <linux/pm_runtime.h>
-#include <linux/videodev2.h>
-
-#include <media/media-device.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-dma-contig.h>
-
-#include "cal.h"
-
-/* ------------------------------------------------------------------
- * Format Handling
- * ------------------------------------------------------------------
- */
-
-static const struct cal_fmt cal_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .code = MEDIA_BUS_FMT_YUYV8_2X8,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_UYVY,
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_YVYU,
- .code = MEDIA_BUS_FMT_YVYU8_2X8,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_VYUY,
- .code = MEDIA_BUS_FMT_VYUY8_2X8,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
- .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
- .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
- .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
- .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
- .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
- .bpp = 24,
- }, {
- .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
- .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
- .bpp = 24,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
- .code = MEDIA_BUS_FMT_ARGB8888_1X32,
- .bpp = 32,
- }, {
- .fourcc = V4L2_PIX_FMT_SBGGR8,
- .code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .bpp = 8,
- }, {
- .fourcc = V4L2_PIX_FMT_SGBRG8,
- .code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .bpp = 8,
- }, {
- .fourcc = V4L2_PIX_FMT_SGRBG8,
- .code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .bpp = 8,
- }, {
- .fourcc = V4L2_PIX_FMT_SRGGB8,
- .code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .bpp = 8,
- }, {
- .fourcc = V4L2_PIX_FMT_SBGGR10,
- .code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .bpp = 10,
- }, {
- .fourcc = V4L2_PIX_FMT_SGBRG10,
- .code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .bpp = 10,
- }, {
- .fourcc = V4L2_PIX_FMT_SGRBG10,
- .code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .bpp = 10,
- }, {
- .fourcc = V4L2_PIX_FMT_SRGGB10,
- .code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .bpp = 10,
- }, {
- .fourcc = V4L2_PIX_FMT_SBGGR12,
- .code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .bpp = 12,
- }, {
- .fourcc = V4L2_PIX_FMT_SGBRG12,
- .code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .bpp = 12,
- }, {
- .fourcc = V4L2_PIX_FMT_SGRBG12,
- .code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .bpp = 12,
- }, {
- .fourcc = V4L2_PIX_FMT_SRGGB12,
- .code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .bpp = 12,
- },
-};
-
-/* Print Four-character-code (FOURCC) */
-static char *fourcc_to_str(u32 fmt)
-{
- static char code[5];
-
- code[0] = (unsigned char)(fmt & 0xff);
- code[1] = (unsigned char)((fmt >> 8) & 0xff);
- code[2] = (unsigned char)((fmt >> 16) & 0xff);
- code[3] = (unsigned char)((fmt >> 24) & 0xff);
- code[4] = '\0';
-
- return code;
-}
-
-/* ------------------------------------------------------------------
- * V4L2 Video IOCTLs
- * ------------------------------------------------------------------
- */
-
-static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
- u32 pixelformat)
-{
- const struct cal_fmt *fmt;
- unsigned int k;
-
- for (k = 0; k < ctx->num_active_fmt; k++) {
- fmt = ctx->active_fmt[k];
- if (fmt->fourcc == pixelformat)
- return fmt;
- }
-
- return NULL;
-}
-
-static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
- u32 code)
-{
- const struct cal_fmt *fmt;
- unsigned int k;
-
- for (k = 0; k < ctx->num_active_fmt; k++) {
- fmt = ctx->active_fmt[k];
- if (fmt->code == code)
- return fmt;
- }
-
- return NULL;
-}
-
-static int cal_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct cal_ctx *ctx = video_drvdata(file);
-
- strscpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
- strscpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
-
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", dev_name(ctx->cal->dev));
- return 0;
-}
-
-static int cal_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct cal_ctx *ctx = video_drvdata(file);
- const struct cal_fmt *fmt;
-
- if (f->index >= ctx->num_active_fmt)
- return -EINVAL;
-
- fmt = ctx->active_fmt[f->index];
-
- f->pixelformat = fmt->fourcc;
- f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- return 0;
-}
-
-static int __subdev_get_format(struct cal_ctx *ctx,
- struct v4l2_mbus_framefmt *fmt)
-{
- struct v4l2_subdev_format sd_fmt;
- struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
- int ret;
-
- sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sd_fmt.pad = 0;
-
- ret = v4l2_subdev_call(ctx->phy->sensor, pad, get_fmt, NULL, &sd_fmt);
- if (ret)
- return ret;
-
- *fmt = *mbus_fmt;
-
- ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
- fmt->width, fmt->height, fmt->code);
-
- return 0;
-}
-
-static int __subdev_set_format(struct cal_ctx *ctx,
- struct v4l2_mbus_framefmt *fmt)
-{
- struct v4l2_subdev_format sd_fmt;
- struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
- int ret;
-
- sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sd_fmt.pad = 0;
- *mbus_fmt = *fmt;
-
- ret = v4l2_subdev_call(ctx->phy->sensor, pad, set_fmt, NULL, &sd_fmt);
- if (ret)
- return ret;
-
- ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
- fmt->width, fmt->height, fmt->code);
-
- return 0;
-}
-
-static int cal_calc_format_size(struct cal_ctx *ctx,
- const struct cal_fmt *fmt,
- struct v4l2_format *f)
-{
- u32 bpl, max_width;
-
- if (!fmt) {
- ctx_dbg(3, ctx, "No cal_fmt provided!\n");
- return -EINVAL;
- }
-
- /*
- * Maximum width is bound by the DMA max width in bytes.
- * We need to recalculate the actual maxi width depending on the
- * number of bytes per pixels required.
- */
- max_width = MAX_WIDTH_BYTES / (ALIGN(fmt->bpp, 8) >> 3);
- v4l_bound_align_image(&f->fmt.pix.width, 48, max_width, 2,
- &f->fmt.pix.height, 32, MAX_HEIGHT_LINES, 0, 0);
-
- bpl = (f->fmt.pix.width * ALIGN(fmt->bpp, 8)) >> 3;
- f->fmt.pix.bytesperline = ALIGN(bpl, 16);
-
- f->fmt.pix.sizeimage = f->fmt.pix.height *
- f->fmt.pix.bytesperline;
-
- ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
- __func__, fourcc_to_str(f->fmt.pix.pixelformat),
- f->fmt.pix.width, f->fmt.pix.height,
- f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
-
- return 0;
-}
-
-static int cal_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct cal_ctx *ctx = video_drvdata(file);
-
- *f = ctx->v_fmt;
-
- return 0;
-}
-
-static int cal_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct cal_ctx *ctx = video_drvdata(file);
- const struct cal_fmt *fmt;
- struct v4l2_subdev_frame_size_enum fse;
- int ret, found;
-
- fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
- if (!fmt) {
- ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
- f->fmt.pix.pixelformat);
-
- /* Just get the first one enumerated */
- fmt = ctx->active_fmt[0];
- f->fmt.pix.pixelformat = fmt->fourcc;
- }
-
- f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
-
- /* check for/find a valid width/height */
- ret = 0;
- found = false;
- fse.pad = 0;
- fse.code = fmt->code;
- fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- for (fse.index = 0; ; fse.index++) {
- ret = v4l2_subdev_call(ctx->phy->sensor, pad, enum_frame_size,
- NULL, &fse);
- if (ret)
- break;
-
- if ((f->fmt.pix.width == fse.max_width) &&
- (f->fmt.pix.height == fse.max_height)) {
- found = true;
- break;
- } else if ((f->fmt.pix.width >= fse.min_width) &&
- (f->fmt.pix.width <= fse.max_width) &&
- (f->fmt.pix.height >= fse.min_height) &&
- (f->fmt.pix.height <= fse.max_height)) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- /* use existing values as default */
- f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
- f->fmt.pix.height = ctx->v_fmt.fmt.pix.height;
- }
-
- /*
- * Use current colorspace for now, it will get
- * updated properly during s_fmt
- */
- f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
- return cal_calc_format_size(ctx, fmt, f);
-}
-
-static int cal_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct cal_ctx *ctx = video_drvdata(file);
- struct vb2_queue *q = &ctx->vb_vidq;
- const struct cal_fmt *fmt;
- struct v4l2_mbus_framefmt mbus_fmt;
- int ret;
-
- if (vb2_is_busy(q)) {
- ctx_dbg(3, ctx, "%s device busy\n", __func__);
- return -EBUSY;
- }
-
- ret = cal_try_fmt_vid_cap(file, priv, f);
- if (ret < 0)
- return ret;
-
- fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
-
- v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
-
- ret = __subdev_set_format(ctx, &mbus_fmt);
- if (ret)
- return ret;
-
- /* Just double check nothing has gone wrong */
- if (mbus_fmt.code != fmt->code) {
- ctx_dbg(3, ctx,
- "%s subdev changed format on us, this should not happen\n",
- __func__);
- return -EINVAL;
- }
-
- v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
- ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
- cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
- ctx->fmt = fmt;
- ctx->m_fmt = mbus_fmt;
- *f = ctx->v_fmt;
-
- return 0;
-}
-
-static int cal_enum_framesizes(struct file *file, void *fh,
- struct v4l2_frmsizeenum *fsize)
-{
- struct cal_ctx *ctx = video_drvdata(file);
- const struct cal_fmt *fmt;
- struct v4l2_subdev_frame_size_enum fse;
- int ret;
-
- /* check for valid format */
- fmt = find_format_by_pix(ctx, fsize->pixel_format);
- if (!fmt) {
- ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
- fsize->pixel_format);
- return -EINVAL;
- }
-
- fse.index = fsize->index;
- fse.pad = 0;
- fse.code = fmt->code;
- fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-
- ret = v4l2_subdev_call(ctx->phy->sensor, pad, enum_frame_size, NULL,
- &fse);
- if (ret)
- return ret;
-
- ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
- __func__, fse.index, fse.code, fse.min_width, fse.max_width,
- fse.min_height, fse.max_height);
-
- fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- fsize->discrete.width = fse.max_width;
- fsize->discrete.height = fse.max_height;
-
- return 0;
-}
-
-static int cal_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- if (inp->index > 0)
- return -EINVAL;
-
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- sprintf(inp->name, "Camera %u", inp->index);
- return 0;
-}
-
-static int cal_g_input(struct file *file, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int cal_s_input(struct file *file, void *priv, unsigned int i)
-{
- return i > 0 ? -EINVAL : 0;
-}
-
-/* timeperframe is arbitrary and continuous */
-static int cal_enum_frameintervals(struct file *file, void *priv,
- struct v4l2_frmivalenum *fival)
-{
- struct cal_ctx *ctx = video_drvdata(file);
- const struct cal_fmt *fmt;
- struct v4l2_subdev_frame_interval_enum fie = {
- .index = fival->index,
- .width = fival->width,
- .height = fival->height,
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- int ret;
-
- fmt = find_format_by_pix(ctx, fival->pixel_format);
- if (!fmt)
- return -EINVAL;
-
- fie.code = fmt->code;
- ret = v4l2_subdev_call(ctx->phy->sensor, pad, enum_frame_interval,
- NULL, &fie);
- if (ret)
- return ret;
- fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
- fival->discrete = fie.interval;
-
- return 0;
-}
-
-static const struct v4l2_file_operations cal_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .release = vb2_fop_release,
- .read = vb2_fop_read,
- .poll = vb2_fop_poll,
- .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
- .mmap = vb2_fop_mmap,
-};
-
-static const struct v4l2_ioctl_ops cal_ioctl_ops = {
- .vidioc_querycap = cal_querycap,
- .vidioc_enum_fmt_vid_cap = cal_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = cal_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = cal_s_fmt_vid_cap,
- .vidioc_enum_framesizes = cal_enum_framesizes,
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- .vidioc_expbuf = vb2_ioctl_expbuf,
- .vidioc_enum_input = cal_enum_input,
- .vidioc_g_input = cal_g_input,
- .vidioc_s_input = cal_s_input,
- .vidioc_enum_frameintervals = cal_enum_frameintervals,
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-/* ------------------------------------------------------------------
- * videobuf2 Operations
- * ------------------------------------------------------------------
- */
-
-static int cal_queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], struct device *alloc_devs[])
-{
- struct cal_ctx *ctx = vb2_get_drv_priv(vq);
- unsigned int size = ctx->v_fmt.fmt.pix.sizeimage;
-
- if (vq->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->num_buffers;
-
- if (*nplanes) {
- if (sizes[0] < size)
- return -EINVAL;
- size = sizes[0];
- }
-
- *nplanes = 1;
- sizes[0] = size;
-
- ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
-
- return 0;
-}
-
-static int cal_buffer_prepare(struct vb2_buffer *vb)
-{
- struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct cal_buffer *buf = container_of(vb, struct cal_buffer,
- vb.vb2_buf);
- unsigned long size;
-
- if (WARN_ON(!ctx->fmt))
- return -EINVAL;
-
- size = ctx->v_fmt.fmt.pix.sizeimage;
- if (vb2_plane_size(vb, 0) < size) {
- ctx_err(ctx,
- "data will not fit into plane (%lu < %lu)\n",
- vb2_plane_size(vb, 0), size);
- return -EINVAL;
- }
-
- vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
- return 0;
-}
-
-static void cal_buffer_queue(struct vb2_buffer *vb)
-{
- struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct cal_buffer *buf = container_of(vb, struct cal_buffer,
- vb.vb2_buf);
- struct cal_dmaqueue *vidq = &ctx->vidq;
- unsigned long flags;
-
- /* recheck locking */
- spin_lock_irqsave(&ctx->slock, flags);
- list_add_tail(&buf->list, &vidq->active);
- spin_unlock_irqrestore(&ctx->slock, flags);
-}
-
-static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct cal_ctx *ctx = vb2_get_drv_priv(vq);
- struct cal_dmaqueue *dma_q = &ctx->vidq;
- struct cal_buffer *buf, *tmp;
- unsigned long addr;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&ctx->slock, flags);
- if (list_empty(&dma_q->active)) {
- spin_unlock_irqrestore(&ctx->slock, flags);
- ctx_dbg(3, ctx, "buffer queue is empty\n");
- return -EIO;
- }
-
- buf = list_entry(dma_q->active.next, struct cal_buffer, list);
- ctx->cur_frm = buf;
- ctx->next_frm = buf;
- list_del(&buf->list);
- spin_unlock_irqrestore(&ctx->slock, flags);
-
- addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
- ctx->sequence = 0;
-
- pm_runtime_get_sync(ctx->cal->dev);
-
- cal_ctx_csi2_config(ctx);
- cal_ctx_pix_proc_config(ctx);
- cal_ctx_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
- ctx->v_fmt.fmt.pix.height);
-
- cal_camerarx_enable_irqs(ctx->phy);
-
- ret = cal_camerarx_start(ctx->phy, ctx->fmt);
- if (ret)
- goto err;
-
- cal_ctx_wr_dma_addr(ctx, addr);
- cal_camerarx_ppi_enable(ctx->phy);
-
- if (cal_debug >= 4)
- cal_quickdump_regs(ctx->cal);
-
- return 0;
-
-err:
- spin_lock_irqsave(&ctx->slock, flags);
- vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- ctx->cur_frm = NULL;
- ctx->next_frm = NULL;
- list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- }
- spin_unlock_irqrestore(&ctx->slock, flags);
- return ret;
-}
-
-static void cal_stop_streaming(struct vb2_queue *vq)
-{
- struct cal_ctx *ctx = vb2_get_drv_priv(vq);
- struct cal_dmaqueue *dma_q = &ctx->vidq;
- struct cal_buffer *buf, *tmp;
- unsigned long timeout;
- unsigned long flags;
- bool dma_act;
-
- cal_camerarx_ppi_disable(ctx->phy);
-
- /* wait for stream and dma to finish */
- dma_act = true;
- timeout = jiffies + msecs_to_jiffies(500);
- while (dma_act && time_before(jiffies, timeout)) {
- msleep(50);
-
- spin_lock_irqsave(&ctx->slock, flags);
- dma_act = ctx->dma_act;
- spin_unlock_irqrestore(&ctx->slock, flags);
- }
-
- if (dma_act)
- ctx_err(ctx, "failed to disable dma cleanly\n");
-
- cal_camerarx_disable_irqs(ctx->phy);
- cal_camerarx_stop(ctx->phy);
-
- /* Release all active buffers */
- spin_lock_irqsave(&ctx->slock, flags);
- list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- }
-
- if (ctx->cur_frm == ctx->next_frm) {
- vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- } else {
- vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
- ctx->cur_frm = NULL;
- ctx->next_frm = NULL;
- spin_unlock_irqrestore(&ctx->slock, flags);
-
- pm_runtime_put_sync(ctx->cal->dev);
-}
-
-static const struct vb2_ops cal_video_qops = {
- .queue_setup = cal_queue_setup,
- .buf_prepare = cal_buffer_prepare,
- .buf_queue = cal_buffer_queue,
- .start_streaming = cal_start_streaming,
- .stop_streaming = cal_stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
-
-/* ------------------------------------------------------------------
- * V4L2 Initialization and Registration
- * ------------------------------------------------------------------
- */
-
-static const struct video_device cal_videodev = {
- .name = CAL_MODULE_NAME,
- .fops = &cal_fops,
- .ioctl_ops = &cal_ioctl_ops,
- .minor = -1,
- .release = video_device_release_empty,
- .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE,
-};
-
-static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
-{
- struct v4l2_subdev_mbus_code_enum mbus_code;
- struct v4l2_mbus_framefmt mbus_fmt;
- const struct cal_fmt *fmt;
- unsigned int i, j, k;
- int ret = 0;
-
- /* Enumerate sub device formats and enable all matching local formats */
- ctx->active_fmt = devm_kcalloc(ctx->cal->dev, ARRAY_SIZE(cal_formats),
- sizeof(*ctx->active_fmt), GFP_KERNEL);
- ctx->num_active_fmt = 0;
-
- for (j = 0, i = 0; ret != -EINVAL; ++j) {
-
- memset(&mbus_code, 0, sizeof(mbus_code));
- mbus_code.index = j;
- mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(ctx->phy->sensor, pad, enum_mbus_code,
- NULL, &mbus_code);
- if (ret)
- continue;
-
- ctx_dbg(2, ctx,
- "subdev %s: code: %04x idx: %u\n",
- ctx->phy->sensor->name, mbus_code.code, j);
-
- for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
- const struct cal_fmt *fmt = &cal_formats[k];
-
- if (mbus_code.code == fmt->code) {
- ctx->active_fmt[i] = fmt;
- ctx_dbg(2, ctx,
- "matched fourcc: %s: code: %04x idx: %u\n",
- fourcc_to_str(fmt->fourcc),
- fmt->code, i);
- ctx->num_active_fmt = ++i;
- }
- }
- }
-
- if (i == 0) {
- ctx_err(ctx, "No suitable format reported by subdev %s\n",
- ctx->phy->sensor->name);
- return -EINVAL;
- }
-
- ret = __subdev_get_format(ctx, &mbus_fmt);
- if (ret)
- return ret;
-
- fmt = find_format_by_code(ctx, mbus_fmt.code);
- if (!fmt) {
- ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
- mbus_fmt.code);
- return -EINVAL;
- }
-
- /* Save current subdev format */
- v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
- ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
- cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
- ctx->fmt = fmt;
- ctx->m_fmt = mbus_fmt;
-
- return 0;
-}
-
-int cal_ctx_v4l2_register(struct cal_ctx *ctx)
-{
- struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler;
- struct video_device *vfd = &ctx->vdev;
- int ret;
-
- ret = cal_ctx_v4l2_init_formats(ctx);
- if (ret)
- return ret;
-
- ret = v4l2_ctrl_add_handler(hdl, ctx->phy->sensor->ctrl_handler, NULL,
- true);
- if (ret < 0) {
- ctx_err(ctx, "Failed to add sensor ctrl handler\n");
- return ret;
- }
-
- ret = video_register_device(vfd, VFL_TYPE_VIDEO, cal_video_nr);
- if (ret < 0) {
- ctx_err(ctx, "Failed to register video device\n");
- return ret;
- }
-
- ctx_info(ctx, "V4L2 device registered as %s\n",
- video_device_node_name(vfd));
-
- return 0;
-}
-
-void cal_ctx_v4l2_unregister(struct cal_ctx *ctx)
-{
- ctx_dbg(1, ctx, "unregistering %s\n",
- video_device_node_name(&ctx->vdev));
-
- video_unregister_device(&ctx->vdev);
-}
-
-int cal_ctx_v4l2_init(struct cal_ctx *ctx)
-{
- struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler;
- struct video_device *vfd = &ctx->vdev;
- struct vb2_queue *q = &ctx->vb_vidq;
- int ret;
-
- INIT_LIST_HEAD(&ctx->vidq.active);
- spin_lock_init(&ctx->slock);
- mutex_init(&ctx->mutex);
-
- /* Initialize the vb2 queue. */
- q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- q->drv_priv = ctx;
- q->buf_struct_size = sizeof(struct cal_buffer);
- q->ops = &cal_video_qops;
- q->mem_ops = &vb2_dma_contig_memops;
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->lock = &ctx->mutex;
- q->min_buffers_needed = 3;
- q->dev = ctx->cal->dev;
-
- ret = vb2_queue_init(q);
- if (ret)
- return ret;
-
- /* Initialize the video device and media entity. */
- *vfd = cal_videodev;
- vfd->v4l2_dev = &ctx->cal->v4l2_dev;
- vfd->queue = q;
- snprintf(vfd->name, sizeof(vfd->name), "CAL output %u", ctx->index);
- vfd->lock = &ctx->mutex;
- video_set_drvdata(vfd, ctx);
-
- ctx->pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_pads_init(&vfd->entity, 1, &ctx->pad);
- if (ret < 0)
- return ret;
-
- /* Initialize the control handler. */
- ret = v4l2_ctrl_handler_init(hdl, 11);
- if (ret < 0) {
- ctx_err(ctx, "Failed to init ctrl handler\n");
- goto error;
- }
-
- vfd->ctrl_handler = hdl;
-
- return 0;
-
-error:
- media_entity_cleanup(&vfd->entity);
- return ret;
-}
-
-void cal_ctx_v4l2_cleanup(struct cal_ctx *ctx)
-{
- v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- media_entity_cleanup(&ctx->vdev.entity);
-}
diff --git a/drivers/media/platform/ti/Makefile b/drivers/media/platform/ti/Makefile
new file mode 100644
index 000000000000..17c9cfb74f66
--- /dev/null
+++ b/drivers/media/platform/ti/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += cal/
+obj-y += vpe/
+obj-y += j721e-csi2rx/
diff --git a/drivers/media/platform/ti/cal/Makefile b/drivers/media/platform/ti/cal/Makefile
new file mode 100644
index 000000000000..45ac35585f0b
--- /dev/null
+++ b/drivers/media/platform/ti/cal/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o
+ti-cal-y := cal.o cal-camerarx.o cal-video.o
diff --git a/drivers/media/platform/ti-vpe/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index 806cbf175d39..3ccd06d37924 100644
--- a/drivers/media/platform/ti-vpe/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -45,22 +45,62 @@ static inline void camerarx_write(struct cal_camerarx *phy, u32 offset, u32 val)
* ------------------------------------------------------------------
*/
-static s64 cal_camerarx_get_external_rate(struct cal_camerarx *phy)
+static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy)
{
- struct v4l2_ctrl *ctrl;
- s64 rate;
+ struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2;
+ u32 num_lanes = mipi_csi2->num_data_lanes;
+ struct v4l2_subdev_state *state;
+ u32 bpp;
+ s64 freq;
- ctrl = v4l2_ctrl_find(phy->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
- if (!ctrl) {
- phy_err(phy, "no pixel rate control in subdev: %s\n",
- phy->sensor->name);
- return -EPIPE;
+ /*
+ * v4l2_get_link_freq() uses V4L2_CID_LINK_FREQ first, and falls back
+ * to V4L2_CID_PIXEL_RATE if V4L2_CID_LINK_FREQ is not available.
+ *
+ * With multistream input there is no single pixel rate, and thus we
+ * cannot use V4L2_CID_PIXEL_RATE, so we pass 0 as the bpp which
+ * causes v4l2_get_link_freq() to return an error if it falls back to
+ * V4L2_CID_PIXEL_RATE.
+ */
+
+ state = v4l2_subdev_lock_active_state(&phy->subdev);
+
+ if (state->routing.num_routes == 0) {
+ v4l2_subdev_unlock_state(state);
+ return -EINVAL;
}
- rate = v4l2_ctrl_g_ctrl_int64(ctrl);
- phy_dbg(3, phy, "sensor Pixel Rate: %llu\n", rate);
+ if (state->routing.num_routes > 1) {
+ bpp = 0;
+ } else {
+ const struct cal_format_info *fmtinfo;
+ struct v4l2_subdev_route *route = &state->routing.routes[0];
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_state_get_stream_format(state, route->sink_pad,
+ route->sink_stream);
+
+ fmtinfo = cal_format_by_code(fmt->code);
+ if (!fmtinfo) {
+ v4l2_subdev_unlock_state(state);
+ return -EINVAL;
+ }
+
+ bpp = fmtinfo->bpp;
+ }
+
+ v4l2_subdev_unlock_state(state);
+
+ freq = v4l2_get_link_freq(phy->source->ctrl_handler, bpp, 2 * num_lanes);
+ if (freq < 0) {
+ phy_err(phy, "failed to get link freq for subdev '%s'\n",
+ phy->source->name);
+ return freq;
+ }
- return rate;
+ phy_dbg(3, phy, "Source Link Freq: %llu\n", freq);
+
+ return freq;
}
static void cal_camerarx_lane_config(struct cal_camerarx *phy)
@@ -116,35 +156,19 @@ void cal_camerarx_disable(struct cal_camerarx *phy)
#define TCLK_MISS 1
#define TCLK_SETTLE 14
-static void cal_camerarx_config(struct cal_camerarx *phy, s64 external_rate,
- const struct cal_fmt *fmt)
+static void cal_camerarx_config(struct cal_camerarx *phy, s64 link_freq)
{
unsigned int reg0, reg1;
unsigned int ths_term, ths_settle;
- unsigned int csi2_ddrclk_khz;
- struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
- &phy->endpoint.bus.mipi_csi2;
- u32 num_lanes = mipi_csi2->num_data_lanes;
/* DPHY timing configuration */
- /*
- * CSI-2 is DDR and we only count used lanes.
- *
- * csi2_ddrclk_khz = external_rate / 1000
- * / (2 * num_lanes) * fmt->bpp;
- */
- csi2_ddrclk_khz = div_s64(external_rate * fmt->bpp,
- 2 * num_lanes * 1000);
-
- phy_dbg(1, phy, "csi2_ddrclk_khz: %d\n", csi2_ddrclk_khz);
-
/* THS_TERM: Programmed value = floor(20 ns/DDRClk period) */
- ths_term = 20 * csi2_ddrclk_khz / 1000000;
+ ths_term = div_s64(20 * link_freq, 1000 * 1000 * 1000);
phy_dbg(1, phy, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
/* THS_SETTLE: Programmed value = floor(105 ns/DDRClk period) + 4 */
- ths_settle = (105 * csi2_ddrclk_khz / 1000000) + 4;
+ ths_settle = div_s64(105 * link_freq, 1000 * 1000 * 1000) + 4;
phy_dbg(1, phy, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
reg0 = camerarx_read(phy, CAL_CSI2_PHY_REG0);
@@ -234,23 +258,82 @@ static void cal_camerarx_wait_stop_state(struct cal_camerarx *phy)
phy_err(phy, "Timeout waiting for stop state\n");
}
-int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
+static void cal_camerarx_enable_irqs(struct cal_camerarx *phy)
+{
+ const u32 cio_err_mask =
+ CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK |
+ CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK |
+ CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK |
+ CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK;
+ const u32 vc_err_mask =
+ CAL_CSI2_VC_IRQ_CS_IRQ_MASK(0) |
+ CAL_CSI2_VC_IRQ_CS_IRQ_MASK(1) |
+ CAL_CSI2_VC_IRQ_CS_IRQ_MASK(2) |
+ CAL_CSI2_VC_IRQ_CS_IRQ_MASK(3) |
+ CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(0) |
+ CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(1) |
+ CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(2) |
+ CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(3);
+
+ /* Enable CIO & VC error IRQs. */
+ cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0),
+ CAL_HL_IRQ_CIO_MASK(phy->instance) |
+ CAL_HL_IRQ_VC_MASK(phy->instance));
+ cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance),
+ cio_err_mask);
+ cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance),
+ vc_err_mask);
+}
+
+static void cal_camerarx_disable_irqs(struct cal_camerarx *phy)
+{
+ /* Disable CIO error irqs */
+ cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(0),
+ CAL_HL_IRQ_CIO_MASK(phy->instance) |
+ CAL_HL_IRQ_VC_MASK(phy->instance));
+ cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), 0);
+ cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance), 0);
+}
+
+static void cal_camerarx_ppi_enable(struct cal_camerarx *phy)
{
- s64 external_rate;
+ cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
+ 1, CAL_CSI2_PPI_CTRL_ECC_EN_MASK);
+
+ cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
+ 1, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void cal_camerarx_ppi_disable(struct cal_camerarx *phy)
+{
+ cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
+ 0, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static int cal_camerarx_start(struct cal_camerarx *phy)
+{
+ s64 link_freq;
u32 sscounter;
u32 val;
int ret;
- external_rate = cal_camerarx_get_external_rate(phy);
- if (external_rate < 0)
- return external_rate;
+ if (phy->enable_count > 0) {
+ phy->enable_count++;
+ return 0;
+ }
- ret = v4l2_subdev_call(phy->sensor, core, s_power, 1);
+ link_freq = cal_camerarx_get_ext_link_freq(phy);
+ if (link_freq < 0)
+ return link_freq;
+
+ ret = v4l2_subdev_call(phy->source, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
phy_err(phy, "power on failed in subdev\n");
return ret;
}
+ cal_camerarx_enable_irqs(phy);
+
/*
* CSI-2 PHY Link Initialization Sequence, according to the DRA74xP /
* DRA75xP / DRA76xP / DRA77xP TRM. The DRA71x / DRA72x and the AM65x /
@@ -275,7 +358,7 @@ int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
* 2. CSI PHY and link initialization sequence.
*
* a. Deassert the CSI-2 PHY reset. Do not wait for reset completion
- * at this point, as it requires the external sensor to send the
+ * at this point, as it requires the external source to send the
* CSI-2 HS clock.
*/
cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance),
@@ -289,7 +372,7 @@ int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
camerarx_read(phy, CAL_CSI2_PHY_REG0);
/* Program the PHY timing parameters. */
- cal_camerarx_config(phy, external_rate, fmt);
+ cal_camerarx_config(phy, link_freq);
/*
* b. Assert the FORCERXMODE signal.
@@ -334,12 +417,13 @@ int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
cal_camerarx_power(phy, true);
/*
- * Start the sensor to enable the CSI-2 HS clock. We can now wait for
+ * Start the source to enable the CSI-2 HS clock. We can now wait for
* CSI-2 PHY reset to complete.
*/
- ret = v4l2_subdev_call(phy->sensor, video, s_stream, 1);
+ ret = v4l2_subdev_call(phy->source, video, s_stream, 1);
if (ret) {
- v4l2_subdev_call(phy->sensor, core, s_power, 0);
+ v4l2_subdev_call(phy->source, core, s_power, 0);
+ cal_camerarx_disable_irqs(phy);
phy_err(phy, "stream on failed in subdev\n");
return ret;
}
@@ -359,14 +443,25 @@ int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt)
* implemented.
*/
+ /* Finally, enable the PHY Protocol Interface (PPI). */
+ cal_camerarx_ppi_enable(phy);
+
+ phy->enable_count++;
+
return 0;
}
-void cal_camerarx_stop(struct cal_camerarx *phy)
+static void cal_camerarx_stop(struct cal_camerarx *phy)
{
- unsigned int i;
int ret;
+ if (--phy->enable_count > 0)
+ return;
+
+ cal_camerarx_ppi_disable(phy);
+
+ cal_camerarx_disable_irqs(phy);
+
cal_camerarx_power(phy, false);
/* Assert Complex IO Reset */
@@ -374,27 +469,17 @@ void cal_camerarx_stop(struct cal_camerarx *phy)
CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL,
CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
- /* Wait for power down completion */
- for (i = 0; i < 10; i++) {
- if (cal_read_field(phy->cal,
- CAL_CSI2_COMPLEXIO_CFG(phy->instance),
- CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) ==
- CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING)
- break;
- usleep_range(1000, 1100);
- }
- phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO in Reset (%d) %s\n",
+ phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO in Reset\n",
phy->instance,
- cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance)), i,
- (i >= 10) ? "(timeout)" : "");
+ cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance)));
/* Disable the phy */
cal_camerarx_disable(phy);
- if (v4l2_subdev_call(phy->sensor, video, s_stream, 0))
+ if (v4l2_subdev_call(phy->source, video, s_stream, 0))
phy_err(phy, "stream off failed in subdev\n");
- ret = v4l2_subdev_call(phy->sensor, core, s_power, 0);
+ ret = v4l2_subdev_call(phy->source, core, s_power, 0);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
phy_err(phy, "power off failed in subdev\n");
}
@@ -428,74 +513,6 @@ void cal_camerarx_i913_errata(struct cal_camerarx *phy)
camerarx_write(phy, CAL_CSI2_PHY_REG10, reg10);
}
-/*
- * Enable the expected IRQ sources
- */
-void cal_camerarx_enable_irqs(struct cal_camerarx *phy)
-{
- u32 val;
-
- const u32 cio_err_mask =
- CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK |
- CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK |
- CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK |
- CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK;
-
- /* Enable CIO error irqs */
- cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0),
- CAL_HL_IRQ_CIO_MASK(phy->instance));
- cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance),
- cio_err_mask);
-
- /* Always enable OCPO error */
- cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK);
-
- /* Enable IRQ_WDMA_END 0/1 */
- val = 0;
- cal_set_field(&val, 1, CAL_HL_IRQ_MASK(phy->instance));
- cal_write(phy->cal, CAL_HL_IRQENABLE_SET(1), val);
- /* Enable IRQ_WDMA_START 0/1 */
- val = 0;
- cal_set_field(&val, 1, CAL_HL_IRQ_MASK(phy->instance));
- cal_write(phy->cal, CAL_HL_IRQENABLE_SET(2), val);
- /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
- cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(0), 0xFF000000);
-}
-
-void cal_camerarx_disable_irqs(struct cal_camerarx *phy)
-{
- u32 val;
-
- /* Disable CIO error irqs */
- cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(0),
- CAL_HL_IRQ_CIO_MASK(phy->instance));
- cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), 0);
-
- /* Disable IRQ_WDMA_END 0/1 */
- val = 0;
- cal_set_field(&val, 1, CAL_HL_IRQ_MASK(phy->instance));
- cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(1), val);
- /* Disable IRQ_WDMA_START 0/1 */
- val = 0;
- cal_set_field(&val, 1, CAL_HL_IRQ_MASK(phy->instance));
- cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(2), val);
- /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
- cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(0), 0);
-}
-
-void cal_camerarx_ppi_enable(struct cal_camerarx *phy)
-{
- cal_write(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), BIT(3));
- cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
- 1, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
-}
-
-void cal_camerarx_ppi_disable(struct cal_camerarx *phy)
-{
- cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
- 0, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
-}
-
static int cal_camerarx_regmap_init(struct cal_dev *cal,
struct cal_camerarx *phy)
{
@@ -533,8 +550,8 @@ static int cal_camerarx_regmap_init(struct cal_dev *cal,
static int cal_camerarx_parse_dt(struct cal_camerarx *phy)
{
struct v4l2_fwnode_endpoint *endpoint = &phy->endpoint;
- struct device_node *ep_node;
char data_lanes[V4L2_FWNODE_CSI2_MAX_DATA_LANES * 2];
+ struct device_node *ep_node;
unsigned int i;
int ret;
@@ -582,25 +599,339 @@ static int cal_camerarx_parse_dt(struct cal_camerarx *phy)
endpoint->bus.mipi_csi2.flags);
/* Retrieve the connected device and store it for later use. */
- phy->sensor_node = of_graph_get_remote_port_parent(ep_node);
- if (!phy->sensor_node) {
+ phy->source_ep_node = of_graph_get_remote_endpoint(ep_node);
+ phy->source_node = of_graph_get_port_parent(phy->source_ep_node);
+ if (!phy->source_node) {
phy_dbg(3, phy, "Can't get remote parent\n");
+ of_node_put(phy->source_ep_node);
ret = -EINVAL;
goto done;
}
- phy_dbg(1, phy, "Found connected device %pOFn\n", phy->sensor_node);
+ phy_dbg(1, phy, "Found connected device %pOFn\n", phy->source_node);
done:
of_node_put(ep_node);
return ret;
}
+int cal_camerarx_get_remote_frame_desc(struct cal_camerarx *phy,
+ struct v4l2_mbus_frame_desc *desc)
+{
+ struct media_pad *pad;
+ int ret;
+
+ if (!phy->source)
+ return -EPIPE;
+
+ pad = media_entity_remote_pad(&phy->pads[CAL_CAMERARX_PAD_SINK]);
+ if (!pad)
+ return -EPIPE;
+
+ ret = v4l2_subdev_call(phy->source, pad, get_frame_desc, pad->index,
+ desc);
+ if (ret)
+ return ret;
+
+ if (desc->type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
+ dev_err(phy->cal->dev,
+ "Frame descriptor does not describe CSI-2 link");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ * V4L2 Subdev Operations
+ * ------------------------------------------------------------------
+ */
+
+static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct cal_camerarx, subdev);
+}
+
+struct cal_camerarx *
+cal_camerarx_get_phy_from_entity(struct media_entity *entity)
+{
+ struct v4l2_subdev *sd;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ if (!sd)
+ return NULL;
+
+ return to_cal_camerarx(sd);
+}
+
+static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct cal_camerarx *phy = to_cal_camerarx(sd);
+ int ret = 0;
+
+ mutex_lock(&phy->mutex);
+
+ if (enable)
+ ret = cal_camerarx_start(phy);
+ else
+ cal_camerarx_stop(phy);
+
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+
+static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ int ret = 0;
+
+ v4l2_subdev_lock_state(state);
+
+ /* No transcoding, source and sink codes must match. */
+ if (cal_rx_pad_is_source(code->pad)) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (code->index > 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fmt = v4l2_state_get_opposite_stream_format(state, code->pad,
+ code->stream);
+
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ code->code = fmt->code;
+ } else {
+ if (code->index >= cal_num_formats) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ code->code = cal_formats[code->index].code;
+ }
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct cal_format_info *fmtinfo;
+ int ret = 0;
+
+ if (fse->index > 0)
+ return -EINVAL;
+
+ v4l2_subdev_lock_state(state);
+
+ /* No transcoding, source and sink formats must match. */
+ if (cal_rx_pad_is_source(fse->pad)) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_state_get_opposite_stream_format(state, fse->pad,
+ fse->stream);
+
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (fse->code != fmt->code) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fse->min_width = fmt->width;
+ fse->max_width = fmt->width;
+ fse->min_height = fmt->height;
+ fse->max_height = fmt->height;
+ } else {
+ fmtinfo = cal_format_by_code(fse->code);
+ if (!fmtinfo) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fse->min_width = CAL_MIN_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8);
+ fse->max_width = CAL_MAX_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8);
+ fse->min_height = CAL_MIN_HEIGHT_LINES;
+ fse->max_height = CAL_MAX_HEIGHT_LINES;
+ }
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ const struct cal_format_info *fmtinfo;
+ struct v4l2_mbus_framefmt *fmt;
+ unsigned int bpp;
+ int ret = 0;
+
+ /* No transcoding, source and sink formats must match. */
+ if (cal_rx_pad_is_source(format->pad))
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ /*
+ * Default to the first format if the requested media bus code isn't
+ * supported.
+ */
+ fmtinfo = cal_format_by_code(format->format.code);
+ if (!fmtinfo)
+ fmtinfo = &cal_formats[0];
+
+ /* Clamp the size, update the code. The colorspace is accepted as-is. */
+ bpp = ALIGN(fmtinfo->bpp, 8);
+
+ format->format.width = clamp_t(unsigned int, format->format.width,
+ CAL_MIN_WIDTH_BYTES * 8 / bpp,
+ CAL_MAX_WIDTH_BYTES * 8 / bpp);
+ format->format.height = clamp_t(unsigned int, format->format.height,
+ CAL_MIN_HEIGHT_LINES,
+ CAL_MAX_HEIGHT_LINES);
+ format->format.code = fmtinfo->code;
+ format->format.field = V4L2_FIELD_NONE;
+
+ /* Store the format and propagate it to the source pad. */
+
+ v4l2_subdev_lock_state(state);
+
+ fmt = v4l2_state_get_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *fmt = format->format;
+
+ fmt = v4l2_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *fmt = format->format;
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int _cal_camerarx_sd_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ static const struct v4l2_mbus_framefmt format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ };
+ int ret;
+
+ ret = v4l2_routing_simple_verify(routing);
+ if (ret)
+ return ret;
+
+ /* TODO: verify that all streams from a single RX port go to a single TX port */
+
+ v4l2_subdev_lock_state(state);
+
+ ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
+
+ v4l2_subdev_unlock_state(state);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cal_camerarx_sd_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ return _cal_camerarx_sd_set_routing(sd, state, routing);
+}
+
+static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route routes[] = { {
+ .sink_pad = 0,
+ .sink_stream = 0,
+ .source_pad = 1,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ } };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = 1,
+ .routes = routes,
+ };
+
+ /* Initialize routing to single route to the fist source pad */
+ return _cal_camerarx_sd_set_routing(sd, state, &routing);
+}
+
+static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = {
+ .s_stream = cal_camerarx_sd_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops cal_camerarx_pad_ops = {
+ .init_cfg = cal_camerarx_sd_init_cfg,
+ .enum_mbus_code = cal_camerarx_sd_enum_mbus_code,
+ .enum_frame_size = cal_camerarx_sd_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = cal_camerarx_sd_set_fmt,
+ .set_routing = cal_camerarx_sd_set_routing,
+};
+
+static const struct v4l2_subdev_ops cal_camerarx_subdev_ops = {
+ .video = &cal_camerarx_video_ops,
+ .pad = &cal_camerarx_pad_ops,
+};
+
+static struct media_entity_operations cal_camerarx_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_route = v4l2_subdev_has_route,
+};
+
+/* ------------------------------------------------------------------
+ * Create and Destroy
+ * ------------------------------------------------------------------
+ */
+
struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
unsigned int instance)
{
struct platform_device *pdev = to_platform_device(cal->dev);
struct cal_camerarx *phy;
+ struct v4l2_subdev *sd;
+ unsigned int i;
int ret;
phy = kzalloc(sizeof(*phy), GFP_KERNEL);
@@ -610,6 +941,9 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
phy->cal = cal;
phy->instance = instance;
+ spin_lock_init(&phy->vc_lock);
+ mutex_init(&phy->mutex);
+
phy->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
(instance == 0) ?
"cal_rx_core0" :
@@ -618,7 +952,7 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
if (IS_ERR(phy->base)) {
cal_err(cal, "failed to ioremap\n");
ret = PTR_ERR(phy->base);
- goto error;
+ goto err_free_phy;
}
cal_dbg(1, cal, "ioresource %s at %pa - %pa\n",
@@ -626,15 +960,45 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
ret = cal_camerarx_regmap_init(cal, phy);
if (ret)
- goto error;
+ goto err_free_phy;
ret = cal_camerarx_parse_dt(phy);
if (ret)
- goto error;
+ goto err_free_phy;
+
+ /* Initialize the V4L2 subdev and media entity. */
+ sd = &phy->subdev;
+ v4l2_subdev_init(sd, &cal_camerarx_subdev_ops);
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_MULTIPLEXED;
+ snprintf(sd->name, sizeof(sd->name), "CAMERARX%u", instance);
+ sd->dev = cal->dev;
+
+ phy->pads[CAL_CAMERARX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+
+ for (i = CAL_CAMERARX_PAD_FIRST_SOURCE; i < CAL_CAMERARX_NUM_PADS; ++i)
+ phy->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.ops = &cal_camerarx_media_ops;
+ ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(phy->pads),
+ phy->pads);
+ if (ret)
+ goto err_free_phy;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = v4l2_device_register_subdev(&cal->v4l2_dev, sd);
+ if (ret)
+ goto err_free_state;
return phy;
-error:
+err_free_state:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
+ media_entity_cleanup(&phy->subdev.entity);
+err_free_phy:
kfree(phy);
return ERR_PTR(ret);
}
@@ -644,6 +1008,13 @@ void cal_camerarx_destroy(struct cal_camerarx *phy)
if (!phy)
return;
- of_node_put(phy->sensor_node);
+ v4l2_device_unregister_subdev(&phy->subdev);
+
+ v4l2_subdev_cleanup(&phy->subdev);
+
+ media_entity_cleanup(&phy->subdev.entity);
+ of_node_put(phy->source_ep_node);
+ of_node_put(phy->source_node);
+ mutex_destroy(&phy->mutex);
kfree(phy);
}
diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c
new file mode 100644
index 000000000000..963e0bf3436d
--- /dev/null
+++ b/drivers/media/platform/ti/cal/cal-video.c
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI Camera Access Layer (CAL) - Video Device
+ *
+ * Copyright (c) 2015-2020 Texas Instruments Inc.
+ *
+ * Authors:
+ * Benoit Parrot <bparrot@ti.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/ioctl.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "cal.h"
+
+/* Print Four-character-code (FOURCC) */
+static char *fourcc_to_str(u32 fmt)
+{
+ static char code[5];
+
+ code[0] = (unsigned char)(fmt & 0xff);
+ code[1] = (unsigned char)((fmt >> 8) & 0xff);
+ code[2] = (unsigned char)((fmt >> 16) & 0xff);
+ code[3] = (unsigned char)((fmt >> 24) & 0xff);
+ code[4] = '\0';
+
+ return code;
+}
+
+/* ------------------------------------------------------------------
+ * V4L2 Common IOCTLs
+ * ------------------------------------------------------------------
+ */
+
+static int cal_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ strscpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
+
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev_name(ctx->cal->dev));
+ return 0;
+}
+
+static int cal_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ *f = ctx->v_fmt;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ * V4L2 Video Node Centric IOCTLs
+ * ------------------------------------------------------------------
+ */
+
+static const struct cal_format_info *find_format_by_pix(struct cal_ctx *ctx,
+ u32 pixelformat)
+{
+ const struct cal_format_info *fmtinfo;
+ unsigned int k;
+
+ for (k = 0; k < ctx->num_active_fmt; k++) {
+ fmtinfo = ctx->active_fmt[k];
+ if (fmtinfo->fourcc == pixelformat)
+ return fmtinfo;
+ }
+
+ return NULL;
+}
+
+static const struct cal_format_info *find_format_by_code(struct cal_ctx *ctx,
+ u32 code)
+{
+ const struct cal_format_info *fmtinfo;
+ unsigned int k;
+
+ for (k = 0; k < ctx->num_active_fmt; k++) {
+ fmtinfo = ctx->active_fmt[k];
+ if (fmtinfo->code == code)
+ return fmtinfo;
+ }
+
+ return NULL;
+}
+
+static int cal_legacy_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_format_info *fmtinfo;
+
+ if (f->index >= ctx->num_active_fmt)
+ return -EINVAL;
+
+ fmtinfo = ctx->active_fmt[f->index];
+
+ f->pixelformat = fmtinfo->fourcc;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ return 0;
+}
+
+static int __subdev_get_format(struct cal_ctx *ctx,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+ struct v4l2_subdev *sd = ctx->phy->source;
+ int ret;
+
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
+
+ *fmt = *mbus_fmt;
+
+ ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
+
+ return 0;
+}
+
+static int __subdev_set_format(struct cal_ctx *ctx,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+ struct v4l2_subdev *sd = ctx->phy->source;
+ int ret;
+
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ *mbus_fmt = *fmt;
+
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
+
+ ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
+
+ return 0;
+}
+
+static void cal_calc_format_size(struct cal_ctx *ctx,
+ const struct cal_format_info *fmtinfo,
+ struct v4l2_format *f)
+{
+ u32 bpl, max_width;
+
+ /*
+ * Maximum width is bound by the DMA max width in bytes.
+ * We need to recalculate the actual maxi width depending on the
+ * number of bytes per pixels required.
+ */
+ max_width = CAL_MAX_WIDTH_BYTES / (ALIGN(fmtinfo->bpp, 8) >> 3);
+ v4l_bound_align_image(&f->fmt.pix.width, 48, max_width, 2,
+ &f->fmt.pix.height, 32, CAL_MAX_HEIGHT_LINES,
+ 0, 0);
+
+ bpl = (f->fmt.pix.width * ALIGN(fmtinfo->bpp, 8)) >> 3;
+ f->fmt.pix.bytesperline = ALIGN(bpl, 16);
+
+ f->fmt.pix.sizeimage = f->fmt.pix.height *
+ f->fmt.pix.bytesperline;
+
+ ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
+ __func__, fourcc_to_str(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+}
+
+static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ struct v4l2_subdev *sd = ctx->phy->source;
+ const struct cal_format_info *fmtinfo;
+ struct v4l2_subdev_frame_size_enum fse;
+ int found;
+
+ fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+ if (!fmtinfo) {
+ ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
+ f->fmt.pix.pixelformat);
+
+ /* Just get the first one enumerated */
+ fmtinfo = ctx->active_fmt[0];
+ f->fmt.pix.pixelformat = fmtinfo->fourcc;
+ }
+
+ f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
+
+ /* check for/find a valid width/height */
+ found = false;
+ fse.pad = 0;
+ fse.code = fmtinfo->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ for (fse.index = 0; ; fse.index++) {
+ int ret;
+
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ break;
+
+ if ((f->fmt.pix.width == fse.max_width) &&
+ (f->fmt.pix.height == fse.max_height)) {
+ found = true;
+ break;
+ } else if ((f->fmt.pix.width >= fse.min_width) &&
+ (f->fmt.pix.width <= fse.max_width) &&
+ (f->fmt.pix.height >= fse.min_height) &&
+ (f->fmt.pix.height <= fse.max_height)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* use existing values as default */
+ f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
+ f->fmt.pix.height = ctx->v_fmt.fmt.pix.height;
+ }
+
+ /*
+ * Use current colorspace for now, it will get
+ * updated properly during s_fmt
+ */
+ f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
+ cal_calc_format_size(ctx, fmtinfo, f);
+ return 0;
+}
+
+static int cal_legacy_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ struct v4l2_subdev *sd = &ctx->phy->subdev;
+ struct vb2_queue *q = &ctx->vb_vidq;
+ struct v4l2_subdev_format sd_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = CAL_CAMERARX_PAD_SINK,
+ };
+ const struct cal_format_info *fmtinfo;
+ int ret;
+
+ if (vb2_is_busy(q)) {
+ ctx_dbg(3, ctx, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = cal_legacy_try_fmt_vid_cap(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+
+ v4l2_fill_mbus_format(&sd_fmt.format, &f->fmt.pix, fmtinfo->code);
+
+ ret = __subdev_set_format(ctx, &sd_fmt.format);
+ if (ret)
+ return ret;
+
+ /* Just double check nothing has gone wrong */
+ if (sd_fmt.format.code != fmtinfo->code) {
+ ctx_dbg(3, ctx,
+ "%s subdev changed format on us, this should not happen\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &sd_fmt.format);
+ ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc;
+ ctx->v_fmt.fmt.pix.field = sd_fmt.format.field;
+ cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt);
+
+ v4l2_subdev_call(sd, pad, set_fmt, v4l2_subdev_get_active_state(sd),
+ &sd_fmt);
+
+ ctx->fmtinfo = fmtinfo;
+ *f = ctx->v_fmt;
+
+ return 0;
+}
+
+static int cal_legacy_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ struct v4l2_subdev *sd = ctx->phy->source;
+ const struct cal_format_info *fmtinfo;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret;
+
+ /* check for valid format */
+ fmtinfo = find_format_by_pix(ctx, fsize->pixel_format);
+ if (!fmtinfo) {
+ ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ fse.index = fsize->index;
+ fse.pad = 0;
+ fse.code = fmtinfo->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size,
+ v4l2_subdev_get_active_state(sd), &fse);
+ if (ret)
+ return ret;
+
+ ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int cal_legacy_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index > 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ sprintf(inp->name, "Camera %u", inp->index);
+ return 0;
+}
+
+static int cal_legacy_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int cal_legacy_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return i > 0 ? -EINVAL : 0;
+}
+
+/* timeperframe is arbitrary and continuous */
+static int cal_legacy_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ struct v4l2_subdev *sd = ctx->phy->source;
+ const struct cal_format_info *fmtinfo;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ fmtinfo = find_format_by_pix(ctx, fival->pixel_format);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ fie.code = fmtinfo->code;
+
+ ret = v4l2_subdev_call(sd, pad, enum_frame_interval,
+ v4l2_subdev_get_active_state(sd), &fie);
+
+ if (ret)
+ return ret;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static int cal_legacy_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ return v4l2_g_parm_cap(video_devdata(file), ctx->phy->source, a);
+}
+
+static int cal_legacy_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ return v4l2_s_parm_cap(video_devdata(file), ctx->phy->source, a);
+}
+
+static const struct v4l2_ioctl_ops cal_ioctl_legacy_ops = {
+ .vidioc_querycap = cal_querycap,
+ .vidioc_enum_fmt_vid_cap = cal_legacy_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cal_legacy_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cal_legacy_s_fmt_vid_cap,
+ .vidioc_enum_framesizes = cal_legacy_enum_framesizes,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_enum_input = cal_legacy_enum_input,
+ .vidioc_g_input = cal_legacy_g_input,
+ .vidioc_s_input = cal_legacy_s_input,
+ .vidioc_enum_frameintervals = cal_legacy_enum_frameintervals,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_parm = cal_legacy_g_parm,
+ .vidioc_s_parm = cal_legacy_s_parm,
+};
+
+/* ------------------------------------------------------------------
+ * V4L2 Media Controller Centric IOCTLs
+ * ------------------------------------------------------------------
+ */
+
+static int cal_mc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ unsigned int i;
+ unsigned int idx;
+
+ if (f->index >= cal_num_formats)
+ return -EINVAL;
+
+ idx = 0;
+
+ for (i = 0; i < cal_num_formats; ++i) {
+ if (f->mbus_code && cal_formats[i].code != f->mbus_code)
+ continue;
+
+ if (idx == f->index) {
+ f->pixelformat = cal_formats[i].fourcc;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ return 0;
+ }
+
+ idx++;
+ }
+
+ return -EINVAL;
+}
+
+static void cal_mc_try_fmt(struct cal_ctx *ctx, struct v4l2_format *f,
+ const struct cal_format_info **info)
+{
+ struct v4l2_pix_format *format = &f->fmt.pix;
+ const struct cal_format_info *fmtinfo;
+ unsigned int bpp;
+
+ /*
+ * Default to the first format if the requested pixel format code isn't
+ * supported.
+ */
+ fmtinfo = cal_format_by_fourcc(f->fmt.pix.pixelformat);
+ if (!fmtinfo)
+ fmtinfo = &cal_formats[0];
+
+ /*
+ * Clamp the size, update the pixel format. The field and colorspace are
+ * accepted as-is, except for V4L2_FIELD_ANY that is turned into
+ * V4L2_FIELD_NONE.
+ */
+ bpp = ALIGN(fmtinfo->bpp, 8);
+
+ format->width = clamp_t(unsigned int, format->width,
+ CAL_MIN_WIDTH_BYTES * 8 / bpp,
+ CAL_MAX_WIDTH_BYTES * 8 / bpp);
+ format->height = clamp_t(unsigned int, format->height,
+ CAL_MIN_HEIGHT_LINES, CAL_MAX_HEIGHT_LINES);
+ format->pixelformat = fmtinfo->fourcc;
+
+ if (format->field == V4L2_FIELD_ANY)
+ format->field = V4L2_FIELD_NONE;
+
+ /*
+ * Calculate the number of bytes per line and the image size. The
+ * hardware stores the stride as a number of 16 bytes words, in a
+ * signed 15-bit value. Only 14 bits are thus usable.
+ */
+ format->bytesperline = ALIGN(clamp(format->bytesperline,
+ format->width * bpp / 8,
+ ((1U << 14) - 1) * 16), 16);
+
+ format->sizeimage = format->height * format->bytesperline;
+
+ format->colorspace = ctx->v_fmt.fmt.pix.colorspace;
+
+ if (info)
+ *info = fmtinfo;
+
+ ctx_dbg(3, ctx, "%s: %s %ux%u (bytesperline %u sizeimage %u)\n",
+ __func__, fourcc_to_str(format->pixelformat),
+ format->width, format->height,
+ format->bytesperline, format->sizeimage);
+}
+
+static int cal_mc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ cal_mc_try_fmt(ctx, f, NULL);
+ return 0;
+}
+
+static int cal_mc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_format_info *fmtinfo;
+
+ if (vb2_is_busy(&ctx->vb_vidq)) {
+ ctx_dbg(3, ctx, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ cal_mc_try_fmt(ctx, f, &fmtinfo);
+
+ ctx->v_fmt = *f;
+ ctx->fmtinfo = fmtinfo;
+
+ return 0;
+}
+
+static int cal_mc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_format_info *fmtinfo;
+ unsigned int bpp;
+
+ if (fsize->index > 0)
+ return -EINVAL;
+
+ fmtinfo = cal_format_by_fourcc(fsize->pixel_format);
+ if (!fmtinfo) {
+ ctx_dbg(3, ctx, "Invalid pixel format 0x%08x\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ bpp = ALIGN(fmtinfo->bpp, 8);
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = CAL_MIN_WIDTH_BYTES * 8 / bpp;
+ fsize->stepwise.max_width = CAL_MAX_WIDTH_BYTES * 8 / bpp;
+ fsize->stepwise.step_width = 64 / bpp;
+ fsize->stepwise.min_height = CAL_MIN_HEIGHT_LINES;
+ fsize->stepwise.max_height = CAL_MAX_HEIGHT_LINES;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops cal_ioctl_mc_ops = {
+ .vidioc_querycap = cal_querycap,
+ .vidioc_enum_fmt_vid_cap = cal_mc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cal_mc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cal_mc_s_fmt_vid_cap,
+ .vidioc_enum_framesizes = cal_mc_enum_framesizes,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+/* ------------------------------------------------------------------
+ * videobuf2 Common Operations
+ * ------------------------------------------------------------------
+ */
+
+static int cal_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ unsigned int size = ctx->v_fmt.fmt.pix.sizeimage;
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
+
+ return 0;
+}
+
+static int cal_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+ vb.vb2_buf);
+ unsigned long size;
+
+ size = ctx->v_fmt.fmt.pix.sizeimage;
+ if (vb2_plane_size(vb, 0) < size) {
+ ctx_err(ctx,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+ return 0;
+}
+
+static void cal_buffer_queue(struct vb2_buffer *vb)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+ vb.vb2_buf);
+ unsigned long flags;
+
+ /* recheck locking */
+ spin_lock_irqsave(&ctx->dma.lock, flags);
+ list_add_tail(&buf->list, &ctx->dma.queue);
+ spin_unlock_irqrestore(&ctx->dma.lock, flags);
+}
+
+static void cal_release_buffers(struct cal_ctx *ctx,
+ enum vb2_buffer_state state)
+{
+ struct cal_buffer *buf, *tmp;
+
+ /* Release all queued buffers. */
+ spin_lock_irq(&ctx->dma.lock);
+
+ list_for_each_entry_safe(buf, tmp, &ctx->dma.queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+
+ if (ctx->dma.pending) {
+ vb2_buffer_done(&ctx->dma.pending->vb.vb2_buf, state);
+ ctx->dma.pending = NULL;
+ }
+
+ if (ctx->dma.active) {
+ vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state);
+ ctx->dma.active = NULL;
+ }
+
+ spin_unlock_irq(&ctx->dma.lock);
+}
+
+/* ------------------------------------------------------------------
+ * videobuf2 Operations
+ * ------------------------------------------------------------------
+ */
+
+static int cal_video_check_format(struct cal_ctx *ctx)
+{
+ const struct v4l2_mbus_framefmt *format;
+ struct media_pad *remote_pad;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+
+ remote_pad = media_entity_remote_pad(&ctx->pad);
+ if (!remote_pad)
+ return -ENODEV;
+
+ state = v4l2_subdev_lock_active_state(&ctx->phy->subdev);
+
+ format = v4l2_state_get_stream_format(state,
+ remote_pad->index, 0);
+ if (!format) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ctx->fmtinfo->code != format->code ||
+ ctx->v_fmt.fmt.pix.height != format->height ||
+ ctx->v_fmt.fmt.pix.width != format->width ||
+ ctx->v_fmt.fmt.pix.field != format->field) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cal_buffer *buf;
+ dma_addr_t addr;
+ int ret;
+
+ if (cal_mc_api) {
+ struct v4l2_subdev_route *route = NULL;
+ struct media_pad *remote_pad;
+ unsigned int i;
+ struct v4l2_subdev_state *state;
+
+ /* Find the PHY connected to this video device */
+
+ remote_pad = media_entity_remote_pad(&ctx->pad);
+ if (!remote_pad) {
+ ctx_err(ctx, "Context not connected\n");
+ ret = -ENODEV;
+ goto error_release_buffers;
+ }
+
+ ctx->phy = cal_camerarx_get_phy_from_entity(remote_pad->entity);
+
+ state = v4l2_subdev_lock_active_state(&ctx->phy->subdev);
+
+ /* Find the stream */
+
+ for (i = 0; i < state->routing.num_routes; ++i) {
+ struct v4l2_subdev_route *r =
+ &state->routing.routes[i];
+
+ if (!(r->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ if (r->source_pad != remote_pad->index)
+ continue;
+
+ route = r;
+
+ break;
+ }
+
+ if (!route) {
+ v4l2_subdev_unlock_state(state);
+ ctx_err(ctx, "Failed to find route\n");
+ ret = -ENODEV;
+ goto error_release_buffers;
+ }
+
+ ctx->stream = route->sink_stream;
+
+ v4l2_subdev_unlock_state(state);
+ }
+
+ ret = media_pipeline_start(ctx->vdev.entity.pads, &ctx->phy->pipe);
+ if (ret < 0) {
+ ctx_err(ctx, "Failed to start media pipeline: %d\n", ret);
+ goto error_release_buffers;
+ }
+
+ /*
+ * Verify that the currently configured format matches the output of
+ * the connected CAMERARX.
+ */
+ ret = cal_video_check_format(ctx);
+ if (ret < 0) {
+ ctx_dbg(3, ctx,
+ "Format mismatch between CAMERARX and video node\n");
+ goto error_pipeline;
+ }
+
+ ret = cal_ctx_prepare(ctx);
+ if (ret) {
+ ctx_err(ctx, "Failed to prepare context: %d\n", ret);
+ goto error_pipeline;
+ }
+
+ spin_lock_irq(&ctx->dma.lock);
+ buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list);
+ ctx->dma.active = buf;
+ list_del(&buf->list);
+ spin_unlock_irq(&ctx->dma.lock);
+
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+
+ ret = pm_runtime_resume_and_get(ctx->cal->dev);
+ if (ret < 0)
+ goto error_pipeline;
+
+ cal_ctx_set_dma_addr(ctx, addr);
+ cal_ctx_start(ctx);
+
+ ret = v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 1);
+ if (ret)
+ goto error_stop;
+
+ if (cal_debug >= 4)
+ cal_quickdump_regs(ctx->cal);
+
+ return 0;
+
+error_stop:
+ cal_ctx_stop(ctx);
+ pm_runtime_put_sync(ctx->cal->dev);
+ cal_ctx_unprepare(ctx);
+
+error_pipeline:
+ media_pipeline_stop(ctx->vdev.entity.pads);
+error_release_buffers:
+ cal_release_buffers(ctx, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void cal_stop_streaming(struct vb2_queue *vq)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+
+ cal_ctx_stop(ctx);
+
+ v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 0);
+
+ pm_runtime_put_sync(ctx->cal->dev);
+
+ cal_ctx_unprepare(ctx);
+
+ cal_release_buffers(ctx, VB2_BUF_STATE_ERROR);
+
+ media_pipeline_stop(ctx->vdev.entity.pads);
+
+ if (cal_mc_api)
+ ctx->phy = NULL;
+}
+
+static const struct vb2_ops cal_video_qops = {
+ .queue_setup = cal_queue_setup,
+ .buf_prepare = cal_buffer_prepare,
+ .buf_queue = cal_buffer_queue,
+ .start_streaming = cal_start_streaming,
+ .stop_streaming = cal_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/* ------------------------------------------------------------------
+ * V4L2 Initialization and Registration
+ * ------------------------------------------------------------------
+ */
+
+static const struct v4l2_file_operations cal_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .mmap = vb2_fop_mmap,
+};
+
+static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
+{
+ struct v4l2_subdev_mbus_code_enum mbus_code;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ const struct cal_format_info *fmtinfo;
+ unsigned int i, j, k;
+ int ret = 0;
+ struct v4l2_subdev *sd = ctx->phy->source;
+
+ /* Enumerate sub device formats and enable all matching local formats */
+ ctx->active_fmt = devm_kcalloc(ctx->cal->dev, cal_num_formats,
+ sizeof(*ctx->active_fmt), GFP_KERNEL);
+ ctx->num_active_fmt = 0;
+
+ for (j = 0, i = 0; ; ++j) {
+
+ memset(&mbus_code, 0, sizeof(mbus_code));
+ mbus_code.index = j;
+ mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, enum_mbus_code,
+ NULL, &mbus_code);
+ if (ret == -EINVAL)
+ break;
+
+ if (ret) {
+ ctx_err(ctx, "Error enumerating mbus codes in subdev %s: %d\n",
+ sd->name, ret);
+ return ret;
+ }
+
+ ctx_dbg(2, ctx,
+ "subdev %s: code: %04x idx: %u\n",
+ sd->name, mbus_code.code, j);
+
+ for (k = 0; k < cal_num_formats; k++) {
+ fmtinfo = &cal_formats[k];
+
+ if (mbus_code.code == fmtinfo->code) {
+ ctx->active_fmt[i] = fmtinfo;
+ ctx_dbg(2, ctx,
+ "matched fourcc: %s: code: %04x idx: %u\n",
+ fourcc_to_str(fmtinfo->fourcc),
+ fmtinfo->code, i);
+ ctx->num_active_fmt = ++i;
+ }
+ }
+ }
+
+ if (i == 0) {
+ ctx_err(ctx, "No suitable format reported by subdev %s\n",
+ sd->name);
+ return -EINVAL;
+ }
+
+ ret = __subdev_get_format(ctx, &mbus_fmt);
+ if (ret)
+ return ret;
+
+ fmtinfo = find_format_by_code(ctx, mbus_fmt.code);
+ if (!fmtinfo) {
+ ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
+ mbus_fmt.code);
+ return -EINVAL;
+ }
+
+ /* Save current format */
+ v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+ ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc;
+ cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt);
+ ctx->fmtinfo = fmtinfo;
+
+ return 0;
+}
+
+static int cal_ctx_v4l2_init_mc_format(struct cal_ctx *ctx)
+{
+ const struct cal_format_info *fmtinfo;
+ struct v4l2_pix_format *pix_fmt = &ctx->v_fmt.fmt.pix;
+
+ fmtinfo = cal_format_by_code(MEDIA_BUS_FMT_UYVY8_2X8);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ pix_fmt->width = 640;
+ pix_fmt->height = 480;
+ pix_fmt->field = V4L2_FIELD_NONE;
+ pix_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ pix_fmt->pixelformat = fmtinfo->fourcc;
+
+ ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ /* Save current format */
+ cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt);
+ ctx->fmtinfo = fmtinfo;
+
+ return 0;
+}
+
+int cal_ctx_v4l2_register(struct cal_ctx *ctx)
+{
+ struct video_device *vfd = &ctx->vdev;
+ int ret;
+
+ if (!cal_mc_api) {
+ struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler;
+
+ ret = cal_ctx_v4l2_init_formats(ctx);
+ if (ret) {
+ ctx_err(ctx, "Failed to init formats: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_ctrl_add_handler(hdl, ctx->phy->source->ctrl_handler,
+ NULL, true);
+ if (ret < 0) {
+ ctx_err(ctx, "Failed to add source ctrl handler\n");
+ return ret;
+ }
+ } else {
+ ret = cal_ctx_v4l2_init_mc_format(ctx);
+ if (ret) {
+ ctx_err(ctx, "Failed to init format: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, cal_video_nr);
+ if (ret < 0) {
+ ctx_err(ctx, "Failed to register video device\n");
+ return ret;
+ }
+
+ if (cal_mc_api) {
+ u16 phy_idx;
+ u16 pad_idx;
+
+ /* Create links from all video nodes to all PHYs */
+
+ for (phy_idx = 0; phy_idx < ctx->cal->data->num_csi2_phy;
+ ++phy_idx) {
+ for (pad_idx = 1; pad_idx < CAL_CAMERARX_NUM_PADS;
+ ++pad_idx) {
+ /*
+ * Enable only links from video0 to PHY0 pad 1,
+ * and video1 to PHY1 pad 1.
+ */
+ bool enable = (ctx->dma_ctx == 0 &&
+ phy_idx == 0 && pad_idx == 1) ||
+ (ctx->dma_ctx == 1 &&
+ phy_idx == 1 && pad_idx == 1);
+
+ ret = media_create_pad_link(
+ &ctx->cal->phy[phy_idx]->subdev.entity,
+ pad_idx, &vfd->entity, 0,
+ enable ? MEDIA_LNK_FL_ENABLED : 0);
+ if (ret) {
+ ctx_err(ctx,
+ "Failed to create media link for context %u\n",
+ ctx->dma_ctx);
+ video_unregister_device(vfd);
+ return ret;
+ }
+ }
+ }
+ } else {
+ ret = media_create_pad_link(
+ &ctx->phy->subdev.entity, CAL_CAMERARX_PAD_FIRST_SOURCE,
+ &vfd->entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ ctx_err(ctx,
+ "Failed to create media link for context %u\n",
+ ctx->dma_ctx);
+ video_unregister_device(vfd);
+ return ret;
+ }
+ }
+
+ ctx_info(ctx, "V4L2 device registered as %s\n",
+ video_device_node_name(vfd));
+
+ return 0;
+}
+
+void cal_ctx_v4l2_unregister(struct cal_ctx *ctx)
+{
+ ctx_dbg(1, ctx, "unregistering %s\n",
+ video_device_node_name(&ctx->vdev));
+
+ video_unregister_device(&ctx->vdev);
+}
+
+int cal_ctx_v4l2_init(struct cal_ctx *ctx)
+{
+ struct video_device *vfd = &ctx->vdev;
+ struct vb2_queue *q = &ctx->vb_vidq;
+ int ret;
+
+ INIT_LIST_HEAD(&ctx->dma.queue);
+ spin_lock_init(&ctx->dma.lock);
+ mutex_init(&ctx->mutex);
+ init_waitqueue_head(&ctx->dma.wait);
+
+ /* Initialize the vb2 queue. */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = ctx;
+ q->buf_struct_size = sizeof(struct cal_buffer);
+ q->ops = &cal_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ctx->mutex;
+ q->min_buffers_needed = 3;
+ q->dev = ctx->cal->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ /* Initialize the video device and media entity. */
+ vfd->fops = &cal_fops;
+ vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
+ | (cal_mc_api ? V4L2_CAP_IO_MC : 0);
+ vfd->v4l2_dev = &ctx->cal->v4l2_dev;
+ vfd->queue = q;
+ snprintf(vfd->name, sizeof(vfd->name), "CAL output %u", ctx->dma_ctx);
+ vfd->release = video_device_release_empty;
+ vfd->ioctl_ops = cal_mc_api ? &cal_ioctl_mc_ops : &cal_ioctl_legacy_ops;
+ vfd->lock = &ctx->mutex;
+ video_set_drvdata(vfd, ctx);
+
+ ctx->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &ctx->pad);
+ if (ret < 0)
+ return ret;
+
+ if (!cal_mc_api) {
+ /* Initialize the control handler. */
+ struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler;
+
+ ret = v4l2_ctrl_handler_init(hdl, 11);
+ if (ret < 0) {
+ ctx_err(ctx, "Failed to init ctrl handler\n");
+ goto error;
+ }
+
+ vfd->ctrl_handler = hdl;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&vfd->entity);
+ return ret;
+}
+
+void cal_ctx_v4l2_cleanup(struct cal_ctx *ctx)
+{
+ if (!cal_mc_api)
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+
+ media_entity_cleanup(&ctx->vdev.entity);
+}
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti/cal/cal.c
index 2eef245c31a1..0399ac9f57d6 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
@@ -43,6 +43,143 @@ unsigned int cal_debug;
module_param_named(debug, cal_debug, uint, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
+#ifdef CONFIG_VIDEO_TI_CAL_MC
+#define CAL_MC_API_DEFAULT 1
+#else
+#define CAL_MC_API_DEFAULT 0
+#endif
+
+bool cal_mc_api = CAL_MC_API_DEFAULT;
+module_param_named(mc_api, cal_mc_api, bool, 0444);
+MODULE_PARM_DESC(mc_api, "activates the MC API");
+
+/* ------------------------------------------------------------------
+ * Format Handling
+ * ------------------------------------------------------------------
+ */
+
+const struct cal_format_info cal_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .bpp = 10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .bpp = 10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .bpp = 10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .bpp = 10,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .bpp = 12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .bpp = 12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .bpp = 12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .bpp = 12,
+ },
+};
+
+const unsigned int cal_num_formats = ARRAY_SIZE(cal_formats);
+
+const struct cal_format_info *cal_format_by_fourcc(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) {
+ if (cal_formats[i].fourcc == fourcc)
+ return &cal_formats[i];
+ }
+
+ return NULL;
+}
+
+const struct cal_format_info *cal_format_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) {
+ if (cal_formats[i].code == code)
+ return &cal_formats[i];
+ }
+
+ return NULL;
+}
+
/* ------------------------------------------------------------------
* Platform Data
* ------------------------------------------------------------------
@@ -136,12 +273,9 @@ void cal_quickdump_regs(struct cal_dev *cal)
(__force const void *)cal->base,
resource_size(cal->res), false);
- for (i = 0; i < ARRAY_SIZE(cal->phy); ++i) {
+ for (i = 0; i < cal->data->num_csi2_phy; ++i) {
struct cal_camerarx *phy = cal->phy[i];
- if (!phy)
- continue;
-
cal_info(cal, "CSI2 Core %u Registers @ %pa:\n", i,
&phy->res->start);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
@@ -156,11 +290,42 @@ void cal_quickdump_regs(struct cal_dev *cal)
* ------------------------------------------------------------------
*/
-void cal_ctx_csi2_config(struct cal_ctx *ctx)
+#define CAL_MAX_PIX_PROC 4
+
+static int cal_reserve_pix_proc(struct cal_dev *cal)
+{
+ unsigned long ret;
+
+ spin_lock(&cal->v4l2_dev.lock);
+
+ ret = find_first_zero_bit(&cal->reserved_pix_proc_mask, CAL_MAX_PIX_PROC);
+
+ if (ret == CAL_MAX_PIX_PROC) {
+ spin_unlock(&cal->v4l2_dev.lock);
+ return -ENOSPC;
+ }
+
+ cal->reserved_pix_proc_mask |= BIT(ret);
+
+ spin_unlock(&cal->v4l2_dev.lock);
+
+ return ret;
+}
+
+static void cal_release_pix_proc(struct cal_dev *cal, unsigned int pix_proc_num)
+{
+ spin_lock(&cal->v4l2_dev.lock);
+
+ cal->reserved_pix_proc_mask &= ~BIT(pix_proc_num);
+
+ spin_unlock(&cal->v4l2_dev.lock);
+}
+
+static void cal_ctx_csi2_config(struct cal_ctx *ctx)
{
u32 val;
- val = cal_read(ctx->cal, CAL_CSI2_CTX0(ctx->index));
+ val = cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx));
cal_set_field(&val, ctx->cport, CAL_CSI2_CTX_CPORT_MASK);
/*
* DT type: MIPI CSI-2 Specs
@@ -170,22 +335,23 @@ void cal_ctx_csi2_config(struct cal_ctx *ctx)
* 0x2A: RAW8 1 pixel = 1 byte
* 0x1E: YUV422 2 pixels = 4 bytes
*/
- cal_set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
- cal_set_field(&val, 0, CAL_CSI2_CTX_VC_MASK);
+ cal_set_field(&val, ctx->datatype, CAL_CSI2_CTX_DT_MASK);
+ cal_set_field(&val, ctx->vc, CAL_CSI2_CTX_VC_MASK);
cal_set_field(&val, ctx->v_fmt.fmt.pix.height, CAL_CSI2_CTX_LINES_MASK);
cal_set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
cal_set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
CAL_CSI2_CTX_PACK_MODE_MASK);
- cal_write(ctx->cal, CAL_CSI2_CTX0(ctx->index), val);
- ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->index,
- cal_read(ctx->cal, CAL_CSI2_CTX0(ctx->index)));
+ cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), val);
+ ctx_dbg(3, ctx, "CAL_CSI2_CTX(%u, %u) = 0x%08x\n",
+ ctx->phy->instance, ctx->csi2_ctx,
+ cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx)));
}
-void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
+static void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
{
u32 val, extract, pack;
- switch (ctx->fmt->bpp) {
+ switch (ctx->fmtinfo->bpp) {
case 8:
extract = CAL_PIX_PROC_EXTRACT_B8;
pack = CAL_PIX_PROC_PACK_B8;
@@ -214,82 +380,242 @@ void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
*/
dev_warn_once(ctx->cal->dev,
"%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n",
- __FILE__, __LINE__, __func__, ctx->fmt->bpp);
+ __FILE__, __LINE__, __func__, ctx->fmtinfo->bpp);
extract = CAL_PIX_PROC_EXTRACT_B8;
pack = CAL_PIX_PROC_PACK_B8;
break;
}
- val = cal_read(ctx->cal, CAL_PIX_PROC(ctx->index));
+ val = cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc));
cal_set_field(&val, extract, CAL_PIX_PROC_EXTRACT_MASK);
cal_set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
cal_set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
cal_set_field(&val, pack, CAL_PIX_PROC_PACK_MASK);
cal_set_field(&val, ctx->cport, CAL_PIX_PROC_CPORT_MASK);
cal_set_field(&val, 1, CAL_PIX_PROC_EN_MASK);
- cal_write(ctx->cal, CAL_PIX_PROC(ctx->index), val);
- ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->index,
- cal_read(ctx->cal, CAL_PIX_PROC(ctx->index)));
+ cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), val);
+ ctx_dbg(3, ctx, "CAL_PIX_PROC(%u) = 0x%08x\n", ctx->pix_proc,
+ cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc)));
}
-void cal_ctx_wr_dma_config(struct cal_ctx *ctx, unsigned int width,
- unsigned int height)
+static void cal_ctx_wr_dma_config(struct cal_ctx *ctx)
{
+ unsigned int stride = ctx->v_fmt.fmt.pix.bytesperline;
u32 val;
- val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->index));
+ val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx));
cal_set_field(&val, ctx->cport, CAL_WR_DMA_CTRL_CPORT_MASK);
- cal_set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
+ cal_set_field(&val, ctx->v_fmt.fmt.pix.height,
+ CAL_WR_DMA_CTRL_YSIZE_MASK);
cal_set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
CAL_WR_DMA_CTRL_DTAG_MASK);
- cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
- CAL_WR_DMA_CTRL_MODE_MASK);
cal_set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
CAL_WR_DMA_CTRL_PATTERN_MASK);
cal_set_field(&val, 1, CAL_WR_DMA_CTRL_STALL_RD_MASK);
- cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->index), val);
- ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->index,
- cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->index)));
+ cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val);
+ ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->dma_ctx,
+ cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)));
- /*
- * width/16 not sure but giving it a whirl.
- * zero does not work right
- */
- cal_write_field(ctx->cal,
- CAL_WR_DMA_OFST(ctx->index),
- (width / 16),
- CAL_WR_DMA_OFST_MASK);
- ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->index,
- cal_read(ctx->cal, CAL_WR_DMA_OFST(ctx->index)));
-
- val = cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->index));
+ cal_write_field(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx),
+ stride / 16, CAL_WR_DMA_OFST_MASK);
+ ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->dma_ctx,
+ cal_read(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx)));
+
+ val = cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx));
/* 64 bit word means no skipping */
cal_set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
/*
- * (width*8)/64 this should be size of an entire line
- * in 64bit word but 0 means all data until the end
- * is detected automagically
+ * The XSIZE field is expressed in 64-bit units and prevents overflows
+ * in case of synchronization issues by limiting the number of bytes
+ * written per line.
*/
- cal_set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
- cal_write(ctx->cal, CAL_WR_DMA_XSIZE(ctx->index), val);
- ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->index,
- cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->index)));
+ cal_set_field(&val, stride / 8, CAL_WR_DMA_XSIZE_MASK);
+ cal_write(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx), val);
+ ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->dma_ctx,
+ cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx)));
+}
- val = cal_read(ctx->cal, CAL_CTRL);
- cal_set_field(&val, CAL_CTRL_BURSTSIZE_BURST128,
- CAL_CTRL_BURSTSIZE_MASK);
- cal_set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
- cal_set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
- CAL_CTRL_POSTED_WRITES_MASK);
- cal_set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
- cal_set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
- cal_write(ctx->cal, CAL_CTRL, val);
- ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", cal_read(ctx->cal, CAL_CTRL));
+void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr)
+{
+ cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->dma_ctx), addr);
+}
+
+static void cal_ctx_wr_dma_enable(struct cal_ctx *ctx)
+{
+ u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx));
+
+ cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
+ CAL_WR_DMA_CTRL_MODE_MASK);
+ cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val);
+}
+
+static void cal_ctx_wr_dma_disable(struct cal_ctx *ctx)
+{
+ u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx));
+
+ cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_DIS,
+ CAL_WR_DMA_CTRL_MODE_MASK);
+ cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val);
+}
+
+static bool cal_ctx_wr_dma_stopped(struct cal_ctx *ctx)
+{
+ bool stopped;
+
+ spin_lock_irq(&ctx->dma.lock);
+ stopped = ctx->dma.state == CAL_DMA_STOPPED;
+ spin_unlock_irq(&ctx->dma.lock);
+
+ return stopped;
}
-void cal_ctx_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
+static int
+cal_get_remote_frame_desc_entry(struct cal_camerarx *phy, u32 stream,
+ struct v4l2_mbus_frame_desc_entry *entry)
{
- cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->index), dmaaddr);
+ struct v4l2_mbus_frame_desc fd;
+ unsigned int i;
+ int ret;
+
+ ret = cal_camerarx_get_remote_frame_desc(phy, &fd);
+ if (ret) {
+ if (ret != -ENOIOCTLCMD)
+ dev_err(phy->cal->dev,
+ "Failed to get remote frame desc: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < fd.num_entries; i++) {
+ if (stream == fd.entry[i].stream) {
+ *entry = fd.entry[i];
+ return 0;
+ }
+ }
+
+ dev_err(phy->cal->dev,
+ "Failed to find stream %u from remote frame descriptor\n",
+ stream);
+
+ return -ENODEV;
+}
+
+int cal_ctx_prepare(struct cal_ctx *ctx)
+{
+ struct v4l2_mbus_frame_desc_entry entry;
+ int ret;
+
+ ret = cal_get_remote_frame_desc_entry(ctx->phy, ctx->stream, &entry);
+
+ if (ret == -ENOIOCTLCMD) {
+ ctx->vc = 0;
+ ctx->datatype = CAL_CSI2_CTX_DT_ANY;
+ } else if (!ret) {
+ ctx_dbg(2, ctx, "Framedesc: stream %u, len %u, vc %u, dt %#x\n",
+ entry.stream, entry.length, entry.bus.csi2.vc,
+ entry.bus.csi2.dt);
+
+ ctx->vc = entry.bus.csi2.vc;
+ ctx->datatype = entry.bus.csi2.dt;
+ } else {
+ return ret;
+ }
+
+ ctx->use_pix_proc = !ctx->fmtinfo->meta;
+
+ if (ctx->use_pix_proc) {
+ ret = cal_reserve_pix_proc(ctx->cal);
+ if (ret < 0) {
+ ctx_err(ctx, "Failed to reserve pix proc: %d\n", ret);
+ return ret;
+ }
+
+ ctx->pix_proc = ret;
+ }
+
+ return 0;
+}
+
+void cal_ctx_unprepare(struct cal_ctx *ctx)
+{
+ if (ctx->use_pix_proc)
+ cal_release_pix_proc(ctx->cal, ctx->pix_proc);
+}
+
+void cal_ctx_start(struct cal_ctx *ctx)
+{
+ struct cal_camerarx *phy = ctx->phy;
+
+ /*
+ * Reset the frame number & sequence number, but only if the
+ * virtual channel is not already in use.
+ */
+
+ spin_lock(&phy->vc_lock);
+
+ if (phy->vc_enable_count[ctx->vc]++ == 0) {
+ phy->vc_frame_number[ctx->vc] = 0;
+ phy->vc_sequence[ctx->vc] = 0;
+ }
+
+ spin_unlock(&phy->vc_lock);
+
+ ctx->dma.state = CAL_DMA_RUNNING;
+
+ /* Configure the CSI-2, pixel processing and write DMA contexts. */
+ cal_ctx_csi2_config(ctx);
+ if (ctx->use_pix_proc)
+ cal_ctx_pix_proc_config(ctx);
+ cal_ctx_wr_dma_config(ctx);
+
+ /* Enable IRQ_WDMA_END and IRQ_WDMA_START. */
+ cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(1),
+ CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx));
+ cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(2),
+ CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx));
+
+ cal_ctx_wr_dma_enable(ctx);
+}
+
+void cal_ctx_stop(struct cal_ctx *ctx)
+{
+ struct cal_camerarx *phy = ctx->phy;
+ long timeout;
+
+ WARN_ON(phy->vc_enable_count[ctx->vc] == 0);
+
+ spin_lock(&phy->vc_lock);
+ phy->vc_enable_count[ctx->vc]--;
+ spin_unlock(&phy->vc_lock);
+
+ /*
+ * Request DMA stop and wait until it completes. If completion times
+ * out, forcefully disable the DMA.
+ */
+ spin_lock_irq(&ctx->dma.lock);
+ ctx->dma.state = CAL_DMA_STOP_REQUESTED;
+ spin_unlock_irq(&ctx->dma.lock);
+
+ timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx),
+ msecs_to_jiffies(500));
+ if (!timeout) {
+ ctx_err(ctx, "failed to disable dma cleanly\n");
+ cal_ctx_wr_dma_disable(ctx);
+ }
+
+ /* Disable IRQ_WDMA_END and IRQ_WDMA_START. */
+ cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(1),
+ CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx));
+ cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(2),
+ CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx));
+
+ ctx->dma.state = CAL_DMA_STOPPED;
+
+ /* Disable CSI2 context */
+ cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), 0);
+
+ /* Disable pix proc */
+ if (ctx->use_pix_proc)
+ cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), 0);
}
/* ------------------------------------------------------------------
@@ -297,48 +623,146 @@ void cal_ctx_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
* ------------------------------------------------------------------
*/
-static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
+/*
+ * Track a sequence number for each virtual channel, which is shared by
+ * all contexts using the same virtual channel. This is done using the
+ * CSI-2 frame number as a base.
+ */
+static void cal_update_seq_number(struct cal_ctx *ctx)
{
- struct cal_dmaqueue *dma_q = &ctx->vidq;
- struct cal_buffer *buf;
- unsigned long addr;
+ struct cal_dev *cal = ctx->cal;
+ struct cal_camerarx *phy = ctx->phy;
+ u32 prev_frame_num, frame_num;
+ u8 vc = ctx->vc;
+
+ frame_num = cal_read(cal, CAL_CSI2_STATUS(phy->instance, ctx->csi2_ctx));
+ frame_num &= 0xffff;
- buf = list_entry(dma_q->active.next, struct cal_buffer, list);
- ctx->next_frm = buf;
- list_del(&buf->list);
+ if (phy->vc_frame_number[vc] != frame_num) {
+ prev_frame_num = phy->vc_frame_number[vc];
- addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
- cal_ctx_wr_dma_addr(ctx, addr);
+ if (prev_frame_num > frame_num)
+ prev_frame_num = 0;
+
+ phy->vc_sequence[vc] += frame_num - prev_frame_num;
+ phy->vc_frame_number[vc] = frame_num;
+ }
}
-static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
+static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
{
- ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
- ctx->cur_frm->vb.field = ctx->m_fmt.field;
- ctx->cur_frm->vb.sequence = ctx->sequence++;
+ spin_lock(&ctx->dma.lock);
- vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
- ctx->cur_frm = ctx->next_frm;
+ if (ctx->dma.state == CAL_DMA_STOP_REQUESTED) {
+ /*
+ * If a stop is requested, disable the write DMA context
+ * immediately. The CAL_WR_DMA_CTRL_j.MODE field is shadowed,
+ * the current frame will complete and the DMA will then stop.
+ */
+ cal_ctx_wr_dma_disable(ctx);
+ ctx->dma.state = CAL_DMA_STOP_PENDING;
+ } else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) {
+ /*
+ * Otherwise, if a new buffer is available, queue it to the
+ * hardware.
+ */
+ struct cal_buffer *buf;
+ dma_addr_t addr;
+
+ buf = list_first_entry(&ctx->dma.queue, struct cal_buffer,
+ list);
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ cal_ctx_set_dma_addr(ctx, addr);
+
+ ctx->dma.pending = buf;
+ list_del(&buf->list);
+ }
+
+ spin_unlock(&ctx->dma.lock);
+
+ cal_update_seq_number(ctx);
+}
+
+static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
+{
+ struct cal_buffer *buf = NULL;
+
+ spin_lock(&ctx->dma.lock);
+
+ /* If the DMA context was stopping, it is now stopped. */
+ if (ctx->dma.state == CAL_DMA_STOP_PENDING) {
+ ctx->dma.state = CAL_DMA_STOPPED;
+ wake_up(&ctx->dma.wait);
+ }
+
+ /* If a new buffer was queued, complete the current buffer. */
+ if (ctx->dma.pending) {
+ buf = ctx->dma.active;
+ ctx->dma.active = ctx->dma.pending;
+ ctx->dma.pending = NULL;
+ }
+
+ spin_unlock(&ctx->dma.lock);
+
+ if (buf) {
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.field = ctx->v_fmt.fmt.pix.field;
+ buf->vb.sequence = ctx->phy->vc_sequence[ctx->vc];
+
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+}
+
+static void cal_irq_handle_wdma(struct cal_ctx *ctx, bool start, bool end)
+{
+ /*
+ * CAL HW interrupts are inherently racy. If we get both start and end
+ * interrupts, we don't know what has happened: did the DMA for a single
+ * frame start and end, or did one frame end and a new frame start?
+ *
+ * Usually for normal pixel frames we get the interrupts separately. If
+ * we do get both, we have to guess. The assumption in the code below is
+ * that the active vertical area is larger than the blanking vertical
+ * area, and thus it is more likely that we get the end of the old frame
+ * and the start of a new frame.
+ *
+ * However, for embedded data, which is only a few lines high, we always
+ * get both interrupts. Here the assumption is that we get both for the
+ * same frame.
+ */
+ if (ctx->v_fmt.fmt.pix.height < 10) {
+ if (start)
+ cal_irq_wdma_start(ctx);
+
+ if (end)
+ cal_irq_wdma_end(ctx);
+ } else {
+ if (end)
+ cal_irq_wdma_end(ctx);
+
+ if (start)
+ cal_irq_wdma_start(ctx);
+ }
}
static irqreturn_t cal_irq(int irq_cal, void *data)
{
struct cal_dev *cal = data;
- struct cal_ctx *ctx;
- struct cal_dmaqueue *dma_q;
- u32 status;
-
- status = cal_read(cal, CAL_HL_IRQSTATUS(0));
- if (status) {
- unsigned int i;
+ u32 status[3];
+ unsigned int i;
- cal_write(cal, CAL_HL_IRQSTATUS(0), status);
+ for (i = 0; i < 3; ++i) {
+ status[i] = cal_read(cal, CAL_HL_IRQSTATUS(i));
+ if (status[i])
+ cal_write(cal, CAL_HL_IRQSTATUS(i), status[i]);
+ }
- if (status & CAL_HL_IRQ_OCPO_ERR_MASK)
+ if (status[0]) {
+ if (status[0] & CAL_HL_IRQ_OCPO_ERR_MASK)
dev_err_ratelimited(cal->dev, "OCPO ERROR\n");
- for (i = 0; i < CAL_NUM_CSI2_PORTS; ++i) {
- if (status & CAL_HL_IRQ_CIO_MASK(i)) {
+ for (i = 0; i < cal->data->num_csi2_phy; ++i) {
+ if (status[0] & CAL_HL_IRQ_CIO_MASK(i)) {
u32 cio_stat = cal_read(cal,
CAL_CSI2_COMPLEXIO_IRQSTATUS(i));
@@ -348,53 +772,25 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
cal_write(cal, CAL_CSI2_COMPLEXIO_IRQSTATUS(i),
cio_stat);
}
- }
- }
-
- /* Check which DMA just finished */
- status = cal_read(cal, CAL_HL_IRQSTATUS(1));
- if (status) {
- unsigned int i;
- /* Clear Interrupt status */
- cal_write(cal, CAL_HL_IRQSTATUS(1), status);
+ if (status[0] & CAL_HL_IRQ_VC_MASK(i)) {
+ u32 vc_stat = cal_read(cal, CAL_CSI2_VC_IRQSTATUS(i));
- for (i = 0; i < ARRAY_SIZE(cal->ctx); ++i) {
- if (status & CAL_HL_IRQ_MASK(i)) {
- ctx = cal->ctx[i];
-
- spin_lock(&ctx->slock);
- ctx->dma_act = false;
-
- if (ctx->cur_frm != ctx->next_frm)
- cal_process_buffer_complete(ctx);
+ dev_err_ratelimited(cal->dev,
+ "CIO%u VC error: %#08x\n",
+ i, vc_stat);
- spin_unlock(&ctx->slock);
+ cal_write(cal, CAL_CSI2_VC_IRQSTATUS(i), vc_stat);
}
}
}
- /* Check which DMA just started */
- status = cal_read(cal, CAL_HL_IRQSTATUS(2));
- if (status) {
- unsigned int i;
+ for (i = 0; i < cal->num_contexts; ++i) {
+ bool end = !!(status[1] & CAL_HL_IRQ_WDMA_END_MASK(i));
+ bool start = !!(status[2] & CAL_HL_IRQ_WDMA_START_MASK(i));
- /* Clear Interrupt status */
- cal_write(cal, CAL_HL_IRQSTATUS(2), status);
-
- for (i = 0; i < ARRAY_SIZE(cal->ctx); ++i) {
- if (status & CAL_HL_IRQ_MASK(i)) {
- ctx = cal->ctx[i];
- dma_q = &ctx->vidq;
-
- spin_lock(&ctx->slock);
- ctx->dma_act = true;
- if (!list_empty(&dma_q->active) &&
- ctx->cur_frm == ctx->next_frm)
- cal_schedule_next_buffer(ctx);
- spin_unlock(&ctx->slock);
- }
- }
+ if (start || end)
+ cal_irq_handle_wdma(cal->ctx[i], start, end);
}
return IRQ_HANDLED;
@@ -421,15 +817,36 @@ static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_subdev *asd)
{
struct cal_camerarx *phy = to_cal_asd(asd)->phy;
+ int pad;
+ int ret;
- if (phy->sensor) {
+ if (phy->source) {
phy_info(phy, "Rejecting subdev %s (Already set!!)",
subdev->name);
return 0;
}
- phy->sensor = subdev;
- phy_dbg(1, phy, "Using sensor %s for capture\n", subdev->name);
+ phy->source = subdev;
+ phy_dbg(1, phy, "Using source %s for capture\n", subdev->name);
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity,
+ of_fwnode_handle(phy->source_ep_node),
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ phy_err(phy, "Source %s has no connected source pad\n",
+ subdev->name);
+ return pad;
+ }
+
+ ret = media_create_pad_link(&subdev->entity, pad,
+ &phy->subdev.entity, CAL_CAMERARX_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ phy_err(phy, "Failed to create media link for source %s\n",
+ subdev->name);
+ return ret;
+ }
return 0;
}
@@ -438,13 +855,32 @@ static int cal_async_notifier_complete(struct v4l2_async_notifier *notifier)
{
struct cal_dev *cal = container_of(notifier, struct cal_dev, notifier);
unsigned int i;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(cal->ctx); ++i) {
- if (cal->ctx[i])
- cal_ctx_v4l2_register(cal->ctx[i]);
+ for (i = 0; i < cal->num_contexts; ++i) {
+ ret = cal_ctx_v4l2_register(cal->ctx[i]);
+ if (ret)
+ goto err_ctx_unreg;
}
+ if (!cal_mc_api)
+ return 0;
+
+ ret = v4l2_device_register_subdev_nodes(&cal->v4l2_dev);
+ if (ret)
+ goto err_ctx_unreg;
+
return 0;
+
+err_ctx_unreg:
+ for (; i > 0; --i) {
+ if (!cal->ctx[i - 1])
+ continue;
+
+ cal_ctx_v4l2_unregister(cal->ctx[i - 1]);
+ }
+
+ return ret;
}
static const struct v4l2_async_notifier_operations cal_async_notifier_ops = {
@@ -460,16 +896,16 @@ static int cal_async_notifier_register(struct cal_dev *cal)
v4l2_async_notifier_init(&cal->notifier);
cal->notifier.ops = &cal_async_notifier_ops;
- for (i = 0; i < ARRAY_SIZE(cal->phy); ++i) {
+ for (i = 0; i < cal->data->num_csi2_phy; ++i) {
struct cal_camerarx *phy = cal->phy[i];
struct cal_v4l2_async_subdev *casd;
struct v4l2_async_subdev *asd;
struct fwnode_handle *fwnode;
- if (!phy || !phy->sensor_node)
+ if (!phy->source_node)
continue;
- fwnode = of_fwnode_handle(phy->sensor_node);
+ fwnode = of_fwnode_handle(phy->source_node);
asd = v4l2_async_notifier_add_fwnode_subdev(&cal->notifier,
fwnode,
sizeof(*casd));
@@ -543,10 +979,8 @@ static void cal_media_unregister(struct cal_dev *cal)
unsigned int i;
/* Unregister all the V4L2 video devices. */
- for (i = 0; i < ARRAY_SIZE(cal->ctx); i++) {
- if (cal->ctx[i])
- cal_ctx_v4l2_unregister(cal->ctx[i]);
- }
+ for (i = 0; i < cal->num_contexts; i++)
+ cal_ctx_v4l2_unregister(cal->ctx[i]);
cal_async_notifier_unregister(cal);
media_device_unregister(&cal->mdev);
@@ -591,13 +1025,6 @@ static int cal_media_init(struct cal_dev *cal)
*/
static void cal_media_cleanup(struct cal_dev *cal)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(cal->ctx); i++) {
- if (cal->ctx[i])
- cal_ctx_v4l2_cleanup(cal->ctx[i]);
- }
-
v4l2_device_unregister(&cal->v4l2_dev);
media_device_cleanup(&cal->mdev);
@@ -614,14 +1041,15 @@ static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst)
struct cal_ctx *ctx;
int ret;
- ctx = devm_kzalloc(cal->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
ctx->cal = cal;
- ctx->phy = cal->phy[inst];
- ctx->index = inst;
+ ctx->dma_ctx = inst;
+ ctx->csi2_ctx = inst;
ctx->cport = inst;
+ ctx->stream = 0;
ret = cal_ctx_v4l2_init(ctx);
if (ret)
@@ -630,6 +1058,13 @@ static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst)
return ctx;
}
+static void cal_ctx_destroy(struct cal_ctx *ctx)
+{
+ cal_ctx_v4l2_cleanup(ctx);
+
+ kfree(ctx);
+}
+
static const struct of_device_id cal_of_match[] = {
{
.compatible = "ti,dra72-cal",
@@ -742,7 +1177,6 @@ static int cal_init_camerarx_regmap(struct cal_dev *cal)
static int cal_probe(struct platform_device *pdev)
{
struct cal_dev *cal;
- struct cal_ctx *ctx;
bool connected = false;
unsigned int i;
int ret;
@@ -790,13 +1224,18 @@ static int cal_probe(struct platform_device *pdev)
/* Read the revision and hardware info to verify hardware access. */
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
goto error_pm_runtime;
cal_get_hwinfo(cal);
pm_runtime_put_sync(&pdev->dev);
+ /* Initialize the media device. */
+ ret = cal_media_init(cal);
+ if (ret < 0)
+ goto error_pm_runtime;
+
/* Create CAMERARX PHYs. */
for (i = 0; i < cal->data->num_csi2_phy; ++i) {
cal->phy[i] = cal_camerarx_create(cal, i);
@@ -806,7 +1245,7 @@ static int cal_probe(struct platform_device *pdev)
goto error_camerarx;
}
- if (cal->phy[i]->sensor_node)
+ if (cal->phy[i]->source_node)
connected = true;
}
@@ -816,21 +1255,33 @@ static int cal_probe(struct platform_device *pdev)
goto error_camerarx;
}
- /* Initialize the media device. */
- ret = cal_media_init(cal);
- if (ret < 0)
- goto error_camerarx;
-
/* Create contexts. */
- for (i = 0; i < cal->data->num_csi2_phy; ++i) {
- if (!cal->phy[i]->sensor_node)
- continue;
+ if (!cal_mc_api) {
+ for (i = 0; i < cal->data->num_csi2_phy; ++i) {
+ if (!cal->phy[i]->source_node)
+ continue;
+
+ cal->ctx[cal->num_contexts] = cal_ctx_create(cal, i);
+ if (!cal->ctx[cal->num_contexts]) {
+ cal_err(cal, "Failed to create context %u\n", cal->num_contexts);
+ ret = -ENODEV;
+ goto error_context;
+ }
- cal->ctx[i] = cal_ctx_create(cal, i);
- if (!cal->ctx[i]) {
- cal_err(cal, "Failed to create context %u\n", i);
- ret = -ENODEV;
- goto error_context;
+ cal->ctx[cal->num_contexts]->phy = cal->phy[i];
+
+ cal->num_contexts++;
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(cal->ctx); ++i) {
+ cal->ctx[i] = cal_ctx_create(cal, i);
+ if (!cal->ctx[i]) {
+ cal_err(cal, "Failed to create context %u\n", i);
+ ret = -ENODEV;
+ goto error_context;
+ }
+
+ cal->num_contexts++;
}
}
@@ -842,18 +1293,15 @@ static int cal_probe(struct platform_device *pdev)
return 0;
error_context:
- for (i = 0; i < ARRAY_SIZE(cal->ctx); i++) {
- ctx = cal->ctx[i];
- if (ctx)
- cal_ctx_v4l2_cleanup(ctx);
- }
-
- cal_media_cleanup(cal);
+ for (i = 0; i < cal->num_contexts; i++)
+ cal_ctx_destroy(cal->ctx[i]);
error_camerarx:
- for (i = 0; i < ARRAY_SIZE(cal->phy); i++)
+ for (i = 0; i < cal->data->num_csi2_phy; i++)
cal_camerarx_destroy(cal->phy[i]);
+ cal_media_cleanup(cal);
+
error_pm_runtime:
pm_runtime_disable(&pdev->dev);
@@ -864,24 +1312,27 @@ static int cal_remove(struct platform_device *pdev)
{
struct cal_dev *cal = platform_get_drvdata(pdev);
unsigned int i;
+ int ret;
cal_dbg(1, cal, "Removing %s\n", CAL_MODULE_NAME);
- pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
cal_media_unregister(cal);
- for (i = 0; i < ARRAY_SIZE(cal->phy); i++) {
- if (cal->phy[i])
- cal_camerarx_disable(cal->phy[i]);
- }
+ for (i = 0; i < cal->data->num_csi2_phy; i++)
+ cal_camerarx_disable(cal->phy[i]);
- cal_media_cleanup(cal);
+ for (i = 0; i < cal->num_contexts; i++)
+ cal_ctx_destroy(cal->ctx[i]);
- for (i = 0; i < ARRAY_SIZE(cal->phy); i++)
+ for (i = 0; i < cal->data->num_csi2_phy; i++)
cal_camerarx_destroy(cal->phy[i]);
- pm_runtime_put_sync(&pdev->dev);
+ cal_media_cleanup(cal);
+
+ if (ret >= 0)
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -890,16 +1341,35 @@ static int cal_remove(struct platform_device *pdev)
static int cal_runtime_resume(struct device *dev)
{
struct cal_dev *cal = dev_get_drvdata(dev);
+ unsigned int i;
+ u32 val;
if (cal->data->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) {
/*
* Apply errata on both port everytime we (re-)enable
* the clock
*/
- cal_camerarx_i913_errata(cal->phy[0]);
- cal_camerarx_i913_errata(cal->phy[1]);
+ for (i = 0; i < cal->data->num_csi2_phy; i++)
+ cal_camerarx_i913_errata(cal->phy[i]);
}
+ /*
+ * Enable global interrupts that are not related to a particular
+ * CAMERARAX or context.
+ */
+ cal_write(cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK);
+
+ val = cal_read(cal, CAL_CTRL);
+ cal_set_field(&val, CAL_CTRL_BURSTSIZE_BURST128,
+ CAL_CTRL_BURSTSIZE_MASK);
+ cal_set_field(&val, 0xf, CAL_CTRL_TAGCNT_MASK);
+ cal_set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
+ CAL_CTRL_POSTED_WRITES_MASK);
+ cal_set_field(&val, 0xff, CAL_CTRL_MFLAGL_MASK);
+ cal_set_field(&val, 0xff, CAL_CTRL_MFLAGH_MASK);
+ cal_write(cal, CAL_CTRL, val);
+ cal_dbg(3, cal, "CAL_CTRL = 0x%08x\n", cal_read(cal, CAL_CTRL));
+
return 0;
}
diff --git a/drivers/media/platform/ti-vpe/cal.h b/drivers/media/platform/ti/cal/cal.h
index 4123405ee0cf..444f0de591ac 100644
--- a/drivers/media/platform/ti-vpe/cal.h
+++ b/drivers/media/platform/ti/cal/cal.h
@@ -17,6 +17,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/videodev2.h>
+#include <linux/wait.h>
#include <media/media-device.h>
#include <media/v4l2-async.h>
@@ -24,21 +25,47 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
#include <media/videobuf2-v4l2.h>
#define CAL_MODULE_NAME "cal"
-#define CAL_NUM_CONTEXT 2
+#define CAL_MAX_NUM_CONTEXT 8
#define CAL_NUM_CSI2_PORTS 2
-#define MAX_WIDTH_BYTES (8192 * 8)
-#define MAX_HEIGHT_LINES 16383
+/*
+ * The width is limited by the size of the CAL_WR_DMA_XSIZE_j.XSIZE field,
+ * expressed in multiples of 64 bits. The height is limited by the size of the
+ * CAL_CSI2_CTXi_j.CTXi_LINES and CAL_WR_DMA_CTRL_j.YSIZE fields, expressed in
+ * lines.
+ */
+#define CAL_MIN_WIDTH_BYTES 16
+#define CAL_MAX_WIDTH_BYTES (8192 * 8)
+#define CAL_MIN_HEIGHT_LINES 1
+#define CAL_MAX_HEIGHT_LINES 16383
+
+#define CAL_CAMERARX_PAD_SINK 0
+#define CAL_CAMERARX_PAD_FIRST_SOURCE 1
+#define CAL_CAMERARX_NUM_SOURCE_PADS 8
+#define CAL_CAMERARX_NUM_PADS (1 + CAL_CAMERARX_NUM_SOURCE_PADS)
+
+static inline bool cal_rx_pad_is_sink(u32 pad)
+{
+ /* Camera RX has 1 sink pad, and N source pads */
+ return pad == 0;
+}
+
+static inline bool cal_rx_pad_is_source(u32 pad)
+{
+ /* Camera RX has 1 sink pad, and N source pads */
+ return pad >= CAL_CAMERARX_PAD_FIRST_SOURCE &&
+ pad <= CAL_CAMERARX_NUM_SOURCE_PADS;
+}
struct device;
struct device_node;
struct resource;
struct regmap;
struct regmap_fied;
-struct v4l2_subdev;
/* CTRL_CORE_CAMERRX_CONTROL register field id */
enum cal_camerarx_field {
@@ -49,11 +76,19 @@ enum cal_camerarx_field {
F_MAX_FIELDS,
};
-struct cal_fmt {
+enum cal_dma_state {
+ CAL_DMA_RUNNING,
+ CAL_DMA_STOP_REQUESTED,
+ CAL_DMA_STOP_PENDING,
+ CAL_DMA_STOPPED,
+};
+
+struct cal_format_info {
u32 fourcc;
u32 code;
/* Bits per pixel */
u8 bpp;
+ bool meta;
};
/* buffer for one video frame */
@@ -63,8 +98,37 @@ struct cal_buffer {
struct list_head list;
};
+/**
+ * struct cal_dmaqueue - Queue of DMA buffers
+ */
struct cal_dmaqueue {
- struct list_head active;
+ /**
+ * @lock: Protects all fields in the cal_dmaqueue.
+ */
+ spinlock_t lock;
+
+ /**
+ * @queue: Buffers queued to the driver and waiting for DMA processing.
+ * Buffers are added to the list by the vb2 .buffer_queue() operation,
+ * and move to @pending when they are scheduled for the next frame.
+ */
+ struct list_head queue;
+ /**
+ * @pending: Buffer provided to the hardware to DMA the next frame.
+ * Will move to @active at the end of the current frame.
+ */
+ struct cal_buffer *pending;
+ /**
+ * @active: Buffer being DMA'ed to for the current frame. Will be
+ * retired and given back to vb2 at the end of the current frame if
+ * a @pending buffer has been scheduled to replace it.
+ */
+ struct cal_buffer *active;
+
+ /** @state: State of the DMA engine. */
+ enum cal_dma_state state;
+ /** @wait: Wait queue to signal a @state transition to CAL_DMA_STOPPED. */
+ struct wait_queue_head wait;
};
struct cal_camerarx_data {
@@ -101,15 +165,35 @@ struct cal_data {
struct cal_camerarx {
void __iomem *base;
struct resource *res;
- struct device *dev;
struct regmap_field *fields[F_MAX_FIELDS];
struct cal_dev *cal;
unsigned int instance;
struct v4l2_fwnode_endpoint endpoint;
- struct device_node *sensor_node;
- struct v4l2_subdev *sensor;
+ struct device_node *source_ep_node;
+ struct device_node *source_node;
+ struct v4l2_subdev *source;
+ struct media_pipeline pipe;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CAL_CAMERARX_NUM_PADS];
+
+ /* protects the vc_* fields below */
+ spinlock_t vc_lock;
+ u8 vc_enable_count[4];
+ u8 vc_frame_number[4];
+ u32 vc_sequence[4];
+
+ /*
+ * Lock for camerarx ops. Protects:
+ * - routing
+ * - stream_configs
+ * - enable_count
+ */
+ struct mutex mutex;
+
+ unsigned int enable_count;
};
struct cal_dev {
@@ -129,11 +213,14 @@ struct cal_dev {
/* Camera Core Module handle */
struct cal_camerarx *phy[CAL_NUM_CSI2_PORTS];
- struct cal_ctx *ctx[CAL_NUM_CONTEXT];
+ u32 num_contexts;
+ struct cal_ctx *ctx[CAL_MAX_NUM_CONTEXT];
struct media_device mdev;
struct v4l2_device v4l2_dev;
struct v4l2_async_notifier notifier;
+
+ unsigned long reserved_pix_proc_mask;
};
/*
@@ -149,37 +236,33 @@ struct cal_ctx {
/* v4l2_ioctl mutex */
struct mutex mutex;
- /* v4l2 buffers lock */
- spinlock_t slock;
- struct cal_dmaqueue vidq;
+ struct cal_dmaqueue dma;
/* video capture */
- const struct cal_fmt *fmt;
+ const struct cal_format_info *fmtinfo;
/* Used to store current pixel format */
- struct v4l2_format v_fmt;
- /* Used to store current mbus frame format */
- struct v4l2_mbus_framefmt m_fmt;
+ struct v4l2_format v_fmt;
- /* Current subdev enumerated format */
- const struct cal_fmt **active_fmt;
+ /* Current subdev enumerated format (legacy) */
+ const struct cal_format_info **active_fmt;
unsigned int num_active_fmt;
- unsigned int sequence;
struct vb2_queue vb_vidq;
- unsigned int index;
- unsigned int cport;
-
- /* Pointer pointing to current v4l2_buffer */
- struct cal_buffer *cur_frm;
- /* Pointer pointing to next v4l2_buffer */
- struct cal_buffer *next_frm;
-
- bool dma_act;
+ u8 dma_ctx;
+ u8 cport;
+ u8 csi2_ctx;
+ u8 pix_proc;
+ u8 vc;
+ u8 datatype;
+ u32 stream;
+
+ bool use_pix_proc;
};
extern unsigned int cal_debug;
extern int cal_video_nr;
+extern bool cal_mc_api;
#define cal_dbg(level, cal, fmt, arg...) \
do { \
@@ -192,11 +275,11 @@ extern int cal_video_nr;
dev_err((cal)->dev, fmt, ##arg)
#define ctx_dbg(level, ctx, fmt, arg...) \
- cal_dbg(level, (ctx)->cal, "ctx%u: " fmt, (ctx)->index, ##arg)
+ cal_dbg(level, (ctx)->cal, "ctx%u: " fmt, (ctx)->dma_ctx, ##arg)
#define ctx_info(ctx, fmt, arg...) \
- cal_info((ctx)->cal, "ctx%u: " fmt, (ctx)->index, ##arg)
+ cal_info((ctx)->cal, "ctx%u: " fmt, (ctx)->dma_ctx, ##arg)
#define ctx_err(ctx, fmt, arg...) \
- cal_err((ctx)->cal, "ctx%u: " fmt, (ctx)->index, ##arg)
+ cal_err((ctx)->cal, "ctx%u: " fmt, (ctx)->dma_ctx, ##arg)
#define phy_dbg(level, phy, fmt, arg...) \
cal_dbg(level, (phy)->cal, "phy%u: " fmt, (phy)->instance, ##arg)
@@ -215,7 +298,7 @@ static inline void cal_write(struct cal_dev *cal, u32 offset, u32 val)
iowrite32(val, cal->base + offset);
}
-static inline u32 cal_read_field(struct cal_dev *cal, u32 offset, u32 mask)
+static __always_inline u32 cal_read_field(struct cal_dev *cal, u32 offset, u32 mask)
{
return FIELD_GET(mask, cal_read(cal, offset));
}
@@ -239,25 +322,27 @@ static inline void cal_set_field(u32 *valp, u32 field, u32 mask)
*valp = val;
}
+extern const struct cal_format_info cal_formats[];
+extern const unsigned int cal_num_formats;
+const struct cal_format_info *cal_format_by_fourcc(u32 fourcc);
+const struct cal_format_info *cal_format_by_code(u32 code);
+
void cal_quickdump_regs(struct cal_dev *cal);
+int cal_camerarx_get_remote_frame_desc(struct cal_camerarx *phy,
+ struct v4l2_mbus_frame_desc *desc);
+struct cal_camerarx *cal_camerarx_get_phy_from_entity(struct media_entity *entity);
void cal_camerarx_disable(struct cal_camerarx *phy);
-int cal_camerarx_start(struct cal_camerarx *phy, const struct cal_fmt *fmt);
-void cal_camerarx_stop(struct cal_camerarx *phy);
-void cal_camerarx_enable_irqs(struct cal_camerarx *phy);
-void cal_camerarx_disable_irqs(struct cal_camerarx *phy);
-void cal_camerarx_ppi_enable(struct cal_camerarx *phy);
-void cal_camerarx_ppi_disable(struct cal_camerarx *phy);
void cal_camerarx_i913_errata(struct cal_camerarx *phy);
struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
unsigned int instance);
void cal_camerarx_destroy(struct cal_camerarx *phy);
-void cal_ctx_csi2_config(struct cal_ctx *ctx);
-void cal_ctx_pix_proc_config(struct cal_ctx *ctx);
-void cal_ctx_wr_dma_config(struct cal_ctx *ctx, unsigned int width,
- unsigned int height);
-void cal_ctx_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr);
+int cal_ctx_prepare(struct cal_ctx *ctx);
+void cal_ctx_unprepare(struct cal_ctx *ctx);
+void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr);
+void cal_ctx_start(struct cal_ctx *ctx);
+void cal_ctx_stop(struct cal_ctx *ctx);
int cal_ctx_v4l2_register(struct cal_ctx *ctx);
void cal_ctx_v4l2_unregister(struct cal_ctx *ctx);
diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti/cal/cal_regs.h
index f752096dcf7f..40e4f972fcb7 100644
--- a/drivers/media/platform/ti-vpe/cal_regs.h
+++ b/drivers/media/platform/ti/cal/cal_regs.h
@@ -72,22 +72,8 @@
#define CAL_CSI2_TIMING(m) (0x314U + (m) * 0x80U)
#define CAL_CSI2_VC_IRQENABLE(m) (0x318U + (m) * 0x80U)
#define CAL_CSI2_VC_IRQSTATUS(m) (0x328U + (m) * 0x80U)
-#define CAL_CSI2_CTX0(m) (0x330U + (m) * 0x80U)
-#define CAL_CSI2_CTX1(m) (0x334U + (m) * 0x80U)
-#define CAL_CSI2_CTX2(m) (0x338U + (m) * 0x80U)
-#define CAL_CSI2_CTX3(m) (0x33cU + (m) * 0x80U)
-#define CAL_CSI2_CTX4(m) (0x340U + (m) * 0x80U)
-#define CAL_CSI2_CTX5(m) (0x344U + (m) * 0x80U)
-#define CAL_CSI2_CTX6(m) (0x348U + (m) * 0x80U)
-#define CAL_CSI2_CTX7(m) (0x34cU + (m) * 0x80U)
-#define CAL_CSI2_STATUS0(m) (0x350U + (m) * 0x80U)
-#define CAL_CSI2_STATUS1(m) (0x354U + (m) * 0x80U)
-#define CAL_CSI2_STATUS2(m) (0x358U + (m) * 0x80U)
-#define CAL_CSI2_STATUS3(m) (0x35cU + (m) * 0x80U)
-#define CAL_CSI2_STATUS4(m) (0x360U + (m) * 0x80U)
-#define CAL_CSI2_STATUS5(m) (0x364U + (m) * 0x80U)
-#define CAL_CSI2_STATUS6(m) (0x368U + (m) * 0x80U)
-#define CAL_CSI2_STATUS7(m) (0x36cU + (m) * 0x80U)
+#define CAL_CSI2_CTX(phy, csi2_ctx) (0x330U + (phy) * 0x80U + (csi2_ctx) * 4)
+#define CAL_CSI2_STATUS(phy, csi2_ctx) (0x350U + (phy) * 0x80U + (csi2_ctx) * 4)
/* CAL CSI2 PHY register offsets */
#define CAL_CSI2_PHY_REG0 0x000
@@ -139,7 +125,8 @@
#define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0 0
#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0 0
-#define CAL_HL_IRQ_MASK(m) BIT(m)
+#define CAL_HL_IRQ_WDMA_END_MASK(m) BIT(m)
+#define CAL_HL_IRQ_WDMA_START_MASK(m) BIT(m)
#define CAL_HL_IRQ_OCPO_ERR_MASK BIT(6)
@@ -419,32 +406,16 @@
#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK BIT(14)
#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK BIT(15)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK BIT(0)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK BIT(1)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK BIT(2)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK BIT(3)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK BIT(4)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK BIT(5)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK BIT(8)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK BIT(9)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK BIT(10)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK BIT(11)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK BIT(12)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK BIT(13)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK BIT(16)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK BIT(17)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK BIT(18)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK BIT(19)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK BIT(20)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK BIT(21)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK BIT(24)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK BIT(25)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK BIT(26)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK BIT(27)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK BIT(28)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK BIT(29)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_MASK(n) BIT(0 + ((n) * 8))
+#define CAL_CSI2_VC_IRQ_FE_IRQ_MASK(n) BIT(1 + ((n) * 8))
+#define CAL_CSI2_VC_IRQ_LS_IRQ_MASK(n) BIT(2 + ((n) * 8))
+#define CAL_CSI2_VC_IRQ_LE_IRQ_MASK(n) BIT(3 + ((n) * 8))
+#define CAL_CSI2_VC_IRQ_CS_IRQ_MASK(n) BIT(4 + ((n) * 8))
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(n) BIT(5 + ((n) * 8))
#define CAL_CSI2_CTX_DT_MASK GENMASK(5, 0)
+#define CAL_CSI2_CTX_DT_DISABLED 0
+#define CAL_CSI2_CTX_DT_ANY 1
#define CAL_CSI2_CTX_VC_MASK GENMASK(7, 6)
#define CAL_CSI2_CTX_CPORT_MASK GENMASK(12, 8)
#define CAL_CSI2_CTX_ATT_MASK BIT(13)
diff --git a/drivers/media/platform/ti/j721e-csi2rx/Makefile b/drivers/media/platform/ti/j721e-csi2rx/Makefile
new file mode 100644
index 000000000000..377afc1d6280
--- /dev/null
+++ b/drivers/media/platform/ti/j721e-csi2rx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_TI_J721E_CSI2RX) += j721e-csi2rx.o
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
new file mode 100644
index 000000000000..092805df1e3f
--- /dev/null
+++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
@@ -0,0 +1,1626 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI CSI2 RX driver.
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Author: Pratyush Yadav <p.yadav@ti.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/of_platform.h>
+#include <linux/pm.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define TI_CSI2RX_MODULE_NAME "j721e-csi2rx"
+
+#define SHIM_CNTL 0x10
+#define SHIM_CNTL_PIX_RST BIT(0)
+
+#define SHIM_DMACNTX(i) (0x20 + ((i) * 0x20))
+#define SHIM_DMACNTX_EN BIT(31)
+#define SHIM_DMACNTX_YUV422 GENMASK(27, 26)
+#define SHIM_DMACNTX_SIZE GENMASK(21, 20)
+#define SHIM_DMACNTX_VC GENMASK(9, 6)
+#define SHIM_DMACNTX_FMT GENMASK(5, 0)
+#define SHIM_DMACNTX_UYVY 0
+#define SHIM_DMACNTX_VYUY 1
+#define SHIM_DMACNTX_YUYV 2
+#define SHIM_DMACNTX_YVYU 3
+#define SHIM_DMACNTX_SIZE_8 0
+#define SHIM_DMACNTX_SIZE_16 1
+#define SHIM_DMACNTX_SIZE_32 2
+
+#define SHIM_PSI_CFG0(i) (0x24 + ((i) * 0x20))
+#define SHIM_PSI_CFG0_SRC_TAG GENMASK(15, 0)
+#define SHIM_PSI_CFG0_DST_TAG GENMASK(31, 16)
+
+#define CSI_DF_YUV420 0x18
+#define CSI_DF_YUV422 0x1e
+#define CSI_DF_RGB444 0x20
+#define CSI_DF_RGB888 0x24
+#define CSI_DF_RAW8 0x2a
+#define CSI_DF_RAW10 0x2b
+#define CSI_DF_RAW12 0x2c
+
+#define PSIL_WORD_SIZE_BYTES 16
+#define TI_CSI2RX_MAX_CTX 32
+
+/*
+ * There are no hard limits on the width or height. The DMA engine can handle
+ * all sizes. The max width and height are arbitrary numbers for this driver.
+ * Use 16M * 16M as the arbitrary limit. It is large enough that it is unlikely
+ * the limit will be hit in practice.
+ */
+#define MAX_WIDTH_BYTES SZ_16M
+#define MAX_HEIGHT_BYTES SZ_16M
+
+#define TI_CSI2RX_PAD_SINK 0
+#define TI_CSI2RX_PAD_FIRST_SOURCE 1
+#define TI_CSI2RX_MAX_SOURCE_PADS TI_CSI2RX_MAX_CTX
+#define TI_CSI2RX_MAX_PADS (1 + TI_CSI2RX_MAX_SOURCE_PADS)
+
+#define DRAIN_TIMEOUT_MS 50
+
+struct ti_csi2rx_fmt {
+ u32 fourcc; /* Four character code. */
+ u32 code; /* Mbus code. */
+ enum v4l2_colorspace colorspace;
+ u32 csi_df; /* CSI Data format. */
+ u8 bpp; /* Bits per pixel. */
+ u8 size; /* Data size shift when unpacking. */
+};
+
+struct ti_csi2rx_buffer {
+ /* Common v4l2 buffer. Must be first. */
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ struct ti_csi2rx_ctx *ctx;
+};
+
+enum ti_csi2rx_dma_state {
+ TI_CSI2RX_DMA_STOPPED, /* Streaming not started yet. */
+ TI_CSI2RX_DMA_IDLE, /* Streaming but no pending DMA operation. */
+ TI_CSI2RX_DMA_ACTIVE, /* Streaming and pending DMA operation. */
+};
+
+struct ti_csi2rx_dma {
+ /* Protects all fields in this struct. */
+ spinlock_t lock;
+ struct dma_chan *chan;
+ /* Buffers queued to the driver, waiting to be processed by DMA. */
+ struct list_head queue;
+ enum ti_csi2rx_dma_state state;
+ /*
+ * Current buffer being processed by DMA. NULL if no buffer is being
+ * processed.
+ */
+ struct ti_csi2rx_buffer *curr;
+};
+
+struct ti_csi2rx_dev;
+
+struct ti_csi2rx_ctx {
+ struct ti_csi2rx_dev *csi;
+ struct video_device vdev;
+ struct vb2_queue vidq;
+ struct mutex mutex; /* To serialize ioctls. */
+ struct v4l2_format v_fmt;
+ struct ti_csi2rx_dma dma;
+ struct media_pad pad;
+ u32 sequence;
+ u32 idx;
+ u32 vc;
+ u32 stream;
+};
+
+struct ti_csi2rx_dev {
+ struct device *dev;
+ void __iomem *shim;
+ /* To serialize core subdev ioctls. */
+ struct mutex mutex;
+ unsigned int enable_count;
+ unsigned int num_ctx;
+ struct v4l2_async_notifier notifier;
+ struct media_device mdev;
+ struct media_pipeline pipe;
+ struct media_pad pads[TI_CSI2RX_MAX_PADS];
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev *source;
+ struct v4l2_subdev subdev;
+ struct ti_csi2rx_ctx ctx[TI_CSI2RX_MAX_CTX];
+};
+
+static const struct ti_csi2rx_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_YUV422,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_YUV422,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_YUV422,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_YUV422,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW8,
+ .bpp = 8,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW8,
+ .bpp = 8,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW8,
+ .bpp = 8,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW8,
+ .bpp = 8,
+ .size = SHIM_DMACNTX_SIZE_8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW12,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW12,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW12,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW12,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGI10,
+ .code = MEDIA_BUS_FMT_SRGGI10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRIG10,
+ .code = MEDIA_BUS_FMT_SGRIG10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGI10,
+ .code = MEDIA_BUS_FMT_SBGGI10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBIG10,
+ .code = MEDIA_BUS_FMT_SGBIG10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGIRG10,
+ .code = MEDIA_BUS_FMT_SGRIG10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SIGGR10,
+ .code = MEDIA_BUS_FMT_SIGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGIBG10,
+ .code = MEDIA_BUS_FMT_SGIBG10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SIGGB10,
+ .code = MEDIA_BUS_FMT_SIGGB10_1X10,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .csi_df = CSI_DF_RAW10,
+ .bpp = 16,
+ .size = SHIM_DMACNTX_SIZE_16,
+ },
+
+ /* More formats can be supported but they are not listed for now. */
+};
+
+static const unsigned int num_formats = ARRAY_SIZE(formats);
+
+/* Forward declaration needed by ti_csi2rx_dma_callback. */
+static int ti_csi2rx_start_dma(struct ti_csi2rx_ctx *ctx,
+ struct ti_csi2rx_buffer *buf);
+
+static const struct ti_csi2rx_fmt *find_format_by_pix(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].fourcc == pixelformat)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static const struct ti_csi2rx_fmt *find_format_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].code == code)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static void ti_csi2rx_fill_fmt(const struct ti_csi2rx_fmt *csi_fmt,
+ struct v4l2_format *v4l2_fmt)
+{
+ struct v4l2_pix_format *pix = &v4l2_fmt->fmt.pix;
+ u32 bpl;
+
+ v4l2_fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ pix->pixelformat = csi_fmt->fourcc;
+ pix->colorspace = csi_fmt->colorspace;
+ pix->sizeimage = pix->height * pix->width * (csi_fmt->bpp / 8);
+
+ bpl = (pix->width * ALIGN(csi_fmt->bpp, 8)) >> 3;
+ pix->bytesperline = ALIGN(bpl, 16);
+}
+
+static int ti_csi2rx_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct ti_csi2rx_ctx *ctx = video_drvdata(file);
+
+ strscpy(cap->driver, TI_CSI2RX_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, TI_CSI2RX_MODULE_NAME, sizeof(cap->card));
+
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(ctx->csi->dev));
+
+ return 0;
+}
+
+static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= num_formats)
+ return -EINVAL;
+
+ memset(f->reserved, 0, sizeof(f->reserved));
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ f->pixelformat = formats[f->index].fourcc;
+
+ return 0;
+}
+
+static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *prov,
+ struct v4l2_format *f)
+{
+ struct ti_csi2rx_ctx *ctx = video_drvdata(file);
+
+ *f = ctx->v_fmt;
+
+ return 0;
+}
+
+static int ti_csi2rx_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ const struct ti_csi2rx_fmt *fmt;
+
+ /*
+ * Default to the first format if the requested pixel format code isn't
+ * supported.
+ */
+ fmt = find_format_by_pix(f->fmt.pix.pixelformat);
+ if (!fmt)
+ fmt = &formats[0];
+
+ if (f->fmt.pix.field == V4L2_FIELD_ANY)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ if (f->fmt.pix.field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ ti_csi2rx_fill_fmt(fmt, f);
+
+ return 0;
+}
+
+static int ti_csi2rx_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ti_csi2rx_ctx *ctx = video_drvdata(file);
+ struct vb2_queue *q = &ctx->vidq;
+ int ret;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ ret = ti_csi2rx_try_fmt_vid_cap(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ ctx->v_fmt = *f;
+
+ return 0;
+}
+
+static int ti_csi2rx_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct ti_csi2rx_fmt *fmt;
+ unsigned int pixels_in_word;
+ u8 bpp;
+
+ fmt = find_format_by_pix(fsize->pixel_format);
+ if (!fmt || fsize->index != 0)
+ return -EINVAL;
+
+ bpp = ALIGN(fmt->bpp, 8);
+
+ /*
+ * Number of pixels in one PSI-L word. The transfer happens in multiples
+ * of PSI-L word sizes.
+ */
+ pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / bpp;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = pixels_in_word;
+ fsize->stepwise.max_width = rounddown(MAX_WIDTH_BYTES, pixels_in_word);
+ fsize->stepwise.step_width = pixels_in_word;
+ fsize->stepwise.min_height = 1;
+ fsize->stepwise.max_height = MAX_HEIGHT_BYTES;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops csi_ioctl_ops = {
+ .vidioc_querycap = ti_csi2rx_querycap,
+ .vidioc_enum_fmt_vid_cap = ti_csi2rx_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = ti_csi2rx_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = ti_csi2rx_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = ti_csi2rx_s_fmt_vid_cap,
+ .vidioc_enum_framesizes = ti_csi2rx_enum_framesizes,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct v4l2_file_operations csi_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static int ti_csi2rx_video_register(struct ti_csi2rx_ctx *ctx)
+{
+ struct ti_csi2rx_dev *csi = ctx->csi;
+ struct video_device *vdev = &ctx->vdev;
+ int ret;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ return ret;
+
+ ret = media_create_pad_link(&csi->subdev.entity,
+ TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
+ &vdev->entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ video_unregister_device(vdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int csi_async_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
+
+ /* Should register only one source. */
+ WARN_ON(csi->source);
+
+ csi->source = subdev;
+
+ return 0;
+}
+
+static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
+ int ret, i, src_pad;
+
+ src_pad = media_entity_get_fwnode_pad(&csi->source->entity,
+ csi->source->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (src_pad < 0) {
+ dev_err(csi->dev, "Couldn't find source pad for subdev\n");
+ return src_pad;
+ }
+
+ ret = media_create_pad_link(&csi->source->entity, src_pad,
+ &csi->subdev.entity, TI_CSI2RX_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < csi->num_ctx; i++) {
+ ret = ti_csi2rx_video_register(&csi->ctx[i]);
+ if (ret)
+ return ret;
+ }
+
+ return v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations csi_async_notifier_ops = {
+ .bound = csi_async_notifier_bound,
+ .complete = csi_async_notifier_complete,
+};
+
+static int ti_csi2rx_init_subdev(struct ti_csi2rx_dev *csi)
+{
+ struct fwnode_handle *fwnode;
+ struct v4l2_async_subdev *asd;
+ struct device_node *node;
+ int ret;
+
+ node = of_get_child_by_name(csi->dev->of_node, "csi-bridge");
+ if (!node)
+ return -EINVAL;
+
+ fwnode = of_fwnode_handle(node);
+ if (!fwnode) {
+ of_node_put(node);
+ return -EINVAL;
+ }
+
+ v4l2_async_notifier_init(&csi->notifier);
+ csi->notifier.ops = &csi_async_notifier_ops;
+
+ asd = v4l2_async_notifier_add_fwnode_subdev(&csi->notifier, fwnode,
+ sizeof(struct v4l2_async_subdev));
+ of_node_put(node);
+ if (IS_ERR(asd)) {
+ v4l2_async_notifier_cleanup(&csi->notifier);
+ return PTR_ERR(asd);
+ }
+
+ ret = v4l2_async_notifier_register(&csi->v4l2_dev, &csi->notifier);
+ if (ret) {
+ v4l2_async_notifier_cleanup(&csi->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ti_csi2rx_setup_shim(struct ti_csi2rx_ctx *ctx)
+{
+ struct ti_csi2rx_dev *csi = ctx->csi;
+ const struct ti_csi2rx_fmt *fmt;
+ unsigned int reg;
+
+ fmt = find_format_by_pix(ctx->v_fmt.fmt.pix.pixelformat);
+ if (!fmt) {
+ dev_err(csi->dev, "Unknown format\n");
+ return;
+ }
+
+ reg = SHIM_DMACNTX_EN;
+ reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_df);
+
+ /*
+ * Using the values from the documentation gives incorrect ordering for
+ * the luma and chroma components. In practice, the "reverse" format
+ * gives the correct image. So for example, if the image is in UYVY, the
+ * reverse would be YVYU.
+ */
+ switch (fmt->fourcc) {
+ case V4L2_PIX_FMT_UYVY:
+ reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
+ SHIM_DMACNTX_YVYU);
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
+ SHIM_DMACNTX_YUYV);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
+ SHIM_DMACNTX_VYUY);
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
+ SHIM_DMACNTX_UYVY);
+ break;
+ default:
+ /* Ignore if not YUV 4:2:2 */
+ break;
+ }
+
+ reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
+ reg |= FIELD_PREP(SHIM_DMACNTX_VC, ctx->vc);
+
+ writel(reg, csi->shim + SHIM_DMACNTX(ctx->idx));
+
+ reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
+ FIELD_PREP(SHIM_PSI_CFG0_DST_TAG, 0);
+ writel(reg, csi->shim + SHIM_PSI_CFG0(ctx->idx));
+}
+
+static void ti_csi2rx_drain_callback(void *param)
+{
+ struct completion *drain_complete = param;
+
+ complete(drain_complete);
+}
+
+static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *csi)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct device *dev = csi->dma.chan->device->dev;
+ struct completion drain_complete;
+ void *buf;
+ size_t len = csi->v_fmt.fmt.pix.sizeimage;
+ dma_addr_t addr;
+ dma_cookie_t cookie;
+ int ret;
+
+ init_completion(&drain_complete);
+
+ buf = dma_alloc_coherent(dev, len, &addr, GFP_KERNEL | GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ desc = dmaengine_prep_slave_single(csi->dma.chan, addr, len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ ret = -EIO;
+ goto out;
+ }
+
+ desc->callback = ti_csi2rx_drain_callback;
+ desc->callback_param = &drain_complete;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ goto out;
+
+ dma_async_issue_pending(csi->dma.chan);
+
+ if (!wait_for_completion_timeout(&drain_complete,
+ msecs_to_jiffies(DRAIN_TIMEOUT_MS))) {
+ dmaengine_terminate_sync(csi->dma.chan);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+out:
+ dma_free_coherent(dev, len, buf, addr);
+ return ret;
+}
+
+static void ti_csi2rx_dma_callback(void *param)
+{
+ struct ti_csi2rx_buffer *buf = param;
+ struct ti_csi2rx_ctx *ctx = buf->ctx;
+ struct ti_csi2rx_dma *dma = &ctx->dma;
+ unsigned long flags = 0;
+
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.sequence = ctx->sequence++;
+
+ spin_lock_irqsave(&dma->lock, flags);
+
+ WARN_ON(dma->curr != buf);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+
+ /* If there are more buffers to process then start their transfer. */
+ dma->curr = NULL;
+ while (!list_empty(&dma->queue)) {
+ buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
+ list_del(&buf->list);
+
+ if (ti_csi2rx_start_dma(ctx, buf)) {
+ dev_err(ctx->csi->dev,
+ "Failed to queue the next buffer for DMA\n");
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ dma->curr = buf;
+ break;
+ }
+ }
+
+ if (!dma->curr)
+ dma->state = TI_CSI2RX_DMA_IDLE;
+
+ spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static int ti_csi2rx_start_dma(struct ti_csi2rx_ctx *ctx,
+ struct ti_csi2rx_buffer *buf)
+{
+ unsigned long addr;
+ struct dma_async_tx_descriptor *desc;
+ size_t len = ctx->v_fmt.fmt.pix.sizeimage;
+ dma_cookie_t cookie;
+ int ret = 0;
+
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ desc = dmaengine_prep_slave_single(ctx->dma.chan, addr, len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EIO;
+
+ desc->callback = ti_csi2rx_dma_callback;
+ desc->callback_param = buf;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return ret;
+
+ dma_async_issue_pending(ctx->dma.chan);
+
+ return 0;
+}
+
+static int ti_csi2rx_restart_dma(struct ti_csi2rx_ctx *ctx,
+ struct ti_csi2rx_buffer *buf)
+{
+ struct ti_csi2rx_dma *dma = &ctx->dma;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ ret = ti_csi2rx_drain_dma(ctx);
+ if (ret)
+ dev_warn(ctx->csi->dev,
+ "Failed to drain DMA. Next frame might be bogus\n");
+
+ ret = ti_csi2rx_start_dma(ctx, buf);
+ if (ret) {
+ dev_err(ctx->csi->dev, "Failed to start DMA: %d\n", ret);
+ spin_lock_irqsave(&dma->lock, flags);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dma->curr = NULL;
+ dma->state = TI_CSI2RX_DMA_IDLE;
+ spin_unlock_irqrestore(&dma->lock, flags);
+ }
+
+ return ret;
+}
+
+static int ti_csi2rx_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(q);
+ unsigned int size = ctx->v_fmt.fmt.pix.sizeimage;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int ti_csi2rx_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = ctx->v_fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(ctx->csi->dev, "Data will not fit into plane\n");
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ return 0;
+}
+
+static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
+{
+ struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct ti_csi2rx_buffer *buf;
+ struct ti_csi2rx_dma *dma = &ctx->dma;
+ bool restart_dma = false;
+ unsigned long flags = 0;
+
+ buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
+ buf->ctx = ctx;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ /*
+ * Usually the DMA callback takes care of queueing the pending buffers.
+ * But if DMA has stalled due to lack of buffers, restart it now.
+ */
+ if (dma->state == TI_CSI2RX_DMA_IDLE) {
+ /*
+ * Do not restart DMA with the lock held because
+ * ti_csi2rx_drain_dma() might block when allocating a buffer.
+ * There won't be a race on queueing DMA anyway since the
+ * callback is not being fired.
+ */
+ restart_dma = true;
+ dma->curr = buf;
+ dma->state = TI_CSI2RX_DMA_ACTIVE;
+ } else {
+ list_add_tail(&buf->list, &dma->queue);
+ }
+ spin_unlock_irqrestore(&dma->lock, flags);
+
+ if (restart_dma) {
+ /*
+ * Once frames start dropping, some data gets stuck in the DMA
+ * pipeline somewhere. So the first DMA transfer after frame
+ * drops gives a partial frame. This is obviously not useful to
+ * the application and will only confuse it. Issue a DMA
+ * transaction to drain that up.
+ */
+ ti_csi2rx_restart_dma(ctx, buf);
+ }
+}
+
+static int ti_csi2rx_get_vc(struct ti_csi2rx_ctx *ctx)
+{
+ struct ti_csi2rx_dev *csi = ctx->csi;
+ struct v4l2_mbus_frame_desc fd;
+ struct media_pad *pad;
+ int ret, i;
+
+ pad = media_entity_remote_pad(&csi->pads[TI_CSI2RX_PAD_SINK]);
+ if (!pad)
+ return -ENODEV;
+
+ ret = v4l2_subdev_call(csi->source, pad, get_frame_desc, pad->index,
+ &fd);
+ if (ret)
+ return ret;
+
+ if (fd.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
+ return -EINVAL;
+
+ for (i = 0; i < fd.num_entries; i++) {
+ if (ctx->stream == fd.entry[i].stream)
+ return fd.entry[i].bus.csi2.vc;
+ }
+
+ return -ENODEV;
+}
+
+static int ti_csi2rx_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vq);
+ struct ti_csi2rx_dev *csi = ctx->csi;
+ struct ti_csi2rx_dma *dma = &ctx->dma;
+ struct ti_csi2rx_buffer *buf, *tmp;
+ struct v4l2_subdev_krouting *routing;
+ struct v4l2_subdev_route *route = NULL;
+ struct media_pad *remote_pad;
+ unsigned long flags = 0;
+ int ret = 0, i;
+ struct v4l2_subdev_state *state;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ if (list_empty(&dma->queue))
+ ret = -EIO;
+ spin_unlock_irqrestore(&dma->lock, flags);
+ if (ret)
+ return ret;
+
+ ret = media_pipeline_start(ctx->vdev.entity.pads, &csi->pipe);
+ if (ret)
+ goto err;
+
+ remote_pad = media_entity_remote_pad(&ctx->pad);
+ if (!remote_pad) {
+ ret = -ENODEV;
+ goto err_pipeline;
+ }
+
+ state = v4l2_subdev_lock_active_state(&csi->subdev);
+
+ routing = &state->routing;
+
+ /* Find the stream to process. */
+ for (i = 0; i < routing->num_routes; i++) {
+ struct v4l2_subdev_route *r = &routing->routes[i];
+
+ if (!(r->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ if (r->source_pad != remote_pad->index)
+ continue;
+
+ route = r;
+ break;
+ }
+
+ if (!route) {
+ ret = -ENODEV;
+ v4l2_subdev_unlock_state(state);
+ goto err_pipeline;
+ }
+
+ ctx->stream = route->sink_stream;
+
+ v4l2_subdev_unlock_state(state);
+
+ ret = ti_csi2rx_get_vc(ctx);
+ if (ret == -ENOIOCTLCMD)
+ ctx->vc = 0;
+ else if (ret < 0)
+ goto err_pipeline;
+ else
+ ctx->vc = ret;
+
+ ti_csi2rx_setup_shim(ctx);
+
+ ret = v4l2_subdev_call(&csi->subdev, video, s_stream, 1);
+ if (ret)
+ goto err_pipeline;
+
+ ctx->sequence = 0;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
+ list_del(&buf->list);
+ dma->state = TI_CSI2RX_DMA_ACTIVE;
+
+ ret = ti_csi2rx_start_dma(ctx, buf);
+ if (ret) {
+ dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ spin_unlock_irqrestore(&dma->lock, flags);
+ goto err_stream;
+ }
+
+ dma->curr = buf;
+ spin_unlock_irqrestore(&dma->lock, flags);
+
+ return 0;
+
+err_stream:
+ v4l2_subdev_call(&csi->subdev, video, s_stream, 0);
+err_pipeline:
+ media_pipeline_stop(ctx->vdev.entity.pads);
+err:
+ spin_lock_irqsave(&dma->lock, flags);
+ list_for_each_entry_safe(buf, tmp, &dma->queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ ctx->dma.state = TI_CSI2RX_DMA_STOPPED;
+ spin_unlock_irqrestore(&dma->lock, flags);
+
+ return ret;
+}
+
+static void ti_csi2rx_stop_streaming(struct vb2_queue *vq)
+{
+ struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vq);
+ struct ti_csi2rx_dev *csi = ctx->csi;
+ struct ti_csi2rx_buffer *buf = NULL, *tmp;
+ struct ti_csi2rx_dma *dma = &ctx->dma;
+ unsigned long flags = 0;
+ enum ti_csi2rx_dma_state state;
+ int ret;
+
+ media_pipeline_stop(ctx->vdev.entity.pads);
+
+ ret = v4l2_subdev_call(&csi->subdev, video, s_stream, 0);
+ if (ret)
+ dev_err(csi->dev, "Failed to stop subdev stream\n");
+
+ ret = dmaengine_terminate_sync(ctx->dma.chan);
+ if (ret)
+ dev_err(csi->dev, "Failed to stop DMA\n");
+
+ writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));
+
+ spin_lock_irqsave(&dma->lock, flags);
+ list_for_each_entry_safe(buf, tmp, &ctx->dma.queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ if (dma->curr)
+ vb2_buffer_done(&dma->curr->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+
+ state = dma->state;
+
+ dma->curr = NULL;
+ dma->state = TI_CSI2RX_DMA_STOPPED;
+ spin_unlock_irqrestore(&dma->lock, flags);
+
+ /*
+ * TODO: For some reason the first frame is wrong if we don't toggle
+ * the pixel reset. But at the same time, drain does not work either.
+ * Figure this one out.
+ */
+ if (state != TI_CSI2RX_DMA_STOPPED) {
+ ret = ti_csi2rx_drain_dma(ctx);
+ if (ret)
+ dev_dbg(csi->dev,
+ "Failed to drain DMA. Next frame might be bogus\n");
+ }
+}
+
+static const struct vb2_ops csi_vb2_qops = {
+ .queue_setup = ti_csi2rx_queue_setup,
+ .buf_prepare = ti_csi2rx_buffer_prepare,
+ .buf_queue = ti_csi2rx_buffer_queue,
+ .start_streaming = ti_csi2rx_start_streaming,
+ .stop_streaming = ti_csi2rx_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static inline struct ti_csi2rx_dev *to_csi2rx_dev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ti_csi2rx_dev, subdev);
+}
+
+static int ti_csi2rx_sd_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ /* No transcoding, don't allow setting source fmt */
+ if (format->pad >= TI_CSI2RX_PAD_FIRST_SOURCE)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ if (!find_format_by_code(format->format.code))
+ format->format.code = formats[0].code;
+
+ v4l2_subdev_lock_state(state);
+
+ fmt = v4l2_state_get_stream_format(state, format->pad, format->stream);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+ *fmt = format->format;
+
+ fmt = v4l2_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+ *fmt = format->format;
+
+out:
+ v4l2_subdev_unlock_state(state);
+ return ret;
+}
+
+static int _ti_csi2rx_sd_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ int ret;
+
+ const struct v4l2_mbus_framefmt format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ };
+
+ v4l2_subdev_lock_state(state);
+
+ ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
+
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ti_csi2rx_sd_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ return _ti_csi2rx_sd_set_routing(sd, state, routing);
+}
+
+static int ti_csi2rx_sd_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route routes[] = { {
+ .sink_pad = 0,
+ .sink_stream = 0,
+ .source_pad = TI_CSI2RX_PAD_FIRST_SOURCE,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ } };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = 1,
+ .routes = routes,
+ };
+
+ /* Initialize routing to single route to the fist source pad */
+ return _ti_csi2rx_sd_set_routing(sd, state, &routing);
+}
+
+static int ti_csi2rx_sd_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ti_csi2rx_dev *csi = to_csi2rx_dev(sd);
+ int ret = 0;
+
+ mutex_lock(&csi->mutex);
+
+ if (enable) {
+ if (csi->enable_count > 0) {
+ csi->enable_count++;
+ goto out;
+ }
+
+ ret = v4l2_subdev_call(csi->source, video, s_stream, 1);
+ if (ret)
+ goto out;
+
+ csi->enable_count++;
+ } else {
+ if (csi->enable_count == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (--csi->enable_count > 0)
+ goto out;
+
+ ret = v4l2_subdev_call(csi->source, video, s_stream, 0);
+ }
+
+out:
+ mutex_unlock(&csi->mutex);
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops ti_csi2rx_subdev_video_ops = {
+ .s_stream = ti_csi2rx_sd_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ti_csi2rx_subdev_pad_ops = {
+ .init_cfg = ti_csi2rx_sd_init_cfg,
+ .set_routing = ti_csi2rx_sd_set_routing,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ti_csi2rx_sd_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ti_csi2rx_subdev_ops = {
+ .video = &ti_csi2rx_subdev_video_ops,
+ .pad = &ti_csi2rx_subdev_pad_ops,
+};
+
+static void ti_csi2rx_cleanup_dma(struct ti_csi2rx_ctx *ctx)
+{
+ dma_release_channel(ctx->dma.chan);
+}
+
+static void ti_csi2rx_cleanup_v4l2(struct ti_csi2rx_dev *csi)
+{
+ media_device_unregister(&csi->mdev);
+ v4l2_device_unregister(&csi->v4l2_dev);
+ media_device_cleanup(&csi->mdev);
+}
+
+static void ti_csi2rx_cleanup_subdev(struct ti_csi2rx_dev *csi)
+{
+ v4l2_async_notifier_unregister(&csi->notifier);
+ v4l2_async_notifier_cleanup(&csi->notifier);
+}
+
+static void ti_csi2rx_cleanup_vb2q(struct ti_csi2rx_ctx *ctx)
+{
+ vb2_queue_release(&ctx->vidq);
+}
+
+static void ti_csi2rx_cleanup_ctx(struct ti_csi2rx_ctx *ctx)
+{
+ ti_csi2rx_cleanup_dma(ctx);
+ ti_csi2rx_cleanup_vb2q(ctx);
+
+ video_unregister_device(&ctx->vdev);
+
+ mutex_destroy(&ctx->mutex);
+}
+
+static int ti_csi2rx_init_vb2q(struct ti_csi2rx_ctx *ctx)
+{
+ struct vb2_queue *q = &ctx->vidq;
+ int ret;
+
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->drv_priv = ctx;
+ q->buf_struct_size = sizeof(struct ti_csi2rx_buffer);
+ q->ops = &csi_vb2_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->dev = dmaengine_get_dma_device(ctx->dma.chan);
+ q->lock = &ctx->mutex;
+ q->min_buffers_needed = 1;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ ctx->vdev.queue = q;
+
+ return 0;
+}
+
+static int ti_csi2rx_init_dma(struct ti_csi2rx_ctx *ctx)
+{
+ struct dma_slave_config cfg;
+ char name[32];
+ int ret;
+
+ INIT_LIST_HEAD(&ctx->dma.queue);
+ spin_lock_init(&ctx->dma.lock);
+
+ ctx->dma.state = TI_CSI2RX_DMA_STOPPED;
+
+ snprintf(name, sizeof(name), "rx%u", ctx->idx);
+ ctx->dma.chan = dma_request_chan(ctx->csi->dev, name);
+ if (IS_ERR(ctx->dma.chan))
+ return PTR_ERR(ctx->dma.chan);
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
+
+ ret = dmaengine_slave_config(ctx->dma.chan, &cfg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ti_csi2rx_v4l2_init(struct ti_csi2rx_dev *csi)
+{
+ struct media_device *mdev = &csi->mdev;
+ struct v4l2_subdev *sd = &csi->subdev;
+ int ret, i;
+
+ mdev->dev = csi->dev;
+ mdev->hw_revision = 1;
+ strscpy(mdev->model, "TI-CSI2RX", sizeof(mdev->model));
+ snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s",
+ dev_name(mdev->dev));
+
+ media_device_init(mdev);
+
+ csi->v4l2_dev.mdev = mdev;
+
+ ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
+ if (ret)
+ goto cleanup_media;
+
+ ret = media_device_register(mdev);
+ if (ret)
+ goto unregister_v4l2;
+
+ v4l2_subdev_init(sd, &ti_csi2rx_subdev_ops);
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_MULTIPLEXED;
+ strscpy(sd->name, dev_name(csi->dev), sizeof(sd->name));
+ sd->dev = csi->dev;
+
+ csi->pads[TI_CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+
+ for (i = TI_CSI2RX_PAD_FIRST_SOURCE;
+ i < TI_CSI2RX_PAD_FIRST_SOURCE + csi->num_ctx; i++)
+ csi->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&sd->entity,
+ TI_CSI2RX_PAD_FIRST_SOURCE + csi->num_ctx,
+ csi->pads);
+ if (ret)
+ goto unregister_media;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto unregister_media;
+
+ ret = v4l2_device_register_subdev(&csi->v4l2_dev, sd);
+ if (ret)
+ goto cleanup_subdev;
+
+ return 0;
+
+cleanup_subdev:
+ v4l2_subdev_cleanup(sd);
+unregister_media:
+ media_device_unregister(mdev);
+unregister_v4l2:
+ v4l2_device_unregister(&csi->v4l2_dev);
+cleanup_media:
+ media_device_cleanup(mdev);
+
+ return ret;
+}
+
+static int ti_csi2rx_init_ctx(struct ti_csi2rx_ctx *ctx)
+{
+ struct ti_csi2rx_dev *csi = ctx->csi;
+ struct video_device *vdev = &ctx->vdev;
+ const struct ti_csi2rx_fmt *fmt;
+ struct v4l2_pix_format *pix_fmt = &ctx->v_fmt.fmt.pix;
+ int ret;
+
+ mutex_init(&ctx->mutex);
+
+ fmt = find_format_by_pix(V4L2_PIX_FMT_UYVY);
+ if (!fmt)
+ return -EINVAL;
+
+ pix_fmt->width = 640;
+ pix_fmt->height = 480;
+
+ ti_csi2rx_fill_fmt(fmt, &ctx->v_fmt);
+
+ ctx->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&ctx->vdev.entity, 1, &ctx->pad);
+ if (ret)
+ return ret;
+
+ snprintf(vdev->name, sizeof(vdev->name), "%s context %u",
+ dev_name(csi->dev), ctx->idx);
+ vdev->v4l2_dev = &csi->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->fops = &csi_fops;
+ vdev->ioctl_ops = &csi_ioctl_ops;
+ vdev->release = video_device_release_empty;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
+ vdev->lock = &ctx->mutex;
+
+ video_set_drvdata(vdev, ctx);
+
+ ret = ti_csi2rx_init_dma(ctx);
+ if (ret)
+ return ret;
+
+ ret = ti_csi2rx_init_vb2q(ctx);
+ if (ret)
+ goto cleanup_dma;
+
+ return 0;
+
+cleanup_dma:
+ ti_csi2rx_cleanup_dma(ctx);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int ti_csi2rx_suspend(struct device *dev)
+{
+ struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
+ struct ti_csi2rx_ctx *ctx;
+ struct ti_csi2rx_dma *dma;
+ unsigned long flags = 0;
+ int i, ret = 0;
+
+ for (i = 0; i < csi->num_ctx; i++) {
+ ctx = &csi->ctx[i];
+ dma = &ctx->dma;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ if (dma->state != TI_CSI2RX_DMA_STOPPED) {
+ spin_unlock_irqrestore(&dma->lock, flags);
+ ret = v4l2_subdev_call(&csi->subdev, video, s_stream, 0);
+ if (ret)
+ dev_err(csi->dev, "Failed to stop subdev stream\n");
+ /* Terminate DMA */
+ ret = dmaengine_terminate_sync(ctx->dma.chan);
+ if (ret)
+ dev_err(csi->dev, "Failed to stop DMA\n");
+ } else {
+ spin_unlock_irqrestore(&dma->lock, flags);
+ }
+
+ /* Stop any on-going streams */
+ writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));
+ }
+
+ /* Assert the pixel reset. */
+ writel(0, csi->shim + SHIM_CNTL);
+
+ return ret;
+}
+
+static int ti_csi2rx_resume(struct device *dev)
+{
+ struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
+ struct ti_csi2rx_ctx *ctx;
+ struct ti_csi2rx_dma *dma;
+ struct ti_csi2rx_buffer *buf;
+ unsigned long flags = 0;
+ unsigned int reg;
+ int i, ret = 0;
+
+ reg = SHIM_CNTL_PIX_RST;
+ writel(reg, csi->shim + SHIM_CNTL);
+
+ for (i = 0; i < csi->num_ctx; i++) {
+ ctx = &csi->ctx[i];
+ dma = &ctx->dma;
+ spin_lock_irqsave(&dma->lock, flags);
+ if (dma->state != TI_CSI2RX_DMA_STOPPED) {
+ buf = dma->curr;
+ spin_unlock_irqrestore(&dma->lock, flags);
+
+ /* Restore stream config */
+ ti_csi2rx_setup_shim(ctx);
+
+ ret = v4l2_subdev_call(&csi->subdev, video, s_stream, 1);
+ if (ret)
+ dev_err(ctx->csi->dev, "Failed to start subdev\n");
+
+ /* Restart DMA */
+ if (buf)
+ ti_csi2rx_restart_dma(ctx, buf);
+ } else {
+ spin_unlock_irqrestore(&dma->lock, flags);
+ }
+ }
+
+ return ret;
+}
+
+static const struct dev_pm_ops ti_csi2rx_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ti_csi2rx_suspend, ti_csi2rx_resume)
+};
+#endif /* CONFIG_PM */
+
+static int ti_csi2rx_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ti_csi2rx_dev *csi;
+ struct resource *res;
+ int ret, i, count;
+ unsigned int reg;
+
+ csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, csi);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csi->shim = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(csi->shim))
+ return PTR_ERR(csi->shim);
+
+ /* Only use as many contexts as the number of DMA channels allocated. */
+ count = of_property_count_strings(np, "dma-names");
+ if (count < 0) {
+ dev_err(csi->dev, "Failed to get DMA channel count: %d\n",
+ count);
+ return count;
+ }
+
+ csi->num_ctx = count;
+ if (csi->num_ctx > TI_CSI2RX_MAX_CTX) {
+ dev_warn(csi->dev,
+ "%u DMA channels passed. Maximum is %u. Ignoring the rest.\n",
+ csi->num_ctx, TI_CSI2RX_MAX_CTX);
+ csi->num_ctx = TI_CSI2RX_MAX_CTX;
+ }
+
+ mutex_init(&csi->mutex);
+
+ ret = ti_csi2rx_v4l2_init(csi);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < csi->num_ctx; i++) {
+ csi->ctx[i].idx = i;
+ csi->ctx[i].csi = csi;
+ ret = ti_csi2rx_init_ctx(&csi->ctx[i]);
+ if (ret)
+ goto cleanup_ctx;
+ }
+
+ ret = ti_csi2rx_init_subdev(csi);
+ if (ret)
+ goto cleanup_ctx;
+
+ ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
+ if (ret) {
+ dev_err(csi->dev, "Failed to create children: %d\n", ret);
+ goto cleanup_subdev;
+ }
+
+ /* De-assert the pixel interface reset. */
+ reg = SHIM_CNTL_PIX_RST;
+ writel(reg, csi->shim + SHIM_CNTL);
+
+ return 0;
+
+cleanup_subdev:
+ ti_csi2rx_cleanup_subdev(csi);
+cleanup_ctx:
+
+ i--;
+ for (; i >= 0; i--)
+ ti_csi2rx_cleanup_ctx(&csi->ctx[i]);
+
+ ti_csi2rx_cleanup_v4l2(csi);
+ return ret;
+}
+
+static int ti_csi2rx_remove(struct platform_device *pdev)
+{
+ struct ti_csi2rx_dev *csi = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < csi->num_ctx; i++) {
+ if (vb2_is_busy(&csi->ctx[i].vidq))
+ return -EBUSY;
+ }
+
+ for (i = 0; i < csi->num_ctx; i++)
+ ti_csi2rx_cleanup_ctx(&csi->ctx[i]);
+
+ ti_csi2rx_cleanup_subdev(csi);
+ ti_csi2rx_cleanup_v4l2(csi);
+
+ /* Assert the pixel reset. */
+ writel(0, csi->shim + SHIM_CNTL);
+
+ mutex_destroy(&csi->mutex);
+
+ return 0;
+}
+
+static const struct of_device_id ti_csi2rx_of_match[] = {
+ { .compatible = "ti,j721e-csi2rx", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ti_csi2rx_of_match);
+
+static struct platform_driver ti_csi2rx_pdrv = {
+ .probe = ti_csi2rx_probe,
+ .remove = ti_csi2rx_remove,
+ .driver = {
+ .name = TI_CSI2RX_MODULE_NAME,
+ .of_match_table = ti_csi2rx_of_match,
+#ifdef CONFIG_PM
+ .pm = &ti_csi2rx_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(ti_csi2rx_pdrv);
+
+MODULE_DESCRIPTION("TI J721E CSI2 RX Driver");
+MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti/vpe/Makefile
index ad624056e039..fbb0dec5a30e 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti/vpe/Makefile
@@ -3,14 +3,12 @@ obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
obj-$(CONFIG_VIDEO_TI_VPDMA) += ti-vpdma.o
obj-$(CONFIG_VIDEO_TI_SC) += ti-sc.o
obj-$(CONFIG_VIDEO_TI_CSC) += ti-csc.o
+obj-$(CONFIG_VIDEO_TI_VIP) += ti-vip.o
ti-vpe-y := vpe.o
ti-vpdma-y := vpdma.o
ti-sc-y := sc.o
ti-csc-y := csc.o
+ti-vip-y := vip.o
ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
-
-obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o
-
-ti-cal-y := cal.o cal-camerarx.o cal-video.o
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti/vpe/csc.c
index f4e0cf72d1cf..f4e0cf72d1cf 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti/vpe/csc.c
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti/vpe/csc.h
index af2e86bccf57..af2e86bccf57 100644
--- a/drivers/media/platform/ti-vpe/csc.h
+++ b/drivers/media/platform/ti/vpe/csc.h
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti/vpe/sc.c
index 98f95082a6fd..98f95082a6fd 100644
--- a/drivers/media/platform/ti-vpe/sc.c
+++ b/drivers/media/platform/ti/vpe/sc.c
diff --git a/drivers/media/platform/ti-vpe/sc.h b/drivers/media/platform/ti/vpe/sc.h
index d55de44d5257..d55de44d5257 100644
--- a/drivers/media/platform/ti-vpe/sc.h
+++ b/drivers/media/platform/ti/vpe/sc.h
diff --git a/drivers/media/platform/ti-vpe/sc_coeff.h b/drivers/media/platform/ti/vpe/sc_coeff.h
index c525d1764099..c525d1764099 100644
--- a/drivers/media/platform/ti-vpe/sc_coeff.h
+++ b/drivers/media/platform/ti/vpe/sc_coeff.h
diff --git a/drivers/media/platform/ti/vpe/vip.c b/drivers/media/platform/ti/vpe/vip.c
new file mode 100644
index 000000000000..c21f8fe06532
--- /dev/null
+++ b/drivers/media/platform/ti/vpe/vip.c
@@ -0,0 +1,3995 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI VIP capture driver
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Nikhil Devshatwar, <nikhil.nd@ti.com>
+ * Benoit Parrot, <bparrot@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+
+#include "vip.h"
+
+#define VIP_MODULE_NAME "vip"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-8)");
+
+/*
+ * Minimum and maximum frame sizes
+ */
+#define MIN_W 128
+#define MIN_H 128
+#define MAX_W 2048
+#define MAX_H 1536
+
+/*
+ * Required alignments
+ */
+#define S_ALIGN 0 /* multiple of 1 */
+#define H_ALIGN 1 /* multiple of 2 */
+#define W_ALIGN 1 /* multiple of 2 */
+#define L_ALIGN 7 /* multiple of 128, line stride, 16 bytes */
+
+/*
+ * Need a descriptor entry for each of up to 15 outputs,
+ * and up to 2 control transfers.
+ */
+#define VIP_DESC_LIST_SIZE (17 * sizeof(struct vpdma_dtd))
+
+#define vip_dbg(level, dev, fmt, arg...) \
+ v4l2_dbg(level, debug, dev, fmt, ##arg)
+#define vip_err(dev, fmt, arg...) \
+ v4l2_err(dev, fmt, ##arg)
+#define vip_info(dev, fmt, arg...) \
+ v4l2_info(dev, fmt, ##arg)
+
+#define CTRL_CORE_SMA_SW_1 0x534
+/*
+ * The srce_info structure contains per-srce data.
+ */
+struct vip_srce_info {
+ u8 base_channel; /* the VPDMA channel nummber */
+ u8 vb_index; /* input frame f, f-1, f-2 index */
+ u8 vb_part; /* identifies section of co-planar formats */
+};
+
+#define VIP_VPDMA_FIFO_SIZE 2
+#define VIP_DROPQ_SIZE 3
+
+/*
+ * Define indices into the srce_info tables
+ */
+
+#define VIP_SRCE_MULT_PORT 0
+#define VIP_SRCE_MULT_ANC 1
+#define VIP_SRCE_LUMA 2
+#define VIP_SRCE_CHROMA 3
+#define VIP_SRCE_RGB 4
+
+static struct vip_srce_info srce_info[5] = {
+ [VIP_SRCE_MULT_PORT] = {
+ .base_channel = VIP1_CHAN_NUM_MULT_PORT_A_SRC0,
+ .vb_index = 0,
+ .vb_part = VIP_CHROMA,
+ },
+ [VIP_SRCE_MULT_ANC] = {
+ .base_channel = VIP1_CHAN_NUM_MULT_ANC_A_SRC0,
+ .vb_index = 0,
+ .vb_part = VIP_LUMA,
+ },
+ [VIP_SRCE_LUMA] = {
+ .base_channel = VIP1_CHAN_NUM_PORT_A_LUMA,
+ .vb_index = 1,
+ .vb_part = VIP_LUMA,
+ },
+ [VIP_SRCE_CHROMA] = {
+ .base_channel = VIP1_CHAN_NUM_PORT_A_CHROMA,
+ .vb_index = 1,
+ .vb_part = VIP_CHROMA,
+ },
+ [VIP_SRCE_RGB] = {
+ .base_channel = VIP1_CHAN_NUM_PORT_A_RGB,
+ .vb_part = VIP_LUMA,
+ },
+};
+
+static struct vip_fmt vip_formats[VIP_MAX_ACTIVE_FMT] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CBY422],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCB422],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CRY422],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCR422],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8],
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8],
+ },
+ },
+ {
+ /* V4L2 currently only defines one 16 bit variant */
+ .fourcc = V4L2_PIX_FMT_SBGGR16,
+ .code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .coplanar = 0,
+ .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW16],
+ },
+ },
+};
+
+/* initialize v4l2_format_info member in vip_formats array */
+static void vip_init_format_info(struct device *dev)
+{
+ struct vip_fmt *fmt;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vip_formats); i++) {
+ fmt = &vip_formats[i];
+ fmt->finfo = v4l2_format_info(fmt->fourcc);
+ }
+}
+
+/* Print Four-character-code (FOURCC) */
+static char *fourcc_to_str(u32 fmt)
+{
+ static char code[5];
+
+ code[0] = (unsigned char)(fmt & 0xff);
+ code[1] = (unsigned char)((fmt >> 8) & 0xff);
+ code[2] = (unsigned char)((fmt >> 16) & 0xff);
+ code[3] = (unsigned char)((fmt >> 24) & 0xff);
+ code[4] = '\0';
+
+ return code;
+}
+
+/*
+ * Find our format description corresponding to the passed v4l2_format
+ */
+
+static struct vip_fmt *find_port_format_by_pix(struct vip_port *port,
+ u32 pixelformat)
+{
+ struct vip_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < port->num_active_fmt; k++) {
+ fmt = port->active_fmt[k];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct vip_fmt *find_port_format_by_code(struct vip_port *port,
+ u32 code)
+{
+ struct vip_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < port->num_active_fmt; k++) {
+ fmt = port->active_fmt[k];
+ if (fmt->code == code)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+inline struct vip_port *notifier_to_vip_port(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct vip_port, notifier);
+}
+
+static bool vip_is_mbuscode_yuv(u32 code)
+{
+ return ((code & 0xFF00) == 0x2000);
+}
+
+static bool vip_is_mbuscode_rgb(u32 code)
+{
+ return ((code & 0xFF00) == 0x1000);
+}
+
+static bool vip_is_mbuscode_raw(u32 code)
+{
+ return ((code & 0xFF00) == 0x3000);
+}
+
+/*
+ * This is not an accurate conversion but it is only used to
+ * assess if color conversion is needed.
+ */
+static u32 vip_mbus_code_to_fourcc(u32 code)
+{
+ if (vip_is_mbuscode_rgb(code))
+ return V4L2_PIX_FMT_RGB24;
+
+ if (vip_is_mbuscode_yuv(code))
+ return V4L2_PIX_FMT_UYVY;
+
+ return V4L2_PIX_FMT_SBGGR8;
+}
+
+static enum vip_csc_state
+vip_csc_direction(u32 src_code, const struct v4l2_format_info *dfinfo)
+{
+ if (vip_is_mbuscode_yuv(src_code) && v4l2_is_format_rgb(dfinfo))
+ return VIP_CSC_Y2R;
+ else if (vip_is_mbuscode_rgb(src_code) && v4l2_is_format_yuv(dfinfo))
+ return VIP_CSC_R2Y;
+ else
+ return VIP_CSC_NA;
+}
+
+/*
+ * port flag bits
+ */
+#define FLAG_FRAME_1D BIT(0)
+#define FLAG_EVEN_LINE_SKIP BIT(1)
+#define FLAG_ODD_LINE_SKIP BIT(2)
+#define FLAG_MODE_TILED BIT(3)
+#define FLAG_INTERLACED BIT(4)
+#define FLAG_MULTIPLEXED BIT(5)
+#define FLAG_MULT_PORT BIT(6)
+#define FLAG_MULT_ANC BIT(7)
+
+/*
+ * Function prototype declarations
+ */
+static int alloc_port(struct vip_dev *, int);
+static void free_port(struct vip_port *);
+static int vip_setup_parser(struct vip_port *port);
+static int vip_setup_scaler(struct vip_stream *stream);
+static void vip_enable_parser(struct vip_port *port, bool on);
+static void vip_reset_parser(struct vip_port *port, bool on);
+static void vip_parser_stop_imm(struct vip_port *port, bool on);
+static void stop_dma(struct vip_stream *stream, bool clear_list);
+static int vip_load_vpdma_list_fifo(struct vip_stream *stream);
+static inline bool is_scaler_available(struct vip_port *port);
+static inline bool allocate_scaler(struct vip_port *port);
+static inline void free_scaler(struct vip_port *port);
+static bool is_csc_available(struct vip_port *port);
+static bool allocate_csc(struct vip_port *port,
+ enum vip_csc_state csc_direction);
+static void free_csc(struct vip_port *port);
+
+#define reg_read(dev, offset) ioread32((dev)->base + (offset))
+#define reg_write(dev, offset, val) iowrite32((val), (dev)->base + (offset))
+
+/*
+ * Insert a masked field into a 32-bit field
+ */
+static void insert_field(u32 *valp, u32 field, u32 mask, int shift)
+{
+ u32 val = *valp;
+
+ val &= ~(mask << shift);
+ val |= (field & mask) << shift;
+ *valp = val;
+}
+
+/*
+ * DMA address/data block for the shadow registers
+ */
+struct vip_mmr_adb {
+ struct vpdma_adb_hdr sc_hdr0;
+ u32 sc_regs0[7];
+ u32 sc_pad0[1];
+ struct vpdma_adb_hdr sc_hdr8;
+ u32 sc_regs8[6];
+ u32 sc_pad8[2];
+ struct vpdma_adb_hdr sc_hdr17;
+ u32 sc_regs17[9];
+ u32 sc_pad17[3];
+ struct vpdma_adb_hdr csc_hdr;
+ u32 csc_regs[6];
+ u32 csc_pad[2];
+};
+
+#define GET_OFFSET_TOP(port, obj, reg) \
+ ((obj)->res->start - (port)->dev->res->start + (reg))
+
+#define VIP_SET_MMR_ADB_HDR(port, hdr, regs, offset_a) \
+ VPDMA_SET_MMR_ADB_HDR((port)->mmr_adb, vip_mmr_adb, hdr, regs, offset_a)
+
+/*
+ * Set the headers for all of the address/data block structures.
+ */
+static void init_adb_hdrs(struct vip_port *port)
+{
+ VIP_SET_MMR_ADB_HDR(port, sc_hdr0, sc_regs0,
+ GET_OFFSET_TOP(port, port->dev->sc, CFG_SC0));
+ VIP_SET_MMR_ADB_HDR(port, sc_hdr8, sc_regs8,
+ GET_OFFSET_TOP(port, port->dev->sc, CFG_SC8));
+ VIP_SET_MMR_ADB_HDR(port, sc_hdr17, sc_regs17,
+ GET_OFFSET_TOP(port, port->dev->sc, CFG_SC17));
+ VIP_SET_MMR_ADB_HDR(port, csc_hdr, csc_regs,
+ GET_OFFSET_TOP(port, port->dev->csc, CSC_CSC00));
+
+};
+
+/*
+ * These represent the module resets bit for slice 1
+ * Upon detecting slice2 we simply left shift by 1
+ */
+#define VIP_DP_RST BIT(16)
+#define VIP_PARSER_RST BIT(18)
+#define VIP_CSC_RST BIT(20)
+#define VIP_SC_RST BIT(22)
+#define VIP_DS0_RST BIT(25)
+#define VIP_DS1_RST BIT(27)
+
+static void vip_module_reset(struct vip_dev *dev, uint32_t module, bool on)
+{
+ u32 val = 0;
+
+ val = reg_read(dev, VIP_CLK_RESET);
+
+ if (dev->slice_id == VIP_SLICE2)
+ module <<= 1;
+
+ if (on)
+ val |= module;
+ else
+ val &= ~module;
+
+ reg_write(dev, VIP_CLK_RESET, val);
+}
+
+/*
+ * Enable or disable the VIP clocks
+ */
+static void vip_set_clock_enable(struct vip_dev *dev, bool on)
+{
+ u32 val = 0;
+
+ val = reg_read(dev, VIP_CLK_ENABLE);
+ if (on) {
+ val |= VIP_VPDMA_CLK_ENABLE;
+ if (dev->slice_id == VIP_SLICE1)
+ val |= VIP_VIP1_DATA_PATH_CLK_ENABLE;
+ else
+ val |= VIP_VIP2_DATA_PATH_CLK_ENABLE;
+ } else {
+ if (dev->slice_id == VIP_SLICE1)
+ val &= ~VIP_VIP1_DATA_PATH_CLK_ENABLE;
+ else
+ val &= ~VIP_VIP2_DATA_PATH_CLK_ENABLE;
+
+ /* Both VIP are disabled then shutdown VPDMA also */
+ if (!(val & (VIP_VIP1_DATA_PATH_CLK_ENABLE |
+ VIP_VIP2_DATA_PATH_CLK_ENABLE)))
+ val = 0;
+ }
+
+ reg_write(dev, VIP_CLK_ENABLE, val);
+}
+
+/* This helper function is used to enable the clock early on to
+ * enable vpdma firmware loading before the slice device are created
+ */
+static void vip_shared_set_clock_enable(struct vip_shared *shared, bool on)
+{
+ u32 val = 0;
+
+ if (on)
+ val = VIP_VIP1_DATA_PATH_CLK_ENABLE | VIP_VPDMA_CLK_ENABLE;
+
+ reg_write(shared, VIP_CLK_ENABLE, val);
+}
+
+static void vip_top_reset(struct vip_dev *dev)
+{
+ u32 val = 0;
+
+ val = reg_read(dev, VIP_CLK_RESET);
+
+ if (dev->slice_id == VIP_SLICE1)
+ insert_field(&val, 1, VIP_DATA_PATH_CLK_RESET_MASK,
+ VIP_VIP1_DATA_PATH_RESET_SHIFT);
+ else
+ insert_field(&val, 1, VIP_DATA_PATH_CLK_RESET_MASK,
+ VIP_VIP2_DATA_PATH_RESET_SHIFT);
+
+ reg_write(dev, VIP_CLK_RESET, val);
+
+ usleep_range(200, 250);
+
+ val = reg_read(dev, VIP_CLK_RESET);
+
+ if (dev->slice_id == VIP_SLICE1)
+ insert_field(&val, 0, VIP_DATA_PATH_CLK_RESET_MASK,
+ VIP_VIP1_DATA_PATH_RESET_SHIFT);
+ else
+ insert_field(&val, 0, VIP_DATA_PATH_CLK_RESET_MASK,
+ VIP_VIP2_DATA_PATH_RESET_SHIFT);
+ reg_write(dev, VIP_CLK_RESET, val);
+}
+
+static void vip_top_vpdma_reset(struct vip_shared *shared)
+{
+ u32 val;
+
+ val = reg_read(shared, VIP_CLK_RESET);
+ insert_field(&val, 1, VIP_VPDMA_CLK_RESET_MASK,
+ VIP_VPDMA_CLK_RESET_SHIFT);
+ reg_write(shared, VIP_CLK_RESET, val);
+
+ usleep_range(200, 250);
+
+ val = reg_read(shared, VIP_CLK_RESET);
+ insert_field(&val, 0, VIP_VPDMA_CLK_RESET_MASK,
+ VIP_VPDMA_CLK_RESET_SHIFT);
+ reg_write(shared, VIP_CLK_RESET, val);
+}
+
+static void vip_set_pclk_invert(struct vip_port *port)
+{
+ u32 offset;
+ /*
+ * When the VIP parser is configured to so that the pixel clock
+ * is to be sampled at falling edge, the pixel clock needs to be
+ * inverted before it is given to the VIP module. This is done
+ * by setting a bit in the CTRL_CORE_SMA_SW1 register.
+ */
+
+ if (port->dev->instance_id == VIP_INSTANCE1) {
+ offset = 0 + 2 * port->port_id + port->dev->slice_id;
+ } else if (port->dev->instance_id == VIP_INSTANCE2) {
+ offset = 4 + 2 * port->port_id + port->dev->slice_id;
+ } else if (port->dev->instance_id == VIP_INSTANCE3) {
+ offset = 10 - port->dev->slice_id;
+ } else {
+ vip_err(port, "%s: VIP instance id out of range...\n",
+ __func__);
+ return;
+ }
+
+ if (port->dev->syscon_pol)
+ regmap_update_bits(port->dev->syscon_pol,
+ port->dev->syscon_pol_offset,
+ 1 << offset, 1 << offset);
+}
+
+#define VIP_PARSER_PORT(p) (VIP_PARSER_PORTA_0 + ((p) * 0x8U))
+#define VIP_PARSER_EXTRA_PORT(p) (VIP_PARSER_PORTA_1 + ((p) * 0x8U))
+#define VIP_PARSER_CROP_H_PORT(p) \
+ (VIP_PARSER_PORTA_EXTRA4 + ((p) * 0x10U))
+#define VIP_PARSER_CROP_V_PORT(p) \
+ (VIP_PARSER_PORTA_EXTRA5 + ((p) * 0x10U))
+#define VIP_PARSER_STOP_IMM_PORT(p) (VIP_PARSER_PORTA_EXTRA6 + ((p) * 0x4U))
+
+static void vip_set_data_interface(struct vip_port *port,
+ enum data_interface_modes mode)
+{
+ u32 val = 0;
+
+ insert_field(&val, mode, VIP_DATA_INTERFACE_MODE_MASK,
+ VIP_DATA_INTERFACE_MODE_SHFT);
+
+ reg_write(port->dev->parser, VIP_PARSER_MAIN_CFG, val);
+}
+
+static void vip_set_slice_path(struct vip_dev *dev,
+ enum data_path_select data_path, u32 path_val)
+{
+ u32 val = 0;
+ int data_path_reg;
+
+ vip_dbg(3, dev, "%s:\n", __func__);
+
+ data_path_reg = VIP_VIP1_DATA_PATH_SELECT + 4 * dev->slice_id;
+
+ switch (data_path) {
+ case ALL_FIELDS_DATA_SELECT:
+ val |= path_val;
+ break;
+ case VIP_CSC_SRC_DATA_SELECT:
+ insert_field(&val, path_val, VIP_CSC_SRC_SELECT_MASK,
+ VIP_CSC_SRC_SELECT_SHFT);
+ break;
+ case VIP_SC_SRC_DATA_SELECT:
+ insert_field(&val, path_val, VIP_SC_SRC_SELECT_MASK,
+ VIP_SC_SRC_SELECT_SHFT);
+ break;
+ case VIP_RGB_SRC_DATA_SELECT:
+ val |= (path_val) ? VIP_RGB_SRC_SELECT : 0;
+ break;
+ case VIP_RGB_OUT_LO_DATA_SELECT:
+ val |= (path_val) ? VIP_RGB_OUT_LO_SRC_SELECT : 0;
+ break;
+ case VIP_RGB_OUT_HI_DATA_SELECT:
+ val |= (path_val) ? VIP_RGB_OUT_HI_SRC_SELECT : 0;
+ break;
+ case VIP_CHR_DS_1_SRC_DATA_SELECT:
+ insert_field(&val, path_val, VIP_DS1_SRC_SELECT_MASK,
+ VIP_DS1_SRC_SELECT_SHFT);
+ break;
+ case VIP_CHR_DS_2_SRC_DATA_SELECT:
+ insert_field(&val, path_val, VIP_DS2_SRC_SELECT_MASK,
+ VIP_DS2_SRC_SELECT_SHFT);
+ break;
+ case VIP_MULTI_CHANNEL_DATA_SELECT:
+ val |= (path_val) ? VIP_MULTI_CHANNEL_SELECT : 0;
+ break;
+ case VIP_CHR_DS_1_DATA_BYPASS:
+ val |= (path_val) ? VIP_DS1_BYPASS : 0;
+ break;
+ case VIP_CHR_DS_2_DATA_BYPASS:
+ val |= (path_val) ? VIP_DS2_BYPASS : 0;
+ break;
+ default:
+ vip_err(dev, "%s: data_path 0x%x is not valid\n",
+ __func__, data_path);
+ return;
+ }
+ insert_field(&val, data_path, VIP_DATAPATH_SELECT_MASK,
+ VIP_DATAPATH_SELECT_SHFT);
+ reg_write(dev, data_path_reg, val);
+ vip_dbg(3, dev, "%s: DATA_PATH_SELECT(%08X): %08X\n", __func__,
+ data_path_reg, reg_read(dev, data_path_reg));
+}
+
+/*
+ * Return the vip_stream structure for a given struct file
+ */
+static inline struct vip_stream *file2stream(struct file *file)
+{
+ return video_drvdata(file);
+}
+
+/*
+ * Append a destination descriptor to the current descriptor list,
+ * setting up dma to the given srce.
+ */
+static int add_out_dtd(struct vip_stream *stream, int srce_type)
+{
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ struct vip_srce_info *sinfo = &srce_info[srce_type];
+ struct v4l2_rect *c_rect = &port->c_rect;
+ struct vip_fmt *fmt = port->fmt;
+ int channel, plane = 0;
+ int max_width, max_height;
+ dma_addr_t dma_addr;
+ u32 flags;
+ u32 width = stream->width;
+
+ channel = sinfo->base_channel;
+
+ switch (srce_type) {
+ case VIP_SRCE_MULT_PORT:
+ case VIP_SRCE_MULT_ANC:
+ if (port->port_id == VIP_PORTB)
+ channel += VIP_CHAN_MULT_PORTB_OFFSET;
+ channel += stream->stream_id;
+ flags = 0;
+ break;
+ case VIP_SRCE_CHROMA:
+ plane = 1;
+ fallthrough;
+ case VIP_SRCE_LUMA:
+ if (port->port_id == VIP_PORTB) {
+ if (port->scaler && !port->fmt->coplanar)
+ /*
+ * In this case Port A Chroma channel
+ * is used to carry Port B scaled YUV422
+ */
+ channel += 1;
+ else
+ channel += VIP_CHAN_YUV_PORTB_OFFSET;
+ }
+ flags = port->flags;
+ break;
+ case VIP_SRCE_RGB:
+ if (port->port_id == VIP_PORTB ||
+ (port->port_id == VIP_PORTA &&
+ port->csc == VIP_CSC_NA &&
+ v4l2_is_format_rgb(port->fmt->finfo)))
+ /*
+ * RGB sensor only connect to Y_LO
+ * channel i.e. port B channel.
+ */
+ channel += VIP_CHAN_RGB_PORTB_OFFSET;
+ flags = port->flags;
+ break;
+ default:
+ vip_err(stream, "%s: srce_type 0x%x is not valid\n",
+ __func__, srce_type);
+ return -1;
+ }
+
+ if (dev->slice_id == VIP_SLICE2)
+ channel += VIP_CHAN_VIP2_OFFSET;
+
+ /* This is just for initialization purposes.
+ * The actual dma_addr will be configured in vpdma_update_dma_addr
+ */
+ dma_addr = 0;
+
+ if (port->fmt->vpdma_fmt[0] == &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8]) {
+ /*
+ * Special case since we are faking a YUV422 16bit format
+ * to have the vpdma perform the needed byte swap
+ * we need to adjust the pixel width accordingly
+ * otherwise the parser will attempt to collect more pixels
+ * then available and the vpdma transfer will exceed the
+ * allocated frame buffer.
+ */
+ width >>= 1;
+ vip_dbg(1, stream, "%s: 8 bit raw detected, adjusting width to %d\n",
+ __func__, width);
+ }
+
+ /*
+ * Use VPDMA_MAX_SIZE1 or VPDMA_MAX_SIZE2 register for slice0/1
+ */
+
+ if (dev->slice_id == VIP_SLICE1) {
+ vpdma_set_max_size(dev->shared->vpdma, VPDMA_MAX_SIZE1,
+ width, stream->height);
+
+ max_width = MAX_OUT_WIDTH_REG1;
+ max_height = MAX_OUT_HEIGHT_REG1;
+ } else {
+ vpdma_set_max_size(dev->shared->vpdma, VPDMA_MAX_SIZE2,
+ width, stream->height);
+
+ max_width = MAX_OUT_WIDTH_REG2;
+ max_height = MAX_OUT_HEIGHT_REG2;
+ }
+
+ /*
+ * Mark this channel to be cleared while cleaning up resources
+ * This will make sure that an abort descriptor for this channel
+ * would be submitted to VPDMA causing any ongoing transaction to be
+ * aborted and cleanup the VPDMA FSM for this channel
+ */
+ stream->vpdma_channels[channel] = 1;
+
+ vpdma_rawchan_add_out_dtd(&stream->desc_list, c_rect->width,
+ stream->bytesperline, c_rect,
+ fmt->vpdma_fmt[plane], dma_addr,
+ max_width, max_height, channel, flags);
+
+ return 0;
+}
+
+/*
+ * add_stream_dtds - prepares and starts DMA for pending transfers
+ */
+static void add_stream_dtds(struct vip_stream *stream)
+{
+ struct vip_port *port = stream->port;
+ int srce_type;
+
+ if (port->flags & FLAG_MULT_PORT)
+ srce_type = VIP_SRCE_MULT_PORT;
+ else if (port->flags & FLAG_MULT_ANC)
+ srce_type = VIP_SRCE_MULT_ANC;
+ else if (v4l2_is_format_rgb(port->fmt->finfo))
+ srce_type = VIP_SRCE_RGB;
+ else
+ srce_type = VIP_SRCE_LUMA;
+
+ add_out_dtd(stream, srce_type);
+
+ if (srce_type == VIP_SRCE_LUMA && port->fmt->coplanar)
+ add_out_dtd(stream, VIP_SRCE_CHROMA);
+}
+
+#define PARSER_IRQ_MASK (VIP_PORTA_OUTPUT_FIFO_YUV | \
+ VIP_PORTB_OUTPUT_FIFO_YUV)
+
+static void enable_irqs(struct vip_dev *dev, int irq_num, int list_num)
+{
+ struct vip_parser_data *parser = dev->parser;
+ u32 reg_addr = VIP_INT0_ENABLE0_SET +
+ VIP_INTC_INTX_OFFSET * irq_num;
+ u32 irq_val = (1 << (list_num * 2)) |
+ (VIP_VIP1_PARSER_INT << (irq_num * 1));
+
+ /* Enable Parser Interrupt */
+ reg_write(parser, VIP_PARSER_FIQ_MASK, ~PARSER_IRQ_MASK);
+
+ reg_write(dev->shared, reg_addr, irq_val);
+
+ vpdma_enable_list_complete_irq(dev->shared->vpdma,
+ irq_num, list_num, true);
+}
+
+static void disable_irqs(struct vip_dev *dev, int irq_num, int list_num)
+{
+ struct vip_parser_data *parser = dev->parser;
+ u32 reg_addr = VIP_INT0_ENABLE0_CLR +
+ VIP_INTC_INTX_OFFSET * irq_num;
+ u32 irq_val = (1 << (list_num * 2)) |
+ (VIP_VIP1_PARSER_INT << (irq_num * 1));
+
+ /* Disable all Parser Interrupt */
+ reg_write(parser, VIP_PARSER_FIQ_MASK, 0xffffffff);
+
+ reg_write(dev->shared, reg_addr, irq_val);
+
+ vpdma_enable_list_complete_irq(dev->shared->vpdma,
+ irq_num, list_num, false);
+}
+
+static void clear_irqs(struct vip_dev *dev, int irq_num, int list_num)
+{
+ struct vip_parser_data *parser = dev->parser;
+ u32 reg_addr = VIP_INT0_STATUS0_CLR +
+ VIP_INTC_INTX_OFFSET * irq_num;
+ u32 irq_val = (1 << (list_num * 2)) |
+ (VIP_VIP1_PARSER_INT << (irq_num * 1));
+
+ /* Clear all Parser Interrupt */
+ reg_write(parser, VIP_PARSER_FIQ_CLR, 0xffffffff);
+ reg_write(parser, VIP_PARSER_FIQ_CLR, 0x0);
+
+ reg_write(dev->shared, reg_addr, irq_val);
+
+ vpdma_clear_list_stat(dev->shared->vpdma, irq_num, dev->slice_id);
+}
+
+static void populate_desc_list(struct vip_stream *stream)
+{
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ unsigned int list_length;
+
+ stream->desc_next = stream->desc_list.buf.addr;
+ add_stream_dtds(stream);
+
+ list_length = stream->desc_next - stream->desc_list.buf.addr;
+ vpdma_map_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
+}
+
+/*
+ * start_dma - adds descriptors to the dma list and submits them.
+ * Should be called after a new vb is queued and on a vpdma list
+ * completion interrupt.
+ */
+static void start_dma(struct vip_stream *stream, struct vip_buffer *buf)
+{
+ struct vip_dev *dev = stream->port->dev;
+ struct vpdma_data *vpdma = dev->shared->vpdma;
+ int list_num = stream->list_num;
+ dma_addr_t dma_addr;
+ int drop_data;
+
+ if (vpdma_list_busy(vpdma, list_num)) {
+ vip_err(stream, "vpdma list busy, cannot post\n");
+ return; /* nothing to do */
+ }
+
+ if (buf) {
+ dma_addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ drop_data = 0;
+ vip_dbg(4, stream, "%s: vb2 buf idx:%d, dma_addr:%pad\n",
+ __func__, buf->vb.vb2_buf.index, &dma_addr);
+ } else {
+ dma_addr = 0;
+ drop_data = 1;
+ vip_dbg(4, stream, "%s: dropped\n", __func__);
+ }
+
+ vpdma_update_dma_addr(dev->shared->vpdma, &stream->desc_list,
+ dma_addr, stream->write_desc, drop_data, 0);
+
+ if (stream->port->fmt->coplanar) {
+ dma_addr += stream->bytesperline * stream->height;
+ vpdma_update_dma_addr(dev->shared->vpdma, &stream->desc_list,
+ dma_addr, stream->write_desc + 1,
+ drop_data, 1);
+ }
+
+ vpdma_submit_descs(dev->shared->vpdma,
+ &stream->desc_list, stream->list_num);
+}
+
+static void vip_schedule_next_buffer(struct vip_stream *stream)
+{
+ struct vip_dev *dev = stream->port->dev;
+ struct vip_buffer *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->slock, flags);
+ if (list_empty(&stream->vidq)) {
+ vip_dbg(4, stream, "Dropping frame\n");
+ if (list_empty(&stream->dropq)) {
+ vip_err(stream, "No dropq buffer left!");
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return;
+ }
+ buf = list_entry(stream->dropq.next,
+ struct vip_buffer, list);
+
+ buf->drop = true;
+ list_move_tail(&buf->list, &stream->post_bufs);
+ buf = NULL;
+ } else {
+ buf = list_entry(stream->vidq.next,
+ struct vip_buffer, list);
+ buf->drop = false;
+ list_move_tail(&buf->list, &stream->post_bufs);
+ vip_dbg(4, stream, "added next buffer\n");
+ }
+
+ spin_unlock_irqrestore(&dev->slock, flags);
+ start_dma(stream, buf);
+}
+
+static void vip_process_buffer_complete(struct vip_stream *stream)
+{
+ struct vip_dev *dev = stream->port->dev;
+ struct vip_buffer *buf;
+ struct vb2_v4l2_buffer *vb = NULL;
+ unsigned long flags, fld;
+
+ buf = list_first_entry(&stream->post_bufs, struct vip_buffer, list);
+
+ if (stream->port->flags & FLAG_INTERLACED) {
+ vpdma_unmap_desc_buf(dev->shared->vpdma,
+ &stream->desc_list.buf);
+
+ fld = dtd_get_field(stream->write_desc);
+ stream->field = fld ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
+
+ vpdma_map_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
+ }
+
+ if (buf) {
+ vip_dbg(4, stream, "vip buffer complete 0x%x, 0x%x\n",
+ (unsigned int)buf, buf->drop);
+
+ vb = &buf->vb;
+ vb->field = stream->field;
+ vb->sequence = stream->sequence;
+ vb->vb2_buf.timestamp = ktime_get_ns();
+
+ if (buf->drop) {
+ spin_lock_irqsave(&dev->slock, flags);
+ list_move_tail(&buf->list, &stream->dropq);
+ spin_unlock_irqrestore(&dev->slock, flags);
+ } else {
+ spin_lock_irqsave(&dev->slock, flags);
+ list_del(&buf->list);
+ spin_unlock_irqrestore(&dev->slock, flags);
+ vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE);
+ }
+ } else {
+ vip_err(stream, "%s: buf is null!!!\n", __func__);
+ return;
+ }
+
+ stream->sequence++;
+}
+
+static int vip_reset_vpdma(struct vip_stream *stream)
+{
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ struct vip_buffer *buf;
+ unsigned long flags;
+
+ stop_dma(stream, false);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ /* requeue all active buffers in the opposite order */
+ while (!list_empty(&stream->post_bufs)) {
+ buf = list_last_entry(&stream->post_bufs,
+ struct vip_buffer, list);
+ list_del(&buf->list);
+ if (buf->drop == 1) {
+ list_add_tail(&buf->list, &stream->dropq);
+ vip_dbg(4, stream, "requeueing drop buffer on dropq\n");
+ } else {
+ list_add(&buf->list, &stream->vidq);
+ vip_dbg(4, stream, "requeueing vb2 buf idx:%d on vidq\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ /* Make sure the desc_list is unmapped */
+ vpdma_unmap_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
+
+ return 0;
+}
+
+static void vip_overflow_recovery_work(struct work_struct *work)
+{
+ struct vip_stream *stream = container_of(work, struct vip_stream,
+ recovery_work);
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+
+ vip_err(stream, "%s: Port %c\n", __func__,
+ port->port_id == VIP_PORTA ? 'A' : 'B');
+
+ disable_irqs(dev, dev->slice_id, stream->list_num);
+ clear_irqs(dev, dev->slice_id, stream->list_num);
+
+ /* 1. Set VIP_XTRA6_PORT_A[31:16] YUV_SRCNUM_STOP_IMMEDIATELY */
+ /* 2. Set VIP_XTRA6_PORT_A[15:0] ANC_SRCNUM_STOP_IMMEDIATELY */
+ vip_parser_stop_imm(port, 1);
+
+ /* 3. Clear VIP_PORT_A[8] ENABLE */
+ /*
+ * 4. Set VIP_PORT_A[7] CLR_ASYNC_FIFO_RD
+ * Set VIP_PORT_A[6] CLR_ASYNC_FIFO_WR
+ */
+ vip_enable_parser(port, false);
+
+ /* 5. Set VIP_PORT_A[23] SW_RESET */
+ vip_reset_parser(port, 1);
+
+ /*
+ * 6. Reset other VIP modules
+ * For each module used downstream of VIP_PARSER, write 1 to the
+ * bit location of the VIP_CLKC_RST register which is connected
+ * to VIP_PARSER
+ */
+ vip_module_reset(dev, VIP_DP_RST, true);
+
+ usleep_range(200, 250);
+
+ /*
+ * 7. Abort VPDMA channels
+ * Write to list attribute to stop list 0
+ * Write to list address register location of abort list
+ * Write to list attribute register list 0 and size of abort list
+ */
+ vip_reset_vpdma(stream);
+
+ /* 8. Clear VIP_PORT_A[23] SW_RESET */
+ vip_reset_parser(port, 0);
+
+ /*
+ * 9. Un-reset other VIP modules
+ * For each module used downstream of VIP_PARSER, write 0 to
+ * the bit location of the VIP_CLKC_RST register which is
+ * connected to VIP_PARSER
+ */
+ vip_module_reset(dev, VIP_DP_RST, false);
+
+ /* 10. (Delay) */
+ /* 11. SC coeff downloaded (if VIP_SCALER is being used) */
+ vip_setup_scaler(stream);
+
+ /* 12. (Delay) */
+ /* the above are not needed here yet */
+
+ populate_desc_list(stream);
+ stream->num_recovery++;
+ if (stream->num_recovery < 5) {
+ /* Reload the vpdma */
+ vip_load_vpdma_list_fifo(stream);
+
+ enable_irqs(dev, dev->slice_id, stream->list_num);
+ vip_schedule_next_buffer(stream);
+
+ /* 13. Clear VIP_XTRA6_PORT_A[31:16] YUV_SRCNUM_STOP_IMM */
+ /* 14. Clear VIP_XTRA6_PORT_A[15:0] ANC_SRCNUM_STOP_IMM */
+
+ vip_parser_stop_imm(port, 0);
+
+ /* 15. Set VIP_PORT_A[8] ENABLE */
+ /*
+ * 16. Clear VIP_PORT_A[7] CLR_ASYNC_FIFO_RD
+ * Clear VIP_PORT_A[6] CLR_ASYNC_FIFO_WR
+ */
+ vip_enable_parser(port, true);
+ } else {
+ vip_err(stream, "%s: num_recovery limit exceeded leaving disabled\n",
+ __func__);
+ }
+}
+
+static void handle_parser_irqs(struct vip_dev *dev)
+{
+ struct vip_parser_data *parser = dev->parser;
+ struct vip_port *porta = dev->ports[VIP_PORTA];
+ struct vip_port *portb = dev->ports[VIP_PORTB];
+ struct vip_stream *stream = NULL;
+ u32 irq_stat = reg_read(parser, VIP_PARSER_FIQ_STATUS);
+ int i;
+
+ vip_dbg(3, dev, "%s: FIQ_STATUS: 0x%08x\n", __func__, irq_stat);
+
+ /* Clear all Parser Interrupt */
+ reg_write(parser, VIP_PARSER_FIQ_CLR, irq_stat);
+ reg_write(parser, VIP_PARSER_FIQ_CLR, 0x0);
+
+ if (irq_stat & VIP_PORTA_VDET)
+ vip_dbg(3, dev, "VIP_PORTA_VDET\n");
+ if (irq_stat & VIP_PORTB_VDET)
+ vip_dbg(3, dev, "VIP_PORTB_VDET\n");
+ if (irq_stat & VIP_PORTA_ASYNC_FIFO_OF)
+ vip_err(dev, "VIP_PORTA_ASYNC_FIFO_OF\n");
+ if (irq_stat & VIP_PORTB_ASYNC_FIFO_OF)
+ vip_err(dev, "VIP_PORTB_ASYNC_FIFO_OF\n");
+ if (irq_stat & VIP_PORTA_OUTPUT_FIFO_YUV)
+ vip_err(dev, "VIP_PORTA_OUTPUT_FIFO_YUV\n");
+ if (irq_stat & VIP_PORTA_OUTPUT_FIFO_ANC)
+ vip_err(dev, "VIP_PORTA_OUTPUT_FIFO_ANC\n");
+ if (irq_stat & VIP_PORTB_OUTPUT_FIFO_YUV)
+ vip_err(dev, "VIP_PORTB_OUTPUT_FIFO_YUV\n");
+ if (irq_stat & VIP_PORTB_OUTPUT_FIFO_ANC)
+ vip_err(dev, "VIP_PORTB_OUTPUT_FIFO_ANC\n");
+ if (irq_stat & VIP_PORTA_CONN)
+ vip_dbg(3, dev, "VIP_PORTA_CONN\n");
+ if (irq_stat & VIP_PORTA_DISCONN)
+ vip_dbg(3, dev, "VIP_PORTA_DISCONN\n");
+ if (irq_stat & VIP_PORTB_CONN)
+ vip_dbg(3, dev, "VIP_PORTB_CONN\n");
+ if (irq_stat & VIP_PORTB_DISCONN)
+ vip_dbg(3, dev, "VIP_PORTB_DISCONN\n");
+ if (irq_stat & VIP_PORTA_SRC0_SIZE)
+ vip_dbg(3, dev, "VIP_PORTA_SRC0_SIZE\n");
+ if (irq_stat & VIP_PORTB_SRC0_SIZE)
+ vip_dbg(3, dev, "VIP_PORTB_SRC0_SIZE\n");
+ if (irq_stat & VIP_PORTA_YUV_PROTO_VIOLATION)
+ vip_dbg(3, dev, "VIP_PORTA_YUV_PROTO_VIOLATION\n");
+ if (irq_stat & VIP_PORTA_ANC_PROTO_VIOLATION)
+ vip_dbg(3, dev, "VIP_PORTA_ANC_PROTO_VIOLATION\n");
+ if (irq_stat & VIP_PORTB_YUV_PROTO_VIOLATION)
+ vip_dbg(3, dev, "VIP_PORTB_YUV_PROTO_VIOLATION\n");
+ if (irq_stat & VIP_PORTB_ANC_PROTO_VIOLATION)
+ vip_dbg(3, dev, "VIP_PORTB_ANC_PROTO_VIOLATION\n");
+ if (irq_stat & VIP_PORTA_CFG_DISABLE_COMPLETE)
+ vip_dbg(3, dev, "VIP_PORTA_CFG_DISABLE_COMPLETE\n");
+ if (irq_stat & VIP_PORTB_CFG_DISABLE_COMPLETE)
+ vip_dbg(3, dev, "VIP_PORTB_CFG_DISABLE_COMPLETE\n");
+
+ if (irq_stat & (VIP_PORTA_ASYNC_FIFO_OF |
+ VIP_PORTA_OUTPUT_FIFO_YUV |
+ VIP_PORTA_OUTPUT_FIFO_ANC)) {
+ for (i = 0; i < VIP_CAP_STREAMS_PER_PORT; i++) {
+ if (porta->cap_streams[i] &&
+ porta->cap_streams[i]->port->port_id ==
+ porta->port_id) {
+ stream = porta->cap_streams[i];
+ break;
+ }
+ }
+ if (stream) {
+ disable_irqs(dev, dev->slice_id,
+ stream->list_num);
+ schedule_work(&stream->recovery_work);
+ return;
+ }
+ }
+ if (irq_stat & (VIP_PORTB_ASYNC_FIFO_OF |
+ VIP_PORTB_OUTPUT_FIFO_YUV |
+ VIP_PORTB_OUTPUT_FIFO_ANC)) {
+ for (i = 0; i < VIP_CAP_STREAMS_PER_PORT; i++) {
+ if (portb->cap_streams[i] &&
+ portb->cap_streams[i]->port->port_id ==
+ portb->port_id) {
+ stream = portb->cap_streams[i];
+ break;
+ }
+ }
+ if (stream) {
+ disable_irqs(dev, dev->slice_id,
+ stream->list_num);
+ schedule_work(&stream->recovery_work);
+ return;
+ }
+ }
+}
+
+static irqreturn_t vip_irq(int irq_vip, void *data)
+{
+ struct vip_dev *dev = (struct vip_dev *)data;
+ struct vpdma_data *vpdma;
+ struct vip_stream *stream;
+ int list_num;
+ int irq_num = dev->slice_id;
+ u32 irqst, irqst_saved, reg_addr;
+
+ if (!dev->shared)
+ return IRQ_HANDLED;
+
+ vpdma = dev->shared->vpdma;
+ reg_addr = VIP_INT0_STATUS0 +
+ VIP_INTC_INTX_OFFSET * irq_num;
+ irqst_saved = reg_read(dev->shared, reg_addr);
+ irqst = irqst_saved;
+
+ vip_dbg(8, dev, "IRQ %d VIP_INT%d_STATUS0 0x%x\n",
+ irq_vip, irq_num, irqst);
+ if (irqst) {
+ if (irqst & (VIP_VIP1_PARSER_INT << (irq_num * 1))) {
+ irqst &= ~(VIP_VIP1_PARSER_INT << (irq_num * 1));
+ handle_parser_irqs(dev);
+ }
+
+ for (list_num = 0; irqst && (list_num < 8); list_num++) {
+ /* Check for LIST_COMPLETE IRQ */
+ if (!(irqst & (1 << list_num * 2)))
+ continue;
+
+ vip_dbg(8, dev, "IRQ %d: handling LIST%d_COMPLETE\n",
+ irq_num, list_num);
+
+ stream = vpdma_hwlist_get_priv(vpdma, list_num);
+ if (!stream || stream->list_num != list_num) {
+ vip_err(dev, "IRQ occurred for unused list");
+ continue;
+ }
+
+ vpdma_clear_list_stat(vpdma, irq_num, list_num);
+
+ vip_process_buffer_complete(stream);
+
+ vip_schedule_next_buffer(stream);
+
+ irqst &= ~((1 << list_num * 2));
+ }
+ }
+
+ /* Acknowledge that we are done with all interrupts */
+ reg_write(dev->shared, VIP_INTC_E0I, 1 << irq_num);
+
+ /* Clear handled events from status register */
+ reg_addr = VIP_INT0_STATUS0_CLR +
+ VIP_INTC_INTX_OFFSET * irq_num;
+ reg_write(dev->shared, reg_addr, irqst_saved);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int vip_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ u32 vin_id = 1 + ((dev->instance_id - 1) * 2) + dev->slice_id;
+
+ strscpy(cap->driver, VIP_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, VIP_MODULE_NAME, sizeof(cap->card));
+
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:vip%1d:vin%1d%c:stream%1d", dev->instance_id, vin_id,
+ port->port_id == VIP_PORTA ? 'a' : 'b', stream->stream_id);
+ return 0;
+}
+
+static int vip_enuminput(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vip_stream *stream = file2stream(file);
+
+ if (inp->index)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = stream->vfd->tvnorms;
+ sprintf(inp->name, "camera %u", stream->vfd->num);
+
+ return 0;
+}
+
+static int vip_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vip_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int vip_querystd(struct file *file, void *fh, v4l2_std_id *std)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+
+ *std = stream->vfd->tvnorms;
+ v4l2_subdev_call(port->subdev, video, querystd, std);
+ vip_dbg(1, stream, "querystd: 0x%lx\n", (unsigned long)*std);
+ return 0;
+}
+
+static int vip_g_std(struct file *file, void *fh, v4l2_std_id *std)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+
+ *std = stream->vfd->tvnorms;
+ v4l2_subdev_call(port->subdev, video, g_std_output, std);
+ vip_dbg(1, stream, "g_std: 0x%lx\n", (unsigned long)*std);
+
+ return 0;
+}
+
+static int vip_s_std(struct file *file, void *fh, v4l2_std_id std)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+
+ vip_dbg(1, stream, "s_std: 0x%lx\n", (unsigned long)std);
+
+ if (!(std & stream->vfd->tvnorms)) {
+ vip_dbg(1, stream, "s_std after check: 0x%lx\n",
+ (unsigned long)std);
+ return -EINVAL;
+ }
+
+ v4l2_subdev_call(port->subdev, video, s_std_output, std);
+ return 0;
+}
+
+static int vip_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+ struct vip_fmt *fmt;
+
+ vip_dbg(3, stream, "enum_fmt index:%d\n", f->index);
+ if (f->index >= port->num_active_fmt)
+ return -EINVAL;
+
+ fmt = port->active_fmt[f->index];
+
+ f->pixelformat = fmt->fourcc;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vip_dbg(3, stream, "enum_fmt fourcc:%s\n",
+ fourcc_to_str(f->pixelformat));
+
+ return 0;
+}
+
+static int vip_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *f)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+ struct vip_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret;
+
+ fmt = find_port_format_by_pix(port, f->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fse.index = f->index;
+ fse.pad = 0;
+ fse.code = fmt->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(port->subdev, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return -EINVAL;
+
+ vip_dbg(1, stream, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
+
+ f->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ f->discrete.width = fse.max_width;
+ f->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int vip_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *f)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+ struct vip_fmt *fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = f->index,
+ .width = f->width,
+ .height = f->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ fmt = find_port_format_by_pix(port, f->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fie.code = fmt->code;
+ ret = v4l2_subdev_call(port->subdev, pad, enum_frame_interval,
+ NULL, &fie);
+ if (ret)
+ return ret;
+ f->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ f->discrete = fie.interval;
+
+ return 0;
+}
+
+static int vip_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe.numerator = 1;
+ parm->parm.capture.timeperframe.denominator = 30;
+ parm->parm.capture.readbuffers = 4;
+ return 0;
+}
+
+static int vip_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ parm->parm.capture.timeperframe.numerator = 1;
+ parm->parm.capture.timeperframe.denominator = 30;
+ parm->parm.capture.readbuffers = 4;
+
+ return 0;
+}
+
+static int vip_calc_format_size(struct vip_port *port,
+ struct vip_fmt *fmt,
+ struct v4l2_format *f)
+{
+ enum v4l2_field *field;
+ unsigned int stride;
+
+ if (!fmt) {
+ vip_dbg(2, port,
+ "no vip_fmt format provided!\n");
+ return -EINVAL;
+ }
+
+ field = &f->fmt.pix.field;
+ if (*field == V4L2_FIELD_ANY)
+ *field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != *field && V4L2_FIELD_ALTERNATE != *field)
+ return -EINVAL;
+
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W, W_ALIGN,
+ &f->fmt.pix.height, MIN_H, MAX_H, H_ALIGN,
+ S_ALIGN);
+
+ stride = f->fmt.pix.width * (fmt->vpdma_fmt[0]->depth >> 3);
+ if (stride > f->fmt.pix.bytesperline)
+ f->fmt.pix.bytesperline = stride;
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline,
+ VPDMA_STRIDE_ALIGN);
+
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ if (fmt->coplanar) {
+ f->fmt.pix.sizeimage += f->fmt.pix.height *
+ f->fmt.pix.bytesperline *
+ fmt->vpdma_fmt[VIP_CHROMA]->depth >> 3;
+ }
+
+ f->fmt.pix.colorspace = fmt->colorspace;
+ f->fmt.pix.priv = 0;
+
+ vip_dbg(3, port, "calc_format_size: fourcc:%s size: %dx%d bpl:%d img_size:%d\n",
+ fourcc_to_str(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+
+ return 0;
+}
+
+static inline bool vip_is_size_dma_aligned(u32 bpp, u32 width)
+{
+ return ((width * bpp) == ALIGN(width * bpp, VPDMA_STRIDE_ALIGN));
+}
+
+static int vip_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+ struct v4l2_subdev_frame_size_enum fse;
+ struct vip_fmt *fmt;
+ u32 best_width, best_height, largest_width, largest_height;
+ int ret, found;
+ enum vip_csc_state csc_direction;
+
+ vip_dbg(3, stream, "try_fmt fourcc:%s size: %dx%d\n",
+ fourcc_to_str(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height);
+
+ fmt = find_port_format_by_pix(port, f->fmt.pix.pixelformat);
+ if (!fmt) {
+ vip_dbg(2, stream,
+ "Fourcc format (0x%08x) not found.\n",
+ f->fmt.pix.pixelformat);
+
+ /* Just get the first one enumerated */
+ fmt = port->active_fmt[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ csc_direction = vip_csc_direction(fmt->code, fmt->finfo);
+ if (csc_direction != VIP_CSC_NA) {
+ if (!is_csc_available(port)) {
+ vip_dbg(2, stream,
+ "CSC not available for Fourcc format (0x%08x).\n",
+ f->fmt.pix.pixelformat);
+
+ /* Just get the first one enumerated */
+ fmt = port->active_fmt[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ /* re-evaluate the csc_direction here */
+ csc_direction = vip_csc_direction(fmt->code,
+ fmt->finfo);
+ } else {
+ vip_dbg(3, stream, "CSC active on Port %c: going %s\n",
+ port->port_id == VIP_PORTA ? 'A' : 'B',
+ (csc_direction == VIP_CSC_Y2R) ? "Y2R" : "R2Y");
+ }
+ }
+
+ /*
+ * Given that sensors might support multiple mbus code we need
+ * to use the one that matches the requested pixel format
+ */
+ port->try_mbus_framefmt = port->mbus_framefmt;
+ port->try_mbus_framefmt.code = fmt->code;
+
+ /* check for/find a valid width/height */
+ ret = 0;
+ found = false;
+ best_width = 0;
+ best_height = 0;
+ largest_width = 0;
+ largest_height = 0;
+ fse.pad = 0;
+ fse.code = fmt->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ for (fse.index = 0; ; fse.index++) {
+ u32 bpp = fmt->vpdma_fmt[0]->depth >> 3;
+
+ ret = v4l2_subdev_call(port->subdev, pad,
+ enum_frame_size, NULL, &fse);
+ if (ret)
+ break;
+
+ vip_dbg(3, stream, "try_fmt loop:%d fourcc:%s size: %dx%d\n",
+ fse.index, fourcc_to_str(f->fmt.pix.pixelformat),
+ fse.max_width, fse.max_height);
+
+ if (!vip_is_size_dma_aligned(bpp, fse.max_width))
+ continue;
+
+ if (fse.max_width >= largest_width &&
+ fse.max_height >= largest_height) {
+ vip_dbg(3, stream, "try_fmt loop:%d found new larger: %dx%d\n",
+ fse.index, fse.max_width, fse.max_height);
+ largest_width = fse.max_width;
+ largest_height = fse.max_height;
+ }
+
+ if (fse.max_width >= f->fmt.pix.width &&
+ fse.max_height >= f->fmt.pix.height) {
+ vip_dbg(3, stream, "try_fmt loop:%d found at least larger: %dx%d\n",
+ fse.index, fse.max_width, fse.max_height);
+
+ if (!best_width ||
+ ((abs(best_width - f->fmt.pix.width) >=
+ abs(fse.max_width - f->fmt.pix.width)) &&
+ (abs(best_height - f->fmt.pix.height) >=
+ abs(fse.max_height - f->fmt.pix.height)))) {
+ best_width = fse.max_width;
+ best_height = fse.max_height;
+ vip_dbg(3, stream, "try_fmt loop:%d found new best: %dx%d\n",
+ fse.index, fse.max_width,
+ fse.max_height);
+ }
+ }
+
+ if (f->fmt.pix.width == fse.max_width &&
+ f->fmt.pix.height == fse.max_height) {
+ found = true;
+ vip_dbg(3, stream, "try_fmt loop:%d found direct match: %dx%d\n",
+ fse.index, fse.max_width,
+ fse.max_height);
+ break;
+ }
+
+ if (f->fmt.pix.width >= fse.min_width &&
+ f->fmt.pix.width <= fse.max_width &&
+ f->fmt.pix.height >= fse.min_height &&
+ f->fmt.pix.height <= fse.max_height) {
+ found = true;
+ vip_dbg(3, stream, "try_fmt loop:%d found direct range match: %dx%d\n",
+ fse.index, fse.max_width,
+ fse.max_height);
+ break;
+ }
+ }
+
+ if (found) {
+ port->try_mbus_framefmt.width = f->fmt.pix.width;
+ port->try_mbus_framefmt.height = f->fmt.pix.height;
+ /* No need to check for scaling */
+ goto calc_size;
+ } else if (f->fmt.pix.width > largest_width) {
+ port->try_mbus_framefmt.width = largest_width;
+ port->try_mbus_framefmt.height = largest_height;
+ } else if (best_width) {
+ port->try_mbus_framefmt.width = best_width;
+ port->try_mbus_framefmt.height = best_height;
+ } else {
+ /* use existing values as default */
+ }
+
+ vip_dbg(3, stream, "try_fmt best subdev size: %dx%d\n",
+ port->try_mbus_framefmt.width,
+ port->try_mbus_framefmt.height);
+
+ if (is_scaler_available(port) &&
+ csc_direction != VIP_CSC_Y2R &&
+ !vip_is_mbuscode_raw(fmt->code) &&
+ f->fmt.pix.height <= port->try_mbus_framefmt.height &&
+ port->try_mbus_framefmt.height <= SC_MAX_PIXEL_HEIGHT &&
+ port->try_mbus_framefmt.width <= SC_MAX_PIXEL_WIDTH) {
+ /*
+ * Scaler is only accessible if the dst colorspace is YUV.
+ * As the input to the scaler must be in YUV mode only.
+ *
+ * Scaling up is allowed only horizontally.
+ */
+ unsigned int hratio, vratio, width_align, height_align;
+ u32 bpp = fmt->vpdma_fmt[0]->depth >> 3;
+
+ vip_dbg(3, stream, "Scaler active on Port %c: requesting %dx%d\n",
+ port->port_id == VIP_PORTA ? 'A' : 'B',
+ f->fmt.pix.width, f->fmt.pix.height);
+
+ /* Just make sure everything is properly aligned */
+ width_align = ALIGN(f->fmt.pix.width * bpp, VPDMA_STRIDE_ALIGN);
+ width_align /= bpp;
+ height_align = ALIGN(f->fmt.pix.height, 2);
+
+ f->fmt.pix.width = width_align;
+ f->fmt.pix.height = height_align;
+
+ hratio = f->fmt.pix.width * 1000 /
+ port->try_mbus_framefmt.width;
+ vratio = f->fmt.pix.height * 1000 /
+ port->try_mbus_framefmt.height;
+ if (hratio < 125) {
+ f->fmt.pix.width = port->try_mbus_framefmt.width / 8;
+ vip_dbg(3, stream, "Horizontal scaling ratio out of range adjusting -> %d\n",
+ f->fmt.pix.width);
+ }
+
+ if (vratio < 188) {
+ f->fmt.pix.height = port->try_mbus_framefmt.height / 4;
+ vip_dbg(3, stream, "Vertical scaling ratio out of range adjusting -> %d\n",
+ f->fmt.pix.height);
+ }
+ vip_dbg(3, stream, "Scaler: got %dx%d\n",
+ f->fmt.pix.width, f->fmt.pix.height);
+ } else {
+ /* use existing values as default */
+ f->fmt.pix.width = port->try_mbus_framefmt.width;
+ f->fmt.pix.height = port->try_mbus_framefmt.height;
+ }
+
+calc_size:
+ /* That we have a fmt calculate imagesize and bytesperline */
+ return vip_calc_format_size(port, fmt, f);
+}
+
+static int vip_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+ struct vip_fmt *fmt = port->fmt;
+
+ /* Use last known values or defaults */
+ f->fmt.pix.width = stream->width;
+ f->fmt.pix.height = stream->height;
+ f->fmt.pix.pixelformat = port->fmt->fourcc;
+ f->fmt.pix.field = stream->sup_field;
+ f->fmt.pix.colorspace = port->fmt->colorspace;
+ f->fmt.pix.bytesperline = stream->bytesperline;
+ f->fmt.pix.sizeimage = stream->sizeimage;
+
+ vip_dbg(3, stream,
+ "g_fmt fourcc:%s code: %04x size: %dx%d bpl:%d img_size:%d\n",
+ fourcc_to_str(f->fmt.pix.pixelformat),
+ fmt->code,
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+ vip_dbg(3, stream, "g_fmt vpdma data type: 0x%02X\n",
+ port->fmt->vpdma_fmt[0]->data_type);
+
+ return 0;
+}
+
+static int vip_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct vip_port *port = stream->port;
+ struct v4l2_subdev_format sfmt;
+ struct v4l2_mbus_framefmt *mf;
+ enum vip_csc_state csc_direction;
+ int ret;
+
+ vip_dbg(3, stream, "s_fmt input fourcc:%s size: %dx%d bpl:%d img_size:%d\n",
+ fourcc_to_str(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+
+ ret = vip_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ vip_dbg(3, stream, "s_fmt try_fmt fourcc:%s size: %dx%d bpl:%d img_size:%d\n",
+ fourcc_to_str(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+
+ if (vb2_is_busy(&stream->vb_vidq)) {
+ vip_err(stream, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * Check if we need the scaler or not
+ *
+ * Since on previous S_FMT call the scaler might have been
+ * allocated if it is not needed in this instance we will
+ * attempt to free it just in case.
+ *
+ * free_scaler() is harmless unless the current port
+ * allocated it.
+ */
+ if (f->fmt.pix.width == port->try_mbus_framefmt.width &&
+ f->fmt.pix.height == port->try_mbus_framefmt.height)
+ free_scaler(port);
+ else
+ allocate_scaler(port);
+
+ port->fmt = find_port_format_by_pix(port,
+ f->fmt.pix.pixelformat);
+ stream->width = f->fmt.pix.width;
+ stream->height = f->fmt.pix.height;
+ stream->bytesperline = f->fmt.pix.bytesperline;
+ stream->sizeimage = f->fmt.pix.sizeimage;
+ stream->sup_field = f->fmt.pix.field;
+
+ port->c_rect.left = 0;
+ port->c_rect.top = 0;
+ port->c_rect.width = stream->width;
+ port->c_rect.height = stream->height;
+
+ /*
+ * Check if we need the csc unit or not
+ *
+ * Since on previous S_FMT call, the csc might have been
+ * allocated if it is not needed in this instance we will
+ * attempt to free it just in case.
+ *
+ * free_csc() is harmless unless the current port
+ * allocated it.
+ */
+ csc_direction = vip_csc_direction(port->fmt->code, port->fmt->finfo);
+ if (csc_direction == VIP_CSC_NA)
+ free_csc(port);
+ else
+ allocate_csc(port, csc_direction);
+
+ if (stream->sup_field == V4L2_FIELD_ALTERNATE)
+ port->flags |= FLAG_INTERLACED;
+ else
+ port->flags &= ~FLAG_INTERLACED;
+
+ vip_dbg(3, stream, "s_fmt fourcc:%s size: %dx%d bpl:%d img_size:%d\n",
+ fourcc_to_str(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+
+ mf = &sfmt.format;
+ v4l2_fill_mbus_format(mf, &f->fmt.pix, port->fmt->code);
+ /* Make sure to use the subdev size found in the try_fmt */
+ mf->width = port->try_mbus_framefmt.width;
+ mf->height = port->try_mbus_framefmt.height;
+
+ vip_dbg(3, stream, "s_fmt pix_to_mbus mbus_code: %04X size: %dx%d\n",
+ mf->code,
+ mf->width, mf->height);
+
+ sfmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sfmt.pad = 0;
+ ret = v4l2_subdev_call(port->subdev, pad, set_fmt, NULL, &sfmt);
+ if (ret) {
+ vip_dbg(1, stream, "set_fmt failed in subdev\n");
+ return ret;
+ }
+
+ /* Save it */
+ port->mbus_framefmt = *mf;
+
+ vip_dbg(3, stream, "s_fmt subdev fmt mbus_code: %04X size: %dx%d\n",
+ port->mbus_framefmt.code,
+ port->mbus_framefmt.width, port->mbus_framefmt.height);
+ vip_dbg(3, stream, "s_fmt vpdma data type: 0x%02X\n",
+ port->fmt->vpdma_fmt[0]->data_type);
+
+ return 0;
+}
+
+/*
+ * Does the exact opposite of set_fmt_params
+ * It makes sure the DataPath register is sane after tear down
+ */
+static void unset_fmt_params(struct vip_stream *stream)
+{
+ struct vip_dev *dev = stream->port->dev;
+ struct vip_port *port = stream->port;
+
+ stream->sequence = 0;
+ stream->field = V4L2_FIELD_TOP;
+
+ if (port->csc == VIP_CSC_Y2R) {
+ if (port->port_id == VIP_PORTA) {
+ vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_RGB_SRC_DATA_SELECT, 0);
+ } else {
+ vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
+ }
+ /* We are done */
+ return;
+ } else if (port->csc == VIP_CSC_R2Y) {
+ if (port->scaler && port->fmt->coplanar) {
+ if (port->port_id == VIP_PORTA) {
+ vip_set_slice_path(dev,
+ VIP_CSC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_SC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT,
+ 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev,
+ VIP_RGB_OUT_HI_DATA_SELECT,
+ 0);
+ }
+ } else if (port->scaler) {
+ if (port->port_id == VIP_PORTA) {
+ vip_set_slice_path(dev,
+ VIP_CSC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_SC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT,
+ 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev,
+ VIP_RGB_OUT_HI_DATA_SELECT,
+ 0);
+ }
+ } else if (port->fmt->coplanar) {
+ if (port->port_id == VIP_PORTA) {
+ vip_set_slice_path(dev,
+ VIP_CSC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT,
+ 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev,
+ VIP_RGB_OUT_HI_DATA_SELECT,
+ 0);
+ }
+ } else {
+ if (port->port_id == VIP_PORTA) {
+ vip_set_slice_path(dev,
+ VIP_CSC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT,
+ 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev,
+ VIP_RGB_OUT_HI_DATA_SELECT,
+ 0);
+ }
+ }
+ /* We are done */
+ return;
+ } else if (v4l2_is_format_rgb(port->fmt->finfo)) {
+ if (port->port_id == VIP_PORTA) {
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
+ }
+ /* We are done */
+ return;
+ }
+
+ if (port->scaler && port->fmt->coplanar) {
+ if (port->port_id == VIP_PORTA) {
+ vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
+ } else {
+ vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_2_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 0);
+ }
+ } else if (port->scaler) {
+ if (port->port_id == VIP_PORTA) {
+ vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
+ } else {
+ vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_2_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev, VIP_CHR_DS_2_DATA_BYPASS, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
+ }
+ } else if (port->fmt->coplanar) {
+ if (port->port_id == VIP_PORTA) {
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
+ } else {
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_2_SRC_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_CHR_DS_2_DATA_BYPASS, 0);
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
+ }
+ } else {
+ /*
+ * We undo all data path setting except for the multi
+ * stream case.
+ * Because we cannot disrupt other on-going capture if only
+ * one stream is terminated the other might still be going
+ */
+ vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 1);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
+ }
+}
+
+/*
+ * Set the registers that are modified when the video format changes.
+ */
+static void set_fmt_params(struct vip_stream *stream)
+{
+ struct vip_dev *dev = stream->port->dev;
+ struct vip_port *port = stream->port;
+
+ stream->sequence = 0;
+ stream->field = V4L2_FIELD_TOP;
+
+ if (port->csc == VIP_CSC_Y2R) {
+ port->flags &= ~FLAG_MULT_PORT;
+ /* Set alpha component in background color */
+ vpdma_set_bg_color(dev->shared->vpdma,
+ (struct vpdma_data_format *)
+ port->fmt->vpdma_fmt[0],
+ 0xff);
+ if (port->port_id == VIP_PORTA) {
+ /*
+ * Input A: YUV422
+ * Output: Y_UP/UV_UP: RGB
+ * CSC_SRC_SELECT = 1
+ * RGB_OUT_HI_SELECT = 1
+ * RGB_SRC_SELECT = 1
+ * MULTI_CHANNEL_SELECT = 0
+ */
+ vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 1);
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 1);
+ vip_set_slice_path(dev, VIP_RGB_SRC_DATA_SELECT, 1);
+ } else {
+ /*
+ * Input B: YUV422
+ * Output: Y_UP/UV_UP: RGB
+ * CSC_SRC_SELECT = 2
+ * RGB_OUT_LO_SELECT = 1
+ * MULTI_CHANNEL_SELECT = 0
+ */
+ vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 2);
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 1);
+ }
+ /* We are done */
+ return;
+ } else if (port->csc == VIP_CSC_R2Y) {
+ port->flags &= ~FLAG_MULT_PORT;
+ if (port->scaler && port->fmt->coplanar) {
+ if (port->port_id == VIP_PORTA) {
+ /*
+ * Input A: RGB
+ * Output: Y_UP/UV_UP: Scaled YUV420
+ * CSC_SRC_SELECT = 4
+ * SC_SRC_SELECT = 1
+ * CHR_DS_1_SRC_SELECT = 1
+ * CHR_DS_1_BYPASS = 0
+ * RGB_OUT_HI_SELECT = 0
+ */
+ vip_set_slice_path(dev,
+ VIP_CSC_SRC_DATA_SELECT, 4);
+ vip_set_slice_path(dev,
+ VIP_SC_SRC_DATA_SELECT, 1);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT,
+ 1);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev,
+ VIP_RGB_OUT_HI_DATA_SELECT,
+ 0);
+ } else {
+ vip_err(stream, "RGB sensor can only be on Port A\n");
+ }
+ } else if (port->scaler) {
+ if (port->port_id == VIP_PORTA) {
+ /*
+ * Input A: RGB
+ * Output: Y_UP: Scaled YUV422
+ * CSC_SRC_SELECT = 4
+ * SC_SRC_SELECT = 1
+ * CHR_DS_1_SRC_SELECT = 1
+ * CHR_DS_1_BYPASS = 1
+ * RGB_OUT_HI_SELECT = 0
+ */
+ vip_set_slice_path(dev,
+ VIP_CSC_SRC_DATA_SELECT, 4);
+ vip_set_slice_path(dev,
+ VIP_SC_SRC_DATA_SELECT, 1);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT,
+ 1);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_DATA_BYPASS, 1);
+ vip_set_slice_path(dev,
+ VIP_RGB_OUT_HI_DATA_SELECT,
+ 0);
+ } else {
+ vip_err(stream, "RGB sensor can only be on Port A\n");
+ }
+ } else if (port->fmt->coplanar) {
+ if (port->port_id == VIP_PORTA) {
+ /*
+ * Input A: RGB
+ * Output: Y_UP/UV_UP: YUV420
+ * CSC_SRC_SELECT = 4
+ * CHR_DS_1_SRC_SELECT = 2
+ * CHR_DS_1_BYPASS = 0
+ * RGB_OUT_HI_SELECT = 0
+ */
+ vip_set_slice_path(dev,
+ VIP_CSC_SRC_DATA_SELECT, 4);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT,
+ 2);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev,
+ VIP_RGB_OUT_HI_DATA_SELECT,
+ 0);
+ } else {
+ vip_err(stream, "RGB sensor can only be on Port A\n");
+ }
+ } else {
+ if (port->port_id == VIP_PORTA) {
+ /*
+ * Input A: RGB
+ * Output: Y_UP/UV_UP: YUV420
+ * CSC_SRC_SELECT = 4
+ * CHR_DS_1_SRC_SELECT = 2
+ * CHR_DS_1_BYPASS = 1
+ * RGB_OUT_HI_SELECT = 0
+ */
+ vip_set_slice_path(dev,
+ VIP_CSC_SRC_DATA_SELECT, 4);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT,
+ 2);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_DATA_BYPASS, 1);
+ vip_set_slice_path(dev,
+ VIP_RGB_OUT_HI_DATA_SELECT,
+ 0);
+ } else {
+ vip_err(stream, "RGB sensor can only be on Port A\n");
+ }
+ }
+ /* We are done */
+ return;
+ } else if (v4l2_is_format_rgb(port->fmt->finfo)) {
+ port->flags &= ~FLAG_MULT_PORT;
+ /* Set alpha component in background color */
+ vpdma_set_bg_color(dev->shared->vpdma,
+ (struct vpdma_data_format *)
+ port->fmt->vpdma_fmt[0],
+ 0xff);
+ if (port->port_id == VIP_PORTA) {
+ /*
+ * Input A: RGB
+ * Output: Y_LO/UV_LO: RGB
+ * RGB_OUT_LO_SELECT = 1
+ * MULTI_CHANNEL_SELECT = 1
+ */
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 1);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 1);
+ } else {
+ vip_err(stream, "RGB sensor can only be on Port A\n");
+ }
+ /* We are done */
+ return;
+ }
+
+ if (port->scaler && port->fmt->coplanar) {
+ port->flags &= ~FLAG_MULT_PORT;
+ if (port->port_id == VIP_PORTA) {
+ /*
+ * Input A: YUV422
+ * Output: Y_UP/UV_UP: Scaled YUV420
+ * SC_SRC_SELECT = 2
+ * CHR_DS_1_SRC_SELECT = 1
+ * CHR_DS_1_BYPASS = 0
+ * RGB_OUT_HI_SELECT = 0
+ */
+ vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 2);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT, 1);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
+ } else {
+ /*
+ * Input B: YUV422
+ * Output: Y_LO/UV_LO: Scaled YUV420
+ * SC_SRC_SELECT = 3
+ * CHR_DS_2_SRC_SELECT = 1
+ * RGB_OUT_LO_SELECT = 0
+ * MULTI_CHANNEL_SELECT = 0
+ */
+ vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 3);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_2_SRC_DATA_SELECT, 1);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 0);
+ }
+ } else if (port->scaler) {
+ port->flags &= ~FLAG_MULT_PORT;
+ if (port->port_id == VIP_PORTA) {
+ /*
+ * Input A: YUV422
+ * Output: Y_UP: Scaled YUV422
+ * SC_SRC_SELECT = 2
+ * CHR_DS_1_SRC_SELECT = 1
+ * CHR_DS_1_BYPASS = 1
+ * RGB_OUT_HI_SELECT = 0
+ */
+ vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 2);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT, 1);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 1);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
+ } else {
+ /*
+ * Input B: YUV422
+ * Output: UV_UP: Scaled YUV422
+ * SC_SRC_SELECT = 3
+ * CHR_DS_2_SRC_SELECT = 1
+ * CHR_DS_1_BYPASS = 1
+ * CHR_DS_2_BYPASS = 1
+ * RGB_OUT_HI_SELECT = 0
+ */
+ vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 3);
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_2_SRC_DATA_SELECT, 1);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 1);
+ vip_set_slice_path(dev, VIP_CHR_DS_2_DATA_BYPASS, 1);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
+ }
+ } else if (port->fmt->coplanar) {
+ port->flags &= ~FLAG_MULT_PORT;
+ if (port->port_id == VIP_PORTA) {
+ /*
+ * Input A: YUV422
+ * Output: Y_UP/UV_UP: YUV420
+ * CHR_DS_1_SRC_SELECT = 3
+ * CHR_DS_1_BYPASS = 0
+ * RGB_OUT_HI_SELECT = 0
+ */
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_1_SRC_DATA_SELECT, 3);
+ vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
+ } else {
+ /*
+ * Input B: YUV422
+ * Output: Y_LO/UV_LO: YUV420
+ * CHR_DS_2_SRC_SELECT = 4
+ * CHR_DS_2_BYPASS = 0
+ * RGB_OUT_LO_SELECT = 0
+ * MULTI_CHANNEL_SELECT = 0
+ */
+ vip_set_slice_path(dev,
+ VIP_CHR_DS_2_SRC_DATA_SELECT, 4);
+ vip_set_slice_path(dev, VIP_CHR_DS_2_DATA_BYPASS, 0);
+ vip_set_slice_path(dev,
+ VIP_MULTI_CHANNEL_DATA_SELECT, 0);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
+ }
+ } else {
+ port->flags |= FLAG_MULT_PORT;
+ /*
+ * Input A/B: YUV422
+ * Output: Y_LO: YUV422 - UV_LO: YUV422
+ * MULTI_CHANNEL_SELECT = 1
+ * RGB_OUT_LO_SELECT = 0
+ */
+ vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 1);
+ vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
+ }
+}
+
+static int vip_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct vip_stream *stream = file2stream(file);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = stream->width;
+ s->r.height = stream->height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ s->r = stream->port->c_rect;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int vip_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct vip_stream *stream = file2stream(file);
+ struct v4l2_rect r = s->r;
+
+ v4l_bound_align_image(&r.width, 0, stream->width, 0,
+ &r.height, 0, stream->height, 0, 0);
+
+ r.left = clamp_t(unsigned int, r.left, 0, stream->width - r.width);
+ r.top = clamp_t(unsigned int, r.top, 0, stream->height - r.height);
+
+ if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&r, &s->r))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &r))
+ return -ERANGE;
+
+ s->r = r;
+ stream->port->c_rect = r;
+
+ vip_dbg(1, stream, "cropped (%d,%d)/%dx%d of %dx%d\n",
+ r.left, r.top, r.width, r.height,
+ stream->width, stream->height);
+
+ return 0;
+}
+
+static long vip_ioctl_default(struct file *file, void *fh, bool valid_prio,
+ unsigned int cmd, void *arg)
+{
+ struct vip_stream *stream = file2stream(file);
+
+ if (!valid_prio) {
+ vip_err(stream, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ switch (cmd) {
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct v4l2_ioctl_ops vip_ioctl_ops = {
+ .vidioc_querycap = vip_querycap,
+ .vidioc_enum_input = vip_enuminput,
+ .vidioc_g_input = vip_g_input,
+ .vidioc_s_input = vip_s_input,
+
+ .vidioc_querystd = vip_querystd,
+ .vidioc_g_std = vip_g_std,
+ .vidioc_s_std = vip_s_std,
+
+ .vidioc_enum_fmt_vid_cap = vip_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vip_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vip_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vip_s_fmt_vid_cap,
+
+ .vidioc_enum_frameintervals = vip_enum_frameintervals,
+ .vidioc_enum_framesizes = vip_enum_framesizes,
+ .vidioc_s_parm = vip_s_parm,
+ .vidioc_g_parm = vip_g_parm,
+ .vidioc_g_selection = vip_g_selection,
+ .vidioc_s_selection = vip_s_selection,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_default = vip_ioctl_default,
+};
+
+/*
+ * Videobuf operations
+ */
+static int vip_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct vip_stream *stream = vb2_get_drv_priv(vq);
+ unsigned int size = stream->sizeimage;
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ vip_dbg(1, stream, "get %d buffer(s) of size %d each.\n",
+ *nbuffers, sizes[0]);
+
+ return 0;
+}
+
+static int vip_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vip_stream *stream = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vb2_plane_size(vb, 0) < stream->sizeimage) {
+ vip_dbg(1, stream,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)stream->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, stream->sizeimage);
+
+ return 0;
+}
+
+static void vip_buf_queue(struct vb2_buffer *vb)
+{
+ struct vip_stream *stream = vb2_get_drv_priv(vb->vb2_queue);
+ struct vip_dev *dev = stream->port->dev;
+ struct vip_buffer *buf = container_of(vb, struct vip_buffer,
+ vb.vb2_buf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->list, &stream->vidq);
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static int vip_setup_scaler(struct vip_stream *stream)
+{
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ struct sc_data *sc = dev->sc;
+ struct csc_data *csc = dev->csc;
+ struct vpdma_data *vpdma = dev->shared->vpdma;
+ struct vip_mmr_adb *mmr_adb = port->mmr_adb.addr;
+ int list_num = stream->list_num;
+ int timeout = 500;
+ struct v4l2_format dst_f;
+ struct v4l2_format src_f;
+
+ memset(&src_f, 0, sizeof(src_f));
+ src_f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ v4l2_fill_pix_format(&src_f.fmt.pix, &port->mbus_framefmt);
+ src_f.fmt.pix.pixelformat = vip_mbus_code_to_fourcc(port->fmt->code);
+
+ dst_f = src_f;
+ dst_f.fmt.pix.pixelformat = port->fmt->fourcc;
+ dst_f.fmt.pix.width = stream->width;
+ dst_f.fmt.pix.height = stream->height;
+
+ /* if scaler not associated with this port then skip */
+ if (port->scaler) {
+ sc_set_hs_coeffs(sc, port->sc_coeff_h.addr,
+ port->mbus_framefmt.width,
+ port->c_rect.width);
+ sc_set_vs_coeffs(sc, port->sc_coeff_v.addr,
+ port->mbus_framefmt.height,
+ port->c_rect.height);
+ sc_config_scaler(sc, &mmr_adb->sc_regs0[0],
+ &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
+ port->mbus_framefmt.width,
+ port->mbus_framefmt.height,
+ port->c_rect.width,
+ port->c_rect.height);
+ port->load_mmrs = true;
+ }
+
+ /* if csc not associated with this port then skip */
+ if (port->csc) {
+ csc_set_coeff(csc, &mmr_adb->csc_regs[0],
+ &src_f, &dst_f);
+
+ port->load_mmrs = true;
+ }
+
+ /* If coeff are already loaded then skip */
+ if (!sc->load_coeff_v && !sc->load_coeff_h && !port->load_mmrs)
+ return 0;
+
+ if (vpdma_list_busy(vpdma, list_num)) {
+ vip_dbg(3, stream, "%s: List %d is busy\n",
+ __func__, list_num);
+ }
+
+ /* Make sure we start with a clean list */
+ vpdma_reset_desc_list(&stream->desc_list);
+
+ /* config descriptors */
+ if (port->load_mmrs) {
+ vpdma_map_desc_buf(vpdma, &port->mmr_adb);
+ vpdma_add_cfd_adb(&stream->desc_list, CFD_MMR_CLIENT,
+ &port->mmr_adb);
+
+ port->load_mmrs = false;
+ vip_dbg(3, stream, "Added mmr_adb config desc\n");
+ }
+
+ if (sc->loaded_coeff_h != port->sc_coeff_h.dma_addr ||
+ sc->load_coeff_h) {
+ vpdma_map_desc_buf(vpdma, &port->sc_coeff_h);
+ vpdma_add_cfd_block(&stream->desc_list,
+ VIP_SLICE1_CFD_SC_CLIENT + dev->slice_id,
+ &port->sc_coeff_h, 0);
+
+ sc->loaded_coeff_h = port->sc_coeff_h.dma_addr;
+ sc->load_coeff_h = false;
+ vip_dbg(3, stream, "Added sc_coeff_h config desc\n");
+ }
+
+ if (sc->loaded_coeff_v != port->sc_coeff_v.dma_addr ||
+ sc->load_coeff_v) {
+ vpdma_map_desc_buf(vpdma, &port->sc_coeff_v);
+ vpdma_add_cfd_block(&stream->desc_list,
+ VIP_SLICE1_CFD_SC_CLIENT + dev->slice_id,
+ &port->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
+
+ sc->loaded_coeff_v = port->sc_coeff_v.dma_addr;
+ sc->load_coeff_v = false;
+ vip_dbg(3, stream, "Added sc_coeff_v config desc\n");
+ }
+ vip_dbg(3, stream, "CFD_SC_CLIENT %d slice_id: %d\n",
+ VIP_SLICE1_CFD_SC_CLIENT + dev->slice_id, dev->slice_id);
+
+ vpdma_map_desc_buf(vpdma, &stream->desc_list.buf);
+ vip_dbg(3, stream, "Submitting desc on list# %d\n", list_num);
+ vpdma_submit_descs(vpdma, &stream->desc_list, list_num);
+
+ while (vpdma_list_busy(vpdma, list_num) && timeout--)
+ usleep_range(1000, 1100);
+
+ vpdma_unmap_desc_buf(dev->shared->vpdma, &port->mmr_adb);
+ vpdma_unmap_desc_buf(dev->shared->vpdma, &port->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->shared->vpdma, &port->sc_coeff_v);
+ vpdma_unmap_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
+
+ vpdma_reset_desc_list(&stream->desc_list);
+
+ if (timeout <= 0) {
+ vip_err(stream, "Timed out setting up scaler through VPDMA list\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vip_load_vpdma_list_fifo(struct vip_stream *stream)
+{
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ struct vpdma_data *vpdma = dev->shared->vpdma;
+ int list_num = stream->list_num;
+ struct vip_buffer *buf;
+ unsigned long flags;
+ int timeout, i;
+
+ if (vpdma_list_busy(dev->shared->vpdma, stream->list_num))
+ return -EBUSY;
+
+ for (i = 0; i < VIP_VPDMA_FIFO_SIZE; i++) {
+ spin_lock_irqsave(&dev->slock, flags);
+ if (list_empty(&stream->vidq)) {
+ vip_err(stream, "No buffer left!");
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return -EINVAL;
+ }
+
+ buf = list_entry(stream->vidq.next,
+ struct vip_buffer, list);
+ buf->drop = false;
+
+ list_move_tail(&buf->list, &stream->post_bufs);
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ vip_dbg(2, stream, "%s: start_dma vb2 buf idx:%d\n",
+ __func__, buf->vb.vb2_buf.index);
+ start_dma(stream, buf);
+
+ timeout = 500;
+ while (vpdma_list_busy(vpdma, list_num) && timeout--)
+ usleep_range(1000, 1100);
+
+ if (timeout <= 0) {
+ vip_err(stream, "Timed out loading VPDMA list fifo\n");
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+static int vip_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vip_stream *stream = vb2_get_drv_priv(vq);
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ int ret;
+
+ vip_setup_scaler(stream);
+
+ /*
+ * Make sure the scaler is configured before the datapath is
+ * enabled. The scaler can only load the coefficient
+ * parameters when it is idle. If the scaler path is enabled
+ * and video data is being received then the VPDMA transfer will
+ * stall indefinetely.
+ */
+ set_fmt_params(stream);
+ vip_setup_parser(port);
+
+ if (port->subdev) {
+ ret = v4l2_subdev_call(port->subdev, video, s_stream, 1);
+ if (ret) {
+ vip_dbg(1, stream, "stream on failed in subdev\n");
+ return ret;
+ }
+ }
+
+ stream->sequence = 0;
+ stream->field = V4L2_FIELD_TOP;
+ populate_desc_list(stream);
+
+ ret = vip_load_vpdma_list_fifo(stream);
+ if (ret)
+ return ret;
+
+ stream->num_recovery = 0;
+
+ clear_irqs(dev, dev->slice_id, stream->list_num);
+ enable_irqs(dev, dev->slice_id, stream->list_num);
+ vip_schedule_next_buffer(stream);
+ vip_parser_stop_imm(port, false);
+ vip_enable_parser(port, true);
+
+ return 0;
+}
+
+/*
+ * Abort streaming and wait for last buffer
+ */
+static void vip_stop_streaming(struct vb2_queue *vq)
+{
+ struct vip_stream *stream = vb2_get_drv_priv(vq);
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ struct vip_buffer *buf;
+ int ret;
+
+ vip_dbg(2, stream, "%s:\n", __func__);
+
+ vip_parser_stop_imm(port, true);
+ vip_enable_parser(port, false);
+ unset_fmt_params(stream);
+
+ disable_irqs(dev, dev->slice_id, stream->list_num);
+ clear_irqs(dev, dev->slice_id, stream->list_num);
+
+ if (port->subdev) {
+ ret = v4l2_subdev_call(port->subdev, video, s_stream, 0);
+ if (ret)
+ vip_dbg(1, stream, "stream on failed in subdev\n");
+ }
+
+ stop_dma(stream, true);
+
+ /* release all active buffers */
+ while (!list_empty(&stream->post_bufs)) {
+ buf = list_entry(stream->post_bufs.next,
+ struct vip_buffer, list);
+ list_del(&buf->list);
+ if (buf->drop == 1)
+ list_add_tail(&buf->list, &stream->dropq);
+ else
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ while (!list_empty(&stream->vidq)) {
+ buf = list_entry(stream->vidq.next, struct vip_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ if (!vb2_is_streaming(vq))
+ return;
+
+ vpdma_unmap_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
+ vpdma_reset_desc_list(&stream->desc_list);
+}
+
+static const struct vb2_ops vip_video_qops = {
+ .queue_setup = vip_queue_setup,
+ .buf_prepare = vip_buf_prepare,
+ .buf_queue = vip_buf_queue,
+ .start_streaming = vip_start_streaming,
+ .stop_streaming = vip_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * File operations
+ */
+
+static int vip_init_dev(struct vip_dev *dev)
+{
+ if (dev->num_ports != 0)
+ goto done;
+
+ vip_set_clock_enable(dev, 1);
+ vip_module_reset(dev, VIP_SC_RST, false);
+ vip_module_reset(dev, VIP_CSC_RST, false);
+done:
+ dev->num_ports++;
+
+ return 0;
+}
+
+static inline bool is_scaler_available(struct vip_port *port)
+{
+ if (port->endpoint.bus_type == V4L2_MBUS_PARALLEL)
+ if (port->dev->sc_assigned == VIP_NOT_ASSIGNED ||
+ port->dev->sc_assigned == port->port_id)
+ return true;
+ return false;
+}
+
+static inline bool allocate_scaler(struct vip_port *port)
+{
+ if (is_scaler_available(port)) {
+ if (port->dev->sc_assigned == VIP_NOT_ASSIGNED ||
+ port->dev->sc_assigned == port->port_id) {
+ port->dev->sc_assigned = port->port_id;
+ port->scaler = true;
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline void free_scaler(struct vip_port *port)
+{
+ if (port->dev->sc_assigned == port->port_id) {
+ port->dev->sc_assigned = VIP_NOT_ASSIGNED;
+ port->scaler = false;
+ }
+}
+
+static bool is_csc_available(struct vip_port *port)
+{
+ if (port->endpoint.bus_type == V4L2_MBUS_PARALLEL)
+ if (port->dev->csc_assigned == VIP_NOT_ASSIGNED ||
+ port->dev->csc_assigned == port->port_id)
+ return true;
+ return false;
+}
+
+static bool allocate_csc(struct vip_port *port,
+ enum vip_csc_state csc_direction)
+{
+ /* Is CSC needed? */
+ if (csc_direction != VIP_CSC_NA) {
+ if (is_csc_available(port)) {
+ port->dev->csc_assigned = port->port_id;
+ port->csc = csc_direction;
+ vip_dbg(1, port, "%s: csc allocated: dir: %d\n",
+ __func__, csc_direction);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void free_csc(struct vip_port *port)
+{
+ if (port->dev->csc_assigned == port->port_id) {
+ port->dev->csc_assigned = VIP_NOT_ASSIGNED;
+ port->csc = VIP_CSC_NA;
+ vip_dbg(1, port, "%s: csc freed\n",
+ __func__);
+ }
+}
+
+static int vip_init_port(struct vip_port *port)
+{
+ int ret;
+ struct vip_fmt *fmt;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+
+ if (port->num_streams != 0)
+ goto done;
+
+ ret = vip_init_dev(port->dev);
+ if (ret)
+ goto done;
+
+ /* Get subdevice current frame format */
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ ret = v4l2_subdev_call(port->subdev, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
+ vip_dbg(1, port, "init_port get_fmt failed in subdev: (%d)\n",
+ ret);
+
+ /* try to find one that matches */
+ fmt = find_port_format_by_code(port, mbus_fmt->code);
+ if (!fmt) {
+ vip_dbg(1, port, "subdev default mbus_fmt %04x is not matched.\n",
+ mbus_fmt->code);
+ /* if all else fails just pick the first one */
+ fmt = port->active_fmt[0];
+
+ mbus_fmt->code = fmt->code;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ ret = v4l2_subdev_call(port->subdev, pad, set_fmt,
+ NULL, &sd_fmt);
+ if (ret)
+ vip_dbg(1, port, "init_port set_fmt failed in subdev: (%d)\n",
+ ret);
+ }
+
+ /* Assign current format */
+ port->fmt = fmt;
+ port->mbus_framefmt = *mbus_fmt;
+
+ vip_dbg(3, port, "%s: g_mbus_fmt subdev mbus_code: %04X fourcc:%s size: %dx%d\n",
+ __func__, fmt->code,
+ fourcc_to_str(fmt->fourcc),
+ mbus_fmt->width, mbus_fmt->height);
+
+ if (mbus_fmt->field == V4L2_FIELD_ALTERNATE)
+ port->flags |= FLAG_INTERLACED;
+ else
+ port->flags &= ~FLAG_INTERLACED;
+
+ port->c_rect.left = 0;
+ port->c_rect.top = 0;
+ port->c_rect.width = mbus_fmt->width;
+ port->c_rect.height = mbus_fmt->height;
+
+ ret = vpdma_alloc_desc_buf(&port->sc_coeff_h, SC_COEF_SRAM_SIZE);
+ if (ret != 0)
+ return ret;
+
+ ret = vpdma_alloc_desc_buf(&port->sc_coeff_v, SC_COEF_SRAM_SIZE);
+ if (ret != 0)
+ goto free_sc_h;
+
+ ret = vpdma_alloc_desc_buf(&port->mmr_adb, sizeof(struct vip_mmr_adb));
+ if (ret != 0)
+ goto free_sc_v;
+
+ init_adb_hdrs(port);
+
+ vip_enable_parser(port, false);
+done:
+ port->num_streams++;
+ return 0;
+
+free_sc_v:
+ vpdma_free_desc_buf(&port->sc_coeff_v);
+free_sc_h:
+ vpdma_free_desc_buf(&port->sc_coeff_h);
+ return ret;
+}
+
+static int vip_init_stream(struct vip_stream *stream)
+{
+ struct vip_port *port = stream->port;
+ struct vip_fmt *fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt;
+ struct v4l2_format f;
+ int ret;
+
+ ret = vip_init_port(port);
+ if (ret != 0)
+ return ret;
+
+ fmt = port->fmt;
+ mbus_fmt = &port->mbus_framefmt;
+
+ memset(&f, 0, sizeof(f));
+
+ /* Properly calculate the sizeimage and bytesperline values. */
+ v4l2_fill_pix_format(&f.fmt.pix, mbus_fmt);
+ f.fmt.pix.pixelformat = fmt->fourcc;
+ ret = vip_calc_format_size(port, fmt, &f);
+ if (ret)
+ return ret;
+
+ stream->width = f.fmt.pix.width;
+ stream->height = f.fmt.pix.height;
+ stream->sup_field = f.fmt.pix.field;
+ stream->bytesperline = f.fmt.pix.bytesperline;
+ stream->sizeimage = f.fmt.pix.sizeimage;
+
+ vip_dbg(3, stream, "init_stream fourcc:%s size: %dx%d bpl:%d img_size:%d\n",
+ fourcc_to_str(f.fmt.pix.pixelformat),
+ f.fmt.pix.width, f.fmt.pix.height,
+ f.fmt.pix.bytesperline, f.fmt.pix.sizeimage);
+ vip_dbg(3, stream, "init_stream vpdma data type: 0x%02X\n",
+ port->fmt->vpdma_fmt[0]->data_type);
+
+ ret = vpdma_create_desc_list(&stream->desc_list, VIP_DESC_LIST_SIZE,
+ VPDMA_LIST_TYPE_NORMAL);
+
+ if (ret != 0)
+ return ret;
+
+ stream->write_desc = (struct vpdma_dtd *)stream->desc_list.buf.addr
+ + 15;
+
+ vip_dbg(1, stream, "%s: stream instance %pa\n",
+ __func__, &stream);
+
+ return 0;
+}
+
+static void vip_release_dev(struct vip_dev *dev)
+{
+ /*
+ * On last close, disable clocks to conserve power
+ */
+
+ if (--dev->num_ports == 0) {
+ /* reset the scaler module */
+ vip_module_reset(dev, VIP_SC_RST, true);
+ vip_module_reset(dev, VIP_CSC_RST, true);
+ vip_set_clock_enable(dev, 0);
+ }
+}
+
+static int vip_set_crop_parser(struct vip_port *port)
+{
+ struct vip_dev *dev = port->dev;
+ struct vip_parser_data *parser = dev->parser;
+ u32 hcrop = 0, vcrop = 0;
+ u32 width = port->mbus_framefmt.width;
+
+ if (port->fmt->vpdma_fmt[0] == &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8]) {
+ /*
+ * Special case since we are faking a YUV422 16bit format
+ * to have the vpdma perform the needed byte swap
+ * we need to adjust the pixel width accordingly
+ * otherwise the parser will attempt to collect more pixels
+ * then available and the vpdma transfer will exceed the
+ * allocated frame buffer.
+ */
+ width >>= 1;
+ vip_dbg(1, port, "%s: 8 bit raw detected, adjusting width to %d\n",
+ __func__, width);
+ }
+
+ /*
+ * Set Parser Crop parameters to source size otherwise
+ * scaler and colorspace converter will yield garbage.
+ */
+ hcrop = VIP_ACT_BYPASS;
+ insert_field(&hcrop, 0, VIP_ACT_SKIP_NUMPIX_MASK,
+ VIP_ACT_SKIP_NUMPIX_SHFT);
+ insert_field(&hcrop, width,
+ VIP_ACT_USE_NUMPIX_MASK, VIP_ACT_USE_NUMPIX_SHFT);
+ reg_write(parser, VIP_PARSER_CROP_H_PORT(port->port_id), hcrop);
+
+ insert_field(&vcrop, 0, VIP_ACT_SKIP_NUMLINES_MASK,
+ VIP_ACT_SKIP_NUMLINES_SHFT);
+ insert_field(&vcrop, port->mbus_framefmt.height,
+ VIP_ACT_USE_NUMLINES_MASK, VIP_ACT_USE_NUMLINES_SHFT);
+ reg_write(parser, VIP_PARSER_CROP_V_PORT(port->port_id), vcrop);
+
+ return 0;
+}
+
+static int vip_setup_parser(struct vip_port *port)
+{
+ struct vip_dev *dev = port->dev;
+ struct vip_parser_data *parser = dev->parser;
+ struct v4l2_fwnode_endpoint *endpoint = &port->endpoint;
+ struct vip_bt656_bus *bt656_ep = &port->bt656_endpoint;
+ int iface, sync_type;
+ u32 flags = 0, config0;
+
+ /* Reset the port */
+ vip_reset_parser(port, true);
+ usleep_range(200, 250);
+ vip_reset_parser(port, false);
+
+ config0 = reg_read(parser, VIP_PARSER_PORT(port->port_id));
+
+ if (endpoint->bus_type == V4L2_MBUS_BT656) {
+ flags = endpoint->bus.parallel.flags;
+ iface = DUAL_8B_INTERFACE;
+
+ /*
+ * Ideally, this should come from subdev
+ * port->fmt can be anything once CSC is enabled
+ */
+ if (vip_is_mbuscode_rgb(port->fmt->code)) {
+ sync_type = EMBEDDED_SYNC_SINGLE_RGB_OR_YUV444;
+ } else {
+ switch (bt656_ep->num_channels) {
+ case 4:
+ sync_type = EMBEDDED_SYNC_4X_MULTIPLEXED_YUV422;
+ break;
+ case 2:
+ sync_type = EMBEDDED_SYNC_2X_MULTIPLEXED_YUV422;
+ break;
+ case 1:
+ sync_type = EMBEDDED_SYNC_SINGLE_YUV422;
+ break;
+ default:
+ sync_type =
+ EMBEDDED_SYNC_LINE_MULTIPLEXED_YUV422;
+ }
+ if (bt656_ep->pixmux == 0)
+ sync_type =
+ EMBEDDED_SYNC_LINE_MULTIPLEXED_YUV422;
+ }
+
+ } else if (endpoint->bus_type == V4L2_MBUS_PARALLEL) {
+ flags = endpoint->bus.parallel.flags;
+
+ switch (endpoint->bus.parallel.bus_width) {
+ case 24:
+ iface = SINGLE_24B_INTERFACE;
+ break;
+ case 16:
+ iface = SINGLE_16B_INTERFACE;
+ break;
+ case 8:
+ default:
+ iface = DUAL_8B_INTERFACE;
+ }
+
+ if (vip_is_mbuscode_rgb(port->fmt->code))
+ sync_type = DISCRETE_SYNC_SINGLE_RGB_24B;
+ else
+ sync_type = DISCRETE_SYNC_SINGLE_YUV422;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ config0 |= VIP_HSYNC_POLARITY;
+ else if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ config0 &= ~VIP_HSYNC_POLARITY;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ config0 |= VIP_VSYNC_POLARITY;
+ else if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ config0 &= ~VIP_VSYNC_POLARITY;
+
+ config0 &= ~VIP_USE_ACTVID_HSYNC_ONLY;
+ config0 |= VIP_ACTVID_POLARITY;
+ config0 |= VIP_DISCRETE_BASIC_MODE;
+
+ } else {
+ vip_err(port, "Device doesn't support CSI2");
+ return -EINVAL;
+ }
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) {
+ vip_set_pclk_invert(port);
+ config0 |= VIP_PIXCLK_EDGE_POLARITY;
+ } else {
+ config0 &= ~VIP_PIXCLK_EDGE_POLARITY;
+ }
+
+ config0 |= ((sync_type & VIP_SYNC_TYPE_MASK) << VIP_SYNC_TYPE_SHFT);
+
+ reg_write(parser, VIP_PARSER_PORT(port->port_id), config0);
+
+ vip_set_data_interface(port, iface);
+ vip_set_crop_parser(port);
+
+ return 0;
+}
+
+static void vip_enable_parser(struct vip_port *port, bool on)
+{
+ u32 config0;
+ struct vip_dev *dev = port->dev;
+ struct vip_parser_data *parser = dev->parser;
+
+ config0 = reg_read(parser, VIP_PARSER_PORT(port->port_id));
+
+ if (on) {
+ config0 |= VIP_PORT_ENABLE;
+ config0 &= ~(VIP_ASYNC_FIFO_RD | VIP_ASYNC_FIFO_WR);
+ } else {
+ config0 &= ~VIP_PORT_ENABLE;
+ config0 |= (VIP_ASYNC_FIFO_RD | VIP_ASYNC_FIFO_WR);
+ }
+ reg_write(parser, VIP_PARSER_PORT(port->port_id), config0);
+}
+
+static void vip_reset_parser(struct vip_port *port, bool on)
+{
+ u32 config0;
+ struct vip_dev *dev = port->dev;
+ struct vip_parser_data *parser = dev->parser;
+
+ config0 = reg_read(parser, VIP_PARSER_PORT(port->port_id));
+
+ if (on)
+ config0 |= VIP_SW_RESET;
+ else
+ config0 &= ~VIP_SW_RESET;
+
+ reg_write(parser, VIP_PARSER_PORT(port->port_id), config0);
+}
+
+static void vip_parser_stop_imm(struct vip_port *port, bool on)
+{
+ u32 config0;
+ struct vip_dev *dev = port->dev;
+ struct vip_parser_data *parser = dev->parser;
+
+ config0 = reg_read(parser, VIP_PARSER_STOP_IMM_PORT(port->port_id));
+
+ if (on)
+ config0 = 0xffffffff;
+ else
+ config0 = 0;
+
+ reg_write(parser, VIP_PARSER_STOP_IMM_PORT(port->port_id), config0);
+}
+
+static void vip_release_stream(struct vip_stream *stream)
+{
+ struct vip_dev *dev = stream->port->dev;
+
+ vip_dbg(1, stream, "%s: stream instance %pa\n",
+ __func__, &stream);
+
+ vpdma_unmap_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
+ vpdma_free_desc_buf(&stream->desc_list.buf);
+ vpdma_free_desc_list(&stream->desc_list);
+}
+
+static void vip_release_port(struct vip_port *port)
+{
+ vip_dbg(1, port, "%s: port instance %pa\n",
+ __func__, &port);
+
+ vpdma_free_desc_buf(&port->mmr_adb);
+ vpdma_free_desc_buf(&port->sc_coeff_h);
+ vpdma_free_desc_buf(&port->sc_coeff_v);
+}
+
+static void stop_dma(struct vip_stream *stream, bool clear_list)
+{
+ struct vip_dev *dev = stream->port->dev;
+ int ch, size = 0;
+
+ /* Create a list of channels to be cleared */
+ for (ch = 0; ch < VPDMA_MAX_CHANNELS; ch++) {
+ if (stream->vpdma_channels[ch] == 1) {
+ stream->vpdma_channels_to_abort[size++] = ch;
+ vip_dbg(2, stream, "Clear channel no: %d\n", ch);
+ }
+ }
+
+ /* Clear all the used channels for the list */
+ vpdma_list_cleanup(dev->shared->vpdma, stream->list_num,
+ stream->vpdma_channels_to_abort, size);
+
+ if (clear_list)
+ for (ch = 0; ch < VPDMA_MAX_CHANNELS; ch++)
+ stream->vpdma_channels[ch] = 0;
+}
+
+static int vip_open(struct file *file)
+{
+ struct vip_stream *stream = video_drvdata(file);
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ int ret = 0;
+
+ vip_dbg(2, stream, "%s\n", __func__);
+
+ mutex_lock(&dev->mutex);
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ vip_err(stream, "v4l2_fh_open failed\n");
+ goto unlock;
+ }
+
+ /*
+ * If this is the first open file.
+ * Then initialize hw module.
+ */
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ if (vip_init_stream(stream))
+ ret = -ENODEV;
+unlock:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static int vip_release(struct file *file)
+{
+ struct vip_stream *stream = video_drvdata(file);
+ struct vip_port *port = stream->port;
+ struct vip_dev *dev = port->dev;
+ bool fh_singular;
+ int ret;
+
+ vip_dbg(2, stream, "%s\n", __func__);
+
+ mutex_lock(&dev->mutex);
+
+ /* Save the singular status before we call the clean-up helper */
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ /* the release helper will cleanup any on-going streaming */
+ ret = _vb2_fop_release(file, NULL);
+
+ free_csc(port);
+ free_scaler(port);
+
+ /*
+ * If this is the last open file.
+ * Then de-initialize hw module.
+ */
+ if (fh_singular) {
+ vip_release_stream(stream);
+
+ if (--port->num_streams == 0) {
+ vip_release_port(port);
+ vip_release_dev(port->dev);
+ }
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations vip_fops = {
+ .owner = THIS_MODULE,
+ .open = vip_open,
+ .release = vip_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static struct video_device vip_videodev = {
+ .name = VIP_MODULE_NAME,
+ .fops = &vip_fops,
+ .ioctl_ops = &vip_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+ .device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE,
+};
+
+static int alloc_stream(struct vip_port *port, int stream_id, int vfl_type)
+{
+ struct vip_stream *stream;
+ struct vip_dev *dev = port->dev;
+ struct vb2_queue *q;
+ struct video_device *vfd;
+ struct vip_buffer *buf;
+ struct list_head *pos, *tmp;
+ int ret, i;
+ u32 vin_id;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ stream->port = port;
+ stream->stream_id = stream_id;
+ stream->vfl_type = vfl_type;
+ port->cap_streams[stream_id] = stream;
+
+ vin_id = 1 + ((dev->instance_id - 1) * 2) + dev->slice_id;
+ snprintf(stream->name, sizeof(stream->name), "vin%d%c-%d",
+ vin_id, (port->port_id == VIP_PORTA) ? 'a' : 'b', stream_id);
+
+ stream->list_num = vpdma_hwlist_alloc(dev->shared->vpdma, stream);
+ if (stream->list_num < 0) {
+ vip_err(stream, "Could not get VPDMA hwlist");
+ ret = -ENODEV;
+ goto do_free_stream;
+ }
+
+ INIT_LIST_HEAD(&stream->post_bufs);
+
+ /*
+ * Initialize queue
+ */
+ q = &stream->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = stream;
+ q->buf_struct_size = sizeof(struct vip_buffer);
+ q->ops = &vip_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &dev->mutex;
+ q->min_buffers_needed = 3;
+ q->dev = dev->v4l2_dev->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto do_free_hwlist;
+
+ INIT_WORK(&stream->recovery_work, vip_overflow_recovery_work);
+
+ INIT_LIST_HEAD(&stream->vidq);
+
+ /* Allocate/populate Drop queue entries */
+ INIT_LIST_HEAD(&stream->dropq);
+ for (i = 0; i < VIP_DROPQ_SIZE; i++) {
+ buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto do_free_dropq;
+ }
+ buf->drop = true;
+ list_add(&buf->list, &stream->dropq);
+ }
+
+ vfd = video_device_alloc();
+ if (!vfd)
+ goto do_free_dropq;
+ *vfd = vip_videodev;
+ vfd->v4l2_dev = dev->v4l2_dev;
+ vfd->queue = q;
+
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, stream);
+
+ ret = video_register_device(vfd, vfl_type, -1);
+ if (ret) {
+ vip_err(stream, "Failed to register video device\n");
+ goto do_free_vfd;
+ }
+
+ stream->vfd = vfd;
+
+ vip_info(stream, "device registered as %s\n",
+ video_device_node_name(vfd));
+ return 0;
+
+do_free_vfd:
+ video_device_release(vfd);
+do_free_dropq:
+ list_for_each_safe(pos, tmp, &stream->dropq) {
+ buf = list_entry(pos,
+ struct vip_buffer, list);
+ vip_dbg(1, dev, "dropq buffer\n");
+ list_del(pos);
+ kfree(buf);
+ }
+do_free_hwlist:
+ vpdma_hwlist_release(dev->shared->vpdma, stream->list_num);
+do_free_stream:
+ kfree(stream);
+ return ret;
+}
+
+static void free_stream(struct vip_stream *stream)
+{
+ struct vip_dev *dev;
+ struct vip_buffer *buf;
+ struct list_head *pos, *q;
+
+ if (!stream)
+ return;
+
+ dev = stream->port->dev;
+ /* Free up the Drop queue */
+ list_for_each_safe(pos, q, &stream->dropq) {
+ buf = list_entry(pos,
+ struct vip_buffer, list);
+ vip_dbg(1, stream, "dropq buffer\n");
+ list_del(pos);
+ kfree(buf);
+ }
+
+ video_unregister_device(stream->vfd);
+ vpdma_hwlist_release(dev->shared->vpdma, stream->list_num);
+ stream->port->cap_streams[stream->stream_id] = NULL;
+ kfree(stream);
+}
+
+static int get_subdev_active_format(struct vip_port *port,
+ struct v4l2_subdev *subdev)
+{
+ struct vip_fmt *fmt;
+ struct v4l2_subdev_mbus_code_enum mbus_code;
+ int ret = 0;
+ unsigned int k, i, j;
+ enum vip_csc_state csc;
+
+ /* Enumerate sub device formats and enable all matching local formats */
+ port->num_active_fmt = 0;
+ for (k = 0, i = 0; (ret != -EINVAL); k++) {
+ memset(&mbus_code, 0, sizeof(mbus_code));
+ mbus_code.index = k;
+ mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code);
+ if (ret)
+ continue;
+
+ vip_dbg(2, port,
+ "subdev %s: code: %04x idx: %d\n",
+ subdev->name, mbus_code.code, k);
+
+ for (j = 0; j < ARRAY_SIZE(vip_formats); j++) {
+ fmt = &vip_formats[j];
+ if (mbus_code.code != fmt->code)
+ continue;
+
+ /*
+ * When the port is configured for BT656
+ * then none of the downstream unit can be used.
+ * So here we need to skip all format requiring
+ * either CSC or CHR_DS
+ */
+ csc = vip_csc_direction(fmt->code, fmt->finfo);
+ if (port->endpoint.bus_type == V4L2_MBUS_BT656 &&
+ (csc != VIP_CSC_NA || fmt->coplanar))
+ continue;
+
+ port->active_fmt[i] = fmt;
+ vip_dbg(2, port,
+ "matched fourcc: %s: code: %04x idx: %d\n",
+ fourcc_to_str(fmt->fourcc), fmt->code, i);
+ port->num_active_fmt = ++i;
+ }
+ }
+
+ if (i == 0) {
+ vip_err(port, "No suitable format reported by subdev %s\n",
+ subdev->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int alloc_port(struct vip_dev *dev, int id)
+{
+ struct vip_port *port;
+ u32 vin_id;
+
+ if (dev->ports[id])
+ return -EINVAL;
+
+ port = devm_kzalloc(&dev->pdev->dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ dev->ports[id] = port;
+ port->dev = dev;
+ port->port_id = id;
+ vin_id = 1 + ((dev->instance_id - 1) * 2) + dev->slice_id;
+ snprintf(port->name, sizeof(port->name),
+ "vin%d%c", vin_id, (id == VIP_PORTA) ? 'a' : 'b');
+ port->num_streams = 0;
+ return 0;
+}
+
+static void free_port(struct vip_port *port)
+{
+ if (!port)
+ return;
+
+ v4l2_async_notifier_unregister(&port->notifier);
+ v4l2_async_notifier_cleanup(&port->notifier);
+ free_stream(port->cap_streams[0]);
+}
+
+static int get_field(u32 value, u32 mask, int shift)
+{
+ return (value & (mask << shift)) >> shift;
+}
+
+static int vip_probe_complete(struct platform_device *pdev);
+static void vip_vpdma_fw_cb(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "VPDMA firmware loaded\n");
+
+ if (pdev->dev.of_node)
+ vip_probe_complete(pdev);
+}
+
+static int vip_create_streams(struct vip_port *port,
+ struct v4l2_subdev *subdev)
+{
+ struct v4l2_fwnode_bus_parallel *bus;
+ struct vip_bt656_bus *bt656_ep;
+ int i;
+
+ for (i = 0; i < VIP_CAP_STREAMS_PER_PORT; i++)
+ free_stream(port->cap_streams[i]);
+
+ if (get_subdev_active_format(port, subdev))
+ return -ENODEV;
+
+ port->subdev = subdev;
+
+ if (port->endpoint.bus_type == V4L2_MBUS_PARALLEL) {
+ port->flags |= FLAG_MULT_PORT;
+ port->num_streams_configured = 1;
+ alloc_stream(port, 0, VFL_TYPE_VIDEO);
+ } else if (port->endpoint.bus_type == V4L2_MBUS_BT656) {
+ port->flags |= FLAG_MULT_PORT;
+ bus = &port->endpoint.bus.parallel;
+ bt656_ep = &port->bt656_endpoint;
+ port->num_streams_configured = bt656_ep->num_channels;
+ for (i = 0; i < bt656_ep->num_channels; i++) {
+ if (bt656_ep->channels[i] >= 16)
+ continue;
+ alloc_stream(port, bt656_ep->channels[i], VFL_TYPE_VIDEO);
+ }
+ }
+ return 0;
+}
+
+static int vip_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct vip_port *port = notifier_to_vip_port(notifier);
+ int ret;
+
+ vip_dbg(1, port, "%s\n", __func__);
+
+ if (port->subdev) {
+ vip_info(port, "Rejecting subdev %s (Already set!!)",
+ subdev->name);
+ return 0;
+ }
+
+ vip_info(port, "Port %c: Using subdev %s for capture\n",
+ port->port_id == VIP_PORTA ? 'A' : 'B', subdev->name);
+
+ ret = vip_create_streams(port, subdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int vip_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct vip_port *port = notifier_to_vip_port(notifier);
+
+ vip_dbg(1, port, "%s\n", __func__);
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations vip_async_ops = {
+ .bound = vip_async_bound,
+ .complete = vip_async_complete,
+};
+
+static struct fwnode_handle *
+fwnode_graph_get_next_endpoint_by_regs(const struct fwnode_handle *fwnode,
+ int port_reg, int reg)
+{
+ return of_fwnode_handle(of_graph_get_endpoint_by_regs(to_of_node(fwnode),
+ port_reg, reg));
+}
+
+static int vip_register_subdev_notif(struct vip_port *port,
+ struct fwnode_handle *ep)
+{
+ struct v4l2_async_notifier *notifier = &port->notifier;
+ struct vip_dev *dev = port->dev;
+ struct fwnode_handle *subdev;
+ struct v4l2_fwnode_endpoint *vep;
+ struct vip_bt656_bus *bt656_vep;
+ struct v4l2_async_subdev *asd;
+ int ret, rval;
+
+ vep = &port->endpoint;
+ bt656_vep = &port->bt656_endpoint;
+
+ subdev = fwnode_graph_get_remote_port_parent(ep);
+ if (!subdev) {
+ vip_dbg(3, port, "can't get remote parent\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep, vep);
+ if (ret) {
+ vip_dbg(3, port, "Failed to parse endpoint:\n");
+ fwnode_handle_put(subdev);
+ return -EINVAL;
+ }
+
+ if (vep->bus_type == V4L2_MBUS_BT656) {
+ if (fwnode_property_present(ep, "ti,vip-pixel-mux"))
+ bt656_vep->pixmux = 1;
+ else
+ bt656_vep->pixmux = 0;
+ vip_dbg(3, port, "ti,vip-pixel-mux %u\n", bt656_vep->pixmux);
+
+ bt656_vep->num_channels = 0;
+ rval = fwnode_property_read_u8_array(ep, "ti,vip-channels",
+ NULL, 0);
+ if (rval > 0) {
+ bt656_vep->num_channels =
+ min_t(int, ARRAY_SIZE(bt656_vep->channels),
+ rval);
+
+ fwnode_property_read_u8_array(ep, "ti,vip-channels",
+ bt656_vep->channels,
+ bt656_vep->num_channels);
+ }
+ vip_dbg(3, port, "ti,vip-channels %u\n", bt656_vep->num_channels);
+ }
+
+ v4l2_async_notifier_init(notifier);
+
+ asd = v4l2_async_notifier_add_fwnode_subdev(notifier, subdev,
+ sizeof(struct v4l2_async_subdev));
+ if (IS_ERR(asd)) {
+ vip_dbg(1, port, "Error adding asd\n");
+ fwnode_handle_put(subdev);
+ v4l2_async_notifier_cleanup(notifier);
+ return -EINVAL;
+ }
+
+ notifier->ops = &vip_async_ops;
+ ret = v4l2_async_notifier_register(dev->v4l2_dev, notifier);
+ if (ret) {
+ vip_dbg(1, port, "Error registering async notifier\n");
+ v4l2_async_notifier_cleanup(notifier);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vip_endpoint_scan(struct platform_device *pdev)
+{
+ struct device_node *parent = pdev->dev.of_node;
+ struct device_node *ep = NULL;
+ int count = 0, p;
+
+ for (p = 0; p < (VIP_NUM_PORTS * VIP_NUM_SLICES); p++) {
+ ep = of_graph_get_endpoint_by_regs(parent, p, 0);
+ if (!ep)
+ continue;
+
+ count++;
+ of_node_put(ep);
+ }
+
+ return count;
+}
+
+static int vip_probe_complete(struct platform_device *pdev)
+{
+ struct vip_shared *shared = platform_get_drvdata(pdev);
+ struct regmap *syscon_pol = NULL;
+ u32 syscon_pol_offset = 0;
+ struct vip_port *port;
+ struct vip_dev *dev;
+ struct device_node *parent = pdev->dev.of_node;
+ struct fwnode_handle *ep = NULL;
+ int ret, slice_id, port_id, p;
+
+ if (parent && of_property_read_bool(parent, "ti,vip-clk-polarity")) {
+ syscon_pol = syscon_regmap_lookup_by_phandle(parent,
+ "ti,vip-clk-polarity");
+ if (IS_ERR(syscon_pol)) {
+ dev_err(&pdev->dev, "failed to get ti,vip-clk-polarity regmap\n");
+ return PTR_ERR(syscon_pol);
+ }
+
+ if (of_property_read_u32_index(parent, "ti,vip-clk-polarity",
+ 1, &syscon_pol_offset)) {
+ dev_err(&pdev->dev, "failed to get ti,vip-clk-polarity offset\n");
+ return -EINVAL;
+ }
+ }
+
+ for (p = 0; p < (VIP_NUM_PORTS * VIP_NUM_SLICES); p++) {
+ ep = fwnode_graph_get_next_endpoint_by_regs(of_fwnode_handle(parent),
+ p, 0);
+ if (!ep)
+ continue;
+
+ switch (p) {
+ case 0:
+ slice_id = VIP_SLICE1; port_id = VIP_PORTA;
+ break;
+ case 1:
+ slice_id = VIP_SLICE2; port_id = VIP_PORTA;
+ break;
+ case 2:
+ slice_id = VIP_SLICE1; port_id = VIP_PORTB;
+ break;
+ case 3:
+ slice_id = VIP_SLICE2; port_id = VIP_PORTB;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown port reg=<%d>\n", p);
+ continue;
+ }
+
+ ret = alloc_port(shared->devs[slice_id], port_id);
+ if (ret < 0)
+ continue;
+
+ dev = shared->devs[slice_id];
+ dev->syscon_pol = syscon_pol;
+ dev->syscon_pol_offset = syscon_pol_offset;
+ port = dev->ports[port_id];
+
+ vip_register_subdev_notif(port, ep);
+ fwnode_handle_put(ep);
+ }
+ return 0;
+}
+
+static int vip_probe_slice(struct platform_device *pdev, int slice, int instance_id)
+{
+ struct vip_shared *shared = platform_get_drvdata(pdev);
+ struct vip_dev *dev;
+ struct vip_parser_data *parser;
+ u32 vin_id;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->instance_id = instance_id;
+ vin_id = 1 + ((dev->instance_id - 1) * 2) + slice;
+ snprintf(dev->name, sizeof(dev->name), "vin%d", vin_id);
+
+ dev->irq = platform_get_irq(pdev, slice);
+ if (dev->irq < 0)
+ return dev->irq;
+
+ ret = devm_request_irq(&pdev->dev, dev->irq, vip_irq,
+ 0, dev->name, dev);
+ if (ret < 0)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->slock);
+ mutex_init(&dev->mutex);
+
+ dev->slice_id = slice;
+ dev->pdev = pdev;
+ dev->res = shared->res;
+ dev->base = shared->base;
+ dev->v4l2_dev = &shared->v4l2_dev;
+
+ dev->shared = shared;
+ shared->devs[slice] = dev;
+
+ vip_top_reset(dev);
+ vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 1);
+
+ parser = devm_kzalloc(&pdev->dev, sizeof(*dev->parser), GFP_KERNEL);
+ if (!parser)
+ return PTR_ERR(parser);
+
+ parser->res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ (slice == 0) ?
+ "parser0" :
+ "parser1");
+ parser->base = devm_ioremap_resource(&pdev->dev, parser->res);
+ if (IS_ERR(parser->base))
+ return PTR_ERR(parser->base);
+
+ parser->pdev = pdev;
+ dev->parser = parser;
+
+ dev->sc_assigned = VIP_NOT_ASSIGNED;
+ dev->sc = sc_create(pdev, (slice == 0) ? "sc0" : "sc1");
+ if (IS_ERR(dev->sc))
+ return PTR_ERR(dev->sc);
+
+ dev->csc_assigned = VIP_NOT_ASSIGNED;
+ dev->csc = csc_create(pdev, (slice == 0) ? "csc0" : "csc1");
+ if (IS_ERR(dev->sc))
+ return PTR_ERR(dev->sc);
+
+ return 0;
+}
+
+static int vip_probe(struct platform_device *pdev)
+{
+ struct vip_shared *shared;
+ struct pinctrl *pinctrl;
+ int ret, slice = VIP_SLICE1;
+ int instance_id;
+ u32 tmp, pid;
+
+ instance_id = (int)of_device_get_match_data(&pdev->dev);
+ if (!instance_id) {
+ dev_err(&pdev->dev, "%s: Unable to match device\n", __func__);
+ return -ENODEV;
+ }
+
+ /* If there are no endpoint defined there is nothing to do */
+ if (!vip_endpoint_scan(pdev))
+ return -ENODEV;
+
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return ret;
+ }
+
+ shared = devm_kzalloc(&pdev->dev, sizeof(*shared), GFP_KERNEL);
+ if (!shared)
+ return -ENOMEM;
+
+ shared->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vip");
+ shared->base = devm_ioremap_resource(&pdev->dev, shared->res);
+ if (IS_ERR(shared->base))
+ return PTR_ERR(shared->base);
+
+ vip_init_format_info(&pdev->dev);
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret)
+ goto err_runtime_disable;
+
+ /* Make sure H/W module has the right functionality */
+ pid = reg_read(shared, VIP_PID);
+ tmp = get_field(pid, VIP_PID_FUNC_MASK, VIP_PID_FUNC_SHIFT);
+
+ if (tmp != VIP_PID_FUNC) {
+ dev_info(&pdev->dev, "vip: unexpected PID function: 0x%x\n",
+ tmp);
+ ret = -ENODEV;
+ goto err_runtime_put;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &shared->v4l2_dev);
+ if (ret)
+ goto err_runtime_put;
+
+ /* enable clocks, so the firmware will load properly */
+ vip_shared_set_clock_enable(shared, 1);
+ vip_top_vpdma_reset(shared);
+
+ platform_set_drvdata(pdev, shared);
+
+ v4l2_ctrl_handler_init(&shared->ctrl_handler, 11);
+ shared->v4l2_dev.ctrl_handler = &shared->ctrl_handler;
+
+ for (slice = VIP_SLICE1; slice < VIP_NUM_SLICES; slice++) {
+ ret = vip_probe_slice(pdev, slice, instance_id);
+ if (ret) {
+ dev_err(&pdev->dev, "Creating slice failed");
+ goto err_dev_unreg;
+ }
+ }
+
+ shared->vpdma = &shared->vpdma_data;
+ ret = vpdma_create(pdev, shared->vpdma, vip_vpdma_fw_cb);
+ if (ret) {
+ dev_err(&pdev->dev, "Creating VPDMA failed");
+ goto err_dev_unreg;
+ }
+
+ return 0;
+
+err_dev_unreg:
+ v4l2_ctrl_handler_free(&shared->ctrl_handler);
+ v4l2_device_unregister(&shared->v4l2_dev);
+err_runtime_put:
+ pm_runtime_put_sync(&pdev->dev);
+err_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int vip_remove(struct platform_device *pdev)
+{
+ struct vip_shared *shared = platform_get_drvdata(pdev);
+ struct vip_dev *dev;
+ int slice;
+
+ for (slice = 0; slice < VIP_NUM_SLICES; slice++) {
+ dev = shared->devs[slice];
+ if (!dev)
+ continue;
+
+ free_port(dev->ports[VIP_PORTA]);
+ free_port(dev->ports[VIP_PORTB]);
+ }
+
+ v4l2_ctrl_handler_free(&shared->ctrl_handler);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id vip_of_match[] = {
+ {
+ .compatible = "ti,dra7-vip1",
+ .data = (void *)VIP_INSTANCE1,
+ },
+ {
+ .compatible = "ti,dra7-vip2",
+ .data = (void *)VIP_INSTANCE2,
+ },
+ {
+ .compatible = "ti,dra7-vip3",
+ .data = (void *)VIP_INSTANCE3,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, vip_of_match);
+#endif
+
+static struct platform_driver vip_pdrv = {
+ .probe = vip_probe,
+ .remove = vip_remove,
+ .driver = {
+ .name = VIP_MODULE_NAME,
+ .of_match_table = of_match_ptr(vip_of_match),
+ },
+};
+
+module_platform_driver(vip_pdrv);
+
+MODULE_DESCRIPTION("TI VIP driver");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti/vpe/vip.h b/drivers/media/platform/ti/vpe/vip.h
new file mode 100644
index 000000000000..f0225fe2a2a3
--- /dev/null
+++ b/drivers/media/platform/ti/vpe/vip.h
@@ -0,0 +1,719 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI VIP capture driver
+ *
+ * Copyright (C) 2018 Texas Instruments Incorpated - http://www.ti.com/
+ * David Griego, <dagriego@biglakesoftware.com>
+ * Dale Farnsworth, <dale@farnsworth.org>
+ * Nikhil Devshatwar, <nikhil.nd@ti.com>
+ * Benoit Parrot, <bparrot@ti.com>
+ */
+
+#ifndef __TI_VIP_H
+#define __TI_VIP_H
+
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-memops.h>
+#include <media/v4l2-fwnode.h>
+
+#include "vpdma.h"
+#include "vpdma_priv.h"
+#include "sc.h"
+#include "csc.h"
+
+#define VIP_INSTANCE1 1
+#define VIP_INSTANCE2 2
+#define VIP_INSTANCE3 3
+
+#define VIP_SLICE1 0
+#define VIP_SLICE2 1
+#define VIP_NUM_SLICES 2
+
+/*
+ * Additionnal client identifiers used for VPDMA configuration descriptors
+ */
+#define VIP_SLICE1_CFD_SC_CLIENT 7
+#define VIP_SLICE2_CFD_SC_CLIENT 8
+
+#define VIP_PORTA 0
+#define VIP_PORTB 1
+#define VIP_NUM_PORTS 2
+
+#define VIP_MAX_PLANES 2
+#define VIP_LUMA 0
+#define VIP_CHROMA 1
+
+#define VIP_CAP_STREAMS_PER_PORT 16
+#define VIP_VBI_STREAMS_PER_PORT 16
+
+#define VIP_MAX_SUBDEV 5
+/*
+ * This value needs to be at least as large as the number of entry in
+ * vip_formats[].
+ * When vip_formats[] is modified make sure to adjust this value also.
+ */
+#define VIP_MAX_ACTIVE_FMT 16
+/*
+ * Colorspace conversion unit can be in one of 3 modes:
+ * NA - Not Available on this port
+ * Y2R - Needed for YUV to RGB on this port
+ * R2Y - Needed for RGB to YUV on this port
+ */
+enum vip_csc_state {
+ VIP_CSC_NA = 0,
+ VIP_CSC_Y2R,
+ VIP_CSC_R2Y,
+};
+
+/* buffer for one video frame */
+struct vip_buffer {
+ /* common v4l buffer stuff */
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ bool drop;
+};
+
+/*
+ * struct vip_fmt - VIP media bus format information
+ * @fourcc: V4L2 pixel format FCC identifier
+ * @code: V4L2 media bus format code
+ * @colorspace: V4L2 colorspace identifier
+ * @coplanar: 1 if unpacked Luma and Chroma, 0 otherwise (packed/interleaved)
+ * @vpdma_fmt: VPDMA data format per plane.
+ * @finfo: Cache v4l2_format_info for associated fourcc
+ */
+struct vip_fmt {
+ u32 fourcc;
+ u32 code;
+ u32 colorspace;
+ u8 coplanar;
+ const struct vpdma_data_format *vpdma_fmt[VIP_MAX_PLANES];
+ const struct v4l2_format_info *finfo;
+};
+
+/*
+ * The vip_parser_data structures contains the memory mapped
+ * info to access the parser registers.
+ */
+struct vip_parser_data {
+ void __iomem *base;
+ struct resource *res;
+
+ struct platform_device *pdev;
+};
+
+/*
+ * The vip_shared structure contains data that is shared by both
+ * the VIP1 and VIP2 slices.
+ */
+struct vip_shared {
+ struct list_head list;
+ struct resource *res;
+ void __iomem *base;
+ struct vpdma_data vpdma_data;
+ struct vpdma_data *vpdma;
+ struct v4l2_device v4l2_dev;
+ struct vip_dev *devs[VIP_NUM_SLICES];
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+/*
+ * The vip_bt656_bus structure contains vip specific bt656 bus data.
+ */
+struct vip_bt656_bus {
+ unsigned char num_channels;
+ unsigned char pixmux;
+ unsigned char channels[16];
+};
+
+/*
+ * There are two vip_dev structure, one for each vip slice: VIP1 & VIP2.
+ */
+struct vip_dev {
+ struct v4l2_device *v4l2_dev;
+ struct platform_device *pdev;
+ struct vip_shared *shared;
+ struct resource *res;
+ struct regmap *syscon_pol;
+ u32 syscon_pol_offset;
+ int instance_id;
+ int slice_id;
+ int num_ports; /* count of open ports */
+ struct mutex mutex;
+ spinlock_t slock;
+
+ int irq;
+ void __iomem *base;
+
+ struct vip_port *ports[VIP_NUM_PORTS];
+
+ char name[16];
+ /* parser data handle */
+ struct vip_parser_data *parser;
+ /* scaler data handle */
+ struct sc_data *sc;
+ /* scaler port assignation */
+ int sc_assigned;
+ /* csc data handle */
+ struct csc_data *csc;
+ /* csc port assignation */
+ int csc_assigned;
+};
+
+/*
+ * There are two vip_port structures for each vip_dev, one for port A
+ * and one for port B.
+ */
+struct vip_port {
+ struct vip_dev *dev;
+ int port_id;
+
+ unsigned int flags;
+ struct v4l2_rect c_rect; /* crop rectangle */
+ struct v4l2_mbus_framefmt mbus_framefmt;
+ struct v4l2_mbus_framefmt try_mbus_framefmt;
+
+ char name[16];
+ struct vip_fmt *fmt; /* current format info */
+ /* Number of channels/streams configured */
+ int num_streams_configured;
+ int num_streams; /* count of open streams */
+ struct vip_stream *cap_streams[VIP_CAP_STREAMS_PER_PORT];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *subdev;
+ struct v4l2_fwnode_endpoint endpoint;
+ struct vip_bt656_bus bt656_endpoint;
+ struct vip_fmt *active_fmt[VIP_MAX_ACTIVE_FMT];
+ int num_active_fmt;
+ /* have new shadow reg values */
+ bool load_mmrs;
+ /* shadow reg addr/data block */
+ struct vpdma_buf mmr_adb;
+ /* h coeff buffer */
+ struct vpdma_buf sc_coeff_h;
+ /* v coeff buffer */
+ struct vpdma_buf sc_coeff_v;
+ /* Show if scaler resource is available on this port */
+ bool scaler;
+ /* Show the csc resource state on this port */
+ enum vip_csc_state csc;
+};
+
+/*
+ * When handling multiplexed video, there can be multiple streams for each
+ * port. The vip_stream structure holds per-stream data.
+ */
+struct vip_stream {
+ struct video_device *vfd;
+ struct vip_port *port;
+ int stream_id;
+ int list_num;
+ int vfl_type;
+ char name[16];
+ struct work_struct recovery_work;
+ int num_recovery;
+ enum v4l2_field field; /* current field */
+ unsigned int sequence; /* current frame/field seq */
+ enum v4l2_field sup_field; /* supported field value */
+ unsigned int width; /* frame width */
+ unsigned int height; /* frame height */
+ unsigned int bytesperline; /* bytes per line in memory */
+ unsigned int sizeimage; /* image size in memory */
+ struct list_head vidq; /* incoming vip_bufs queue */
+ struct list_head dropq; /* drop vip_bufs queue */
+ struct list_head post_bufs; /* vip_bufs to be DMAed */
+ /* Maintain a list of used channels - Needed for VPDMA cleanup */
+ int vpdma_channels[VPDMA_MAX_CHANNELS];
+ int vpdma_channels_to_abort[VPDMA_MAX_CHANNELS];
+ struct vpdma_desc_list desc_list; /* DMA descriptor list */
+ struct vpdma_dtd *write_desc;
+ /* next unused desc_list addr */
+ void *desc_next;
+ struct vb2_queue vb_vidq;
+};
+
+/*
+ * VIP Enumerations
+ */
+enum data_path_select {
+ ALL_FIELDS_DATA_SELECT = 0,
+ VIP_CSC_SRC_DATA_SELECT,
+ VIP_SC_SRC_DATA_SELECT,
+ VIP_RGB_SRC_DATA_SELECT,
+ VIP_RGB_OUT_LO_DATA_SELECT,
+ VIP_RGB_OUT_HI_DATA_SELECT,
+ VIP_CHR_DS_1_SRC_DATA_SELECT,
+ VIP_CHR_DS_2_SRC_DATA_SELECT,
+ VIP_MULTI_CHANNEL_DATA_SELECT,
+ VIP_CHR_DS_1_DATA_BYPASS,
+ VIP_CHR_DS_2_DATA_BYPASS,
+};
+
+
+enum data_interface_modes {
+ SINGLE_24B_INTERFACE = 0,
+ SINGLE_16B_INTERFACE = 1,
+ DUAL_8B_INTERFACE = 2,
+};
+
+enum sync_types {
+ EMBEDDED_SYNC_SINGLE_YUV422 = 0,
+ EMBEDDED_SYNC_2X_MULTIPLEXED_YUV422 = 1,
+ EMBEDDED_SYNC_4X_MULTIPLEXED_YUV422 = 2,
+ EMBEDDED_SYNC_LINE_MULTIPLEXED_YUV422 = 3,
+ DISCRETE_SYNC_SINGLE_YUV422 = 4,
+ EMBEDDED_SYNC_SINGLE_RGB_OR_YUV444 = 5,
+ DISCRETE_SYNC_SINGLE_RGB_24B = 10,
+};
+
+#define VIP_NOT_ASSIGNED -1
+
+/*
+ * Register offsets and field selectors
+ */
+#define VIP_PID_FUNC 0xf02
+
+#define VIP_PID 0x0000
+#define VIP_PID_MINOR_MASK 0x3f
+#define VIP_PID_MINOR_SHIFT 0
+#define VIP_PID_CUSTOM_MASK 0x03
+#define VIP_PID_CUSTOM_SHIFT 6
+#define VIP_PID_MAJOR_MASK 0x07
+#define VIP_PID_MAJOR_SHIFT 8
+#define VIP_PID_RTL_MASK 0x1f
+#define VIP_PID_RTL_SHIFT 11
+#define VIP_PID_FUNC_MASK 0xfff
+#define VIP_PID_FUNC_SHIFT 16
+#define VIP_PID_SCHEME_MASK 0x03
+#define VIP_PID_SCHEME_SHIFT 30
+
+#define VIP_SYSCONFIG 0x0010
+#define VIP_SYSCONFIG_IDLE_MASK 0x03
+#define VIP_SYSCONFIG_IDLE_SHIFT 2
+#define VIP_SYSCONFIG_STANDBY_MASK 0x03
+#define VIP_SYSCONFIG_STANDBY_SHIFT 4
+#define VIP_FORCE_IDLE_MODE 0
+#define VIP_NO_IDLE_MODE 1
+#define VIP_SMART_IDLE_MODE 2
+#define VIP_SMART_IDLE_WAKEUP_MODE 3
+#define VIP_FORCE_STANDBY_MODE 0
+#define VIP_NO_STANDBY_MODE 1
+#define VIP_SMART_STANDBY_MODE 2
+#define VIP_SMART_STANDBY_WAKEUP_MODE 3
+
+#define VIP_INTC_INTX_OFFSET 0x0020
+
+#define VIP_INT0_STATUS0_RAW_SET 0x0020
+#define VIP_INT0_STATUS0_RAW VIP_INT0_STATUS0_RAW_SET
+#define VIP_INT0_STATUS0_CLR 0x0028
+#define VIP_INT0_STATUS0 VIP_INT0_STATUS0_CLR
+#define VIP_INT0_ENABLE0_SET 0x0030
+#define VIP_INT0_ENABLE0 VIP_INT0_ENABLE0_SET
+#define VIP_INT0_ENABLE0_CLR 0x0038
+#define VIP_INT0_LIST0_COMPLETE BIT(0)
+#define VIP_INT0_LIST0_NOTIFY BIT(1)
+#define VIP_INT0_LIST1_COMPLETE BIT(2)
+#define VIP_INT0_LIST1_NOTIFY BIT(3)
+#define VIP_INT0_LIST2_COMPLETE BIT(4)
+#define VIP_INT0_LIST2_NOTIFY BIT(5)
+#define VIP_INT0_LIST3_COMPLETE BIT(6)
+#define VIP_INT0_LIST3_NOTIFY BIT(7)
+#define VIP_INT0_LIST4_COMPLETE BIT(8)
+#define VIP_INT0_LIST4_NOTIFY BIT(9)
+#define VIP_INT0_LIST5_COMPLETE BIT(10)
+#define VIP_INT0_LIST5_NOTIFY BIT(11)
+#define VIP_INT0_LIST6_COMPLETE BIT(12)
+#define VIP_INT0_LIST6_NOTIFY BIT(13)
+#define VIP_INT0_LIST7_COMPLETE BIT(14)
+#define VIP_INT0_LIST7_NOTIFY BIT(15)
+#define VIP_INT0_DESCRIPTOR BIT(16)
+#define VIP_VIP1_PARSER_INT BIT(20)
+#define VIP_VIP2_PARSER_INT BIT(21)
+
+#define VIP_INT0_STATUS1_RAW_SET 0x0024
+#define VIP_INT0_STATUS1_RAW VIP_INT0_STATUS0_RAW_SET
+#define VIP_INT0_STATUS1_CLR 0x002c
+#define VIP_INT0_STATUS1 VIP_INT0_STATUS0_CLR
+#define VIP_INT0_ENABLE1_SET 0x0034
+#define VIP_INT0_ENABLE1 VIP_INT0_ENABLE0_SET
+#define VIP_INT0_ENABLE1_CLR 0x003c
+#define VIP_INT0_ENABLE1_STAT 0x004c
+#define VIP_INT0_CHANNEL_GROUP0 BIT(0)
+#define VIP_INT0_CHANNEL_GROUP1 BIT(1)
+#define VIP_INT0_CHANNEL_GROUP2 BIT(2)
+#define VIP_INT0_CHANNEL_GROUP3 BIT(3)
+#define VIP_INT0_CHANNEL_GROUP4 BIT(4)
+#define VIP_INT0_CHANNEL_GROUP5 BIT(5)
+#define VIP_INT0_CLIENT BIT(7)
+#define VIP_VIP1_DS1_UV_ERROR_INT BIT(22)
+#define VIP_VIP1_DS2_UV_ERROR_INT BIT(23)
+#define VIP_VIP2_DS1_UV_ERROR_INT BIT(24)
+#define VIP_VIP2_DS2_UV_ERROR_INT BIT(25)
+
+#define VIP_INTC_E0I 0x00a0
+
+#define VIP_CLK_ENABLE 0x0100
+#define VIP_VPDMA_CLK_ENABLE BIT(0)
+#define VIP_VIP1_DATA_PATH_CLK_ENABLE BIT(16)
+#define VIP_VIP2_DATA_PATH_CLK_ENABLE BIT(17)
+
+#define VIP_CLK_RESET 0x0104
+#define VIP_VPDMA_RESET BIT(0)
+#define VIP_VPDMA_CLK_RESET_MASK 0x1
+#define VIP_VPDMA_CLK_RESET_SHIFT 0
+#define VIP_DATA_PATH_CLK_RESET_MASK 0x1
+#define VIP_VIP1_DATA_PATH_RESET_SHIFT 16
+#define VIP_VIP2_DATA_PATH_RESET_SHIFT 17
+#define VIP_VIP1_DATA_PATH_RESET BIT(16)
+#define VIP_VIP2_DATA_PATH_RESET BIT(17)
+#define VIP_VIP1_PARSER_RESET BIT(18)
+#define VIP_VIP2_PARSER_RESET BIT(19)
+#define VIP_VIP1_CSC_RESET BIT(20)
+#define VIP_VIP2_CSC_RESET BIT(21)
+#define VIP_VIP1_SC_RESET BIT(22)
+#define VIP_VIP2_SC_RESET BIT(23)
+#define VIP_VIP1_DS1_RESET BIT(25)
+#define VIP_VIP2_DS1_RESET BIT(26)
+#define VIP_VIP1_DS2_RESET BIT(27)
+#define VIP_VIP2_DS2_RESET BIT(28)
+#define VIP_MAIN_RESET BIT(31)
+
+#define VIP_VIP1_DATA_PATH_SELECT 0x010c
+#define VIP_VIP2_DATA_PATH_SELECT 0x0110
+#define VIP_CSC_SRC_SELECT_MASK 0x07
+#define VIP_CSC_SRC_SELECT_SHFT 0
+#define VIP_SC_SRC_SELECT_MASK 0x07
+#define VIP_SC_SRC_SELECT_SHFT 3
+#define VIP_RGB_SRC_SELECT BIT(6)
+#define VIP_RGB_OUT_LO_SRC_SELECT BIT(7)
+#define VIP_RGB_OUT_HI_SRC_SELECT BIT(8)
+#define VIP_DS1_SRC_SELECT_MASK 0x07
+#define VIP_DS1_SRC_SELECT_SHFT 9
+#define VIP_DS2_SRC_SELECT_MASK 0x07
+#define VIP_DS2_SRC_SELECT_SHFT 12
+#define VIP_MULTI_CHANNEL_SELECT BIT(15)
+#define VIP_DS1_BYPASS BIT(16)
+#define VIP_DS2_BYPASS BIT(17)
+#define VIP_TESTPORT_B_SELECT BIT(26)
+#define VIP_TESTPORT_A_SELECT BIT(27)
+#define VIP_DATAPATH_SELECT_MASK 0x0f
+#define VIP_DATAPATH_SELECT_SHFT 28
+
+#define VIP1_PARSER_REG_OFFSET 0x5500
+#define VIP2_PARSER_REG_OFFSET 0x5a00
+
+#define VIP_PARSER_MAIN_CFG 0x0000
+#define VIP_DATA_INTERFACE_MODE_MASK 0x03
+#define VIP_DATA_INTERFACE_MODE_SHFT 0
+#define VIP_CLIP_BLANK BIT(4)
+#define VIP_CLIP_ACTIVE BIT(5)
+
+#define VIP_PARSER_PORTA_0 0x0004
+#define VIP_PARSER_PORTB_0 0x000c
+#define VIP_SYNC_TYPE_MASK 0x0f
+#define VIP_SYNC_TYPE_SHFT 0
+#define VIP_CTRL_CHANNEL_SEL_MASK 0x03
+#define VIP_CTRL_CHANNEL_SEL_SHFT 4
+#define VIP_ASYNC_FIFO_WR BIT(6)
+#define VIP_ASYNC_FIFO_RD BIT(7)
+#define VIP_PORT_ENABLE BIT(8)
+#define VIP_FID_POLARITY BIT(9)
+#define VIP_PIXCLK_EDGE_POLARITY BIT(10)
+#define VIP_HSYNC_POLARITY BIT(11)
+#define VIP_VSYNC_POLARITY BIT(12)
+#define VIP_ACTVID_POLARITY BIT(13)
+#define VIP_FID_DETECT_MODE BIT(14)
+#define VIP_USE_ACTVID_HSYNC_ONLY BIT(15)
+#define VIP_FID_SKEW_PRECOUNT_MASK 0x3f
+#define VIP_FID_SKEW_PRECOUNT_SHFT 16
+#define VIP_DISCRETE_BASIC_MODE BIT(22)
+#define VIP_SW_RESET BIT(23)
+#define VIP_FID_SKEW_POSTCOUNT_MASK 0x3f
+#define VIP_FID_SKEW_POSTCOUNT_SHFT 24
+#define VIP_ANALYZER_2X4X_SRCNUM_POS BIT(30)
+#define VIP_ANALYZER_FVH_ERR_COR_EN BIT(31)
+
+#define VIP_PARSER_PORTA_1 0x0008
+#define VIP_PARSER_PORTB_1 0x0010
+#define VIP_SRC0_NUMLINES_MASK 0x0fff
+#define VIP_SRC0_NUMLINES_SHFT 0
+#define VIP_ANC_CHAN_SEL_8B_MASK 0x03
+#define VIP_ANC_CHAN_SEL_8B_SHFT 13
+#define VIP_SRC0_NUMPIX_MASK 0x0fff
+#define VIP_SRC0_NUMPIX_SHFT 16
+#define VIP_REPACK_SEL_MASK 0x07
+#define VIP_REPACK_SEL_SHFT 28
+
+#define VIP_PARSER_FIQ_MASK 0x0014
+#define VIP_PARSER_FIQ_CLR 0x0018
+#define VIP_PARSER_FIQ_STATUS 0x001c
+#define VIP_PORTA_VDET BIT(0)
+#define VIP_PORTB_VDET BIT(1)
+#define VIP_PORTA_ASYNC_FIFO_OF BIT(2)
+#define VIP_PORTB_ASYNC_FIFO_OF BIT(3)
+#define VIP_PORTA_OUTPUT_FIFO_YUV BIT(4)
+#define VIP_PORTA_OUTPUT_FIFO_ANC BIT(6)
+#define VIP_PORTB_OUTPUT_FIFO_YUV BIT(7)
+#define VIP_PORTB_OUTPUT_FIFO_ANC BIT(9)
+#define VIP_PORTA_CONN BIT(10)
+#define VIP_PORTA_DISCONN BIT(11)
+#define VIP_PORTB_CONN BIT(12)
+#define VIP_PORTB_DISCONN BIT(13)
+#define VIP_PORTA_SRC0_SIZE BIT(14)
+#define VIP_PORTB_SRC0_SIZE BIT(15)
+#define VIP_PORTA_YUV_PROTO_VIOLATION BIT(16)
+#define VIP_PORTA_ANC_PROTO_VIOLATION BIT(17)
+#define VIP_PORTB_YUV_PROTO_VIOLATION BIT(18)
+#define VIP_PORTB_ANC_PROTO_VIOLATION BIT(19)
+#define VIP_PORTA_CFG_DISABLE_COMPLETE BIT(20)
+#define VIP_PORTB_CFG_DISABLE_COMPLETE BIT(21)
+
+#define VIP_PARSER_PORTA_SOURCE_FID 0x0020
+#define VIP_PARSER_PORTA_ENCODER_FID 0x0024
+#define VIP_PARSER_PORTB_SOURCE_FID 0x0028
+#define VIP_PARSER_PORTB_ENCODER_FID 0x002c
+
+#define VIP_PARSER_PORTA_SRC0_SIZE 0x0030
+#define VIP_PARSER_PORTB_SRC0_SIZE 0x0070
+#define VIP_SOURCE_HEIGHT_MASK 0x0fff
+#define VIP_SOURCE_HEIGHT_SHFT 0
+#define VIP_SOURCE_WIDTH_MASK 0x0fff
+#define VIP_SOURCE_WIDTH_SHFT 16
+
+#define VIP_PARSER_PORTA_VDET_VEC 0x00b0
+#define VIP_PARSER_PORTB_VDET_VEC 0x00b4
+
+#define VIP_PARSER_PORTA_EXTRA2 0x00b8
+#define VIP_PARSER_PORTB_EXTRA2 0x00c8
+#define VIP_ANC_SKIP_NUMPIX_MASK 0x0fff
+#define VIP_ANC_SKIP_NUMPIX_SHFT 0
+#define VIP_ANC_BYPASS BIT(15)
+#define VIP_ANC_USE_NUMPIX_MASK 0x0fff
+#define VIP_ANC_USE_NUMPIX_SHFT 16
+#define VIP_ANC_TARGET_SRCNUM_MASK 0x0f
+#define VIP_ANC_TARGET_SRCNUM_SHFT 28
+
+#define VIP_PARSER_PORTA_EXTRA3 0x00bc
+#define VIP_PARSER_PORTB_EXTRA3 0x00cc
+#define VIP_ANC_SKIP_NUMLINES_MASK 0x0fff
+#define VIP_ANC_SKIP_NUMLINES_SHFT 0
+#define VIP_ANC_USE_NUMLINES_MASK 0x0fff
+#define VIP_ANC_USE_NUMLINES_SHFT 16
+
+#define VIP_PARSER_PORTA_EXTRA4 0x00c0
+#define VIP_PARSER_PORTB_EXTRA4 0x00d0
+#define VIP_ACT_SKIP_NUMPIX_MASK 0x0fff
+#define VIP_ACT_SKIP_NUMPIX_SHFT 0
+#define VIP_ACT_BYPASS BIT(15)
+#define VIP_ACT_USE_NUMPIX_MASK 0x0fff
+#define VIP_ACT_USE_NUMPIX_SHFT 16
+#define VIP_ACT_TARGET_SRCNUM_MASK 0x0f
+#define VIP_ACT_TARGET_SRCNUM_SHFT 28
+
+#define VIP_PARSER_PORTA_EXTRA5 0x00c4
+#define VIP_PARSER_PORTB_EXTRA5 0x00d4
+#define VIP_ACT_SKIP_NUMLINES_MASK 0x0fff
+#define VIP_ACT_SKIP_NUMLINES_SHFT 0
+#define VIP_ACT_USE_NUMLINES_MASK 0x0fff
+#define VIP_ACT_USE_NUMLINES_SHFT 16
+
+#define VIP_PARSER_PORTA_EXTRA6 0x00d8
+#define VIP_PARSER_PORTB_EXTRA6 0x00dc
+#define VIP_ANC_SRCNUM_STOP_IMM_SHFT 0
+#define VIP_YUV_SRCNUM_STOP_IMM_SHFT 16
+
+#define VIP_CSC_CSC00 0x0200
+#define VIP_CSC_A0_MASK 0x1fff
+#define VIP_CSC_A0_SHFT 0
+#define VIP_CSC_B0_MASK 0x1fff
+#define VIP_CSC_B0_SHFT 16
+
+#define VIP_CSC_CSC01 0x0204
+#define VIP_CSC_C0_MASK 0x1fff
+#define VIP_CSC_C0_SHFT 0
+#define VIP_CSC_A1_MASK 0x1fff
+#define VIP_CSC_A1_SHFT 16
+
+#define VIP_CSC_CSC02 0x0208
+#define VIP_CSC_B1_MASK 0x1fff
+#define VIP_CSC_B1_SHFT 0
+#define VIP_CSC_C1_MASK 0x1fff
+#define VIP_CSC_C1_SHFT 16
+
+#define VIP_CSC_CSC03 0x020c
+#define VIP_CSC_A2_MASK 0x1fff
+#define VIP_CSC_A2_SHFT 0
+#define VIP_CSC_B2_MASK 0x1fff
+#define VIP_CSC_B2_SHFT 16
+
+#define VIP_CSC_CSC04 0x0210
+#define VIP_CSC_C2_MASK 0x1fff
+#define VIP_CSC_C2_SHFT 0
+#define VIP_CSC_D0_MASK 0x0fff
+#define VIP_CSC_D0_SHFT 16
+
+#define VIP_CSC_CSC05 0x0214
+#define VIP_CSC_D1_MASK 0x0fff
+#define VIP_CSC_D1_SHFT 0
+#define VIP_CSC_D2_MASK 0x0fff
+#define VIP_CSC_D2_SHFT 16
+#define VIP_CSC_BYPASS BIT(28)
+
+#define VIP_SC_MP_SC0 0x0300
+#define VIP_INTERLACE_O BIT(0)
+#define VIP_LINEAR BIT(1)
+#define VIP_SC_BYPASS BIT(2)
+#define VIP_INVT_FID BIT(3)
+#define VIP_USE_RAV BIT(4)
+#define VIP_ENABLE_EV BIT(5)
+#define VIP_AUTH_HS BIT(6)
+#define VIP_DCM_2X BIT(7)
+#define VIP_DCM_4X BIT(8)
+#define VIP_HP_BYPASS BIT(9)
+#define VIP_INTERLACE_I BIT(10)
+#define VIP_ENABLE_SIN2_VER_INTP BIT(11)
+#define VIP_Y_PK_EN BIT(14)
+#define VIP_TRIM BIT(15)
+#define VIP_SELFGEN_FID BIT(16)
+
+#define VIP_SC_MP_SC1 0x0304
+#define VIP_ROW_ACC_INC_MASK 0x07ffffff
+#define VIP_ROW_ACC_INC_SHFT 0
+
+#define VIP_SC_MP_SC2 0x0308
+#define VIP_ROW_ACC_OFFSET_MASK 0x0fffffff
+#define VIP_ROW_ACC_OFFSET_SHFT 0
+
+#define VIP_SC_MP_SC3 0x030c
+#define VIP_ROW_ACC_OFFSET_B_MASK 0x0fffffff
+#define VIP_ROW_ACC_OFFSET_B_SHFT 0
+
+#define VIP_SC_MP_SC4 0x0310
+#define VIP_TAR_H_MASK 0x07ff
+#define VIP_TAR_H_SHFT 0
+#define VIP_TAR_W_MASK 0x07ff
+#define VIP_TAR_W_SHFT 12
+#define VIP_LIN_ACC_INC_U_MASK 0x07
+#define VIP_LIN_ACC_INC_U_SHFT 24
+#define VIP_NLIN_ACC_INIT_U_MASK 0x07
+#define VIP_NLIN_ACC_INIT_U_SHFT 28
+
+#define VIP_SC_MP_SC5 0x0314
+#define VIP_SRC_H_MASK 0x03ff
+#define VIP_SRC_H_SHFT 0
+#define VIP_SRC_W_MASK 0x07ff
+#define VIP_SRC_W_SHFT 12
+#define VIP_NLIN_ACC_INC_U_MASK 0x07
+#define VIP_NLIN_ACC_INC_U_SHFT 24
+
+#define VIP_SC_MP_SC6 0x0318
+#define VIP_ROW_ACC_INIT_RAV_MASK 0x03ff
+#define VIP_ROW_ACC_INIT_RAV_SHFT 0
+#define VIP_ROW_ACC_INIT_RAV_B_MASK 0x03ff
+#define VIP_ROW_ACC_INIT_RAV_B_SHFT 10
+
+#define VIP_SC_MP_SC8 0x0320
+#define VIP_NLIN_LEFT_MASK 0x07ff
+#define VIP_NLIN_LEFT_SHFT 0
+#define VIP_NLIN_RIGHT_MASK 0x07ff
+#define VIP_NLIN_RIGHT_SHFT 12
+
+#define VIP_SC_MP_SC9 0x0324
+#define VIP_LIN_ACC_INC VIP_SC_MP_SC9
+
+#define VIP_SC_MP_SC10 0x0328
+#define VIP_NLIN_ACC_INIT VIP_SC_MP_SC10
+
+#define VIP_SC_MP_SC11 0x032c
+#define VIP_NLIN_ACC_INC VIP_SC_MP_SC11
+
+#define VIP_SC_MP_SC12 0x0330
+#define VIP_COL_ACC_OFFSET_MASK 0x01ffffff
+#define VIP_COL_ACC_OFFSET_SHFT 0
+
+#define VIP_SC_MP_SC13 0x0334
+#define VIP_SC_FACTOR_RAV_MASK 0x03ff
+#define VIP_SC_FACTOR_RAV_SHFT 0
+#define VIP_CHROMA_INTP_THR_MASK 0x03ff
+#define VIP_CHROMA_INTP_THR_SHFT 12
+#define VIP_DELTA_CHROMA_THR_MASK 0x0f
+#define VIP_DELTA_CHROMA_THR_SHFT 24
+
+#define VIP_SC_MP_SC17 0x0344
+#define VIP_EV_THR_MASK 0x03ff
+#define VIP_EV_THR_SHFT 12
+#define VIP_DELTA_LUMA_THR_MASK 0x0f
+#define VIP_DELTA_LUMA_THR_SHFT 24
+#define VIP_DELTA_EV_THR_MASK 0x0f
+#define VIP_DELTA_EV_THR_SHFT 28
+
+#define VIP_SC_MP_SC18 0x0348
+#define VIP_HS_FACTOR_MASK 0x03ff
+#define VIP_HS_FACTOR_SHFT 0
+#define VIP_CONF_DEFAULT_MASK 0x01ff
+#define VIP_CONF_DEFAULT_SHFT 16
+
+#define VIP_SC_MP_SC19 0x034c
+#define VIP_HPF_COEFF0_MASK 0xff
+#define VIP_HPF_COEFF0_SHFT 0
+#define VIP_HPF_COEFF1_MASK 0xff
+#define VIP_HPF_COEFF1_SHFT 8
+#define VIP_HPF_COEFF2_MASK 0xff
+#define VIP_HPF_COEFF2_SHFT 16
+#define VIP_HPF_COEFF3_MASK 0xff
+#define VIP_HPF_COEFF3_SHFT 23
+
+#define VIP_SC_MP_SC20 0x0350
+#define VIP_HPF_COEFF4_MASK 0xff
+#define VIP_HPF_COEFF4_SHFT 0
+#define VIP_HPF_COEFF5_MASK 0xff
+#define VIP_HPF_COEFF5_SHFT 8
+#define VIP_HPF_NORM_SHFT_MASK 0x07
+#define VIP_HPF_NORM_SHFT_SHFT 16
+#define VIP_NL_LIMIT_MASK 0x1ff
+#define VIP_NL_LIMIT_SHFT 20
+
+#define VIP_SC_MP_SC21 0x0354
+#define VIP_NL_LO_THR_MASK 0x01ff
+#define VIP_NL_LO_THR_SHFT 0
+#define VIP_NL_LO_SLOPE_MASK 0xff
+#define VIP_NL_LO_SLOPE_SHFT 16
+
+#define VIP_SC_MP_SC22 0x0358
+#define VIP_NL_HI_THR_MASK 0x01ff
+#define VIP_NL_HI_THR_SHFT 0
+#define VIP_NL_HI_SLOPE_SH_MASK 0x07
+#define VIP_NL_HI_SLOPE_SH_SHFT 16
+
+#define VIP_SC_MP_SC23 0x035c
+#define VIP_GRADIENT_THR_MASK 0x07ff
+#define VIP_GRADIENT_THR_SHFT 0
+#define VIP_GRADIENT_THR_RANGE_MASK 0x0f
+#define VIP_GRADIENT_THR_RANGE_SHFT 12
+#define VIP_MIN_GY_THR_MASK 0xff
+#define VIP_MIN_GY_THR_SHFT 16
+#define VIP_MIN_GY_THR_RANGE_MASK 0x0f
+#define VIP_MIN_GY_THR_RANGE_SHFT 28
+
+#define VIP_SC_MP_SC24 0x0360
+#define VIP_ORG_H_MASK 0x07ff
+#define VIP_ORG_H_SHFT 0
+#define VIP_ORG_W_MASK 0x07ff
+#define VIP_ORG_W_SHFT 16
+
+#define VIP_SC_MP_SC25 0x0364
+#define VIP_OFF_H_MASK 0x07ff
+#define VIP_OFF_H_SHFT 0
+#define VIP_OFF_W_MASK 0x07ff
+#define VIP_OFF_W_SHFT 16
+
+#define VIP_VPDMA_REG_OFFSET 0xd000
+
+#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti/vpe/vpdma.c
index 2e5148ae7a0f..2e5148ae7a0f 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti/vpe/vpdma.c
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti/vpe/vpdma.h
index 393fcbb3cb40..393fcbb3cb40 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti/vpe/vpdma.h
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti/vpe/vpdma_priv.h
index 0bbee45338bd..0bbee45338bd 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti/vpe/vpdma_priv.h
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti/vpe/vpe.c
index 779dd74b82d0..e9a99a9b7366 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti/vpe/vpe.c
@@ -2473,11 +2473,9 @@ static int vpe_runtime_get(struct platform_device *pdev)
dev_dbg(&pdev->dev, "vpe_runtime_get\n");
- r = pm_runtime_get_sync(&pdev->dev);
+ r = pm_runtime_resume_and_get(&pdev->dev);
WARN_ON(r < 0);
- if (r)
- pm_runtime_put_noidle(&pdev->dev);
- return r < 0 ? r : 0;
+ return r;
}
static void vpe_runtime_put(struct platform_device *pdev)
@@ -2582,7 +2580,7 @@ static int vpe_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = vpe_runtime_get(pdev);
- if (ret)
+ if (ret < 0)
goto rel_m2m;
/* Perform clk enable followed by reset */
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti/vpe/vpe_regs.h
index 1a1ad5ae1228..1a1ad5ae1228 100644
--- a/drivers/media/platform/ti-vpe/vpe_regs.h
+++ b/drivers/media/platform/ti/vpe/vpe_regs.h
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index ed0ad68c5c48..3655573e8581 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -844,6 +844,9 @@ static int viacam_do_try_fmt(struct via_camera *cam,
{
int ret;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -852,7 +855,7 @@ static int viacam_do_try_fmt(struct via_camera *cam,
upix->pixelformat = f->pixelformat;
viacam_fmt_pre(upix, spix);
v4l2_fill_mbus_format(&format.format, spix, f->mbus_code);
- ret = sensor_call(cam, pad, set_fmt, &pad_cfg, &format);
+ ret = sensor_call(cam, pad, set_fmt, &pad_state, &format);
v4l2_fill_pix_format(spix, &format.format);
viacam_fmt_post(upix, spix);
return ret;
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 640ce76fe0d9..e39bef853bed 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -140,14 +140,14 @@ static const struct v4l2_subdev_video_ops video_mux_subdev_video_ops = {
static struct v4l2_mbus_framefmt *
__video_mux_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(sd, cfg, pad);
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &vmux->format_mbus[pad];
default:
@@ -156,14 +156,15 @@ __video_mux_get_pad_format(struct v4l2_subdev *sd,
}
static int video_mux_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
mutex_lock(&vmux->lock);
- sdformat->format = *__video_mux_get_pad_format(sd, cfg, sdformat->pad,
+ sdformat->format = *__video_mux_get_pad_format(sd, sd_state,
+ sdformat->pad,
sdformat->which);
mutex_unlock(&vmux->lock);
@@ -172,7 +173,7 @@ static int video_mux_get_format(struct v4l2_subdev *sd,
}
static int video_mux_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
@@ -180,12 +181,13 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
struct media_pad *pad = &vmux->pads[sdformat->pad];
u16 source_pad = sd->entity.num_pads - 1;
- mbusformat = __video_mux_get_pad_format(sd, cfg, sdformat->pad,
- sdformat->which);
+ mbusformat = __video_mux_get_pad_format(sd, sd_state, sdformat->pad,
+ sdformat->which);
if (!mbusformat)
return -EINVAL;
- source_mbusformat = __video_mux_get_pad_format(sd, cfg, source_pad,
+ source_mbusformat = __video_mux_get_pad_format(sd, sd_state,
+ source_pad,
sdformat->which);
if (!source_mbusformat)
return -EINVAL;
@@ -310,7 +312,7 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
}
static int video_mux_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
struct v4l2_mbus_framefmt *mbusformat;
@@ -319,7 +321,7 @@ static int video_mux_init_cfg(struct v4l2_subdev *sd,
mutex_lock(&vmux->lock);
for (i = 0; i < sd->entity.num_pads; i++) {
- mbusformat = v4l2_subdev_get_try_format(sd, cfg, i);
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state, i);
*mbusformat = video_mux_format_mbus_default;
}
diff --git a/drivers/media/platform/vsp1/vsp1_brx.c b/drivers/media/platform/vsp1/vsp1_brx.c
index 2d86c718a5cf..89385b4cabe5 100644
--- a/drivers/media/platform/vsp1/vsp1_brx.c
+++ b/drivers/media/platform/vsp1/vsp1_brx.c
@@ -65,7 +65,7 @@ static const struct v4l2_ctrl_ops brx_ctrl_ops = {
*/
static int brx_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -73,12 +73,12 @@ static int brx_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes,
ARRAY_SIZE(codes));
}
static int brx_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index)
@@ -97,14 +97,14 @@ static int brx_enum_frame_size(struct v4l2_subdev *subdev,
}
static struct v4l2_rect *brx_get_compose(struct vsp1_brx *brx,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad)
{
- return v4l2_subdev_get_try_compose(&brx->entity.subdev, cfg, pad);
+ return v4l2_subdev_get_try_compose(&brx->entity.subdev, sd_state, pad);
}
static void brx_try_format(struct vsp1_brx *brx,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_mbus_framefmt *format;
@@ -119,7 +119,7 @@ static void brx_try_format(struct vsp1_brx *brx,
default:
/* The BRx can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&brx->entity, config,
+ format = vsp1_entity_get_pad_format(&brx->entity, sd_state,
BRX_PAD_SINK(0));
fmt->code = format->code;
break;
@@ -132,17 +132,18 @@ static void brx_try_format(struct vsp1_brx *brx,
}
static int brx_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_brx *brx = to_brx(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&brx->entity.lock);
- config = vsp1_entity_get_pad_config(&brx->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -181,11 +182,11 @@ done:
}
static int brx_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_brx *brx = to_brx(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
if (sel->pad == brx->entity.source_pad)
return -EINVAL;
@@ -199,7 +200,7 @@ static int brx_get_selection(struct v4l2_subdev *subdev,
return 0;
case V4L2_SEL_TGT_COMPOSE:
- config = vsp1_entity_get_pad_config(&brx->entity, cfg,
+ config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
sel->which);
if (!config)
return -EINVAL;
@@ -215,11 +216,11 @@ static int brx_get_selection(struct v4l2_subdev *subdev,
}
static int brx_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_brx *brx = to_brx(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *compose;
int ret = 0;
@@ -232,7 +233,8 @@ static int brx_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&brx->entity.lock);
- config = vsp1_entity_get_pad_config(&brx->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_clu.c b/drivers/media/platform/vsp1/vsp1_clu.c
index a47b23bf5abf..c5217fee24f1 100644
--- a/drivers/media/platform/vsp1/vsp1_clu.c
+++ b/drivers/media/platform/vsp1/vsp1_clu.c
@@ -123,27 +123,28 @@ static const unsigned int clu_codes[] = {
};
static int clu_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, clu_codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, clu_codes,
ARRAY_SIZE(clu_codes));
}
static int clu_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, CLU_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ CLU_MIN_SIZE,
CLU_MIN_SIZE, CLU_MAX_SIZE,
CLU_MAX_SIZE);
}
static int clu_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt, clu_codes,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, clu_codes,
ARRAY_SIZE(clu_codes),
CLU_MIN_SIZE, CLU_MIN_SIZE,
CLU_MAX_SIZE, CLU_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index aa9d2286056e..dded3ff57830 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -114,9 +114,9 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
* and simply returned when requested. The ACTIVE configuration comes from the
* entity structure.
*/
-struct v4l2_subdev_pad_config *
+struct v4l2_subdev_state *
vsp1_entity_get_pad_config(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
switch (which) {
@@ -124,7 +124,7 @@ vsp1_entity_get_pad_config(struct vsp1_entity *entity,
return entity->config;
case V4L2_SUBDEV_FORMAT_TRY:
default:
- return cfg;
+ return sd_state;
}
}
@@ -139,10 +139,10 @@ vsp1_entity_get_pad_config(struct vsp1_entity *entity,
*/
struct v4l2_mbus_framefmt *
vsp1_entity_get_pad_format(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad)
{
- return v4l2_subdev_get_try_format(&entity->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&entity->subdev, sd_state, pad);
}
/**
@@ -158,14 +158,16 @@ vsp1_entity_get_pad_format(struct vsp1_entity *entity,
*/
struct v4l2_rect *
vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, unsigned int target)
{
switch (target) {
case V4L2_SEL_TGT_COMPOSE:
- return v4l2_subdev_get_try_compose(&entity->subdev, cfg, pad);
+ return v4l2_subdev_get_try_compose(&entity->subdev, sd_state,
+ pad);
case V4L2_SEL_TGT_CROP:
- return v4l2_subdev_get_try_crop(&entity->subdev, cfg, pad);
+ return v4l2_subdev_get_try_crop(&entity->subdev, sd_state,
+ pad);
default:
return NULL;
}
@@ -180,7 +182,7 @@ vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
* function can be used as a handler for the subdev pad::init_cfg operation.
*/
int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format format;
unsigned int pad;
@@ -189,10 +191,10 @@ int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
memset(&format, 0, sizeof(format));
format.pad = pad;
- format.which = cfg ? V4L2_SUBDEV_FORMAT_TRY
+ format.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
: V4L2_SUBDEV_FORMAT_ACTIVE;
- v4l2_subdev_call(subdev, pad, set_fmt, cfg, &format);
+ v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &format);
}
return 0;
@@ -208,13 +210,13 @@ int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
* a direct drop-in for the operation handler.
*/
int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_entity *entity = to_vsp1_entity(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
- config = vsp1_entity_get_pad_config(entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which);
if (!config)
return -EINVAL;
@@ -239,7 +241,7 @@ int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
* the sink pad.
*/
int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code,
const unsigned int *codes, unsigned int ncodes)
{
@@ -251,7 +253,7 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
code->code = codes[code->index];
} else {
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
/*
@@ -261,7 +263,8 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- config = vsp1_entity_get_pad_config(entity, cfg, code->which);
+ config = vsp1_entity_get_pad_config(entity, sd_state,
+ code->which);
if (!config)
return -EINVAL;
@@ -290,17 +293,17 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
* source pad size identical to the sink pad.
*/
int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse,
unsigned int min_width, unsigned int min_height,
unsigned int max_width, unsigned int max_height)
{
struct vsp1_entity *entity = to_vsp1_entity(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
- config = vsp1_entity_get_pad_config(entity, cfg, fse->which);
+ config = vsp1_entity_get_pad_config(entity, sd_state, fse->which);
if (!config)
return -EINVAL;
@@ -353,14 +356,14 @@ done:
* source pad.
*/
int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt,
const unsigned int *codes, unsigned int ncodes,
unsigned int min_width, unsigned int min_height,
unsigned int max_width, unsigned int max_height)
{
struct vsp1_entity *entity = to_vsp1_entity(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *selection;
unsigned int i;
@@ -368,7 +371,7 @@ int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
mutex_lock(&entity->lock);
- config = vsp1_entity_get_pad_config(entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -610,6 +613,7 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
const char *name, unsigned int num_pads,
const struct v4l2_subdev_ops *ops, u32 function)
{
+ static struct lock_class_key key;
struct v4l2_subdev *subdev;
unsigned int i;
int ret;
@@ -672,10 +676,11 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
* Allocate the pad configuration to store formats and selection
* rectangles.
*/
- entity->config = v4l2_subdev_alloc_pad_config(&entity->subdev);
- if (entity->config == NULL) {
+ entity->config = __v4l2_subdev_state_alloc(&entity->subdev,
+ "vsp1:config->lock", &key);
+ if (IS_ERR(entity->config)) {
media_entity_cleanup(&entity->subdev.entity);
- return -ENOMEM;
+ return PTR_ERR(entity->config);
}
return 0;
@@ -687,6 +692,6 @@ void vsp1_entity_destroy(struct vsp1_entity *entity)
entity->ops->destroy(entity);
if (entity->subdev.ctrl_handler)
v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
- v4l2_subdev_free_pad_config(entity->config);
+ __v4l2_subdev_state_free(entity->config);
media_entity_cleanup(&entity->subdev.entity);
}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index a1ceb37bb837..f22724439cdc 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -115,7 +115,7 @@ struct vsp1_entity {
unsigned int sink_pad;
struct v4l2_subdev subdev;
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct mutex lock; /* Protects the pad config */
};
@@ -136,20 +136,20 @@ int vsp1_entity_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
-struct v4l2_subdev_pad_config *
+struct v4l2_subdev_state *
vsp1_entity_get_pad_config(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which);
struct v4l2_mbus_framefmt *
vsp1_entity_get_pad_format(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad);
struct v4l2_rect *
vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, unsigned int target);
int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg);
+ struct v4l2_subdev_state *sd_state);
void vsp1_entity_route_setup(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
@@ -173,20 +173,20 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad);
int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt);
int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt,
const unsigned int *codes, unsigned int ncodes,
unsigned int min_width, unsigned int min_height,
unsigned int max_width, unsigned int max_height);
int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code,
const unsigned int *codes, unsigned int ncodes);
int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse,
unsigned int min_w, unsigned int min_h,
unsigned int max_w, unsigned int max_h);
diff --git a/drivers/media/platform/vsp1/vsp1_histo.c b/drivers/media/platform/vsp1/vsp1_histo.c
index a91e142bcb94..5e5013d2cd2a 100644
--- a/drivers/media/platform/vsp1/vsp1_histo.c
+++ b/drivers/media/platform/vsp1/vsp1_histo.c
@@ -170,7 +170,7 @@ static const struct vb2_ops histo_video_queue_qops = {
*/
static int histo_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
@@ -180,28 +180,30 @@ static int histo_enum_mbus_code(struct v4l2_subdev *subdev,
return 0;
}
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, histo->formats,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code,
+ histo->formats,
histo->num_formats);
}
static int histo_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->pad != HISTO_PAD_SINK)
return -EINVAL;
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, HISTO_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ HISTO_MIN_SIZE,
HISTO_MIN_SIZE, HISTO_MAX_SIZE,
HISTO_MAX_SIZE);
}
static int histo_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
int ret = 0;
@@ -211,7 +213,8 @@ static int histo_get_selection(struct v4l2_subdev *subdev,
mutex_lock(&histo->entity.lock);
- config = vsp1_entity_get_pad_config(&histo->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&histo->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -256,15 +259,15 @@ done:
}
static int histo_set_crop(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
- struct v4l2_subdev_selection *sel)
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *selection;
/* The crop rectangle must be inside the input frame. */
- format = vsp1_entity_get_pad_format(&histo->entity, config,
+ format = vsp1_entity_get_pad_format(&histo->entity, sd_state,
HISTO_PAD_SINK);
sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
@@ -274,11 +277,11 @@ static int histo_set_crop(struct v4l2_subdev *subdev,
format->height - sel->r.top);
/* Set the crop rectangle and reset the compose rectangle. */
- selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ selection = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
sel->pad, V4L2_SEL_TGT_CROP);
*selection = sel->r;
- selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ selection = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
sel->pad,
V4L2_SEL_TGT_COMPOSE);
*selection = sel->r;
@@ -287,7 +290,7 @@ static int histo_set_crop(struct v4l2_subdev *subdev,
}
static int histo_set_compose(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
@@ -303,7 +306,8 @@ static int histo_set_compose(struct v4l2_subdev *subdev,
sel->r.left = 0;
sel->r.top = 0;
- crop = vsp1_entity_get_pad_selection(&histo->entity, config, sel->pad,
+ crop = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
+ sel->pad,
V4L2_SEL_TGT_CROP);
/*
@@ -329,7 +333,7 @@ static int histo_set_compose(struct v4l2_subdev *subdev,
ratio = 1 << (crop->height * 2 / sel->r.height / 3);
sel->r.height = crop->height / ratio;
- compose = vsp1_entity_get_pad_selection(&histo->entity, config,
+ compose = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
sel->pad,
V4L2_SEL_TGT_COMPOSE);
*compose = sel->r;
@@ -338,11 +342,11 @@ static int histo_set_compose(struct v4l2_subdev *subdev,
}
static int histo_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
int ret;
if (sel->pad != HISTO_PAD_SINK)
@@ -350,7 +354,8 @@ static int histo_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&histo->entity.lock);
- config = vsp1_entity_get_pad_config(&histo->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&histo->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -369,7 +374,7 @@ done:
}
static int histo_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
if (fmt->pad == HISTO_PAD_SOURCE) {
@@ -381,19 +386,19 @@ static int histo_get_format(struct v4l2_subdev *subdev,
return 0;
}
- return vsp1_subdev_get_pad_format(subdev, cfg, fmt);
+ return vsp1_subdev_get_pad_format(subdev, sd_state, fmt);
}
static int histo_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
if (fmt->pad != HISTO_PAD_SINK)
- return histo_get_format(subdev, cfg, fmt);
+ return histo_get_format(subdev, sd_state, fmt);
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt,
histo->formats, histo->num_formats,
HISTO_MIN_SIZE, HISTO_MIN_SIZE,
HISTO_MAX_SIZE, HISTO_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index d5ebd9d08c8a..361a870380c2 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -34,7 +34,7 @@ static inline void vsp1_hsit_write(struct vsp1_hsit *hsit,
*/
static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
@@ -52,26 +52,28 @@ static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, HSIT_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ HSIT_MIN_SIZE,
HSIT_MIN_SIZE, HSIT_MAX_SIZE,
HSIT_MAX_SIZE);
}
static int hsit_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&hsit->entity.lock);
- config = vsp1_entity_get_pad_config(&hsit->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&hsit->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 14ed5d7bd061..6a6857ac9327 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -40,27 +40,28 @@ static const unsigned int lif_codes[] = {
};
static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, lif_codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, lif_codes,
ARRAY_SIZE(lif_codes));
}
static int lif_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LIF_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ LIF_MIN_SIZE,
LIF_MIN_SIZE, LIF_MAX_SIZE,
LIF_MAX_SIZE);
}
static int lif_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt, lif_codes,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, lif_codes,
ARRAY_SIZE(lif_codes),
LIF_MIN_SIZE, LIF_MIN_SIZE,
LIF_MAX_SIZE, LIF_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index 9f88842d7048..ac6802a325f5 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -99,27 +99,28 @@ static const unsigned int lut_codes[] = {
};
static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, lut_codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, lut_codes,
ARRAY_SIZE(lut_codes));
}
static int lut_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LUT_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ LUT_MIN_SIZE,
LUT_MIN_SIZE, LUT_MAX_SIZE,
LUT_MAX_SIZE);
}
static int lut_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt, lut_codes,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, lut_codes,
ARRAY_SIZE(lut_codes),
LUT_MIN_SIZE, LUT_MIN_SIZE,
LUT_MAX_SIZE, LUT_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 049bdd958e56..22a82d218152 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -17,9 +17,9 @@
#define RWPF_MIN_HEIGHT 1
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
- struct v4l2_subdev_pad_config *config)
+ struct v4l2_subdev_state *sd_state)
{
- return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, config,
+ return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, sd_state,
RWPF_PAD_SINK);
}
@@ -28,7 +28,7 @@ struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
*/
static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -46,28 +46,30 @@ static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
}
static int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, RWPF_MIN_WIDTH,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ RWPF_MIN_WIDTH,
RWPF_MIN_HEIGHT, rwpf->max_width,
rwpf->max_height);
}
static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&rwpf->entity.lock);
- config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -128,11 +130,11 @@ done:
}
static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
@@ -145,7 +147,8 @@ static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
mutex_lock(&rwpf->entity.lock);
- config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -176,11 +179,11 @@ done:
}
static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
int ret = 0;
@@ -197,7 +200,8 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&rwpf->entity.lock);
- config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 2f3582590618..eac5c04c2239 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -84,6 +84,6 @@ int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
- struct v4l2_subdev_pad_config *config);
+ struct v4l2_subdev_state *sd_state);
#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index 2b65457ee12f..b614a2aea461 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -106,7 +106,7 @@ static const struct v4l2_ctrl_config sru_intensity_control = {
*/
static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -114,20 +114,21 @@ static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes,
ARRAY_SIZE(codes));
}
static int sru_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_sru *sru = to_sru(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
- config = vsp1_entity_get_pad_config(&sru->entity, cfg, fse->which);
+ config = vsp1_entity_get_pad_config(&sru->entity, sd_state,
+ fse->which);
if (!config)
return -EINVAL;
@@ -164,7 +165,7 @@ done:
}
static void sru_try_format(struct vsp1_sru *sru,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_mbus_framefmt *format;
@@ -184,7 +185,7 @@ static void sru_try_format(struct vsp1_sru *sru,
case SRU_PAD_SOURCE:
/* The SRU can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&sru->entity, config,
+ format = vsp1_entity_get_pad_format(&sru->entity, sd_state,
SRU_PAD_SINK);
fmt->code = format->code;
@@ -216,17 +217,18 @@ static void sru_try_format(struct vsp1_sru *sru,
}
static int sru_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_sru *sru = to_sru(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&sru->entity.lock);
- config = vsp1_entity_get_pad_config(&sru->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&sru->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index 5fc04c082d1a..1c290cda005a 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -111,7 +111,7 @@ static unsigned int uds_compute_ratio(unsigned int input, unsigned int output)
*/
static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
@@ -119,20 +119,21 @@ static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes,
ARRAY_SIZE(codes));
}
static int uds_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_uds *uds = to_uds(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
- config = vsp1_entity_get_pad_config(&uds->entity, cfg, fse->which);
+ config = vsp1_entity_get_pad_config(&uds->entity, sd_state,
+ fse->which);
if (!config)
return -EINVAL;
@@ -164,7 +165,7 @@ done:
}
static void uds_try_format(struct vsp1_uds *uds,
- struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_mbus_framefmt *format;
@@ -184,7 +185,7 @@ static void uds_try_format(struct vsp1_uds *uds,
case UDS_PAD_SOURCE:
/* The UDS scales but can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&uds->entity, config,
+ format = vsp1_entity_get_pad_format(&uds->entity, sd_state,
UDS_PAD_SINK);
fmt->code = format->code;
@@ -200,17 +201,18 @@ static void uds_try_format(struct vsp1_uds *uds,
}
static int uds_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vsp1_uds *uds = to_uds(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&uds->entity.lock);
- config = vsp1_entity_get_pad_config(&uds->entity, cfg, fmt->which);
+ config = vsp1_entity_get_pad_config(&uds->entity, sd_state,
+ fmt->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_uif.c b/drivers/media/platform/vsp1/vsp1_uif.c
index 467d1072577b..83d7f17df80e 100644
--- a/drivers/media/platform/vsp1/vsp1_uif.c
+++ b/drivers/media/platform/vsp1/vsp1_uif.c
@@ -54,38 +54,39 @@ static const unsigned int uif_codes[] = {
};
static int uif_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- return vsp1_subdev_enum_mbus_code(subdev, cfg, code, uif_codes,
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, uif_codes,
ARRAY_SIZE(uif_codes));
}
static int uif_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- return vsp1_subdev_enum_frame_size(subdev, cfg, fse, UIF_MIN_SIZE,
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ UIF_MIN_SIZE,
UIF_MIN_SIZE, UIF_MAX_SIZE,
UIF_MAX_SIZE);
}
static int uif_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return vsp1_subdev_set_pad_format(subdev, cfg, fmt, uif_codes,
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, uif_codes,
ARRAY_SIZE(uif_codes),
UIF_MIN_SIZE, UIF_MIN_SIZE,
UIF_MAX_SIZE, UIF_MAX_SIZE);
}
static int uif_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_uif *uif = to_uif(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
int ret = 0;
@@ -94,7 +95,8 @@ static int uif_get_selection(struct v4l2_subdev *subdev,
mutex_lock(&uif->entity.lock);
- config = vsp1_entity_get_pad_config(&uif->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&uif->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
@@ -127,11 +129,11 @@ done:
}
static int uif_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vsp1_uif *uif = to_uif(subdev);
- struct v4l2_subdev_pad_config *config;
+ struct v4l2_subdev_state *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *selection;
int ret = 0;
@@ -142,7 +144,8 @@ static int uif_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&uif->entity.lock);
- config = vsp1_entity_get_pad_config(&uif->entity, cfg, sel->which);
+ config = vsp1_entity_get_pad_config(&uif->entity, sd_state,
+ sel->which);
if (!config) {
ret = -EINVAL;
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 044eb5778820..978f820b0f34 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -559,8 +559,8 @@ static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
struct vsp1_video *video)
{
struct media_graph graph;
- struct media_entity *entity = &video->video.entity;
- struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_pad *pad = video->video.entity.pads;
+ struct media_device *mdev = video->video.entity.graph_obj.mdev;
unsigned int i;
int ret;
@@ -569,17 +569,17 @@ static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
if (ret)
return ret;
- media_graph_walk_start(&graph, entity);
+ media_graph_walk_start(&graph, pad);
- while ((entity = media_graph_walk_next(&graph))) {
+ while ((pad = media_graph_walk_next(&graph))) {
struct v4l2_subdev *subdev;
struct vsp1_rwpf *rwpf;
struct vsp1_entity *e;
- if (!is_media_entity_v4l2_subdev(entity))
+ if (!is_media_entity_v4l2_subdev(pad->entity))
continue;
- subdev = media_entity_to_v4l2_subdev(entity);
+ subdev = media_entity_to_v4l2_subdev(pad->entity);
e = to_vsp1_entity(subdev);
list_add_tail(&e->list_pipe, &pipe->entities);
e->pipe = pipe;
@@ -927,7 +927,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
}
mutex_unlock(&pipe->lock);
- media_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(video->video.entity.pads);
vsp1_video_release_buffers(video);
vsp1_video_pipeline_put(pipe);
}
@@ -1048,7 +1048,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
return PTR_ERR(pipe);
}
- ret = __media_pipeline_start(&video->video.entity, &pipe->pipe);
+ ret = __media_pipeline_start(video->video.entity.pads, &pipe->pipe);
if (ret < 0) {
mutex_unlock(&mdev->graph_mutex);
goto err_pipe;
@@ -1072,7 +1072,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
return 0;
err_stop:
- media_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(video->video.entity.pads);
err_pipe:
vsp1_video_pipeline_put(pipe);
return ret;
diff --git a/drivers/media/platform/vxe-vxd/Makefile b/drivers/media/platform/vxe-vxd/Makefile
new file mode 100644
index 000000000000..8f159aa0f56a
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/Makefile
@@ -0,0 +1,163 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Optional Video feature configuration control
+
+# (1)
+# This config allows enabling or disabling of HEVC/H265 video
+# decoding functionality with IMG VXD Video decoder. If you
+# do not want HEVC decode capability, select N.
+# If unsure, select Y
+HAS_HEVC ?=y
+
+# (2)
+# This config enables error concealment with gray pattern.
+# Disable if you do not want error concealment capability.
+# If unsure, say Y
+ERROR_CONCEALMENT ?=y
+
+# (3)
+# This config, if enabled, configures H264 video decoder to
+# output frames in the decode order with no buffering and
+# picture reordering inside codec.
+# If unsure, say N
+REDUCED_DPB_NO_PIC_REORDERING ?=n
+
+# (4)
+# This config, if enabled, enables all the debug traces in
+# decoder driver. Enable it only for debug purpose
+# Keep it always disabled for release codebase
+DEBUG_DECODER_DRIVER ?=n
+
+# (5)
+# This config allows enabling or disabling of MJPEG video
+# decoding functionality with IMG VXD Video decoder. If you
+# do not want MJPEG decode capability, select N.
+# If unsure, select Y
+HAS_JPEG ?=y
+
+# (6)
+# This config allows simulation of Error recovery.
+# This config is only for testing, never enable it for release build.
+ERROR_RECOVERY_SIMULATION ?=n
+
+# (7)
+# This config enables allocation of capture buffers from
+# dma contiguous memory.
+# If unsure, say Y
+CAPTURE_CONTIG_ALLOC ?=y
+
+vxd-dec-y += common/img_mem_man.o \
+ common/img_mem_unified.o \
+ common/imgmmu.o \
+ common/pool_api.o \
+ common/idgen_api.o \
+ common/talmmu_api.o \
+ common/pool.o \
+ common/hash.o \
+ common/ra.o \
+ common/addr_alloc.o \
+ common/work_queue.o \
+ common/lst.o \
+ common/dq.o \
+ common/resource.o \
+ common/rman_api.o \
+
+vxd-dec-y += decoder/vxd_core.o \
+ decoder/vxd_pvdec.o \
+ decoder/dec_resources.o \
+ decoder/pixel_api.o \
+ decoder/vdecdd_utils_buf.o \
+ decoder/vdecdd_utils.o \
+ decoder/vdec_mmu_wrapper.o \
+ decoder/hw_control.o \
+ decoder/vxd_int.o \
+ decoder/translation_api.o \
+ decoder/decoder.o \
+ decoder/core.o \
+ decoder/swsr.o \
+ decoder/h264_secure_parser.o \
+ decoder/bspp.o \
+ decoder/vxd_dec.o \
+ decoder/vxd_v4l2.o \
+
+
+ifeq ($(HAS_HEVC),y)
+ccflags-y += -DHAS_HEVC
+vxd-dec-y += decoder/hevc_secure_parser.o
+endif
+
+ifeq ($(HAS_JPEG),y)
+ccflags-y += -DHAS_JPEG
+vxd-dec-y += decoder/jpeg_secure_parser.o
+endif
+
+ifeq ($(DEBUG_DECODER_DRIVER), y)
+ccflags-y += -DDEBUG_DECODER_DRIVER
+ccflags-y += -DDEBUG
+endif
+
+ifeq ($(ERROR_CONCEALMENT),y)
+ccflags-y += -DERROR_CONCEALMENT
+endif
+
+ifeq ($(REDUCED_DPB_NO_PIC_REORDERING),y)
+ccflags-y += -DREDUCED_DPB_NO_PIC_REORDERING
+endif
+
+ifeq ($(ERROR_RECOVERY_SIMULATION),y)
+ccflags-y += -DERROR_RECOVERY_SIMULATION
+endif
+
+ifeq ($(CAPTURE_CONTIG_ALLOC),y)
+ccflags-y += -DCAPTURE_CONTIG_ALLOC
+endif
+
+obj-$(CONFIG_VIDEO_IMG_VXD_DEC) += vxd-dec.o
+
+# (1)
+# This config, if enabled, enables all the debug traces in
+# encoder driver. Enable it only for debug purpose
+# Keep it always disabled for release codebase
+DEBUG_ENCODER_DRIVER ?=n
+
+# (3)
+# This config enables encoder performance profiling
+# keep it always disabled. Enable it only for profiling in development
+# environments.
+ENABLE_PROFILING ?=n
+
+vxe-enc-y += common/img_mem_man.o \
+ common/img_mem_unified.o \
+ common/talmmu_api.o \
+ common/addr_alloc.o \
+ common/lst.o \
+ common/hash.o \
+ common/ra.o \
+ common/pool.o \
+ common/rman_api.o \
+ common/dq.o \
+ common/idgen_api.o \
+ common/imgmmu.o \
+ common/work_queue.o \
+
+vxe-enc-y += encoder/vxe_v4l2.o \
+ encoder/vxe_enc.o \
+ encoder/topaz_device.o \
+ encoder/topazmmu.o \
+ encoder/topaz_api.o \
+ encoder/topaz_api_utils.o \
+ encoder/header_gen.o \
+ encoder/mtx_fwif.o \
+
+obj-$(CONFIG_VIDEO_IMG_VXE_ENC) += vxe-enc.o
+
+ifeq ($(DEBUG_ENCODER_DRIVER), y)
+ccflags-y += -DDEBUG_ENCODER_DRIVER
+ccflags-y += -DDEBUG
+endif
+
+ifeq ($(ENABLE_PROFILING),y)
+ccflags-y += -DENABLE_PROFILING
+endif
+
+ccflags-y += -I$(srctree)/drivers/media/platform/vxe-vxd/common/
diff --git a/drivers/media/platform/vxe-vxd/common/addr_alloc.c b/drivers/media/platform/vxe-vxd/common/addr_alloc.c
new file mode 100644
index 000000000000..393d309b2c0c
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/addr_alloc.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Address allocation APIs - used to manage address allocation
+ * with a number of predefined regions.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "addr_alloc.h"
+#include "hash.h"
+#include "img_errors.h"
+
+/* Global context. */
+static struct addr_context global_ctx = {0};
+/* Sub-system initialized. */
+static int global_initialized;
+/* Count of contexts. */
+static unsigned int num_ctx;
+/* Global mutex */
+static struct mutex *global_lock;
+
+/**
+ * addr_initialise - addr_initialise
+ */
+
+int addr_initialise(void)
+{
+ unsigned int result = IMG_ERROR_ALREADY_INITIALISED;
+
+ /* If we are not initialized */
+ if (!global_initialized)
+ result = addr_cx_initialise(&global_ctx);
+ return result;
+}
+
+int addr_cx_initialise(struct addr_context * const context)
+{
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!context)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (!global_initialized) {
+ /* Initialise context */
+ memset(context, 0x00, sizeof(struct addr_context));
+
+ /* If no mutex associated with this resource */
+ if (!global_lock) {
+ /* Create one */
+
+ global_lock = kzalloc(sizeof(*global_lock), GFP_KERNEL);
+ if (!global_lock)
+ return -ENOMEM;
+
+ mutex_init(global_lock);
+ }
+
+ mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+ /* Initialise the hash functions. */
+ result = vid_hash_initialise();
+ if (result != IMG_SUCCESS) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ /* Initialise the arena functions */
+ result = vid_ra_initialise();
+ if (result != IMG_SUCCESS) {
+ mutex_unlock(global_lock);
+ result = vid_hash_finalise();
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ /* We are now initialized */
+ global_initialized = TRUE;
+ result = IMG_SUCCESS;
+ } else {
+ mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+ }
+
+ num_ctx++;
+ mutex_unlock(global_lock);
+
+ return result;
+}
+
+int addr_deinitialise(void)
+{
+ return addr_cx_deinitialise(&global_ctx);
+}
+
+int addr_cx_deinitialise(struct addr_context * const context)
+{
+ struct addr_region *tmp_region = NULL;
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!context)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (global_initialized) {
+ mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+ tmp_region = context->regions;
+
+ /* Delete all arena structure */
+ if (context->default_region)
+ result = vid_ra_delete(context->default_region->arena);
+
+ while (tmp_region) {
+ result = vid_ra_delete(tmp_region->arena);
+ tmp_region = tmp_region->nxt_region;
+ }
+
+ if (num_ctx != 0)
+ num_ctx--;
+
+ result = IMG_SUCCESS;
+ if (num_ctx == 0) {
+ /* Free off resources */
+ result = vid_hash_finalise();
+ result = vid_ra_deinit();
+ global_initialized = FALSE;
+
+ mutex_unlock(global_lock);
+ mutex_destroy(global_lock);
+ kfree(global_lock);
+ global_lock = NULL;
+ } else {
+ mutex_unlock(global_lock);
+ }
+ }
+
+ return result;
+}
+
+int addr_define_mem_region(struct addr_region * const region)
+{
+ return addr_cx_define_mem_region(&global_ctx, region);
+}
+
+int addr_cx_define_mem_region(struct addr_context * const context,
+ struct addr_region * const region)
+{
+ struct addr_region *tmp_region = NULL;
+ unsigned int result = IMG_SUCCESS;
+
+ if (!context || !region)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+ tmp_region = context->regions;
+
+ /* Ensure the link to the next is NULL */
+ region->nxt_region = NULL;
+
+ /* If this is the default memory region */
+ if (!region->name) {
+ /* Should not previously have been defined */
+ if (context->default_region) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ context->default_region = region;
+ context->no_regions++;
+
+ /*
+ * Create an arena for memory allocation
+ * name of resource arena for debug
+ * start of resource
+ * size of resource
+ * allocation quantum
+ * import allocator
+ * import deallocator
+ * import handle
+ */
+ result = vid_ra_create("memory",
+ region->base_addr,
+ region->size,
+ 1,
+ NULL,
+ NULL,
+ NULL,
+ &region->arena);
+
+ if (result != IMG_SUCCESS) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+ } else {
+ /*
+ * Run down the list of existing named regions
+ * to check if there is a region with this name
+ */
+ while (tmp_region &&
+ (strcmp(region->name, tmp_region->name) != 0) &&
+ tmp_region->nxt_region) {
+ tmp_region = tmp_region->nxt_region;
+ }
+
+ /* If we have items in the list */
+ if (tmp_region) {
+ /*
+ * Check we didn't stop because the name
+ * clashes with one already defined.
+ */
+
+ if (strcmp(region->name, tmp_region->name) == 0 ||
+ tmp_region->nxt_region) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ /* Add to end of list */
+ tmp_region->nxt_region = region;
+ } else {
+ /* Add to head of list */
+ context->regions = region;
+ }
+
+ context->no_regions++;
+
+ /*
+ * Create an arena for memory allocation
+ * name of resource arena for debug
+ * start of resource
+ * size of resource
+ * allocation quantum
+ * import allocator
+ * import deallocator
+ * import handle
+ */
+ result = vid_ra_create(region->name,
+ region->base_addr,
+ region->size,
+ 1,
+ NULL,
+ NULL,
+ NULL,
+ &region->arena);
+
+ if (result != IMG_SUCCESS) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+ }
+
+ mutex_unlock(global_lock);
+
+ /* Check the arean was created OK */
+ if (!region->arena)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ return result;
+}
+
+int addr_malloc(const unsigned char * const name,
+ unsigned long long size,
+ unsigned long long * const base_adr)
+{
+ return addr_cx_malloc(&global_ctx, name, size, base_adr);
+}
+
+int addr_cx_malloc(struct addr_context * const context,
+ const unsigned char * const name,
+ unsigned long long size,
+ unsigned long long * const base_adr)
+{
+ unsigned int result = IMG_ERROR_FATAL;
+ struct addr_region *tmp_region = NULL;
+
+ if (!context || !base_adr || !name)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ *(base_adr) = (unsigned long long)-1LL;
+
+ mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+ tmp_region = context->regions;
+
+ /*
+ * Run down the list of existing named
+ * regions to locate this
+ */
+ while (tmp_region && (strcmp(name, tmp_region->name) != 0) && (tmp_region->nxt_region))
+ tmp_region = tmp_region->nxt_region;
+
+ /* If there was no match. */
+ if (!tmp_region || (strcmp(name, tmp_region->name) != 0)) {
+ /* Use the default */
+ if (!context->default_region) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ tmp_region = context->default_region;
+ }
+
+ if (!tmp_region) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ /* Allocate size + guard band */
+ result = vid_ra_alloc(tmp_region->arena,
+ size + tmp_region->guard_band,
+ NULL,
+ NULL,
+ SEQUENTIAL_ALLOCATION,
+ 1,
+ base_adr);
+ if (result != IMG_SUCCESS) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ mutex_unlock(global_lock);
+
+ return result;
+}
+
+int addr_cx_malloc_res(struct addr_context * const context,
+ const unsigned char * const name,
+ unsigned long long size,
+ unsigned long long * const base_adr)
+{
+ unsigned int result = IMG_ERROR_FATAL;
+ struct addr_region *tmp_region = NULL;
+
+ if (!context || !base_adr || !name)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+ tmp_region = context->regions;
+ /* If the allocation is for the default region */
+ /*
+ * Run down the list of existing named
+ * regions to locate this
+ */
+ while (tmp_region && (strcmp(name, tmp_region->name) != 0) && (tmp_region->nxt_region))
+ tmp_region = tmp_region->nxt_region;
+
+ /* If there was no match. */
+ if (!tmp_region || (strcmp(name, tmp_region->name) != 0)) {
+ /* Use the default */
+ if (!context->default_region) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+ tmp_region = context->default_region;
+ }
+ if (!tmp_region) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+ /* Allocate size + guard band */
+ result = vid_ra_alloc(tmp_region->arena, size + tmp_region->guard_band,
+ NULL, NULL, SEQUENTIAL_ALLOCATION, 1, base_adr);
+ if (result != IMG_SUCCESS) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+ mutex_unlock(global_lock);
+
+ return result;
+}
+
+int addr_cx_malloc_align_res(struct addr_context * const context,
+ const unsigned char * const name,
+ unsigned long long size,
+ unsigned long long alignment,
+ unsigned long long * const base_adr)
+{
+ unsigned int result;
+ struct addr_region *tmp_region = NULL;
+
+ if (!context || !base_adr || !name)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+ tmp_region = context->regions;
+
+ /*
+ * Run down the list of existing named
+ * regions to locate this
+ */
+ while (tmp_region &&
+ (strcmp(name, tmp_region->name) != 0) &&
+ (tmp_region->nxt_region)) {
+ tmp_region = tmp_region->nxt_region;
+ }
+ /* If there was no match. */
+ if (!tmp_region ||
+ (strcmp(name, tmp_region->name) != 0)) {
+ /* Use the default */
+ if (!context->default_region) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ tmp_region = context->default_region;
+ }
+
+ if (!tmp_region) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+ /* Allocate size + guard band */
+ result = vid_ra_alloc(tmp_region->arena,
+ size + tmp_region->guard_band,
+ NULL,
+ NULL,
+ SEQUENTIAL_ALLOCATION,
+ alignment,
+ base_adr);
+ if (result != IMG_SUCCESS) {
+ mutex_unlock(global_lock);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ mutex_unlock(global_lock);
+
+ return result;
+}
+
+int addr_free(const unsigned char * const name, unsigned long long addr)
+{
+ return addr_cx_free(&global_ctx, name, addr);
+}
+
+int addr_cx_free(struct addr_context * const context,
+ const unsigned char * const name,
+ unsigned long long addr)
+{
+ struct addr_region *tmp_region;
+ unsigned int result;
+
+ if (!context)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ tmp_region = context->regions;
+
+ mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+ /* If the allocation is for the default region */
+ if (!name) {
+ if (!context->default_region) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+ tmp_region = context->default_region;
+ } else {
+ /*
+ * Run down the list of existing named
+ * regions to locate this
+ */
+ while (tmp_region &&
+ (strcmp(name, tmp_region->name) != 0) &&
+ tmp_region->nxt_region) {
+ tmp_region = tmp_region->nxt_region;
+ }
+
+ /* If there was no match */
+ if (!tmp_region || (strcmp(name, tmp_region->name) != 0)) {
+ /* Use the default */
+ if (!context->default_region) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+ tmp_region = context->default_region;
+ }
+ }
+
+ /* Free the address */
+ result = vid_ra_free(tmp_region->arena, addr);
+
+error:
+ mutex_unlock(global_lock);
+ return result;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/addr_alloc.h b/drivers/media/platform/vxe-vxd/common/addr_alloc.h
new file mode 100644
index 000000000000..387418b124e4
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/addr_alloc.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Address allocation management API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __ADDR_ALLOC_H__
+#define __ADDR_ALLOC_H__
+
+#include <linux/types.h>
+#include "ra.h"
+
+/* Defines whether sequential or random allocation is used */
+enum {
+ SEQUENTIAL_ALLOCATION,
+ RANDOM_ALLOCATION,
+ RANDOM_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/**
+ * struct addr_region - Memory region structure
+ *@name: A pointer to a sring containing the name of the region.
+ * NULL for the default memory region.
+ *@base_addr: The base address of the memory region.
+ *@size: The size of the memory region.
+ *@guard_band: The size of any guard band to be used.
+ * Guard bands can be useful in separating block allocations
+ * and allows the caller to detect erroneous accesses
+ * into these areas.
+ *@nxt_region:Used internally by the ADDR API.A pointer used to point
+ * to the next memory region.
+ *@arena: Used internally by the ADDR API. A to a structure used to
+ * maintain and perform address allocation.
+ *
+ * This structure contains information about the memory region.
+ */
+struct addr_region {
+ unsigned char *name;
+ unsigned long long base_addr;
+ unsigned long long size;
+ unsigned int guard_band;
+ struct addr_region *nxt_region;
+ void *arena;
+};
+
+/*
+ * This structure contains the context for allocation.
+ *@regions: Pointer the first region in the list.
+ *@default_region: Pointer the default region.
+ *@no_regions: Number of regions currently available (including default)
+ */
+struct addr_context {
+ struct addr_region *regions;
+ struct addr_region *default_region;
+ unsigned int no_regions;
+};
+
+/*
+ * @Function ADDR_Initialise
+ * @Description
+ * This function is used to initialise the address alocation sub-system.
+ * NOTE: This function may be called multiple times. The initialisation only
+ * happens the first time it is called.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_initialise(void);
+
+/*
+ * @Function addr_deinitialise
+ * @Description
+ * This function is used to de-initialise the address alocation sub-system.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_deinitialise(void);
+
+/*
+ * @Function addr_define_mem_region
+ * @Description
+ * This function is used define a memory region.
+ * NOTE: The region structure MUST be defined in static memory as this
+ * is retained and used by the ADDR sub-system.
+ * NOTE: Invalid parameters are trapped by asserts.
+ * @Input region: A pointer to a region structure.
+ * @Return IMG_RESULT : IMG_SUCCESS or an error code.
+ */
+int addr_define_mem_region(struct addr_region * const region);
+
+/*
+ * @Function addr_malloc
+ * @Description
+ * This function is used allocate space within a memory region.
+ * NOTE: Allocation failures or invalid parameters are trapped by asserts.
+ * @Input name: Is a pointer the name of the memory region.
+ * NULL can be used to allocate space from the
+ * default memory region.
+ * @Input size: The size (in bytes) of the allocation.
+ * @Output base_adr : The address of the allocated space.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_malloc(const unsigned char *const name,
+ unsigned long long size,
+ unsigned long long *const base_adr);
+
+/*
+ * @Function addr_free
+ * @Description
+ * This function is used free a previously allocate space within
+ * a memory region.
+ * NOTE: Invalid parameters are trapped by asserts.
+ * @Input name: Is a pointer to the name of the memory region.
+ * NULL is used to free space from the default memory region.
+ *@Input addr: The address allocated.
+ *@Return IMG_SUCCESS or an error code.
+ */
+int addr_free(const unsigned char * const name, unsigned long long addr);
+
+/*
+ * @Function addr_cx_initialise
+ * @Description
+ * This function is used to initialise the address allocation sub-system with
+ * an external context structure.
+ * NOTE: This function should be call only once for the context.
+ * @Input context : Pointer to context structure.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_cx_initialise(struct addr_context * const context);
+
+/*
+ * @Function addr_cx_deinitialise
+ * @Description
+ * This function is used to de-initialise the address allocation
+ * sub-system with an external context structure.
+ * @Input context : Pointer to context structure.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_cx_deinitialise(struct addr_context * const context);
+
+/*
+ * @Function addr_cx_define_mem_region
+ * @Description
+ * This function is used define a memory region with an external
+ * context structure.
+ * NOTE: The region structure MUST be defined in static memory as this
+ * is retained and used by the ADDR sub-system.
+ * NOTE: Invalid parameters are trapped by asserts.
+ * @Input context : Pointer to context structure.
+ * @Input region : A pointer to a region structure.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_cx_define_mem_region(struct addr_context *const context,
+ struct addr_region *const region);
+
+/*
+ * @Function addr_cx_malloc
+ * @Description
+ * This function is used allocate space within a memory region with
+ * an external context structure.
+ * NOTE: Allocation failures or invalid parameters are trapped by asserts.
+ * @Input context : Pointer to context structure.
+ * @Input name : Is a pointer the name of the memory region.
+ * NULL can be used to allocate space from the
+ * default memory region.
+ * @Input size : The size (in bytes) of the allocation.
+ * @Output base_adr : The address of the allocated space.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_cx_malloc(struct addr_context * const context,
+ const unsigned char *const name,
+ unsigned long long size,
+ unsigned long long *const base_adr);
+
+/*
+ * @Function addr_cx_malloc_res
+ * @Description
+ * This function is used allocate space within a memory region with
+ * an external context structure.
+ * NOTE: Allocation failures are returned in IMG_RESULT, however invalid
+ * parameters are trapped by asserts.
+ * @Input context : Pointer to context structure.
+ * @Input name : Is a pointer the name of the memory region.
+ * NULL can be used to allocate space from the
+ * default memory region.
+ * @Input size : The size (in bytes) of the allocation.
+ * @Input base_adr : Pointer to the address of the allocated space.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_cx_malloc_res(struct addr_context *const context,
+ const unsigned char *const name,
+ unsigned long long size,
+ unsigned long long * const base_adr);
+
+/*
+ * @Function addr_cx_malloc1_res
+ * @Description
+ * This function is used allocate space within a memory region with
+ * an external context structure.
+ * NOTE: Allocation failures are returned in IMG_RESULT, however invalid
+ * parameters are trapped by asserts.
+ * @Input context : Pointer to context structure.
+ * @Input name : Is a pointer the name of the memory region.
+ * NULL can be used to allocate space from the
+ * default memory region.
+ * @Input size : The size (in bytes) of the allocation.
+ * @Input alignment : The required byte alignment (1, 2, 4, 8, 16 etc).
+ * @Input base_adr : Pointer to the address of the allocated space.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_cx_malloc_align_res(struct addr_context *const context,
+ const unsigned char *const name,
+ unsigned long long size,
+ unsigned long long alignment,
+ unsigned long long *const base_adr);
+
+/*
+ * @Function addr_cx_free
+ * @Description
+ * This function is used free a previously allocate space within a memory region
+ * with an external context structure.
+ * NOTE: Invalid parameters are trapped by asserts.
+ * @Input context : Pointer to context structure.
+ * @Input name : Is a pointer the name of the memory region.
+ * NULL is used to free space from the
+ * default memory region.
+ * @Input addr : The address allocated.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int addr_cx_free(struct addr_context *const context,
+ const unsigned char *const name,
+ unsigned long long addr);
+
+#endif /* __ADDR_ALLOC_H__ */
diff --git a/drivers/media/platform/vxe-vxd/common/dq.c b/drivers/media/platform/vxe-vxd/common/dq.c
new file mode 100644
index 000000000000..890be5ed00e7
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/dq.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Utility module for doubly linked queues.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "dq.h"
+#include "img_errors.h"
+
+void dq_init(struct dq_linkage_t *queue)
+{
+ queue->fwd = (struct dq_linkage_t *)queue;
+ queue->back = (struct dq_linkage_t *)queue;
+}
+
+void dq_addhead(struct dq_linkage_t *queue, void *item)
+{
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+ if (!((struct dq_linkage_t *)queue)->back ||
+ !((struct dq_linkage_t *)queue)->fwd)
+ return;
+
+ ((struct dq_linkage_t *)item)->back = (struct dq_linkage_t *)queue;
+ ((struct dq_linkage_t *)item)->fwd =
+ ((struct dq_linkage_t *)queue)->fwd;
+ ((struct dq_linkage_t *)queue)->fwd->back = (struct dq_linkage_t *)item;
+ ((struct dq_linkage_t *)queue)->fwd = (struct dq_linkage_t *)item;
+}
+
+void dq_addtail(struct dq_linkage_t *queue, void *item)
+{
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+ if (!((struct dq_linkage_t *)queue)->back ||
+ !((struct dq_linkage_t *)queue)->fwd)
+ return;
+
+ ((struct dq_linkage_t *)item)->fwd = (struct dq_linkage_t *)queue;
+ ((struct dq_linkage_t *)item)->back =
+ ((struct dq_linkage_t *)queue)->back;
+ ((struct dq_linkage_t *)queue)->back->fwd = (struct dq_linkage_t *)item;
+ ((struct dq_linkage_t *)queue)->back = (struct dq_linkage_t *)item;
+}
+
+int dq_empty(struct dq_linkage_t *queue)
+{
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+ if (!((struct dq_linkage_t *)queue)->back ||
+ !((struct dq_linkage_t *)queue)->fwd)
+ return 1;
+
+ return ((queue)->fwd == (struct dq_linkage_t *)(queue));
+}
+
+void *dq_first(struct dq_linkage_t *queue)
+{
+ struct dq_linkage_t *temp = queue->fwd;
+
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+ if (!((struct dq_linkage_t *)queue)->back ||
+ !((struct dq_linkage_t *)queue)->fwd)
+ return NULL;
+
+ return temp == (struct dq_linkage_t *)queue ? NULL : temp;
+}
+
+void *dq_last(struct dq_linkage_t *queue)
+{
+ struct dq_linkage_t *temp = queue->back;
+
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+ if (!((struct dq_linkage_t *)queue)->back ||
+ !((struct dq_linkage_t *)queue)->fwd)
+ return NULL;
+
+ return temp == (struct dq_linkage_t *)queue ? NULL : temp;
+}
+
+void *dq_next(void *item)
+{
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->fwd);
+
+ if (!((struct dq_linkage_t *)item)->back ||
+ !((struct dq_linkage_t *)item)->fwd)
+ return NULL;
+
+ return ((struct dq_linkage_t *)item)->fwd;
+}
+
+void *dq_previous(void *item)
+{
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->fwd);
+
+ if (!((struct dq_linkage_t *)item)->back ||
+ !((struct dq_linkage_t *)item)->fwd)
+ return NULL;
+
+ return ((struct dq_linkage_t *)item)->back;
+}
+
+void dq_remove(void *item)
+{
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->fwd);
+
+ if (!((struct dq_linkage_t *)item)->back ||
+ !((struct dq_linkage_t *)item)->fwd)
+ return;
+
+ ((struct dq_linkage_t *)item)->fwd->back =
+ ((struct dq_linkage_t *)item)->back;
+ ((struct dq_linkage_t *)item)->back->fwd =
+ ((struct dq_linkage_t *)item)->fwd;
+
+ /* make item linkages safe for "orphan" removes */
+ ((struct dq_linkage_t *)item)->fwd = item;
+ ((struct dq_linkage_t *)item)->back = item;
+}
+
+void *dq_removehead(struct dq_linkage_t *queue)
+{
+ struct dq_linkage_t *temp;
+
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+ if (!((struct dq_linkage_t *)queue)->back ||
+ !((struct dq_linkage_t *)queue)->fwd)
+ return NULL;
+
+ if ((queue)->fwd == (struct dq_linkage_t *)(queue))
+ return NULL;
+
+ temp = ((struct dq_linkage_t *)queue)->fwd;
+ temp->fwd->back = temp->back;
+ temp->back->fwd = temp->fwd;
+
+ /* make item linkages safe for "orphan" removes */
+ temp->fwd = temp;
+ temp->back = temp;
+ return temp;
+}
+
+void *dq_removetail(struct dq_linkage_t *queue)
+{
+ struct dq_linkage_t *temp;
+
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+ if (!((struct dq_linkage_t *)queue)->back ||
+ !((struct dq_linkage_t *)queue)->fwd)
+ return NULL;
+
+ if ((queue)->fwd == (struct dq_linkage_t *)(queue))
+ return NULL;
+
+ temp = ((struct dq_linkage_t *)queue)->back;
+ temp->fwd->back = temp->back;
+ temp->back->fwd = temp->fwd;
+
+ /* make item linkages safe for "orphan" removes */
+ temp->fwd = temp;
+ temp->back = temp;
+
+ return temp;
+}
+
+void dq_addbefore(void *successor, void *item)
+{
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)successor)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)successor)->fwd);
+
+ if (!((struct dq_linkage_t *)successor)->back ||
+ !((struct dq_linkage_t *)successor)->fwd)
+ return;
+
+ ((struct dq_linkage_t *)item)->fwd = (struct dq_linkage_t *)successor;
+ ((struct dq_linkage_t *)item)->back =
+ ((struct dq_linkage_t *)successor)->back;
+ ((struct dq_linkage_t *)item)->back->fwd = (struct dq_linkage_t *)item;
+ ((struct dq_linkage_t *)successor)->back = (struct dq_linkage_t *)item;
+}
+
+void dq_addafter(void *predecessor, void *item)
+{
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)predecessor)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)predecessor)->fwd);
+
+ if (!((struct dq_linkage_t *)predecessor)->back ||
+ !((struct dq_linkage_t *)predecessor)->fwd)
+ return;
+
+ ((struct dq_linkage_t *)item)->fwd =
+ ((struct dq_linkage_t *)predecessor)->fwd;
+ ((struct dq_linkage_t *)item)->back =
+ (struct dq_linkage_t *)predecessor;
+ ((struct dq_linkage_t *)item)->fwd->back = (struct dq_linkage_t *)item;
+ ((struct dq_linkage_t *)predecessor)->fwd = (struct dq_linkage_t *)item;
+}
+
+void dq_move(struct dq_linkage_t *from, struct dq_linkage_t *to)
+{
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)from)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)from)->fwd);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)to)->back);
+ IMG_DBG_ASSERT(((struct dq_linkage_t *)to)->fwd);
+
+ if (!((struct dq_linkage_t *)from)->back ||
+ !((struct dq_linkage_t *)from)->fwd ||
+ !((struct dq_linkage_t *)to)->back ||
+ !((struct dq_linkage_t *)to)->fwd)
+ return;
+
+ if ((from)->fwd == (struct dq_linkage_t *)(from)) {
+ dq_init(to);
+ } else {
+ *to = *from;
+ to->fwd->back = (struct dq_linkage_t *)to;
+ to->back->fwd = (struct dq_linkage_t *)to;
+ dq_init(from);
+ }
+}
diff --git a/drivers/media/platform/vxe-vxd/common/dq.h b/drivers/media/platform/vxe-vxd/common/dq.h
new file mode 100644
index 000000000000..4663a92aaf7a
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/dq.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Utility module for doubly linked queues.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ */
+#ifndef DQ_H
+#define DQ_H
+
+/* dq structure */
+struct dq_linkage_t {
+ struct dq_linkage_t *fwd;
+ struct dq_linkage_t *back;
+};
+
+/* Function Prototypes */
+void dq_addafter(void *predecessor, void *item);
+void dq_addbefore(void *successor, void *item);
+void dq_addhead(struct dq_linkage_t *queue, void *item);
+void dq_addtail(struct dq_linkage_t *queue, void *item);
+int dq_empty(struct dq_linkage_t *queue);
+void *dq_first(struct dq_linkage_t *queue);
+void *dq_last(struct dq_linkage_t *queue);
+void dq_init(struct dq_linkage_t *queue);
+void dq_move(struct dq_linkage_t *from, struct dq_linkage_t *to);
+void *dq_next(void *item);
+void *dq_previous(void *item);
+void dq_remove(void *item);
+void *dq_removehead(struct dq_linkage_t *queue);
+void *dq_removetail(struct dq_linkage_t *queue);
+
+#endif /* #define DQ_H */
diff --git a/drivers/media/platform/vxe-vxd/common/hash.c b/drivers/media/platform/vxe-vxd/common/hash.c
new file mode 100644
index 000000000000..1a03aecc34ef
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/hash.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Self scaling hash tables.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hash.h"
+#include "img_errors.h"
+#include "pool.h"
+
+/* pool of struct hash objects */
+static struct pool *global_hashpool;
+
+/* pool of struct bucket objects */
+static struct pool *global_bucketpool;
+
+static int global_initialized;
+
+/* Each entry in a hash table is placed into a bucket */
+struct bucket {
+ struct bucket *next;
+ unsigned long long key;
+ unsigned long long value;
+};
+
+struct hash {
+ struct bucket **table;
+ unsigned int size;
+ unsigned int count;
+ unsigned int minimum_size;
+};
+
+/**
+ * hash_func - Hash function intended for hashing addresses.
+ * @vale : The key to hash.
+ * @size : The size of the hash table
+ */
+static unsigned int hash_func(unsigned long long vale,
+ unsigned int size)
+{
+ unsigned int hash = (unsigned int)(vale);
+
+ hash += (hash << 12);
+ hash ^= (hash >> 22);
+ hash += (hash << 4);
+ hash ^= (hash >> 9);
+ hash += (hash << 10);
+ hash ^= (hash >> 2);
+ hash += (hash << 7);
+ hash ^= (hash >> 12);
+ hash &= (size - 1);
+ return hash;
+}
+
+/*
+ * @Function hash_chain_insert
+ * @Description
+ * Hash function intended for hashing addresses.
+ * @Input bucket : The bucket
+ * @Input table : The hash table
+ * @Input size : The size of the hash table
+ * @Return IMG_SUCCESS or an error code.
+ */
+static int hash_chain_insert(struct bucket *bucket,
+ struct bucket **table,
+ unsigned int size)
+{
+ unsigned int idx;
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!bucket || !table || !size) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ idx = hash_func(bucket->key, size);
+
+ if (idx < size) {
+ result = IMG_SUCCESS;
+ bucket->next = table[idx];
+ table[idx] = bucket;
+ }
+
+ return result;
+}
+
+/*
+ * @Function hash_rehash
+ * @Description
+ * Iterate over every entry in an old hash table and rehash into the new table.
+ * @Input old_table : The old hash table
+ * @Input old_size : The size of the old hash table
+ * @Input new_table : The new hash table
+ * @Input new_sz : The size of the new hash table
+ * @Return IMG_SUCCESS or an error code.
+ */
+static int hash_rehash(struct bucket **old_table,
+ unsigned int old_size,
+ struct bucket **new_table,
+ unsigned int new_sz)
+{
+ unsigned int idx;
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!old_table || !new_table) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ for (idx = 0; idx < old_size; idx++) {
+ struct bucket *bucket;
+ struct bucket *nex_bucket;
+
+ bucket = old_table[idx];
+ while (bucket) {
+ nex_bucket = bucket->next;
+ result = hash_chain_insert(bucket, new_table, new_sz);
+ if (result != IMG_SUCCESS) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+ bucket = nex_bucket;
+ }
+ }
+ result = IMG_SUCCESS;
+
+ return result;
+}
+
+/*
+ * @Function hash_resize
+ * @Description
+ * Attempt to resize a hash table, failure to allocate a new larger hash table
+ * is not considered a hard failure. We simply continue and allow the table to
+ * fill up, the effect is to allow hash chains to become longer.
+ * @Input hash_arg : Pointer to the hash table
+ * @Input new_sz : The size of the new hash table
+ * @Return IMG_SUCCESS or an error code.
+ */
+static int hash_resize(struct hash *hash_arg,
+ unsigned int new_sz)
+{
+ unsigned int malloc_sz = 0;
+ unsigned int result = IMG_ERROR_FATAL;
+ unsigned int idx;
+
+ if (!hash_arg) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ if (new_sz != hash_arg->size) {
+ struct bucket **new_bkt_table;
+
+ malloc_sz = (sizeof(struct bucket *) * new_sz);
+ new_bkt_table = kmalloc(malloc_sz, GFP_KERNEL);
+
+ if (!new_bkt_table) {
+ result = IMG_ERROR_MALLOC_FAILED;
+ return result;
+ }
+
+ for (idx = 0; idx < new_sz; idx++)
+ new_bkt_table[idx] = NULL;
+
+ result = hash_rehash(hash_arg->table,
+ hash_arg->size,
+ new_bkt_table,
+ new_sz);
+
+ if (result != IMG_SUCCESS) {
+ kfree(new_bkt_table);
+ new_bkt_table = NULL;
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+
+ kfree(hash_arg->table);
+ hash_arg->table = new_bkt_table;
+ hash_arg->size = new_sz;
+ }
+ result = IMG_SUCCESS;
+
+ return result;
+}
+
+static unsigned int private_max(unsigned int a, unsigned int b)
+{
+ unsigned int ret = (a > b) ? a : b;
+ return ret;
+}
+
+/*
+ * @Function vid_hash_initialise
+ * @Description
+ * To initialise the hash module.
+ * @Input None
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_initialise(void)
+{
+ unsigned int result = IMG_ERROR_ALREADY_COMPLETE;
+
+ if (!global_initialized) {
+ if (global_hashpool || global_bucketpool) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+
+ result = pool_create("img-hash",
+ sizeof(struct hash),
+ &global_hashpool);
+
+ if (result != IMG_SUCCESS) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+
+ result = pool_create("img-sBucket",
+ sizeof(struct bucket),
+ &global_bucketpool);
+ if (result != IMG_SUCCESS) {
+ if (global_bucketpool) {
+ result = pool_delete(global_bucketpool);
+ global_bucketpool = NULL;
+ }
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+ global_initialized = true;
+ result = IMG_SUCCESS;
+ }
+ return result;
+}
+
+/*
+ * @Function vid_hash_finalise
+ * @Description
+ * To finalise the hash module. All allocated hash tables should
+ * be deleted before calling this function.
+ * @Input None
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_finalise(void)
+{
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (global_initialized) {
+ if (global_hashpool) {
+ result = pool_delete(global_hashpool);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ global_hashpool = NULL;
+ }
+
+ if (global_bucketpool) {
+ result = pool_delete(global_bucketpool);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ global_bucketpool = NULL;
+ }
+ global_initialized = false;
+ result = IMG_SUCCESS;
+ }
+
+ return result;
+}
+
+/*
+ * @Function vid_hash_create
+ * @Description
+ * Create a self scaling hash table.
+ * @Input initial_size : Initial and minimum size of the hash table.
+ * @Output hash_arg : Will countin the hash table handle or NULL.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_create(unsigned int initial_size,
+ struct hash ** const hash_arg)
+{
+ unsigned int idx;
+ unsigned int tbl_sz = 0;
+ unsigned int result = IMG_ERROR_FATAL;
+ struct hash *local_hash = NULL;
+
+ if (!hash_arg) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ if (global_initialized) {
+ pool_alloc(global_hashpool, ((void **)&local_hash));
+ if (!local_hash) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ *hash_arg = NULL;
+ return result;
+ }
+
+ local_hash->count = 0;
+ local_hash->size = initial_size;
+ local_hash->minimum_size = initial_size;
+
+ tbl_sz = (sizeof(struct bucket *) * local_hash->size);
+ local_hash->table = kmalloc(tbl_sz, GFP_KERNEL);
+ if (!local_hash->table) {
+ result = pool_free(global_hashpool, local_hash);
+ if (result != IMG_SUCCESS)
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ result |= IMG_ERROR_MALLOC_FAILED;
+ *hash_arg = NULL;
+ return result;
+ }
+
+ for (idx = 0; idx < local_hash->size; idx++)
+ local_hash->table[idx] = NULL;
+
+ *hash_arg = local_hash;
+ result = IMG_SUCCESS;
+ }
+ return result;
+}
+
+/*
+ * @Function vid_hash_delete
+ * @Description
+ * To delete a hash table, all entries in the table should be
+ * removed before calling this function.
+ * @Input hash_arg : Hash table pointer
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_delete(struct hash * const hash_arg)
+{
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!hash_arg) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ if (global_initialized) {
+ if (hash_arg->count != 0) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+
+ kfree(hash_arg->table);
+ hash_arg->table = NULL;
+
+ result = pool_free(global_hashpool, hash_arg);
+ if (result != IMG_SUCCESS) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+ }
+ return result;
+}
+
+/*
+ * @Function vid_hash_insert
+ * @Description
+ * To insert a key value pair into a hash table.
+ * @Input hash_arg : Hash table pointer
+ * @Input key : Key value
+ * @Input value : The value associated with the key.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_insert(struct hash * const hash_arg,
+ unsigned long long key,
+ unsigned long long value)
+{
+ struct bucket *ps_bucket = NULL;
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!hash_arg) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ if (global_initialized) {
+ result = pool_alloc(global_bucketpool, ((void **)&ps_bucket));
+ if (result != IMG_SUCCESS || !ps_bucket) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+ ps_bucket->next = NULL;
+ ps_bucket->key = key;
+ ps_bucket->value = value;
+
+ result = hash_chain_insert(ps_bucket,
+ hash_arg->table,
+ hash_arg->size);
+
+ if (result != IMG_SUCCESS) {
+ pool_free(global_bucketpool, ((void **)&ps_bucket));
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+
+ hash_arg->count++;
+
+ /* check if we need to think about re-balancing */
+ if ((hash_arg->count << 1) > hash_arg->size) {
+ result = hash_resize(hash_arg, (hash_arg->size << 1));
+ if (result != IMG_SUCCESS) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+ }
+ result = IMG_SUCCESS;
+ }
+ return result;
+}
+
+/*
+ * @Function vid_hash_remove
+ * @Description
+ * To remove a key value pair from a hash table
+ * @Input hash_arg : Hash table pointer
+ * @Input key : Key value
+ * @Input ret_result : 0 if the key is missing or the value
+ * associated with the key.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_remove(struct hash * const hash_arg,
+ unsigned long long key,
+ unsigned long * const ret_result)
+{
+ unsigned int idx;
+ unsigned int tmp1 = 0;
+ unsigned int tmp2 = 0;
+ unsigned int result = IMG_ERROR_FATAL;
+ struct bucket **bucket = NULL;
+
+ if (!hash_arg) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ idx = hash_func(key, hash_arg->size);
+
+ for (bucket = &hash_arg->table[idx]; (*bucket) != NULL;
+ bucket = &((*bucket)->next)) {
+ if ((*bucket)->key == key) {
+ struct bucket *ps_bucket = (*bucket);
+
+ unsigned long long value = ps_bucket->value;
+
+ *bucket = ps_bucket->next;
+ result = pool_free(global_bucketpool, ps_bucket);
+
+ hash_arg->count--;
+
+ /* check if we need to think about re-balencing */
+ if (hash_arg->size > (hash_arg->count << 2) &&
+ hash_arg->size > hash_arg->minimum_size) {
+ tmp1 = (hash_arg->size >> 1);
+ tmp2 = hash_arg->minimum_size;
+ result = hash_resize(hash_arg,
+ private_max(tmp1, tmp2));
+ }
+ *ret_result = value;
+ result = IMG_SUCCESS;
+ break;
+ }
+ }
+ return result;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/hash.h b/drivers/media/platform/vxe-vxd/common/hash.h
new file mode 100644
index 000000000000..91034d1ba441
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/hash.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Self scaling hash tables.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef _HASH_H_
+#define _HASH_H_
+
+#include <linux/types.h>
+struct hash;
+
+/**
+ * vid_hash_initialise - VID_HASH_Initialise
+ * @Input None
+ *
+ * To initialise the hash module.
+ */
+int vid_hash_initialise(void);
+
+/*
+ * @Function VID_HASH_Finalise
+ * @Description
+ * To finalise the hash module. All allocated hash tables should
+ * be deleted before calling this function.
+ * @Input None
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_finalise(void);
+
+/*
+ * @Function VID_HASH_Create
+ * @Description
+ * Create a self scaling hash table.
+ * @Input initial_size : Initial and minimum size of the hash table.
+ * @Output hash : Hash table handle or NULL.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_create(unsigned int initial_size,
+ struct hash ** const hash_hndl);
+
+/*
+ * @Function VID_HASH_Delete
+ * @Description
+ * To delete a hash table, all entries in the table should be
+ * removed before calling this function.
+ * @Input hash : Hash table pointer
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_delete(struct hash * const ps_hash);
+
+/*
+ * @Function VID_HASH_Insert
+ * @Description
+ * To insert a key value pair into a hash table.
+ * @Input ps_hash : Hash table pointer
+ * @Input key : Key value
+ * @Input value : The value associated with the key.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_insert(struct hash * const ps_hash,
+ unsigned long long key,
+ unsigned long long value);
+
+/*
+ * @Function VID_HASH_Remove
+ * @Description
+ * To remove a key value pair from a hash table
+ * @Input ps_hash : Hash table pointer
+ * @Input key : Key value
+ * @Input result : 0 if the key is missing or the value
+ * associated with the key.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int vid_hash_remove(struct hash * const ps_hash,
+ unsigned long long key,
+ unsigned long * const result);
+
+#endif /* _HASH_H_ */
diff --git a/drivers/media/platform/vxe-vxd/common/idgen_api.c b/drivers/media/platform/vxe-vxd/common/idgen_api.c
new file mode 100644
index 000000000000..abc8660d7a4a
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/idgen_api.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ID generation manager API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "idgen_api.h"
+#include "lst.h"
+
+/*
+ * This structure contains ID context.
+ */
+struct idgen_context {
+ /* List of handle block structures */
+ struct lst_t hdlblklst;
+ /* Max ID - set by IDGEN_CreateContext(). */
+ unsigned int maxid;
+ /*
+ * The number of handle per block. In case of
+ * incrementing ids, size of the Hash table.
+ */
+ unsigned int blksize;
+ /* Next free slot. */
+ unsigned int freeslot;
+ /* Max slot+1 for which we have allocated blocks. */
+ unsigned int maxslotplus1;
+ /* Incrementing ID's */
+ /* API needed to return incrementing IDs */
+ int incids;
+ /* Latest ID given back */
+ unsigned int latestincnumb;
+ /* Array of list to hold IDGEN_sHdlId */
+ struct lst_t *incidlist;
+};
+
+/*
+ * This structure represents internal representation of an Incrementing ID.
+ */
+struct idgen_id {
+ void **link; /* to be part of single linked list */
+ /* Incrementing ID returned */
+ unsigned int incid;
+ void *hid;
+};
+
+/*
+ * Structure contains the ID context.
+ */
+struct idgen_hdblk {
+ void **link; /* to be part of single linked list */
+ /* Array of handles in this block. */
+ void *ahhandles[1];
+};
+
+/*
+ * A hashing function could go here. Currently just makes a circular list of
+ * max number of concurrent Ids (idgen_context->blksize) in the system.
+ */
+static unsigned int idgen_func(struct idgen_context *idcontext, unsigned int id)
+{
+ return ((id - 1) % idcontext->blksize);
+}
+
+int idgen_createcontext(unsigned int maxid, unsigned int blksize,
+ int incid, void **idgenhandle)
+{
+ struct idgen_context *idcontext;
+
+ /* Create context structure */
+ idcontext = kzalloc(sizeof(*idcontext), GFP_KERNEL);
+ if (!idcontext)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* InitIalise the context */
+ lst_init(&idcontext->hdlblklst);
+ idcontext->maxid = maxid;
+ idcontext->blksize = blksize;
+
+ /* If we need incrementing Ids */
+ idcontext->incids = incid;
+ idcontext->latestincnumb = 0;
+ idcontext->incidlist = NULL;
+ if (idcontext->incids) {
+ unsigned int i = 0;
+ /* Initialise the hash table of lists of length ui32BlkSize */
+ idcontext->incidlist = kzalloc((sizeof(*idcontext->incidlist) *
+ idcontext->blksize), GFP_KERNEL);
+ if (!idcontext->incidlist) {
+ kfree(idcontext);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Initialise all the lists in the hash table */
+ for (i = 0; i < idcontext->blksize; i++)
+ lst_init(&idcontext->incidlist[i]);
+ }
+
+ /* Return context structure as handle */
+ *idgenhandle = idcontext;
+
+ return IMG_SUCCESS;
+}
+
+int idgen_destroycontext(void *idgenhandle)
+{
+ struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+ struct idgen_hdblk *hdblk;
+
+ if (!idcontext)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* If incrementing Ids, free the List of Incrementing Ids */
+ if (idcontext->incids) {
+ struct idgen_id *id;
+ unsigned int i = 0;
+
+ for (i = 0; i < idcontext->blksize; i++) {
+ id = lst_removehead(&idcontext->incidlist[i]);
+ while (id) {
+ kfree(id);
+ id = lst_removehead(&idcontext->incidlist[i]);
+ }
+ }
+ kfree(idcontext->incidlist);
+ }
+
+ /* Remove and free all handle blocks */
+ hdblk = (struct idgen_hdblk *)lst_removehead(&idcontext->hdlblklst);
+ while (hdblk) {
+ kfree(hdblk);
+ hdblk = (struct idgen_hdblk *)
+ lst_removehead(&idcontext->hdlblklst);
+ }
+
+ /* Free context structure */
+ kfree(idcontext);
+
+ return IMG_SUCCESS;
+}
+
+static int idgen_findnextfreeslot(void *idgenhandle, unsigned int prevfreeslot)
+{
+ struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+ struct idgen_hdblk *hdblk;
+ unsigned int freslotblk;
+ unsigned int freeslot;
+
+ if (!idcontext)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Find the block containing the current free slot */
+ freeslot = prevfreeslot;
+ freslotblk = prevfreeslot;
+ hdblk = (struct idgen_hdblk *)lst_first(&idcontext->hdlblklst);
+ if (!hdblk)
+ return IMG_ERROR_FATAL;
+
+ while (freslotblk >= idcontext->blksize) {
+ freslotblk -= idcontext->blksize;
+ hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+ }
+
+ /* Locate the next free slot */
+ while (hdblk) {
+ while (freslotblk < idcontext->blksize) {
+ if (!hdblk->ahhandles[freslotblk]) {
+ /* Found */
+ idcontext->freeslot = freeslot;
+ return IMG_SUCCESS;
+ }
+ freeslot++;
+ freslotblk++;
+ }
+ freslotblk = 0;
+ hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+ }
+
+ /* Beyond the last block */
+ idcontext->freeslot = freeslot;
+ return IMG_SUCCESS;
+}
+
+/*
+ * This function returns ID structure (
+ */
+static struct idgen_id *idgen_getid(struct lst_t *idlist, unsigned int id)
+{
+ struct idgen_id *idstruct;
+
+ idstruct = lst_first(idlist);
+ while (idstruct) {
+ if (idstruct->incid == id)
+ break;
+
+ idstruct = lst_next(idstruct);
+ }
+ return idstruct;
+}
+
+/*
+ * This function does IDGEN allocation.
+ */
+int idgen_allocid(void *idgenhandle, void *handle, unsigned int *id)
+{
+ struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+ struct idgen_hdblk *hdblk;
+ unsigned int size = 0;
+ unsigned int freeslot = 0;
+ unsigned int result = 0;
+
+ if (!idcontext || !handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (!idcontext->incids) {
+ /* If the free slot is >= to the max id */
+ if (idcontext->freeslot >= idcontext->maxid) {
+ result = IMG_ERROR_INVALID_ID;
+ goto error;
+ }
+
+ /* If all of the allocated Ids have been used */
+ if (idcontext->freeslot >= idcontext->maxslotplus1) {
+ /* Allocate a stream context */
+ size = sizeof(*hdblk) + (sizeof(void *) *
+ (idcontext->blksize - 1));
+ hdblk = kzalloc(size, GFP_KERNEL);
+ if (!hdblk) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ lst_add(&idcontext->hdlblklst, hdblk);
+ idcontext->maxslotplus1 += idcontext->blksize;
+ }
+
+ /* Find the block containing the next free slot */
+ freeslot = idcontext->freeslot;
+ hdblk = (struct idgen_hdblk *)lst_first(&idcontext->hdlblklst);
+ if (!hdblk) {
+ result = IMG_ERROR_FATAL;
+ goto error;
+ }
+ while (freeslot >= idcontext->blksize) {
+ freeslot -= idcontext->blksize;
+ hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+ if (!hdblk) {
+ result = IMG_ERROR_FATAL;
+ goto error;
+ }
+ }
+
+ /* Put handle in the next free slot */
+ hdblk->ahhandles[freeslot] = handle;
+
+ *id = idcontext->freeslot + 1;
+
+ /* Find a new free slot */
+ result = idgen_findnextfreeslot(idcontext, idcontext->freeslot);
+ if (result != 0)
+ goto error;
+ /*
+ * If incrementing IDs, just add the ID node to the correct hash table
+ * list.
+ */
+ } else {
+ struct idgen_id *psid;
+ unsigned int currentincnum, funcid;
+ /*
+ * If incrementing IDs, increment the id for returning back,and
+ * save the ID node in the list of ids, indexed by hash function
+ * (idgen_func). We might want to use a better hashing function
+ */
+ currentincnum = (idcontext->latestincnumb + 1) %
+ idcontext->maxid;
+
+ /* Increment the id. Wraps if greater then Max Id */
+ if (currentincnum == 0)
+ currentincnum++;
+
+ idcontext->latestincnumb = currentincnum;
+
+ result = IMG_ERROR_INVALID_ID;
+ do {
+ /* Add to list in the correct hash table entry */
+ funcid = idgen_func(idcontext, idcontext->latestincnumb);
+ if (idgen_getid(&idcontext->incidlist[funcid],
+ idcontext->latestincnumb) == NULL) {
+ psid = kmalloc(sizeof(*psid), GFP_KERNEL);
+ if (!psid) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ psid->incid = idcontext->latestincnumb;
+ psid->hid = handle;
+
+ funcid = idgen_func(idcontext,
+ idcontext->latestincnumb);
+ lst_add(&idcontext->incidlist[funcid],
+ psid);
+
+ result = IMG_SUCCESS;
+ } else {
+ idcontext->latestincnumb =
+ (idcontext->latestincnumb + 1) %
+ idcontext->maxid;
+ if (idcontext->latestincnumb == 0) {
+ /* Do not want to have zero as pic id */
+ idcontext->latestincnumb++;
+ }
+ /*
+ * We have reached a point where we have wrapped
+ * allowed Ids (MaxId) and we want to overwrite
+ * ID still not released
+ */
+ if (idcontext->latestincnumb == currentincnum)
+ goto error;
+ }
+ } while (result != IMG_SUCCESS);
+
+ *id = psid->incid;
+ }
+ return IMG_SUCCESS;
+error:
+ return result;
+}
+
+int idgen_freeid(void *idgenhandle, unsigned int id)
+{
+ struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+ struct idgen_hdblk *hdblk;
+ unsigned int origslot;
+ unsigned int slot;
+
+ if (idcontext->incids) {
+ /*
+ * Find the slot in the correct hash table entry, and
+ * remove the ID.
+ */
+ struct idgen_id *psid;
+
+ psid = idgen_getid(&idcontext->incidlist
+ [idgen_func(idcontext, id)], id);
+ if (psid) {
+ lst_remove(&idcontext->incidlist
+ [idgen_func(idcontext, id)], psid);
+ kfree(psid);
+ } else {
+ return IMG_ERROR_INVALID_ID;
+ }
+ } else {
+ /* If not incrementing id */
+ slot = id - 1;
+ origslot = slot;
+
+ if (slot >= idcontext->maxslotplus1)
+ return IMG_ERROR_INVALID_ID;
+
+ /* Find the block containing the id */
+ hdblk = (struct idgen_hdblk *)lst_first(&idcontext->hdlblklst);
+ if (!hdblk)
+ return IMG_ERROR_FATAL;
+
+ while (slot >= idcontext->blksize) {
+ slot -= idcontext->blksize;
+ hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+ if (!hdblk)
+ return IMG_ERROR_FATAL;
+ }
+
+ /* Slot should be occupied */
+ if (!hdblk->ahhandles[slot])
+ return IMG_ERROR_INVALID_ID;
+
+ /* Free slot */
+ hdblk->ahhandles[slot] = NULL;
+
+ /* If this slot is before the previous free slot */
+ if ((origslot) < idcontext->freeslot)
+ idcontext->freeslot = origslot;
+ }
+ return IMG_SUCCESS;
+}
+
+int idgen_gethandle(void *idgenhandle, unsigned int id, void **handle)
+{
+ struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+ struct idgen_hdblk *hdblk;
+ unsigned int slot;
+
+ if (!idcontext)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (idcontext->incids) {
+ /*
+ * Find the slot in the correct hash table entry, and return
+ * the handles.
+ */
+ struct idgen_id *psid;
+
+ psid = idgen_getid(&idcontext->incidlist
+ [idgen_func(idcontext, id)], id);
+ if (psid)
+ *handle = psid->hid;
+
+ else
+ return IMG_ERROR_INVALID_ID;
+ } else {
+ /* If not incrementing IDs */
+ slot = id - 1;
+ if (slot >= idcontext->maxslotplus1)
+ return IMG_ERROR_INVALID_ID;
+
+ /* Find the block containing the id */
+ hdblk = (struct idgen_hdblk *)lst_first(&idcontext->hdlblklst);
+ if (!hdblk)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ while (slot >= idcontext->blksize) {
+ slot -= idcontext->blksize;
+ hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+ if (!hdblk)
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Slot should be occupied */
+ if (!hdblk->ahhandles[slot])
+ return IMG_ERROR_INVALID_ID;
+
+ /* Return the handle */
+ *handle = hdblk->ahhandles[slot];
+ }
+
+ return IMG_SUCCESS;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/idgen_api.h b/drivers/media/platform/vxe-vxd/common/idgen_api.h
new file mode 100644
index 000000000000..6c894343f1fb
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/idgen_api.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ID generation manager API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+#ifndef __IDGENAPI_H__
+#define __IDGENAPI_H__
+
+#include <linux/types.h>
+
+#include "img_errors.h"
+
+/*
+ * This function is used to create Id generation context.
+ * NOTE: Should only be called once to setup the context structure.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherence.
+ */
+int idgen_createcontext(unsigned int maxid, unsigned int blksize,
+ int incid, void **idgenhandle);
+
+/*
+ * This function is used to destroy an Id generation context. This function
+ * discards any handle blocks associated with the context.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherence.
+ */
+int idgen_destroycontext(void *idgenhandle);
+
+/*
+ * This function is used to associate a handle with an Id.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherency.
+ */
+int idgen_allocid(void *idgenhandle, void *handle, unsigned int *id);
+
+/*
+ * This function is used to free an Id.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherency.
+ */
+int idgen_freeid(void *idgenhandle, unsigned int id);
+
+/*
+ * This function is used to get the handle associated with an Id.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherency.
+ */
+int idgen_gethandle(void *idgenhandle, unsigned int id, void **handle);
+#endif /* __IDGENAPI_H__ */
diff --git a/drivers/media/platform/vxe-vxd/common/img_errors.h b/drivers/media/platform/vxe-vxd/common/img_errors.h
new file mode 100644
index 000000000000..05335320a5f9
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/img_errors.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Error codes.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ */
+#ifndef __IMG_ERRORS__
+#define __IMG_ERRORS__
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#define IMG_DBG_ASSERT(expected) ({WARN_ON(!(expected)); 0; })
+
+/* @brief Success */
+#define IMG_SUCCESS (0)
+/* @brief Timeout */
+#define IMG_ERROR_TIMEOUT (1)
+/* @brief memory allocation failed */
+#define IMG_ERROR_MALLOC_FAILED (2)
+/* @brief Unspecified fatal error */
+#define IMG_ERROR_FATAL (3)
+/* @brief Memory allocation failed */
+#define IMG_ERROR_OUT_OF_MEMORY (4)
+/* @brief Device is not found */
+#define IMG_ERROR_DEVICE_NOT_FOUND (5)
+/* @brief Device is not available/in use */
+#define IMG_ERROR_DEVICE_UNAVAILABLE (6)
+/* @brief Generic/unspecified failure */
+#define IMG_ERROR_GENERIC_FAILURE (7)
+/* @brief Operation was interrupted - retry */
+#define IMG_ERROR_INTERRUPTED (8)
+/* @brief Invalid id */
+#define IMG_ERROR_INVALID_ID (9)
+/* @brief A signature value was found to be incorrect */
+#define IMG_ERROR_SIGNATURE_INCORRECT (10)
+/* @brief The provided parameters were inconsistent/incorrect */
+#define IMG_ERROR_INVALID_PARAMETERS (11)
+/* @brief A list/pool has run dry */
+#define IMG_ERROR_STORAGE_TYPE_EMPTY (12)
+/* @brief A list is full */
+#define IMG_ERROR_STORAGE_TYPE_FULL (13)
+/* @brief Something has already occurred which the code thinks has not */
+#define IMG_ERROR_ALREADY_COMPLETE (14)
+/* @brief A state machine is in an unexpected/illegal state */
+#define IMG_ERROR_UNEXPECTED_STATE (15)
+/* @brief A required resource could not be created/locked */
+#define IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE (16)
+/*
+ * @brief An attempt to access a structure/resource was
+ * made before it was initialised
+ */
+#define IMG_ERROR_NOT_INITIALISED (17)
+/*
+ * @brief An attempt to initialise a structure/resource
+ * was made when it has already been initialised
+ */
+#define IMG_ERROR_ALREADY_INITIALISED (18)
+/* @brief A provided value exceeded stated bounds */
+#define IMG_ERROR_VALUE_OUT_OF_RANGE (19)
+/* @brief The operation has been cancelled */
+#define IMG_ERROR_CANCELLED (20)
+/* @brief A specified minimum has not been met */
+#define IMG_ERROR_MINIMUM_LIMIT_NOT_MET (21)
+/* @brief The requested feature or mode is not supported */
+#define IMG_ERROR_NOT_SUPPORTED (22)
+/* @brief A device or process was idle */
+#define IMG_ERROR_IDLE (23)
+/* @brief A device or process was busy */
+#define IMG_ERROR_BUSY (24)
+/* @brief The device or resource has been disabled */
+#define IMG_ERROR_DISABLED (25)
+/* @brief The requested operation is not permitted at this time */
+#define IMG_ERROR_OPERATION_PROHIBITED (26)
+/* @brief The entry read from the MMU page directory is invalid */
+#define IMG_ERROR_MMU_PAGE_DIRECTORY_FAULT (27)
+/* @brief The entry read from an MMU page table is invalid */
+#define IMG_ERROR_MMU_PAGE_TABLE_FAULT (28)
+/* @brief The entry read from an MMU page catalogue is invalid */
+#define IMG_ERROR_MMU_PAGE_CATALOGUE_FAULT (29)
+/* @brief Memory can not be freed as it is still been used */
+#define IMG_ERROR_MEMORY_IN_USE (30)
+/* @brief A mismatch has unexpectedly occurred in data */
+#define IMG_ERROR_TEST_MISMATCH (31)
+
+#define IMG_ERROR_INVALID_CONTEXT (32)
+
+#define IMG_ERROR_RETRY (33)
+#define IMG_ERROR_UNDEFINED (34)
+#define IMG_ERROR_INVALID_SIZE (35)
+#define IMG_ERROR_SURFACE_LOCKED (36)
+
+/* Mutex subclasses */
+#define SUBCLASS_BASE 0
+#define SUBCLASS_VXD_V4L2 1
+#define SUBCLASS_VXE_V4L2 1
+#define SUBCLASS_BSPP 1
+#define SUBCLASS_ADDR_ALLOC 7
+#define SUBCLASS_IMGMEM 6
+#define SUBCLASS_RMAN 1
+#define SUBCLASS_TALMMU 5
+#define SUBCLASS_VXD_CORE 2
+#define SUBCLASS_POOL 3
+#define SUBCLASS_POOL_RES 5
+#define SUBCLASS_TOPAZ_API 2
+#define SUBCLASS_TOPAZDD_TX 4
+#define SUBCLASS_TOPAZDD 3
+
+#endif /* __IMG_ERRORS__ */
diff --git a/drivers/media/platform/vxe-vxd/common/img_mem.h b/drivers/media/platform/vxe-vxd/common/img_mem.h
new file mode 100644
index 000000000000..3328712a24ab
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/img_mem.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Typedefs for memory pool and attributes
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __IMG_MEM__
+#define __IMG_MEM__
+
+/*
+ * This type defines the memory attributes.
+ * @0x00000001: Memory to be allocated as cached
+ * @0x00000002: Memory to be allocated as uncached
+ * @0x00000004: Memory to be allocated as write-combined
+ * (or equivalent buffered/burst writes mechanism)
+ * @0x00001000: Memory can be read only by the core
+ * @0x00002000: Memory can be written only by the core
+ * @0x00010000: Memory should be readable by the cpu
+ * @0x00020000: Memory should be writable by the cpu
+ */
+enum sys_emem_attrib {
+ SYS_MEMATTRIB_CACHED = 0x00000001,
+ SYS_MEMATTRIB_UNCACHED = 0x00000002,
+ SYS_MEMATTRIB_WRITECOMBINE = 0x00000004,
+ SYS_MEMATTRIB_SECURE = 0x00000010,
+ SYS_MEMATTRIB_INPUT = 0x00000100,
+ SYS_MEMATTRIB_OUTPUT = 0x00000200,
+ SYS_MEMATTRIB_INTERNAL = 0x00000400,
+ SYS_MEMATTRIB_CORE_READ_ONLY = 0x00001000,
+ SYS_MEMATTRIB_CORE_WRITE_ONLY = 0x00002000,
+ SYS_MEMATTRIB_CPU_READ = 0x00010000,
+ SYS_MEMATTRIB_CPU_WRITE = 0x00020000,
+ SYS_MEMATTRIB_FORCE32BITS = 0x7FFFFFFFU
+};
+
+#endif /* __IMG_MEM__ */
diff --git a/drivers/media/platform/vxe-vxd/common/img_mem_man.c b/drivers/media/platform/vxe-vxd/common/img_mem_man.c
new file mode 100644
index 000000000000..683f208c12ed
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/img_mem_man.c
@@ -0,0 +1,1125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC Memory Manager
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "imgmmu.h"
+#include "img_mem_man.h"
+#include "img_errors.h"
+
+#define VXD_MMU_SHIFT 8 /* assume 40-bit MMU */
+/* heaps ids (global) */
+#define MIN_HEAP 1
+#define MAX_HEAP 16
+
+/*
+ * struct dev_mem_man - the device memory management
+ * @heaps: idr list of heap for the device memory manager
+ * @mem_ctxs: contains lists of mem_ctx
+ * @mutex: mutex for this device
+ */
+struct mem_man {
+ void *dev;
+ struct idr *heaps;
+ struct list_head mem_ctxs;
+ struct mutex *mutex; /* mutex for this device */
+};
+
+static struct mem_man mem_man_data = {0};
+
+/**
+ * struct mmu_page - the mmu page information for the buffer
+ * @buffer: buffer pointer for the particular mmu_page
+ * @page_cfg: mmu page configuration of physical and virtual addr
+ * @addr_shift: address shifting information
+ */
+struct mmu_page {
+ struct buffer *buffer;
+ struct mmu_page_cfg page_cfg;
+ unsigned int addr_shift;
+};
+
+static void _img_mem_free(struct buffer *buffer);
+static void _img_mmu_unmap(struct mmu_ctx_mapping *mapping);
+static void _img_mmu_ctx_destroy(struct mmu_ctx *ctx);
+
+#if defined(DEBUG_DECODER_DRIVER)
+static unsigned char *get_heap_name(enum heap_type type)
+{
+ switch (type) {
+ case MEM_HEAP_TYPE_UNIFIED:
+ return "unified";
+ default:
+ return "unknown";
+ }
+}
+#endif
+
+int img_mem_add_heap(const struct heap_config *heap_cfg, int *heap_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct heap *heap;
+ int (*init_fn)(const struct heap_config *heap_cfg, struct heap *heap);
+ int ret;
+
+ switch (heap_cfg->type) {
+ case MEM_HEAP_TYPE_UNIFIED:
+ init_fn = img_mem_unified_init;
+ break;
+ default:
+ dev_err(mem_man->dev, "%s: heap type %d unknown\n", __func__,
+ heap_cfg->type);
+ return -EINVAL;
+ }
+
+ heap = kmalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return -ENOMEM;
+
+ ret = mutex_lock_interruptible_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ if (ret)
+ goto lock_failed;
+
+ ret = idr_alloc(mem_man->heaps, heap, MIN_HEAP, MAX_HEAP, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(mem_man->dev, "%s: idr_alloc failed\n", __func__);
+ goto alloc_id_failed;
+ }
+
+ heap->id = ret;
+ heap->type = heap_cfg->type;
+ heap->options = heap_cfg->options;
+ heap->to_dev_addr = heap_cfg->to_dev_addr;
+ heap->priv = NULL;
+
+ ret = init_fn(heap_cfg, heap);
+ if (ret) {
+ dev_err(mem_man->dev, "%s: heap init failed\n", __func__);
+ goto heap_init_failed;
+ }
+
+ *heap_id = heap->id;
+ mutex_unlock(mem_man->mutex);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(mem_man->dev, "%s created heap %d type %d (%s)\n",
+ __func__, *heap_id, heap_cfg->type, get_heap_name(heap->type));
+#endif
+ return 0;
+
+heap_init_failed:
+ idr_remove(mem_man->heaps, heap->id);
+alloc_id_failed:
+ mutex_unlock(mem_man->mutex);
+lock_failed:
+ kfree(heap);
+ return ret;
+}
+
+static void _img_mem_del_heap(struct heap *heap)
+{
+ struct mem_man *mem_man = &mem_man_data;
+
+ if (heap->ops->destroy)
+ heap->ops->destroy(heap);
+
+ idr_remove(mem_man->heaps, heap->id);
+}
+
+void img_mem_del_heap(int heap_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct heap *heap;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+ heap = idr_find(mem_man->heaps, heap_id);
+ if (!heap) {
+ dev_warn(mem_man->dev, "%s heap %d not found!\n", __func__,
+ heap_id);
+ mutex_unlock(mem_man->mutex);
+ return;
+ }
+
+ _img_mem_del_heap(heap);
+
+ mutex_unlock(mem_man->mutex);
+
+ kfree(heap);
+}
+
+int img_mem_create_ctx(struct mem_ctx **new_ctx)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct mem_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->buffers = kzalloc(sizeof(*ctx->buffers), GFP_KERNEL);
+ if (!ctx->buffers)
+ return -ENOMEM;
+ idr_init(ctx->buffers);
+
+ INIT_LIST_HEAD(&ctx->mmu_ctxs);
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ list_add(&ctx->mem_man_entry, &mem_man->mem_ctxs);
+ mutex_unlock(mem_man->mutex);
+
+ *new_ctx = ctx;
+ return 0;
+}
+
+static void _img_mem_destroy_ctx(struct mem_ctx *ctx)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct buffer *buffer;
+ int buff_id;
+
+ /* free derelict mmu contexts */
+ while (!list_empty(&ctx->mmu_ctxs)) {
+ struct mmu_ctx *mc;
+
+ mc = list_first_entry(&ctx->mmu_ctxs,
+ struct mmu_ctx, mem_ctx_entry);
+ dev_warn(mem_man->dev, "%s: found derelict mmu context %p\n",
+ __func__, mc);
+ _img_mmu_ctx_destroy(mc);
+ kfree(mc);
+ }
+
+ /* free derelict buffers */
+ buff_id = MEM_MAN_MIN_BUFFER;
+ buffer = idr_get_next(ctx->buffers, &buff_id);
+ while (buffer) {
+ dev_warn(mem_man->dev, "%s: found derelict buffer %d\n",
+ __func__, buff_id);
+ if (buffer->heap)
+ _img_mem_free(buffer);
+ else
+ idr_remove(ctx->buffers, buffer->id);
+ kfree(buffer);
+ buff_id = MEM_MAN_MIN_BUFFER;
+ buffer = idr_get_next(ctx->buffers, &buff_id);
+ }
+
+ idr_destroy(ctx->buffers);
+ kfree(ctx->buffers);
+ __list_del_entry(&ctx->mem_man_entry);
+}
+
+void img_mem_destroy_ctx(struct mem_ctx *ctx)
+{
+ struct mem_man *mem_man = &mem_man_data;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ _img_mem_destroy_ctx(ctx);
+ mutex_unlock(mem_man->mutex);
+
+ kfree(ctx);
+}
+
+static int _img_mem_alloc(void *device, struct mem_ctx *ctx,
+ struct heap *heap, unsigned long size,
+ enum mem_attr attr, struct buffer **buffer_new)
+{
+ struct buffer *buffer;
+ int ret;
+
+ if (size == 0) {
+ dev_err(device, "%s: buffer size is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!heap->ops || !heap->ops->alloc) {
+ dev_err(device, "%s: no alloc function in heap %d!\n",
+ __func__, heap->id);
+ return -EINVAL;
+ }
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ ret = idr_alloc(ctx->buffers, buffer,
+ MEM_MAN_MIN_BUFFER, MEM_MAN_MAX_BUFFER, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(device, "%s: idr_alloc failed\n", __func__);
+ goto idr_alloc_failed;
+ }
+
+ buffer->id = ret;
+ buffer->request_size = size;
+ buffer->actual_size = ((size + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
+ buffer->device = device;
+ buffer->mem_ctx = ctx;
+ buffer->heap = heap;
+ INIT_LIST_HEAD(&buffer->mappings);
+ buffer->kptr = NULL;
+ buffer->priv = NULL;
+
+ ret = heap->ops->alloc(device, heap, buffer->actual_size, attr,
+ buffer);
+ if (ret) {
+ dev_err(device, "%s: heap %d alloc failed\n", __func__,
+ heap->id);
+ goto heap_alloc_failed;
+ }
+
+ *buffer_new = buffer;
+
+ dev_dbg(device, "%s heap %p ctx %p created buffer %d (%p) actual_size %lu\n",
+ __func__, heap, ctx, buffer->id, buffer, buffer->actual_size);
+ return 0;
+
+heap_alloc_failed:
+ idr_remove(ctx->buffers, buffer->id);
+idr_alloc_failed:
+ kfree(buffer);
+ return ret;
+}
+
+int img_mem_alloc(void *device, struct mem_ctx *ctx, int heap_id,
+ unsigned long size, enum mem_attr attr, int *buf_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct heap *heap;
+ struct buffer *buffer;
+ int ret;
+
+ dev_dbg(device, "%s heap %d ctx %p size %lu\n", __func__, heap_id,
+ ctx, size);
+
+ ret = mutex_lock_interruptible_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ if (ret)
+ return ret;
+
+ heap = idr_find(mem_man->heaps, heap_id);
+ if (!heap) {
+ dev_err(device, "%s: heap id %d not found\n", __func__,
+ heap_id);
+ mutex_unlock(mem_man->mutex);
+ return -EINVAL;
+ }
+
+ ret = _img_mem_alloc(device, ctx, heap, size, attr, &buffer);
+ if (ret) {
+ mutex_unlock(mem_man->mutex);
+ return ret;
+ }
+
+ *buf_id = buffer->id;
+ mutex_unlock(mem_man->mutex);
+
+ dev_dbg(device, "%s heap %d ctx %p created buffer %d (%p) size %lu\n",
+ __func__, heap_id, ctx, *buf_id, buffer, size);
+ return ret;
+}
+
+static int _img_mem_import(void *device, struct mem_ctx *ctx,
+ unsigned long size, enum mem_attr attr, struct buffer **buffer_new)
+{
+ struct buffer *buffer;
+ int ret;
+
+ if (size == 0) {
+ dev_err(device, "%s: buffer size is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ ret = idr_alloc(ctx->buffers, buffer,
+ MEM_MAN_MIN_BUFFER, MEM_MAN_MAX_BUFFER, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(device, "%s: idr_alloc failed\n", __func__);
+ goto idr_alloc_failed;
+ }
+
+ buffer->id = ret;
+ buffer->request_size = size;
+ buffer->actual_size = ((size + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
+ buffer->device = device;
+ buffer->mem_ctx = ctx;
+ buffer->heap = NULL;
+ INIT_LIST_HEAD(&buffer->mappings);
+ buffer->kptr = NULL;
+ buffer->priv = NULL;
+
+ *buffer_new = buffer;
+
+ dev_dbg(device, "%s ctx %p created buffer %d (%p) actual_size %lu\n",
+ __func__, ctx, buffer->id, buffer, buffer->actual_size);
+ return 0;
+
+idr_alloc_failed:
+ kfree(buffer);
+ return ret;
+}
+
+int img_mem_import(void *device, struct mem_ctx *ctx,
+ unsigned long size, enum mem_attr attr, int *buf_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct buffer *buffer;
+ int ret;
+
+ dev_dbg(device, "%s ctx %p size %lu\n", __func__, ctx, size);
+
+ ret = mutex_lock_interruptible_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ if (ret)
+ return ret;
+
+ ret = _img_mem_import(device, ctx, size, attr, &buffer);
+ if (ret) {
+ mutex_unlock(mem_man->mutex);
+ return ret;
+ }
+
+ *buf_id = buffer->id;
+ mutex_unlock(mem_man->mutex);
+
+ dev_dbg(device, "%s ctx %p created buffer %d (%p) size %lu\n",
+ __func__, ctx, *buf_id, buffer, size);
+ return ret;
+}
+
+static void _img_mem_free(struct buffer *buffer)
+{
+ void *dev = buffer->device;
+ struct heap *heap = buffer->heap;
+ struct mem_ctx *ctx = buffer->mem_ctx;
+
+ if (!heap->ops || !heap->ops->free) {
+ dev_err(dev, "%s: no free function in heap %d!\n",
+ __func__, heap->id);
+ return;
+ }
+
+ while (!list_empty(&buffer->mappings)) {
+ struct mmu_ctx_mapping *map;
+
+ map = list_first_entry(&buffer->mappings,
+ struct mmu_ctx_mapping, buffer_entry);
+ dev_warn(dev, "%s: found mapping for buffer %d (size %lu)\n",
+ __func__, map->buffer->id, map->buffer->actual_size);
+
+ _img_mmu_unmap(map);
+
+ kfree(map);
+ }
+
+ heap->ops->free(heap, buffer);
+
+ idr_remove(ctx->buffers, buffer->id);
+}
+
+void img_mem_free(struct mem_ctx *ctx, int buff_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct buffer *buffer;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+ buffer = idr_find(ctx->buffers, buff_id);
+ if (!buffer) {
+ dev_err(mem_man->dev, "%s: buffer id %d not found\n",
+ __func__, buff_id);
+ mutex_unlock(mem_man->mutex);
+ return;
+ }
+
+ _img_mem_free(buffer);
+
+ mutex_unlock(mem_man->mutex);
+
+ kfree(buffer);
+}
+
+void img_mem_free_bufid(struct mem_ctx *ctx, int buff_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct buffer *buffer;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+ buffer = idr_find(ctx->buffers, buff_id);
+ if (!buffer) {
+ dev_err(mem_man->dev, "%s: buffer id %d not found\n",
+ __func__, buff_id);
+ mutex_unlock(mem_man->mutex);
+ return;
+ }
+
+ idr_remove(ctx->buffers, buffer->id);
+
+ mutex_unlock(mem_man->mutex);
+
+ kfree(buffer);
+}
+
+static int _img_mem_map_km(struct buffer *buffer)
+{
+ void *dev = buffer->device;
+ struct heap *heap = buffer->heap;
+
+ if (!heap->ops || !heap->ops->map_km) {
+ dev_err(dev, "%s: no map_km in heap %d!\n", __func__, heap->id);
+ return -EINVAL;
+ }
+
+ return heap->ops->map_km(heap, buffer);
+}
+
+int img_mem_map_km(struct mem_ctx *ctx, int buff_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct buffer *buffer;
+ int ret;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ buffer = idr_find(ctx->buffers, buff_id);
+ if (!buffer) {
+ dev_err(mem_man->dev, "%s: buffer id %d not found\n",
+ __func__, buff_id);
+ mutex_unlock(mem_man->mutex);
+ return -EINVAL;
+ }
+
+ ret = _img_mem_map_km(buffer);
+
+ mutex_unlock(mem_man->mutex);
+
+ return ret;
+}
+
+void *img_mem_get_kptr(struct mem_ctx *ctx, int buff_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct buffer *buffer;
+ void *kptr;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ buffer = idr_find(ctx->buffers, buff_id);
+ if (!buffer) {
+ dev_err(mem_man->dev, "%s: buffer id %d not found\n", __func__,
+ buff_id);
+ mutex_unlock(mem_man->mutex);
+ return NULL;
+ }
+ kptr = buffer->kptr;
+ mutex_unlock(mem_man->mutex);
+ return kptr;
+}
+
+static void _img_mem_sync_cpu_to_device(struct buffer *buffer)
+{
+ struct heap *heap = buffer->heap;
+
+ if (heap->ops && heap->ops->sync_cpu_to_dev)
+ heap->ops->sync_cpu_to_dev(heap, buffer);
+
+ /* sync to device memory */
+ mb();
+}
+
+int img_mem_sync_cpu_to_device(struct mem_ctx *ctx, int buff_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct buffer *buffer;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ buffer = idr_find(ctx->buffers, buff_id);
+ if (!buffer) {
+ dev_err(mem_man->dev, "%s: buffer id %d not found\n", __func__,
+ buff_id);
+ mutex_unlock(mem_man->mutex);
+ return -EINVAL;
+ }
+
+ _img_mem_sync_cpu_to_device(buffer);
+
+ mutex_unlock(mem_man->mutex);
+ return 0;
+}
+
+static void _img_mem_sync_device_to_cpu(struct buffer *buffer)
+{
+ struct heap *heap = buffer->heap;
+
+ if (heap->ops && heap->ops->sync_dev_to_cpu)
+ heap->ops->sync_dev_to_cpu(heap, buffer);
+}
+
+int img_mem_sync_device_to_cpu(struct mem_ctx *ctx, int buff_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct buffer *buffer;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ buffer = idr_find(ctx->buffers, buff_id);
+ if (!buffer) {
+ dev_err(mem_man->dev, "%s: buffer id %d not found\n", __func__,
+ buff_id);
+ mutex_unlock(mem_man->mutex);
+ return -EINVAL;
+ }
+
+ _img_mem_sync_device_to_cpu(buffer);
+
+ mutex_unlock(mem_man->mutex);
+ return 0;
+}
+
+static struct mmu_page_cfg *mmu_page_alloc(void *arg)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct mmu_ctx *mmu_ctx = arg;
+ struct mmu_page *page;
+ struct buffer *buffer;
+ struct heap *heap;
+ int ret;
+
+ dev_dbg(mmu_ctx->device, "%s:%d arg %p\n", __func__, __LINE__, arg);
+
+ WARN_ON(!mutex_is_locked(mem_man->mutex));
+
+ page = kzalloc(sizeof(*page), GFP_KERNEL);
+ if (!page)
+ return NULL;
+
+ ret = _img_mem_alloc(mmu_ctx->device, mmu_ctx->mem_ctx,
+ mmu_ctx->heap, PAGE_SIZE, (enum mem_attr)0, &buffer);
+ if (ret) {
+ dev_err(mmu_ctx->device, "%s: img_mem_alloc failed (%d)\n",
+ __func__, ret);
+ goto free_page;
+ }
+
+ ret = _img_mem_map_km(buffer);
+ if (ret) {
+ dev_err(mmu_ctx->device, "%s: img_mem_map_km failed (%d)\n",
+ __func__, ret);
+ goto free_buffer;
+ }
+
+ page->addr_shift = mmu_ctx->mmu_config_addr_width - 32;
+ page->buffer = buffer;
+ page->page_cfg.cpu_virt_addr = (unsigned long)buffer->kptr;
+
+ heap = buffer->heap;
+ if (heap->ops && heap->ops->get_sg_table) {
+ void *sgt;
+
+ ret = heap->ops->get_sg_table(heap, buffer, &sgt);
+ if (ret) {
+ dev_err(mmu_ctx->device,
+ "%s: heap %d buffer %d no sg_table!\n",
+ __func__, heap->id, buffer->id);
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+ page->page_cfg.phys_addr = sg_phys(img_mmu_get_sgl(sgt));
+ } else {
+ dev_err(mmu_ctx->device, "%s: heap %d buffer %d no get_sg!\n",
+ __func__, heap->id, buffer->id);
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+ dev_dbg(mmu_ctx->device, "%s:%d virt addr %#lx\n", __func__, __LINE__,
+ page->page_cfg.cpu_virt_addr);
+ dev_dbg(mmu_ctx->device, "%s:%d phys addr %#llx\n", __func__, __LINE__,
+ page->page_cfg.phys_addr);
+ return &page->page_cfg;
+
+free_buffer:
+ _img_mem_free(buffer);
+ kfree(buffer);
+free_page:
+ kfree(page);
+ return NULL;
+}
+
+static void mmu_page_free(struct mmu_page_cfg *arg)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct mmu_page *page;
+
+ page = container_of(arg, struct mmu_page, page_cfg);
+
+ WARN_ON(!mutex_is_locked(mem_man->mutex));
+
+ _img_mem_free(page->buffer);
+ kfree(page->buffer);
+ kfree(page);
+}
+
+static void mmu_page_write(struct mmu_page_cfg *page_cfg,
+ unsigned int offset, unsigned long long addr,
+ unsigned int flags)
+{
+ unsigned int *mem = (unsigned int *)page_cfg->cpu_virt_addr;
+ struct mmu_page *mmu_page;
+ struct heap *heap;
+
+ mmu_page = container_of(page_cfg, struct mmu_page, page_cfg);
+ heap = mmu_page->buffer->heap;
+
+ /* skip translation when flags are zero, assuming address is invalid */
+ if (flags && heap->to_dev_addr)
+ addr = heap->to_dev_addr(&heap->options, addr);
+ addr >>= mmu_page->addr_shift;
+
+ mem[offset] = addr | flags;
+}
+
+static void mmu_update_page(struct mmu_page_cfg *arg)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct mmu_page *page;
+
+ page = container_of(arg, struct mmu_page, page_cfg);
+
+ WARN_ON(!mutex_is_locked(mem_man->mutex));
+
+ _img_mem_sync_cpu_to_device(page->buffer);
+}
+
+int img_mmu_ctx_create(void *device, unsigned int mmu_config_addr_width,
+ struct mem_ctx *mem_ctx, int heap_id,
+ void (*callback_fn)(enum mmu_callback_type type,
+ int buff_id, void *data),
+ void *callback_data, struct mmu_ctx **mmu_ctx)
+{
+ struct mem_man *mem_man = &mem_man_data;
+
+ static struct mmu_info mmu_functions = {
+ .pfn_page_alloc = mmu_page_alloc,
+ .pfn_page_free = mmu_page_free,
+ .pfn_page_write = mmu_page_write,
+ .pfn_page_update = mmu_update_page,
+ };
+ struct mmu_ctx *ctx;
+ int ret;
+
+ if (mmu_config_addr_width < 32) {
+ dev_err(device,
+ "%s: invalid addr_width (%d) must be >= 32 !\n",
+ __func__, mmu_config_addr_width);
+ return -EINVAL;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->device = device;
+ ctx->mem_ctx = mem_ctx;
+ ctx->mmu_config_addr_width = mmu_config_addr_width;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+ ctx->heap = idr_find(mem_man->heaps, heap_id);
+ if (!ctx->heap) {
+ dev_err(device, "%s: invalid heap_id (%d)!\n", __func__,
+ heap_id);
+ mutex_unlock(mem_man->mutex);
+ kfree(ctx);
+ return -EINVAL;
+ }
+
+ mmu_functions.alloc_ctx = ctx;
+ ctx->mmu_dir = mmu_create_directory(&mmu_functions);
+ if (IS_ERR_VALUE((unsigned long)ctx->mmu_dir)) {
+ ret = (long)(ctx->mmu_dir);
+ dev_err(device, "%s: directory create failed (%d)!\n", __func__,
+ ret);
+ ctx->mmu_dir = NULL;
+ mutex_unlock(mem_man->mutex);
+ kfree(ctx);
+ return ret;
+ }
+
+ list_add(&ctx->mem_ctx_entry, &mem_ctx->mmu_ctxs);
+ INIT_LIST_HEAD(&ctx->mappings);
+
+ ctx->callback_fn = callback_fn;
+ ctx->callback_data = callback_data;
+
+ *mmu_ctx = ctx;
+
+ mutex_unlock(mem_man->mutex);
+
+ return 0;
+}
+
+static void _img_mmu_ctx_destroy(struct mmu_ctx *ctx)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ int ret;
+
+ while (!list_empty(&ctx->mappings)) {
+ struct mmu_ctx_mapping *map;
+
+ map = list_first_entry(&ctx->mappings,
+ struct mmu_ctx_mapping, mmu_ctx_entry);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(ctx->device,
+ "%s: found mapped buffer %d (size %lu)\n",
+ __func__, map->buffer->id, map->buffer->request_size);
+#endif
+
+ _img_mmu_unmap(map);
+
+ kfree(map);
+ }
+
+ ret = mmu_destroy_directory(ctx->mmu_dir);
+ if (ret)
+ dev_err(mem_man->dev, "mmu_destroy_directory failed (%d)!\n",
+ ret);
+ __list_del_entry(&ctx->mem_ctx_entry);
+}
+
+void img_mmu_ctx_destroy(struct mmu_ctx *ctx)
+{
+ struct mem_man *mem_man = &mem_man_data;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ _img_mmu_ctx_destroy(ctx);
+ mutex_unlock(mem_man->mutex);
+
+ kfree(ctx);
+}
+
+int img_mmu_map_sg(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+ int buff_id, void *sgt, unsigned int virt_addr,
+ unsigned int map_flags)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct mmu_ctx_mapping *mapping;
+ struct mmu_heap_alloc heap_alloc;
+ struct buffer *buffer;
+ int ret = 0;
+
+ dev_dbg(mmu_ctx->device, "%s sgt %p virt_addr %#x\n", __func__,
+ sgt, virt_addr);
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ buffer = idr_find(mem_ctx->buffers, buff_id);
+ if (!buffer) {
+ dev_err(mmu_ctx->device, "%s: buffer id %d not found\n",
+ __func__, buff_id);
+ ret = -EINVAL;
+ goto error;
+ }
+ dev_dbg(mmu_ctx->device, "%s buffer %d 0x%p size %lu virt_addr %#x\n",
+ __func__, buff_id, buffer, buffer->request_size, virt_addr);
+
+ heap_alloc.virt_addr = virt_addr;
+ heap_alloc.alloc_size = buffer->actual_size;
+
+ mapping->mmu_ctx = mmu_ctx;
+ mapping->buffer = buffer;
+ mapping->virt_addr = virt_addr;
+
+ if (sgt) {
+ struct sg_table *sgt_new = sgt;
+
+ mapping->map = mmu_directory_map_sg(mmu_ctx->mmu_dir, sgt_new->sgl,
+ &heap_alloc, map_flags);
+ if (IS_ERR_VALUE((unsigned long)mapping->map)) {
+ ret = (long)(mapping->map);
+ mapping->map = NULL;
+ }
+ } else {
+ dev_err(mmu_ctx->device, "%s: buffer %d no get_sg!\n",
+ __func__, buffer->id);
+ ret = -EINVAL;
+ goto error;
+ }
+ if (ret) {
+ dev_err(mmu_ctx->device, "mmu_directory_map_sg failed (%d)!\n",
+ ret);
+ goto error;
+ }
+
+ list_add(&mapping->mmu_ctx_entry, &mmu_ctx->mappings);
+ list_add(&mapping->buffer_entry, &mapping->buffer->mappings);
+
+ if (mmu_ctx->callback_fn)
+ mmu_ctx->callback_fn(MMU_CALLBACK_MAP, buffer->id,
+ mmu_ctx->callback_data);
+
+ mutex_unlock(mem_man->mutex);
+ return 0;
+
+error:
+ mutex_unlock(mem_man->mutex);
+ kfree(mapping);
+ return ret;
+}
+
+int img_mmu_map(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+ int buff_id, unsigned int virt_addr, unsigned int map_flags)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct mmu_ctx_mapping *mapping;
+ struct mmu_heap_alloc heap_alloc;
+ struct buffer *buffer;
+ struct heap *heap;
+ int ret;
+
+ dev_dbg(mmu_ctx->device, "%s buffer %d virt_addr %#x\n", __func__,
+ buff_id, virt_addr);
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+ buffer = idr_find(mem_ctx->buffers, buff_id);
+ if (!buffer) {
+ dev_err(mmu_ctx->device, "%s: buffer id %d not found\n",
+ __func__, buff_id);
+ ret = -EINVAL;
+ goto error;
+ }
+ dev_dbg(mmu_ctx->device, "%s buffer %d 0x%p size %lu virt_addr %#x\n",
+ __func__, buff_id, buffer, buffer->request_size, virt_addr);
+
+ heap_alloc.virt_addr = virt_addr;
+ heap_alloc.alloc_size = buffer->actual_size;
+
+ mapping->mmu_ctx = mmu_ctx;
+ mapping->buffer = buffer;
+ mapping->virt_addr = virt_addr;
+
+ heap = buffer->heap;
+ if (heap->ops && heap->ops->get_sg_table) {
+ void *sgt;
+
+ ret = heap->ops->get_sg_table(heap, buffer, &sgt);
+ if (ret) {
+ dev_err(mmu_ctx->device,
+ "%s: heap %d buffer %d no sg_table!\n",
+ __func__, heap->id, buffer->id);
+ goto error;
+ }
+
+ mapping->map = mmu_directory_map_sg(mmu_ctx->mmu_dir, img_mmu_get_sgl(sgt),
+ &heap_alloc, map_flags);
+ if (IS_ERR_VALUE((unsigned long)mapping->map)) {
+ ret = (long)(mapping->map);
+ mapping->map = NULL;
+ }
+ } else {
+ dev_err(mmu_ctx->device, "%s: heap %d buffer %d no get_sg!\n",
+ __func__, heap->id, buffer->id);
+ ret = -EINVAL;
+ goto error;
+ }
+ if (ret) {
+ dev_err(mmu_ctx->device, "mmu_directory_map failed (%d)!\n",
+ ret);
+ goto error;
+ }
+
+ list_add(&mapping->mmu_ctx_entry, &mmu_ctx->mappings);
+ list_add(&mapping->buffer_entry, &mapping->buffer->mappings);
+
+ if (mmu_ctx->callback_fn)
+ mmu_ctx->callback_fn(MMU_CALLBACK_MAP, buffer->id,
+ mmu_ctx->callback_data);
+
+ mutex_unlock(mem_man->mutex);
+ return 0;
+
+error:
+ mutex_unlock(mem_man->mutex);
+ kfree(mapping);
+ return ret;
+}
+
+static void _img_mmu_unmap(struct mmu_ctx_mapping *mapping)
+{
+ struct mmu_ctx *ctx = mapping->mmu_ctx;
+ int res;
+
+ dev_dbg(ctx->device, "%s:%d mapping %p buffer %d\n", __func__,
+ __LINE__, mapping, mapping->buffer->id);
+
+ res = mmu_directory_unmap(mapping->map);
+ if (res)
+ dev_warn(ctx->device, "mmu_directory_unmap failed (%d)!\n",
+ res);
+
+ __list_del_entry(&mapping->mmu_ctx_entry);
+ __list_del_entry(&mapping->buffer_entry);
+
+ if (ctx->callback_fn)
+ ctx->callback_fn(MMU_CALLBACK_UNMAP, mapping->buffer->id,
+ ctx->callback_data);
+}
+
+int img_mmu_unmap(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+ int buff_id)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct mmu_ctx_mapping *mapping;
+ struct list_head *lst;
+
+ dev_dbg(mmu_ctx->device, "%s:%d buffer %d\n", __func__, __LINE__,
+ buff_id);
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+ mapping = NULL;
+ list_for_each(lst, &mmu_ctx->mappings) {
+ struct mmu_ctx_mapping *m;
+
+ m = list_entry(lst, struct mmu_ctx_mapping, mmu_ctx_entry);
+ if (m->buffer->id == buff_id) {
+ mapping = m;
+ break;
+ }
+ }
+
+ if (!mapping) {
+ dev_err(mmu_ctx->device, "%s: buffer id %d not found\n",
+ __func__, buff_id);
+ mutex_unlock(mem_man->mutex);
+ return -EINVAL;
+ }
+
+ _img_mmu_unmap(mapping);
+
+ mutex_unlock(mem_man->mutex);
+ kfree(mapping);
+ return 0;
+}
+
+int img_mmu_get_ptd(const struct mmu_ctx *ctx, unsigned int *ptd)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct mmu_page_cfg *page_cfg;
+ unsigned long long addr;
+
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+ page_cfg = mmu_directory_get_page(ctx->mmu_dir);
+ if (!page_cfg) {
+ mutex_unlock(mem_man->mutex);
+ return -EINVAL;
+ }
+
+ addr = page_cfg->phys_addr;
+ if (ctx->heap->to_dev_addr)
+ addr = ctx->heap->to_dev_addr(&ctx->heap->options, addr);
+
+ mutex_unlock(mem_man->mutex);
+
+ *ptd = (unsigned int)(addr >>= VXD_MMU_SHIFT);
+
+ dev_dbg(ctx->device, "%s: addr %#llx ptd %#x\n", __func__,
+ page_cfg->phys_addr, *ptd);
+ return 0;
+}
+
+int img_mmu_get_pagetable_entry(const struct mmu_ctx *ctx, unsigned long dev_virt_addr)
+{
+ if (!ctx)
+ return 0xFFFFFF;
+
+ return mmu_directory_get_pagetable_entry(ctx->mmu_dir, dev_virt_addr);
+}
+
+/*
+ * Initialisation
+ */
+int img_mem_init(void *dev)
+{
+ struct mem_man *mem_man = &mem_man_data;
+
+ mem_man->dev = dev;
+ mem_man->heaps = kzalloc(sizeof(*mem_man->heaps), GFP_KERNEL);
+ if (!mem_man->heaps)
+ return -ENOMEM;
+ idr_init(mem_man->heaps);
+ INIT_LIST_HEAD(&mem_man->mem_ctxs);
+ mem_man->mutex = kzalloc(sizeof(*mem_man->mutex), GFP_KERNEL);
+ if (!mem_man->mutex) {
+ pr_err("Memory allocation failed for mutex\n");
+ return -ENOMEM;
+ }
+ mutex_init(mem_man->mutex);
+
+ return 0;
+}
+
+void img_mem_exit(void)
+{
+ struct mem_man *mem_man = &mem_man_data;
+ struct heap *heap;
+ int heap_id;
+
+ /* keeps mutex checks (WARN_ON) happy, this will never actually wait */
+ mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+ while (!list_empty(&mem_man->mem_ctxs)) {
+ struct mem_ctx *mc;
+
+ mc = list_first_entry(&mem_man->mem_ctxs,
+ struct mem_ctx, mem_man_entry);
+ dev_warn(mem_man->dev, "%s derelict memory context %p!\n",
+ __func__, mc);
+ _img_mem_destroy_ctx(mc);
+ kfree(mc);
+ }
+
+ heap_id = MIN_HEAP;
+ heap = idr_get_next(mem_man->heaps, &heap_id);
+ while (heap) {
+ dev_warn(mem_man->dev, "%s derelict heap %d!\n", __func__,
+ heap_id);
+ _img_mem_del_heap(heap);
+ kfree(heap);
+ heap_id = MIN_HEAP;
+ heap = idr_get_next(mem_man->heaps, &heap_id);
+ }
+ idr_destroy(mem_man->heaps);
+ kfree(mem_man->heaps);
+
+ mutex_unlock(mem_man->mutex);
+
+ mutex_destroy(mem_man->mutex);
+ kfree(mem_man->mutex);
+ mem_man->mutex = NULL;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/img_mem_man.h b/drivers/media/platform/vxe-vxd/common/img_mem_man.h
new file mode 100644
index 000000000000..1a10ad994d6e
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/img_mem_man.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG DEC Memory Manager header file
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_DEC_MEM_MGR_H
+#define _IMG_DEC_MEM_MGR_H
+
+#include <linux/types.h>
+
+/* buffer ids (per memory context) */
+#define MEM_MAN_MIN_BUFFER 1
+#define MEM_MAN_MAX_BUFFER 16384
+
+enum mem_attr {
+ MEM_ATTR_CACHED = 0x00000001,
+ MEM_ATTR_UNCACHED = 0x00000002,
+ MEM_ATTR_WRITECOMBINE = 0x00000004,
+ MEM_ATTR_SECURE = 0x00000010,
+ MEM_ATTR_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum mmu_callback_type {
+ MMU_CALLBACK_MAP = 1,
+ MMU_CALLBACK_UNMAP,
+ MMU_CALLBACK_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum heap_type {
+ MEM_HEAP_TYPE_UNIFIED = 1,
+ MEM_HEAP_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+union heap_options {
+ struct {
+ long long gfp_type; /* pool and flags for buffer allocations */
+ } unified;
+};
+
+/**
+ * struct heap_config - contains heap configuration structure
+ * @type: enumeration of heap_type
+ * @options: pool and flags for buffer allocations, eg GFP_KERNEL
+ * @to_dev_addr: function pointer for retrieving device addr
+ */
+struct heap_config {
+ enum heap_type type;
+ union heap_options options;
+ unsigned long long (*to_dev_addr)(union heap_options *opts, unsigned long long addr);
+};
+
+/*
+ * struct mmu_heap - typedef for mmu_heap
+ * @virt_addr_start: start of the device virtual address
+ * @alloc_atom: atom allocation in bytes
+ * @size: total size of the heap in bytes
+ */
+struct mmu_heap {
+ unsigned long virt_addr_start;
+ unsigned long alloc_atom;
+ unsigned long size;
+};
+
+/*
+ * struct mem_ctx - the memory context
+ * @buffers: idr list of buffers
+ * @mmu_ctxs: contains linked lists of struct mmu_ctx
+ * @mem_man_entry: the entry list for dev_mem_main:mem_ctxs linked list
+ */
+struct mem_ctx {
+ struct idr *buffers;
+ struct list_head mmu_ctxs;
+ struct list_head mem_man_entry;
+};
+
+/*
+ * struct mmu_ctx_mapping - the mmu context mapping information
+ * @mmu_ctx: pointer to the mmu_ctx to which this mmu mapping information
+ * belongs
+ * @buffer: pointer to the buffer which this mmu_ctx_mapping is for
+ * @map: pointer to the mmu_map which this mmu_ctx_mapping belongs
+ * @virt_addr: Virtual address
+ * @mmu_ctx_entry: the entry list for mmu_ctx:mapping linked list.
+ * @buffer_entry: the entry list for buffer:mappings linked list.
+ */
+struct mmu_ctx_mapping {
+ struct mmu_ctx *mmu_ctx;
+ struct buffer *buffer;
+ struct mmu_map *map;
+ unsigned int virt_addr;
+ struct list_head mmu_ctx_entry;
+ struct list_head buffer_entry;
+};
+
+/*
+ * struct mmu_ctx - the mmu context information - one per stream
+ * @device: pointer to the device
+ * @mmu_config_addr_width: the address width for the mmu config
+ * @mem_ctx: pointer to mem_ctx where this mmu_ctx belongs to
+ * @heap: pointer to struct heap to where this mem_ctx belongs to
+ * @mmu_dir: pointer to the mmu_directory this mmu_ctx belongs to
+ * @mappings: contains linked list of struct mmu_ctx_mapping
+ * @mem_ctx_entry: the entry list for mem_ctx:mmu_ctxs
+ * @callback_fn: pointer to function callback
+ * @callback_data: pointer to the callback data
+ */
+struct mmu_ctx {
+ void *device;
+ unsigned int mmu_config_addr_width;
+ struct mem_ctx *mem_ctx;
+ struct heap *heap;
+ struct mmu_directory *mmu_dir;
+ struct list_head mappings;
+ struct list_head mem_ctx_entry;
+ void (*callback_fn)(enum mmu_callback_type type, int buff_id,
+ void *data);
+ void *callback_data;
+};
+
+/*
+ * struct buffer - the mmu context information - one per stream
+ * @id: buffer identification
+ * @request_size: request size for the allocation
+ * @actual_size: size aligned with the PAGE_SIZE allocation
+ * @device: pointer to the device
+ * @mem_ctx: pointer to struct mem_ctx to where this buffer belongs to
+ * @heap: pointer to struct heap to where this buffer belongs to
+ * @mappings: contains linked lists of struct mmu_ctx_mapping
+ * @kptr: pointer to virtual mapping for the buffer object into kernel address
+ * space
+ * @priv: pointer to priv data used for scaterlist table info
+ */
+struct buffer {
+ int id; /* Generated in <mem_ctx:buffers> */
+ unsigned long request_size;
+ unsigned long actual_size;
+ void *device;
+ struct mem_ctx *mem_ctx;
+ struct heap *heap;
+ struct list_head mappings; /* contains <struct mmu_ctx_mapping> */
+ void *kptr;
+ void *priv;
+};
+
+struct heap_ops {
+ int (*alloc)(void *device, struct heap *heap,
+ unsigned long size, enum mem_attr attr,
+ struct buffer *buffer);
+ void (*free)(struct heap *heap, struct buffer *buffer);
+ int (*map_km)(struct heap *heap, struct buffer *buffer);
+ int (*get_sg_table)(struct heap *heap, struct buffer *buffer,
+ void **sg_table);
+ void (*sync_cpu_to_dev)(struct heap *heap, struct buffer *buffer);
+ void (*sync_dev_to_cpu)(struct heap *heap, struct buffer *buffer);
+ void (*destroy)(struct heap *heap);
+};
+
+struct heap {
+ int id; /* Generated in <mem_man:heaps> */
+ enum heap_type type;
+ struct heap_ops *ops;
+ union heap_options options;
+ unsigned long long (*to_dev_addr)(union heap_options *opts, unsigned long long addr);
+ void *priv;
+};
+
+int img_mem_init(void *dev);
+void img_mem_exit(void);
+
+int img_mem_create_ctx(struct mem_ctx **new_ctx);
+void img_mem_destroy_ctx(struct mem_ctx *ctx);
+
+int img_mem_import(void *device, struct mem_ctx *ctx,
+ unsigned long size, enum mem_attr attr, int *buf_id);
+
+int img_mem_alloc(void *device, struct mem_ctx *ctx, int heap_id,
+ unsigned long size, enum mem_attr attributes, int *buf_id);
+void img_mem_free(struct mem_ctx *ctx, int buff_id);
+
+void img_mem_free_bufid(struct mem_ctx *ctx, int buf_id);
+
+int img_mem_map_km(struct mem_ctx *ctx, int buf_id);
+void *img_mem_get_kptr(struct mem_ctx *ctx, int buff_id);
+
+int img_mem_sync_cpu_to_device(struct mem_ctx *ctx, int buf_id);
+int img_mem_sync_device_to_cpu(struct mem_ctx *ctx, int buf_id);
+
+int img_mmu_ctx_create(void *device, unsigned int mmu_config_addr_width,
+ struct mem_ctx *mem_ctx, int heap_id,
+ void (*callback_fn)(enum mmu_callback_type type,
+ int buff_id, void *data),
+ void *callback_data, struct mmu_ctx **mmu_ctx);
+void img_mmu_ctx_destroy(struct mmu_ctx *ctx);
+
+int img_mmu_map(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+ int buff_id, unsigned int virt_addr, unsigned int map_flags);
+int img_mmu_map_sg(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+ int buff_id, void *sgt, unsigned int virt_addr,
+ unsigned int map_flags);
+int img_mmu_unmap(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+ int buff_id);
+
+int img_mmu_get_ptd(const struct mmu_ctx *ctx, unsigned int *ptd);
+
+int img_mmu_get_pagetable_entry(const struct mmu_ctx *ctx, unsigned long dev_virt_addr);
+
+int img_mem_add_heap(const struct heap_config *heap_cfg, int *heap_id);
+void img_mem_del_heap(int heap_id);
+
+/* Heap operation related function */
+int img_mem_unified_init(const struct heap_config *config,
+ struct heap *heap);
+
+/* page and sg list related functions */
+void img_mmu_get_pages(void **page_args, void *sgt_args);
+unsigned int img_mmu_get_orig_nents(void *sgt_args);
+void img_mmu_set_sgt_nents(void *sgt_args, int ret);
+void img_mmu_set_sg_table(void **sg_table_args, void *buffer);
+unsigned int img_mmu_get_sgl_length(void *sgl_args);
+void *img_mmu_get_sgl(void *sgt_args);
+
+#endif /* _IMG_DEC_MEM_MGR */
diff --git a/drivers/media/platform/vxe-vxd/common/img_mem_unified.c b/drivers/media/platform/vxe-vxd/common/img_mem_unified.c
new file mode 100644
index 000000000000..30108b25d8b0
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/img_mem_unified.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC Memory Manager for unified memory
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_mem_man.h"
+
+void img_mmu_get_pages(void **page_args, void *sgt_args)
+{
+ struct page **pages = (struct page **)page_args;
+ struct sg_table *sgt = sgt_args;
+ struct scatterlist *sgl = sgt->sgl;
+ int i;
+
+ i = 0;
+ while (sgl) {
+ pages[i++] = sg_page(sgl);
+ sgl = sg_next(sgl);
+ }
+}
+
+unsigned int img_mmu_get_orig_nents(void *sgt_args)
+{
+ struct sg_table *sgt = sgt_args;
+
+ return sgt->orig_nents;
+}
+
+void img_mmu_set_sgt_nents(void *sgt_args, int ret)
+{
+ struct sg_table *sgt = sgt_args;
+
+ sgt->nents = ret;
+}
+
+void img_mmu_set_sg_table(void **sg_table_args, void *buffer)
+{
+ struct sg_table **sg_table = (struct sg_table **)sg_table_args;
+
+ *sg_table = buffer;
+}
+
+unsigned int img_mmu_get_sgl_length(void *sgl_args)
+{
+ struct scatterlist *sgl = (struct scatterlist *)sgl_args;
+
+ return sgl->length;
+}
+
+void *img_mmu_get_sgl(void *sgt_args)
+{
+ struct sg_table *sgt = sgt_args;
+
+ return sgt->sgl;
+}
+
+static int unified_alloc(void *device, struct heap *heap,
+ unsigned long size, enum mem_attr attr,
+ struct buffer *buffer)
+{
+ struct sg_table *sgt;
+ void *sgl;
+ int pages;
+ int ret;
+
+ dev_dbg(device, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+ buffer->id, buffer);
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return -ENOMEM;
+
+ pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
+ if (ret)
+ goto sg_alloc_table_failed;
+
+ sgl = img_mmu_get_sgl(sgt);
+ while (sgl) {
+ void *page;
+ unsigned long long dma_addr;
+
+ page = alloc_page(heap->options.unified.gfp_type);
+ if (!page) {
+ dev_err(device, "%s alloc_page failed!\n", __func__);
+ ret = -ENOMEM;
+ goto alloc_page_failed;
+ }
+
+ /*
+ * dma_map_page() is probably going to fail if alloc flags are
+ * GFP_HIGHMEM, since it is not mapped to CPU. Hopefully, this
+ * will never happen because memory of this sort cannot be used
+ * for DMA anyway. To check if this is the case, build with
+ * debug, set trace_physical_pages=1 and check if page_address
+ * printed above is NULL
+ */
+ dma_addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device, dma_addr)) {
+ __free_page(page);
+ dev_err(device, "%s dma_map_page failed!\n", __func__);
+ ret = -EIO;
+ goto alloc_page_failed;
+ }
+ dma_unmap_page(device, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ sg_set_page(sgl, page, PAGE_SIZE, 0);
+
+ sgl = sg_next(sgl);
+ }
+
+ buffer->priv = sgt;
+ return 0;
+
+alloc_page_failed:
+ sgl = img_mmu_get_sgl(sgt);
+ while (sgl) {
+ void *page = sg_page(sgl);
+
+ if (page)
+ __free_page(page);
+
+ sgl = sg_next(sgl);
+ }
+ sg_free_table(sgt);
+sg_alloc_table_failed:
+ kfree(sgt);
+ return ret;
+}
+
+static void unified_free(struct heap *heap, struct buffer *buffer)
+{
+ void *dev = buffer->device;
+ void *sgt = buffer->priv;
+ void *sgl;
+
+ dev_dbg(dev, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+ buffer->id, buffer);
+
+ if (buffer->kptr) {
+ dev_dbg(dev, "%s vunmap 0x%p\n", __func__, buffer->kptr);
+ dma_unmap_sg(dev, img_mmu_get_sgl(sgt), img_mmu_get_orig_nents(sgt),
+ DMA_FROM_DEVICE);
+ vunmap(buffer->kptr);
+ }
+
+ sgl = img_mmu_get_sgl(sgt);
+ while (sgl) {
+ __free_page(sg_page(sgl));
+ sgl = sg_next(sgl);
+ }
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static int unified_map_km(struct heap *heap, struct buffer *buffer)
+{
+ void *dev = buffer->device;
+ void *sgt = buffer->priv;
+ void *sgl = img_mmu_get_sgl(sgt);
+ unsigned int num_pages = sg_nents(sgl);
+ unsigned int orig_nents = img_mmu_get_orig_nents(sgt);
+ void **pages;
+ int ret;
+ pgprot_t prot;
+
+ dev_dbg(dev, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__, buffer->id, buffer);
+
+ if (buffer->kptr) {
+ dev_warn(dev, "%s called for already mapped buffer %d\n", __func__, buffer->id);
+ return 0;
+ }
+
+ pages = kmalloc_array(num_pages, sizeof(void *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ img_mmu_get_pages(pages, sgt);
+
+ prot = PAGE_KERNEL;
+ prot = pgprot_writecombine(prot);
+ buffer->kptr = vmap((struct page **)pages, num_pages, VM_MAP, prot);
+ kfree(pages);
+ if (!buffer->kptr) {
+ dev_err(dev, "%s vmap failed!\n", __func__);
+ return -EFAULT;
+ }
+
+ ret = dma_map_sg(dev, sgl, orig_nents, DMA_FROM_DEVICE);
+
+ if (ret <= 0) {
+ dev_err(dev, "%s dma_map_sg failed!\n", __func__);
+ vunmap(buffer->kptr);
+ return -EFAULT;
+ }
+ dev_dbg(dev, "%s:%d buffer %d orig_nents %d nents %d\n", __func__,
+ __LINE__, buffer->id, orig_nents, ret);
+
+ img_mmu_set_sgt_nents(sgt, ret);
+
+ dev_dbg(dev, "%s:%d buffer %d vmap to 0x%p\n", __func__, __LINE__,
+ buffer->id, buffer->kptr);
+
+ return 0;
+}
+
+static int unified_get_sg_table(struct heap *heap, struct buffer *buffer, void **sg_table)
+{
+ img_mmu_set_sg_table(sg_table, buffer->priv);
+ return 0;
+}
+
+static void unified_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
+{
+ void *dev = buffer->device;
+ void *sgt = buffer->priv;
+
+ if (!buffer->kptr)
+ return;
+
+ dev_dbg(dev, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__, buffer->id, buffer);
+
+ dma_sync_sg_for_device(dev, img_mmu_get_sgl(sgt), img_mmu_get_orig_nents(sgt),
+ DMA_TO_DEVICE);
+}
+
+static void unified_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
+{
+ void *dev = buffer->device;
+ void *sgt = buffer->priv;
+
+ if (!buffer->kptr)
+ return;
+
+ dev_dbg(dev, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+ buffer->id, buffer);
+
+ dma_sync_sg_for_cpu(dev, img_mmu_get_sgl(sgt), img_mmu_get_orig_nents(sgt),
+ DMA_FROM_DEVICE);
+}
+
+static void unified_heap_destroy(struct heap *heap)
+{
+}
+
+static struct heap_ops unified_heap_ops = {
+ .alloc = unified_alloc,
+ .free = unified_free,
+ .map_km = unified_map_km,
+ .get_sg_table = unified_get_sg_table,
+ .sync_cpu_to_dev = unified_sync_cpu_to_dev,
+ .sync_dev_to_cpu = unified_sync_dev_to_cpu,
+ .destroy = unified_heap_destroy,
+};
+
+int img_mem_unified_init(const struct heap_config *heap_cfg,
+ struct heap *heap)
+{
+ heap->ops = &unified_heap_ops;
+ return 0;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/imgmmu.c b/drivers/media/platform/vxe-vxd/common/imgmmu.c
new file mode 100644
index 000000000000..ce2f41f72485
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/imgmmu.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC MMU function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include "img_mem_man.h"
+#include "imgmmu.h"
+
+/**
+ * struct mmu_directory - the MMU directory information
+ * @dir_page: pointer to the mmu_page_cfg_table (physical table used) which
+ * this mmu_directory belongs to
+ * @dir_page_table: All the page table structures in a static array of pointers
+ * @mmu_info_cfg: Functions to use to manage pages allocation, liberation and
+ * writing
+ * @num_mapping: number of mapping using this directory
+ */
+struct mmu_directory {
+ struct mmu_page_cfg *dir_page;
+ struct mmu_page_cfg_table **dir_page_table;
+ struct mmu_info mmu_info_cfg;
+ unsigned int num_mapping;
+};
+
+/*
+ * struct mmu_map - the MMU mapping information
+ * @mmu_dir: pointer to the mmu_directory which this mmu_map belongs to
+ * @dev_virt_addr: device virtual address root associated with this mapping
+ * @used_flag: flag used when allocating
+ * @n_entries: number of entries mapped
+ */
+struct mmu_map {
+ struct mmu_directory *mmu_dir;
+ struct mmu_heap_alloc dev_virt_addr;
+ unsigned int used_flag;
+ unsigned int n_entries;
+};
+
+/*
+ * struct mmu_page_cfg_table - the MMU page table information.
+ * One page table of the directory.
+ * @mmu_dir: pointer to the mmu_directory which this mmu_page_cfg_table
+ * belongs to
+ * @page: page used to store this mapping in the MMU
+ * @valid_entries: number of valid entries in this page
+ */
+struct mmu_page_cfg_table {
+ struct mmu_directory *mmu_dir;
+ struct mmu_page_cfg *page;
+ unsigned int valid_entries;
+};
+
+/*
+ * mmu_pgt_destroy() - Destruction of a page table (does not follow the
+ * child pointer)
+ * @pgt: pointer to the MMU page table information
+ *
+ * Warning: Does not verify if pages are still valid or not
+ */
+static void mmu_pgt_destroy(struct mmu_page_cfg_table *pgt)
+{
+ if (!pgt->mmu_dir ||
+ !pgt->mmu_dir->mmu_info_cfg.pfn_page_free ||
+ !pgt->page) {
+ return;
+ }
+
+ pr_debug("%s:%d Destroy page table (phys addr %llu)\n",
+ __func__, __LINE__, pgt->page->phys_addr);
+
+ pgt->mmu_dir->mmu_info_cfg.pfn_page_free(pgt->page);
+ pgt->page = NULL;
+
+ kfree(pgt);
+}
+
+/*
+ * mmu_dir_entry() - Extact the directory index from a virtual address
+ * @vaddr: virtual address
+ */
+static inline unsigned int mmu_dir_entry(unsigned long vaddr)
+{
+ return (unsigned int)((vaddr & VIRT_DIR_IDX_MASK) >> MMU_DIR_SHIFT);
+}
+
+/*
+ * mmu_pg_entry() - Extract the page table index from a virtual address
+ * @vaddr: virtual address
+ */
+static inline unsigned int mmu_pg_entry(unsigned long vaddr)
+{
+ return (unsigned int)((vaddr & VIRT_PAGE_TBL_MASK) >> MMU_PAGE_SHIFT);
+}
+
+/*
+ * mmu_pg_wr() - Default function used when a mmu_info structure has an empty
+ * pfn_page_write pointer
+ * @mmu_page: pointer to the mmu_page to update
+ * @offset: offset into the directory
+ * @pa_to_write: physical address value to add to the entr
+ * @mmu_flag: mmu flag(s) to set
+ */
+static void mmu_pg_wr(struct mmu_page_cfg *mmu_page, unsigned int offset,
+ unsigned long long pa_to_write, unsigned int mmu_flag)
+{
+ unsigned int *dir_mem = NULL;
+ unsigned long long cur_pa = pa_to_write;
+
+ if (!mmu_page)
+ return;
+
+ dir_mem = (unsigned int *)mmu_page->cpu_virt_addr;
+ /*
+ * assumes that the MMU HW has the extra-bits enabled (this default
+ * function has no way of knowing)
+ */
+ if ((MMU_PHYS_SIZE - MMU_VIRT_SIZE) > 0)
+ cur_pa >>= (MMU_PHYS_SIZE - MMU_VIRT_SIZE);
+ /*
+ * The MMU_PAGE_SHIFT bottom bits should be masked because page
+ * allocation.
+ * MMU_PAGE_SHIFT-(MMU_PHYS_SIZE-MMU_VIRT_SIZE) are used for
+ * flags so it's ok
+ */
+ dir_mem[offset] = (unsigned int)cur_pa | (mmu_flag);
+}
+
+/*
+ * mmu_page_cfg_table() - Create a page table
+ * @mmu_dir: pointer to the mmu_directory in which to create the new page table
+ * structure
+ *
+ * Return: A pointer to the new page table structure in case of success.
+ * (void *) in case of error
+ */
+static struct mmu_page_cfg_table *mmu_pgt_create(struct mmu_directory *mmu_dir)
+{
+ struct mmu_page_cfg_table *neo = NULL;
+ unsigned int i;
+
+ if (!mmu_dir || !mmu_dir->mmu_info_cfg.pfn_page_alloc ||
+ !mmu_dir->mmu_info_cfg.pfn_page_write)
+ return (void *)(-EINVAL);
+
+ neo = kmalloc(sizeof(*neo), GFP_KERNEL);
+ if (!neo)
+ return (void *)(-ENOMEM);
+
+ neo->mmu_dir = mmu_dir;
+
+ neo->page =
+ mmu_dir->mmu_info_cfg.pfn_page_alloc(mmu_dir->mmu_info_cfg.alloc_ctx);
+ if (!neo->page) {
+ pr_err("%s:%d failed to allocate Page Table physical page\n",
+ __func__, __LINE__);
+ kfree(neo);
+ return (void *)(-ENOMEM);
+ }
+ pr_debug("%s:%d Create page table (phys addr 0x%llx CPU Virt 0x%lx)\n",
+ __func__, __LINE__, neo->page->phys_addr,
+ neo->page->cpu_virt_addr);
+
+ /* invalidate all pages */
+ for (i = 0; i < MMU_N_PAGE; i++) {
+ mmu_dir->mmu_info_cfg.pfn_page_write(neo->page, i, 0,
+ MMU_FLAG_INVALID);
+ }
+
+ /*
+ * When non-UMA need to update the device memory after setting
+ * it to 0
+ */
+ if (mmu_dir->mmu_info_cfg.pfn_page_update)
+ mmu_dir->mmu_info_cfg.pfn_page_update(neo->page);
+
+ return neo;
+}
+
+/*
+ * mmu_create_directory - Create a directory entry based on a given directory
+ * configuration
+ * @mmu_info_ops: contains the functions to use to manage page table memory.
+ * Is copied and not modified.
+ *
+ * @warning Obviously creation of the directory allocates memory - do not call
+ * while interrupts are disabled
+ *
+ * @return The opaque handle to the mmu_directory object and result to 0
+ * @return (void *) in case of an error and result has the value:
+ * @li -EINVAL if mmu_info configuration is NULL or does not
+ * contain function pointers
+ * @li -ENOMEM if an internal allocation failed
+ * @li -ENOMEM if the given mmu_pfn_page_alloc returned NULL
+ */
+struct mmu_directory *mmu_create_directory(const struct mmu_info *mmu_info_ops)
+{
+ struct mmu_directory *neo = NULL;
+ unsigned int i;
+
+ /*
+ * invalid information in the directory config:
+ * - invalid page allocator and dealloc (page write can be NULL)
+ * - invalid virtual address representation
+ * - invalid page size
+ * - invalid MMU size
+ */
+ if (!mmu_info_ops || !mmu_info_ops->pfn_page_alloc || !mmu_info_ops->pfn_page_free) {
+ pr_err("%s:%d invalid MMU configuration\n", __func__, __LINE__);
+ return (void *)(-EINVAL);
+ }
+
+ neo = kzalloc(sizeof(*neo), GFP_KERNEL);
+ if (!neo)
+ return (void *)(-ENOMEM);
+
+ neo->dir_page_table = kcalloc(MMU_N_TABLE, sizeof(struct mmu_page_cfg_table *),
+ GFP_KERNEL);
+ if (!neo->dir_page_table) {
+ kfree(neo);
+ return (void *)(-ENOMEM);
+ }
+
+ memcpy(&neo->mmu_info_cfg, mmu_info_ops, sizeof(struct mmu_info));
+ if (!mmu_info_ops->pfn_page_write) {
+ pr_debug("%s:%d using default MMU write\n", __func__, __LINE__);
+ /* use internal function */
+ neo->mmu_info_cfg.pfn_page_write = &mmu_pg_wr;
+ }
+
+ neo->dir_page = mmu_info_ops->pfn_page_alloc(mmu_info_ops->alloc_ctx);
+ if (!neo->dir_page) {
+ kfree(neo->dir_page_table);
+ kfree(neo);
+ return (void *)(-ENOMEM);
+ }
+
+ pr_debug("%s:%d (phys page 0x%llx; CPU virt 0x%lx)\n", __func__,
+ __LINE__, neo->dir_page->phys_addr,
+ neo->dir_page->cpu_virt_addr);
+ /* now we have a valid mmu_directory structure */
+
+ /* invalidate all entries */
+ for (i = 0; i < MMU_N_TABLE; i++) {
+ neo->mmu_info_cfg.pfn_page_write(neo->dir_page, i, 0,
+ MMU_FLAG_INVALID);
+ }
+
+ /* when non-UMA need to update the device memory */
+ if (neo->mmu_info_cfg.pfn_page_update)
+ neo->mmu_info_cfg.pfn_page_update(neo->dir_page);
+
+ return neo;
+}
+
+/*
+ * mmu_destroy_directory - Destroy the mmu_directory - assumes that the HW is
+ * not going to access the memory any-more
+ * @mmu_dir: pointer to the mmu directory to destroy
+ *
+ * Does not invalidate any memory because it assumes that everything is not
+ * used any-more
+ */
+int mmu_destroy_directory(struct mmu_directory *mmu_dir)
+{
+ unsigned int i;
+
+ if (!mmu_dir) {
+ /* could be an assert */
+ pr_err("%s:%d mmu_dir is NULL\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (mmu_dir->num_mapping > 0)
+ /* mappings should have been destroyed! */
+ pr_err("%s:%d directory still has %u mapping attached to it\n",
+ __func__, __LINE__, mmu_dir->num_mapping);
+ /*
+ * not exiting because clearing the page table map is more
+ * important than losing a few structures
+ */
+
+ if (!mmu_dir->mmu_info_cfg.pfn_page_free || !mmu_dir->dir_page_table)
+ return -EINVAL;
+
+ pr_debug("%s:%d destroy MMU dir (phys page 0x%llx)\n",
+ __func__, __LINE__, mmu_dir->dir_page->phys_addr);
+
+ /* first we destroy the directory entry */
+ mmu_dir->mmu_info_cfg.pfn_page_free(mmu_dir->dir_page);
+ mmu_dir->dir_page = NULL;
+
+ /* destroy every mapping that still exists */
+ for (i = 0; i < MMU_N_TABLE; i++) {
+ if (mmu_dir->dir_page_table[i]) {
+ mmu_pgt_destroy(mmu_dir->dir_page_table[i]);
+ mmu_dir->dir_page_table[i] = NULL;
+ }
+ }
+
+ kfree(mmu_dir->dir_page_table);
+ kfree(mmu_dir);
+ return 0;
+}
+
+/*
+ * mmu_directory_get_page - Get access to the page table structure used in the
+ * directory (to be able to write it to registers)
+ * @mmu_dir: pointer to the mmu directory. asserts if mmu_dir is NULL
+ *
+ * @return the page table structure used
+ */
+struct mmu_page_cfg *mmu_directory_get_page(struct mmu_directory *mmu_dir)
+{
+ if (!mmu_dir)
+ return NULL;
+
+ return mmu_dir->dir_page;
+}
+
+static struct mmu_map *mmu_directory_map(struct mmu_directory *mmu_dir,
+ const struct mmu_heap_alloc *dev_va,
+ unsigned int ui_map_flags,
+ int (*phys_iter_next)(void *arg,
+ unsigned long long *next),
+ void *phys_iter_arg)
+{
+ unsigned int first_dir = 0;
+ unsigned int first_pg = 0;
+ unsigned int dir_off = 0;
+ unsigned int pg_off = 0;
+ unsigned int n_entries = 0;
+ unsigned int i;
+ unsigned int d;
+ const unsigned int duplicate = PAGE_SIZE / mmu_get_page_size();
+ int res = 0;
+ struct mmu_map *neo = NULL;
+ struct mmu_page_cfg_table **dir_pgtbl = NULL;
+
+ /*
+ * in non UMA updates on pages needs to be done - store index of
+ * directory entry pages to update
+ */
+ unsigned int *to_update;
+ /*
+ * number of pages in to_update (will be at least 1 for the first_pg to
+ * update)
+ */
+ unsigned int n_pgs_to_update = 0;
+ /*
+ * to know if we also need to update the directory page (creation of new
+ * page)
+ */
+ unsigned char dir_modified = FALSE;
+
+ if (!mmu_dir || !dev_va || duplicate < 1)
+ return (void *)(-EINVAL);
+
+ dir_pgtbl = mmu_dir->dir_page_table;
+
+ n_entries = dev_va->alloc_size / PAGE_SIZE;
+ if (dev_va->alloc_size % MMU_PAGE_SIZE != 0 || n_entries == 0) {
+ pr_err("%s:%d invalid allocation size\n", __func__, __LINE__);
+ return (void *)(-EINVAL);
+ }
+
+ if ((ui_map_flags & MMU_FLAG_VALID) != 0) {
+ pr_err("%s:%d valid flag (0x%x) is set in the falgs 0x%x\n",
+ __func__, __LINE__, MMU_FLAG_VALID, ui_map_flags);
+ return (void *)(-EINVAL);
+ }
+
+ /*
+ * has to be dynamically allocated because it is bigger than 1k (max
+ * stack in the kernel)
+ * MMU_N_TABLE is 1024 for 4096B pages, that's a 4k allocation (1 page)
+ * - if it gets bigger may IMG_BIGALLOC should be used
+ */
+ to_update = kcalloc(MMU_N_TABLE, sizeof(unsigned int), GFP_KERNEL);
+ if (!to_update)
+ return (void *)(-ENOMEM);
+
+ /* manage multiple page table mapping */
+
+ first_dir = mmu_dir_entry(dev_va->virt_addr);
+ first_pg = mmu_pg_entry(dev_va->virt_addr);
+
+ if (first_dir >= MMU_N_TABLE || first_pg >= MMU_N_PAGE) {
+ kfree(to_update);
+ return (void *)(-EINVAL);
+ }
+
+ /* verify that the pages that should be used are available */
+ dir_off = first_dir;
+ pg_off = first_pg;
+
+ /*
+ * loop over the number of entries given by CPU allocator but CPU page
+ * size can be > than MMU page size therefore it may need to "duplicate"
+ * entries by creating a fake physical address
+ */
+ for (i = 0; i < n_entries * duplicate; i++) {
+ if (pg_off >= MMU_N_PAGE) {
+ dir_off++; /* move to next directory */
+ if (dir_off >= MMU_N_TABLE) {
+ res = -EINVAL;
+ break;
+ }
+ pg_off = 0; /* using its first page */
+ }
+
+ /*
+ * if dir_pgtbl[dir_off] == NULL not yet
+ * allocated it means all entries are available
+ */
+ if (dir_pgtbl[dir_off]) {
+ /*
+ * inside a pagetable - verify that the required offset
+ * is invalid
+ */
+ struct mmu_page_cfg_table *tbl = dir_pgtbl[dir_off];
+ unsigned int *page_mem = (unsigned int *)tbl->page->cpu_virt_addr;
+
+ if ((page_mem[pg_off] & MMU_FLAG_VALID) != 0) {
+ pr_err("%s:%d one of the required page is currently in use\n",
+ __func__, __LINE__);
+ res = -EPERM;
+ break;
+ }
+ }
+ /* PageTable struct exists */
+ pg_off++;
+ } /* for all needed entries */
+
+ /* it means one entry was not invalid or not enough page were given */
+ if (res != 0) {
+ /*
+ * message already printed
+ * IMG_ERROR_MEMORY_IN_USE when an entry is not invalid
+ * IMG_ERROR_INVALID_PARAMETERS when not enough pages are given
+ * (or too much)
+ */
+ kfree(to_update);
+ return (void *)(unsigned long)(res);
+ }
+
+ neo = kmalloc(sizeof(*neo), GFP_KERNEL);
+ if (!neo) {
+ kfree(to_update);
+ return (void *)(-ENOMEM);
+ }
+ neo->mmu_dir = mmu_dir;
+ neo->dev_virt_addr = *dev_va;
+ memcpy(&neo->dev_virt_addr, dev_va, sizeof(struct mmu_heap_alloc));
+ neo->used_flag = ui_map_flags;
+
+ /* we now know that all pages are available */
+ dir_off = first_dir;
+ pg_off = first_pg;
+
+ to_update[n_pgs_to_update] = first_dir;
+ n_pgs_to_update++;
+
+ for (i = 0; i < n_entries; i++) {
+ unsigned long long cur_phys_addr;
+
+ if (phys_iter_next(phys_iter_arg, &cur_phys_addr) != 0) {
+ pr_err("%s:%d not enough entries in physical address array\n",
+ __func__, __LINE__);
+ kfree(neo);
+ kfree(to_update);
+ return (void *)(-EBUSY);
+ }
+ for (d = 0; d < duplicate; d++) {
+ if (pg_off >= MMU_N_PAGE) {
+ /* move to next directory */
+ dir_off++;
+ /* using its first page */
+ pg_off = 0;
+
+ to_update[n_pgs_to_update] = dir_off;
+ n_pgs_to_update++;
+ }
+
+ /* this page table object does not exists, create it */
+ if (!dir_pgtbl[dir_off]) {
+ dir_pgtbl[dir_off] = mmu_pgt_create(mmu_dir);
+ if (IS_ERR_VALUE((unsigned long)dir_pgtbl[dir_off])) {
+ dir_pgtbl[dir_off] = NULL;
+ goto cleanup_fail;
+ }
+ /*
+ * make this page table valid
+ * should be dir_off
+ */
+ mmu_dir->mmu_info_cfg.pfn_page_write(mmu_dir->dir_page,
+ dir_off,
+ dir_pgtbl[dir_off]->page->phys_addr,
+ MMU_FLAG_VALID);
+ dir_modified = TRUE;
+ }
+
+ /*
+ * map this particular page in the page table
+ * use d*(MMU page size) to add additional entries from
+ * the given physical address with the correct offset
+ * for the MMU
+ */
+ mmu_dir->mmu_info_cfg.pfn_page_write(dir_pgtbl[dir_off]->page,
+ pg_off,
+ cur_phys_addr + d *
+ mmu_get_page_size(),
+ neo->used_flag |
+ MMU_FLAG_VALID);
+ dir_pgtbl[dir_off]->valid_entries++;
+
+ pg_off++;
+ } /* for duplicate */
+ } /* for entries */
+
+ neo->n_entries = n_entries * duplicate;
+ /* one more mapping is related to this directory */
+ mmu_dir->num_mapping++;
+
+ /* if non UMA we need to update device memory */
+ if (mmu_dir->mmu_info_cfg.pfn_page_update) {
+ while (n_pgs_to_update > 0) {
+ unsigned int idx = to_update[n_pgs_to_update - 1];
+ struct mmu_page_cfg_table *tbl = dir_pgtbl[idx];
+
+ mmu_dir->mmu_info_cfg.pfn_page_update(tbl->page);
+ n_pgs_to_update--;
+ }
+ if (dir_modified)
+ mmu_dir->mmu_info_cfg.pfn_page_update(mmu_dir->dir_page);
+ }
+
+ kfree(to_update);
+ return neo;
+
+cleanup_fail:
+ pr_err("%s:%d failed to create a non-existing page table\n", __func__, __LINE__);
+
+ /*
+ * invalidate all already mapped pages -
+ * do not destroy the created pages
+ */
+ while (i > 1) {
+ if (d == 0) {
+ i--;
+ d = duplicate;
+ }
+ d--;
+
+ if (pg_off == 0) {
+ pg_off = MMU_N_PAGE;
+ if (!dir_off)
+ continue;
+ dir_off--;
+ }
+
+ pg_off--;
+
+ /* it should have been used before */
+ if (!dir_pgtbl[dir_off])
+ continue;
+
+ mmu_dir->mmu_info_cfg.pfn_page_write(dir_pgtbl[dir_off]->page,
+ pg_off, 0,
+ MMU_FLAG_INVALID);
+ dir_pgtbl[dir_off]->valid_entries--;
+ }
+
+ kfree(neo);
+ kfree(to_update);
+ return (void *)(-ENOMEM);
+}
+
+/*
+ * with sg
+ */
+struct sg_phys_iter {
+ void *sgl;
+ unsigned int offset;
+};
+
+static int sg_phys_iter_next(void *arg, unsigned long long *next)
+{
+ struct sg_phys_iter *iter = arg;
+
+ if (!iter->sgl)
+ return -ENOENT;
+
+ *next = sg_phys(iter->sgl) + iter->offset; /* phys_addr to dma_addr? */
+ iter->offset += PAGE_SIZE;
+
+ if (iter->offset == img_mmu_get_sgl_length(iter->sgl)) {
+ iter->sgl = sg_next(iter->sgl);
+ iter->offset = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * mmu_directory_map_sg - Create a page table mapping for a list of physical
+ * pages and device virtual address
+ *
+ * @mmu_dir: directory to use for the mapping
+ * @phys_page_sg: sorted array of physical addresses (ascending order). The
+ * number of elements is dev_va->alloc_size/MMU_PAGE_SIZE
+ * @note This array can potentially be big, the caller may need to use vmalloc
+ * if running the linux kernel (e.g. mapping a 1080p NV12 is 760 entries, 6080
+ * Bytes - 2 CPU pages needed, fine with kmalloc; 4k NV12 is 3038 entries,
+ * 24304 Bytes - 6 CPU pages needed, kmalloc would try to find 8 contiguous
+ * pages which may be problematic if memory is fragmented)
+ * @dev_va: associated device virtual address. Given structure is copied
+ * @map_flag: flags to apply on the page (typically 0x2 for Write Only,
+ * 0x4 for Read Only) - the flag should not set bit 1 as 0x1 is the
+ * valid flag.
+ *
+ * @warning Mapping can cause memory allocation (missing pages) - do not call
+ * while interrupts are disabled
+ *
+ * @return The opaque handle to the mmu_map object and result to 0
+ * @return (void *) in case of an error with the following values:
+ * @li -EINVAL if the allocation size is not a multiple of MMU_PAGE_SIZE,
+ * if the given list of page table is too long or not long enough for the
+ * mapping or if the give flags set the invalid bit
+ * @li -EPERM if the virtual memory is already mapped
+ * @li -ENOMEM if an internal allocation failed
+ * @li -ENOMEM if a page creation failed
+ */
+struct mmu_map *mmu_directory_map_sg(struct mmu_directory *mmu_dir,
+ void *phys_page_sg,
+ const struct mmu_heap_alloc *dev_va,
+ unsigned int map_flag)
+{
+ struct sg_phys_iter arg = { phys_page_sg };
+
+ return mmu_directory_map(mmu_dir, dev_va, map_flag,
+ sg_phys_iter_next, &arg);
+}
+
+/*
+ * mmu_directory_unmap - Un-map the mapped pages (invalidate their entries) and
+ * destroy the mapping object
+ * @map: pointer to the pages to un-map
+ *
+ * This does not destroy the created Page Table (even if they are becoming
+ * un-used) and does not change the Directory valid bits.
+ *
+ * @return 0
+ */
+int mmu_directory_unmap(struct mmu_map *map)
+{
+ unsigned int first_dir = 0;
+ unsigned int first_pg = 0;
+ unsigned int dir_offset = 0;
+ unsigned int pg_offset = 0;
+ unsigned int i;
+ struct mmu_directory *mmu_dir = NULL;
+
+ /*
+ * in non UMA updates on pages needs to be done - store index of
+ * directory entry pages to update
+ */
+ unsigned int *to_update;
+ unsigned int n_pgs_to_update = 0;
+
+ if (!map || map->n_entries <= 0 || !map->mmu_dir)
+ return -EINVAL;
+
+ mmu_dir = map->mmu_dir;
+
+ /*
+ * has to be dynamically allocated because it is bigger than 1k (max
+ * stack in the kernel)
+ */
+ to_update = kcalloc(MMU_N_TABLE, sizeof(unsigned int), GFP_KERNEL);
+ if (!to_update)
+ return -ENOMEM;
+
+ first_dir = mmu_dir_entry(map->dev_virt_addr.virt_addr);
+ first_pg = mmu_pg_entry(map->dev_virt_addr.virt_addr);
+
+ /* verify that the pages that should be used are available */
+ dir_offset = first_dir;
+ pg_offset = first_pg;
+
+ to_update[n_pgs_to_update] = first_dir;
+ n_pgs_to_update++;
+
+ for (i = 0; i < map->n_entries; i++) {
+ if (pg_offset >= MMU_N_PAGE) {
+ /* move to next directory */
+ dir_offset++;
+ /* using its first page */
+ pg_offset = 0;
+
+ to_update[n_pgs_to_update] = dir_offset;
+ n_pgs_to_update++;
+ }
+
+ /*
+ * this page table object does not exist, something destroyed
+ * it while the mapping was supposed to use it
+ */
+ if (mmu_dir->dir_page_table[dir_offset]) {
+ mmu_dir->mmu_info_cfg.pfn_page_write
+ (mmu_dir->dir_page_table[dir_offset]->page,
+ pg_offset, 0,
+ MMU_FLAG_INVALID);
+ mmu_dir->dir_page_table[dir_offset]->valid_entries--;
+ }
+
+ pg_offset++;
+ }
+
+ mmu_dir->num_mapping--;
+
+ if (mmu_dir->mmu_info_cfg.pfn_page_update)
+ while (n_pgs_to_update > 0) {
+ unsigned int idx = to_update[n_pgs_to_update - 1];
+ struct mmu_page_cfg_table *tbl = mmu_dir->dir_page_table[idx];
+
+ mmu_dir->mmu_info_cfg.pfn_page_update(tbl->page);
+ n_pgs_to_update--;
+ }
+
+ /* mapping does not own the given virtual address */
+ kfree(map);
+ kfree(to_update);
+ return 0;
+}
+
+unsigned int mmu_directory_get_pagetable_entry(struct mmu_directory *mmu_dir,
+ unsigned long dev_virt_addr)
+{
+ unsigned int dir_entry = 0;
+ unsigned int table_entry = 0;
+ struct mmu_page_cfg_table *tbl;
+ struct mmu_page_cfg_table **dir_pgtbl = NULL;
+ unsigned int *page_mem;
+
+ if (!mmu_dir) {
+ pr_err("mmu directory table is NULL\n");
+ return 0xFFFFFF;
+ }
+
+ dir_pgtbl = mmu_dir->dir_page_table;
+
+ dir_entry = mmu_dir_entry(dev_virt_addr);
+ table_entry = mmu_pg_entry(dev_virt_addr);
+
+ tbl = dir_pgtbl[dir_entry];
+ if (!tbl) {
+ pr_err("page table entry is NULL\n");
+ return 0xFFFFFF;
+ }
+
+ page_mem = (unsigned int *)tbl->page->cpu_virt_addr;
+
+#if defined(DEBUG_DECODER_DRIVER) || defined(DEBUG_ENCODER_DRIVER)
+ pr_info("Page table value@dir_entry:table_entry[%d : %d] = %x\n",
+ dir_entry, table_entry, page_mem[table_entry]);
+#endif
+
+ return page_mem[table_entry];
+}
diff --git a/drivers/media/platform/vxe-vxd/common/imgmmu.h b/drivers/media/platform/vxe-vxd/common/imgmmu.h
new file mode 100644
index 000000000000..b35256d09e24
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/imgmmu.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG DEC MMU Library
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef IMG_DEC_MMU_MMU_H
+#define IMG_DEC_MMU_MMU_H
+
+#include <linux/types.h>
+
+#ifndef MMU_PHYS_SIZE
+/* @brief MMU physical address size in bits */
+#define MMU_PHYS_SIZE 40
+#endif
+
+#ifndef MMU_VIRT_SIZE
+/* @brief MMU virtual address size in bits */
+#define MMU_VIRT_SIZE 32
+#endif
+
+#ifndef MMU_PAGE_SIZE
+/* @brief Page size in bytes */
+#define MMU_PAGE_SIZE 4096u
+#define MMU_PAGE_SHIFT 12
+#define MMU_DIR_SHIFT 22
+#endif
+
+#if MMU_VIRT_SIZE == 32
+/* @brief max number of pagetable that can be stored in the directory entry */
+#define MMU_N_TABLE (MMU_PAGE_SIZE / 4u)
+/* @brief max number of page mapping in the pagetable */
+#define MMU_N_PAGE (MMU_PAGE_SIZE / 4u)
+#endif
+
+/* @brief Memory flag used to mark a page mapping as invalid */
+#define MMU_FLAG_VALID 0x1
+#define MMU_FLAG_INVALID 0x0
+
+/*
+ * This type defines MMU variant.
+ */
+enum mmu_etype {
+ MMU_TYPE_NONE = 0,
+ MMU_TYPE_32BIT,
+ MMU_TYPE_36BIT,
+ MMU_TYPE_40BIT,
+ MMU_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* @brief Page offset mask in virtual address - bottom bits */
+static const unsigned long VIRT_PAGE_OFF_MASK = ((1 << MMU_PAGE_SHIFT) - 1);
+/* @brief Page table index mask in virtual address - middle bits */
+static const unsigned long VIRT_PAGE_TBL_MASK =
+ (((1 << MMU_DIR_SHIFT) - 1) & ~(((1 << MMU_PAGE_SHIFT) - 1)));
+/* @brief Directory index mask in virtual address - high bits */
+static const unsigned long VIRT_DIR_IDX_MASK = (~((1 << MMU_DIR_SHIFT) - 1));
+
+/*
+ * struct mmu_heap_alloc - information about a virtual mem heap allocation
+ * @virt_addr: pointer to start of the allocation
+ * @alloc_size: size in bytes
+ */
+struct mmu_heap_alloc {
+ unsigned long virt_addr;
+ unsigned long alloc_size;
+};
+
+/*
+ * struct mmu_page_cfg - mmu_page configuration
+ * @phys_addr: physical address - unsigned long long is used to support extended physical
+ * address on 32bit system
+ * @cpu_virt_addr: CPU virtual address pointer
+ */
+struct mmu_page_cfg {
+ unsigned long long phys_addr;
+ unsigned long cpu_virt_addr;
+};
+
+/*
+ * typedef mmu_pfn_page_alloc - page table allocation function
+ *
+ * Pointer to a function implemented by the used allocator to create 1
+ * page table (used for the MMU mapping - directory page and mapping page)
+ *
+ * Return:
+ * * A populated mmu_page_cfg structure with the result of the page alloc.
+ * * NULL if the allocation failed.
+ */
+typedef struct mmu_page_cfg *(*mmu_pfn_page_alloc) (void *);
+
+/*
+ * typedef mmu_pfn_page_free
+ * @arg1: pointer to the mmu_page_cfg that is allocated using mmu_pfn_page_alloc
+ *
+ * Pointer to a function to free the allocated page table used for MMU mapping.
+ *
+ * @return void
+ */
+typedef void (*mmu_pfn_page_free) (struct mmu_page_cfg *arg1);
+
+/*
+ * typedef mmu_pfn_page_update
+ * @arg1: pointer to the mmu_page_cfg that is allocated using mmu_pfn_page_alloc
+ *
+ * Pointer to a function to update Device memory on non Unified Memory
+ *
+ * @return void
+ */
+typedef void (*mmu_pfn_page_update) (struct mmu_page_cfg *arg1);
+
+/*
+ * typedef mmu_pfn_page_write
+ * @mmu_page: mmu_page mmu page configuration to be written
+ * @offset: offset in entries (32b word)
+ * @pa_to_write: pa_to_write physical address to write
+ * @flags: flags bottom part of the entry used as flags for the MMU (including
+ * valid flag)
+ *
+ * Pointer to a function to write to a device address
+ *
+ * @return void
+ */
+typedef void (*mmu_pfn_page_write) (struct mmu_page_cfg *mmu_page,
+ unsigned int offset,
+ unsigned long long pa_to_write, unsigned int flags);
+
+/*
+ * struct mmu_info
+ * @pfn_page_alloc: function pointer for allocating a physical page used in
+ * MMU mapping
+ * @alloc_ctx: allocation context handler
+ * @pfn_page_free: function pointer for freeing a physical page used in
+ * MMU mapping
+ * @pfn_page_write: function pointer to write a physical address onto a page.
+ * If NULL, then internal function is used. Internal function
+ * assumes that MMU_PHYS_SIZE is the MMU size.
+ * @pfn_page_update: function pointer to update a physical page on device if
+ * non UMA.
+ */
+struct mmu_info {
+ mmu_pfn_page_alloc pfn_page_alloc;
+ void *alloc_ctx;
+ mmu_pfn_page_free pfn_page_free;
+ mmu_pfn_page_write pfn_page_write;
+ mmu_pfn_page_update pfn_page_update;
+};
+
+/*
+ * mmu_get_page_size() - Access the compilation specified page size of the
+ * MMU (in Bytes)
+ */
+static inline unsigned long mmu_get_page_size(void)
+{
+ return MMU_PAGE_SIZE;
+}
+
+struct mmu_directory *mmu_create_directory(const struct mmu_info *mmu_info_ops);
+int mmu_destroy_directory(struct mmu_directory *mmu_dir);
+
+struct mmu_page_cfg *mmu_directory_get_page(struct mmu_directory *mmu_dir);
+
+struct mmu_map *mmu_directory_map_sg(struct mmu_directory *mmu_dir,
+ void *phys_page_sg,
+ const struct mmu_heap_alloc *dev_va,
+ unsigned int map_flag);
+int mmu_directory_unmap(struct mmu_map *map);
+
+unsigned int mmu_directory_get_pagetable_entry(struct mmu_directory *mmu_dir,
+ unsigned long dev_virt_addr);
+
+#endif /* IMG_DEC_MMU_MMU_H */
diff --git a/drivers/media/platform/vxe-vxd/common/lst.c b/drivers/media/platform/vxe-vxd/common/lst.c
new file mode 100644
index 000000000000..bb047ab6d598
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/lst.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * List processing primitives.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ */
+
+#include "lst.h"
+
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+void lst_add(struct lst_t *list, void *item)
+{
+ if (!list->first) {
+ list->first = item;
+ list->last = item;
+ } else {
+ *list->last = item;
+ list->last = item;
+ }
+ *((void **)item) = NULL;
+}
+
+void lst_addhead(struct lst_t *list, void *item)
+{
+ if (!list->first) {
+ list->first = item;
+ list->last = item;
+ *((void **)item) = NULL;
+ } else {
+ *((void **)item) = list->first;
+ list->first = item;
+ }
+}
+
+int lst_empty(struct lst_t *list)
+{
+ if (!list->first)
+ return 1;
+ else
+ return 0;
+}
+
+void *lst_first(struct lst_t *list)
+{
+ return list->first;
+}
+
+void lst_init(struct lst_t *list)
+{
+ list->first = NULL;
+ list->last = NULL;
+}
+
+void *lst_last(struct lst_t *list)
+{
+ return list->last;
+}
+
+void *lst_next(void *item)
+{
+ return *((void **)item);
+}
+
+void *lst_removehead(struct lst_t *list)
+{
+ void **temp = list->first;
+
+ if (temp) {
+ list->first = *temp;
+ if (!list->first)
+ list->last = NULL;
+ }
+ return temp;
+}
+
+void *lst_remove(struct lst_t *list, void *item)
+{
+ void **p;
+ void **q;
+
+ p = (void **)list;
+ q = *p;
+ while (q) {
+ if (q == item) {
+ *p = *q;
+ if (list->last == q)
+ list->last = p;
+ return item;
+ }
+ p = q;
+ q = *p;
+ }
+
+ return NULL;
+}
+
+int lst_check(struct lst_t *list, void *item)
+{
+ void **p;
+ void **q;
+
+ p = (void **)list;
+ q = *p;
+ while (q) {
+ if (q == item)
+ return 1;
+ p = q;
+ q = *p;
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/lst.h b/drivers/media/platform/vxe-vxd/common/lst.h
new file mode 100644
index 000000000000..ccf6eed19019
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/lst.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * List processing primitives.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ */
+#ifndef __LIST_H__
+#define __LIST_H__
+
+#include <linux/types.h>
+
+struct lst_t {
+ void **first;
+ void **last;
+};
+
+void lst_add(struct lst_t *list, void *item);
+void lst_addhead(struct lst_t *list, void *item);
+
+/**
+ * lst_empty- Is list empty?
+ * @list: pointer to list
+ */
+int lst_empty(struct lst_t *list);
+void *lst_first(struct lst_t *list);
+void lst_init(struct lst_t *list);
+void *lst_last(struct lst_t *list);
+void *lst_next(void *item);
+void *lst_remove(struct lst_t *list, void *item);
+void *lst_removehead(struct lst_t *list);
+int lst_check(struct lst_t *list, void *item);
+
+#endif /* __LIST_H__ */
diff --git a/drivers/media/platform/vxe-vxd/common/pool.c b/drivers/media/platform/vxe-vxd/common/pool.c
new file mode 100644
index 000000000000..c0cb1e465c50
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/pool.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Object Pool Memory Allocator
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_errors.h"
+#include "pool.h"
+
+#define BUFF_MAX_SIZE 4096
+#define BUFF_MAX_GROW 32
+
+/* 64 bits */
+#define ALIGN_SIZE (sizeof(long long) - 1)
+
+struct pool {
+ unsigned char *name;
+ unsigned int size;
+ unsigned int grow;
+ struct buffer *buffers;
+ struct object *objects;
+};
+
+struct buffer {
+ struct buffer *next;
+};
+
+struct object {
+ struct object *next;
+};
+
+static inline unsigned char *strdup_cust(const unsigned char *str)
+{
+ unsigned char *r = kmalloc(strlen(str) + 1, GFP_KERNEL);
+
+ if (r)
+ strcpy(r, str);
+ return r;
+}
+
+/*
+ * pool_create - Create an sObject pool
+ * @name: Name of sObject pool for diagnostic purposes
+ * @obj_size: size of each sObject in the pool in bytes
+ * @pool_hdnl: Will contain NULL or sObject pool handle
+ *
+ * This function Create an sObject pool
+ */
+
+int pool_create(const unsigned char * const name,
+ unsigned int obj_size,
+ struct pool ** const pool_hdnl)
+{
+ struct pool *local_pool = NULL;
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!name || !pool_hdnl) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ local_pool = kmalloc((sizeof(*local_pool)), GFP_KERNEL);
+ if (!local_pool) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ local_pool->name = strdup_cust((unsigned char *)name);
+ local_pool->size = obj_size;
+ local_pool->buffers = NULL;
+ local_pool->objects = NULL;
+ local_pool->grow =
+ (BUFF_MAX_SIZE - sizeof(struct buffer)) /
+ (obj_size + ALIGN_SIZE);
+
+ if (local_pool->grow == 0)
+ local_pool->grow = 1;
+ else if (local_pool->grow > BUFF_MAX_GROW)
+ local_pool->grow = BUFF_MAX_GROW;
+
+ *pool_hdnl = local_pool;
+ result = IMG_SUCCESS;
+
+ return result;
+}
+
+/*
+ * @Function pool_delete
+ * @Description
+ * Delete an sObject pool. All psObjects allocated from the pool must
+ * be free'd with pool_free() before deleting the sObject pool.
+ * @Input pool : Object Pool pointer
+ * @Return IMG_SUCCESS or an error code.
+ */
+int pool_delete(struct pool * const pool_arg)
+{
+ struct buffer *local_buf = NULL;
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!pool_arg) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ local_buf = pool_arg->buffers;
+ while (local_buf) {
+ local_buf = local_buf->next;
+ kfree(pool_arg->buffers);
+ pool_arg->buffers = local_buf;
+ }
+
+ kfree(pool_arg->name);
+ pool_arg->name = NULL;
+
+ kfree(pool_arg);
+ result = IMG_SUCCESS;
+
+ return result;
+}
+
+/*
+ * @Function pool_alloc
+ * @Description
+ * Allocate an sObject from an sObject pool.
+ * @Input pool_arg : Object Pool
+ * @Output obj_hndl : Pointer containing the handle to the
+ * object created or IMG_NULL
+ * @Return IMG_SUCCESS or an error code.
+ */
+int pool_alloc(struct pool * const pool_arg,
+ void ** const obj_hndl)
+{
+ struct object *local_obj1 = NULL;
+ struct buffer *local_buf = NULL;
+ unsigned int idx = 0;
+ unsigned int sz = 0;
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!pool_arg || !obj_hndl) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ if (!pool_arg->objects) {
+ sz = (pool_arg->size + ALIGN_SIZE);
+ sz *= (pool_arg->grow + sizeof(struct buffer));
+ local_buf = kmalloc(sz, GFP_KERNEL);
+ if (!local_buf) {
+ result = IMG_ERROR_MALLOC_FAILED;
+ return result;
+ }
+
+ local_buf->next = pool_arg->buffers;
+ pool_arg->buffers = local_buf;
+
+ for (idx = 0; idx < pool_arg->grow; idx++) {
+ struct object *local_obj2;
+ unsigned char *temp_ptr = NULL;
+
+ local_obj2 = (struct object *)(((unsigned char *)(local_buf + 1))
+ + (idx * (pool_arg->size + ALIGN_SIZE)));
+
+ temp_ptr = (unsigned char *)local_obj2;
+ if ((unsigned long)temp_ptr & ALIGN_SIZE) {
+ temp_ptr += ((ALIGN_SIZE + 1)
+ - ((unsigned long)temp_ptr & ALIGN_SIZE));
+ local_obj2 = (struct object *)temp_ptr;
+ }
+
+ local_obj2->next = pool_arg->objects;
+ pool_arg->objects = local_obj2;
+ }
+ }
+
+ if (!pool_arg->objects) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ return result;
+ }
+
+ local_obj1 = pool_arg->objects;
+ pool_arg->objects = local_obj1->next;
+
+ *obj_hndl = (void *)(local_obj1);
+ result = IMG_SUCCESS;
+
+ return result;
+}
+
+/*
+ * @Function pool_free
+ * @Description
+ * Free an sObject previously allocated from an sObject pool.
+ * @Input pool_arg : Object Pool pointer.
+ * @Output h_object : Handle to the object to be freed.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int pool_free(struct pool * const pool_arg,
+ void * const obj_hndl)
+{
+ struct object *object = NULL;
+ unsigned int result = IMG_ERROR_FATAL;
+
+ if (!pool_arg || !obj_hndl) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ return result;
+ }
+
+ object = (struct object *)obj_hndl;
+ object->next = pool_arg->objects;
+ pool_arg->objects = object;
+
+ result = IMG_SUCCESS;
+
+ return result;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/pool.h b/drivers/media/platform/vxe-vxd/common/pool.h
new file mode 100644
index 000000000000..d22d15a2af54
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/pool.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Object Pool Memory Allocator header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef _pool_h_
+#define _pool_h_
+
+#include <linux/types.h>
+
+struct pool;
+
+/**
+ * pool_create - Create an sObject pool
+ * @name: Name of sObject pool for diagnostic purposes
+ * @obj_size: size of each sObject in the pool in bytes
+ * @pool: Will contain NULL or sObject pool handle
+ *
+ * Return IMG_SUCCESS or an error code.
+ */
+int pool_create(const unsigned char * const name,
+ unsigned int obj_size,
+ struct pool ** const pool);
+
+/*
+ * @Function pool_delete
+ * @Description
+ * Delete an sObject pool. All psObjects allocated from the pool must
+ * be free'd with pool_free() before deleting the sObject pool.
+ * @Input pool : Object Pool pointer
+ * @Return IMG_SUCCESS or an error code.
+ */
+int pool_delete(struct pool * const pool);
+
+/*
+ * @Function pool_alloc
+ * @Description
+ * Allocate an Object from an Object pool.
+ * @Input pool : Object Pool
+ * @Output obj_hdnl : Pointer containing the handle to the
+ * object created or IMG_NULL
+ * @Return IMG_SUCCESS or an error code.
+ */
+int pool_alloc(struct pool * const pool,
+ void ** const obj_hdnl);
+
+/*
+ * @Function pool_free
+ * @Description
+ * Free an sObject previously allocated from an sObject pool.
+ * @Input pool : Object Pool pointer.
+ * @Output obj_hdnl : Handle to the object to be freed.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int pool_free(struct pool * const pool,
+ void * const obj_hdnl);
+
+#endif /* _pool_h_ */
diff --git a/drivers/media/platform/vxe-vxd/common/pool_api.c b/drivers/media/platform/vxe-vxd/common/pool_api.c
new file mode 100644
index 000000000000..eecadc665005
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/pool_api.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Resource pool manager API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/types.h>
+
+#include "idgen_api.h"
+#include "lst.h"
+#include "pool_api.h"
+#include "img_errors.h"
+
+/*
+ * list can be modified by different instances. So please,
+ * make sure to acquire mutex lock before initializing the list.
+ */
+static struct mutex *shared_res_mutex_handle;
+
+/*
+ * Max resource ID's.
+ */
+#define POOL_IDGEN_MAX_ID (0xFFFFFFFF)
+/*
+ * Size of blocks used for ID's.
+ */
+#define POOL_IDGEN_BLOCK_SIZE (50)
+
+/*
+ * Indicates if the pool API has been indialized or not.
+ * zero if not done. 1 if done.
+ */
+static int poolinitdone;
+
+/* list of resource pool */
+static struct lst_t poollist = {0};
+
+/**
+ * struct poollist - Structure contains resource list information.
+ * @link: to be able to part of single linked list
+ * @pool_mutex: lock
+ * @freereslst: list of free resource structure
+ * @actvreslst: list of active resource structure
+ * @pfnfree: pool free callback function
+ * @idgenhandle: ID generator context handl
+ */
+struct poollist {
+ void **link;
+ struct mutex *pool_mutex; /* Mutex lock */
+ struct lst_t freereslst;
+ struct lst_t actvreslst;
+ pfrecalbkpntr pfnfree;
+ void *idgenhandle;
+};
+
+/*
+ * This structure contains pool resource.
+ */
+struct poolres {
+ void **link; /* to be able to part of single linked list */
+ /* Resource id */
+ unsigned int resid;
+ /* Pointer to destructor function */
+ pdestcallbkptr desfunc;
+ /* resource param */
+ void *resparam;
+ /* size of resource param in bytes */
+ unsigned int resparmsize;
+ /* pointer to resource pool list */
+ struct poollist *respoollst;
+ /* 1 if this is a clone of the original resource */
+ int isclone;
+ /* pointer to original resource */
+ struct poolres *origres;
+ /* list of cloned resource structures. Only used on the original */
+ struct lst_t clonereslst;
+ /* reference count. Only used on the original resource */
+ unsigned int refcnt;
+ void *cb_handle;
+};
+
+/*
+ * This function initializes the list if not done earlier.
+ */
+int pool_init(void)
+{
+ /* Check if list already initialized */
+ if (!poolinitdone) {
+ /*
+ * list can be modified by different instances. So please,
+ * make sure to acquire mutex lock before initializing the list.
+ */
+
+ shared_res_mutex_handle = kzalloc(sizeof(*shared_res_mutex_handle), GFP_KERNEL);
+ if (!shared_res_mutex_handle)
+ return -ENOMEM;
+
+ mutex_init(shared_res_mutex_handle);
+
+ /* initialize the list of pools */
+ lst_init(&poollist);
+ /* Get initialized flag to true */
+ poolinitdone = 1;
+ }
+
+ return 0;
+}
+
+/*
+ * This function de-initializes the list.
+ */
+void pool_deinit(void)
+{
+ struct poollist *respoollist;
+
+ /* Check if list initialized */
+ if (poolinitdone) {
+ /* destroy any active pools */
+ respoollist = (struct poollist *)lst_first(&poollist);
+ while (respoollist) {
+ pool_destroy(respoollist);
+ respoollist = (struct poollist *)lst_first(&poollist);
+ }
+
+ /* Destroy mutex */
+ mutex_destroy(shared_res_mutex_handle);
+ kfree(shared_res_mutex_handle);
+ shared_res_mutex_handle = NULL;
+
+ /* set initialized flag to 0 */
+ poolinitdone = 0;
+ }
+}
+
+/*
+ * This function creates pool.
+ */
+int pool_api_create(void **poolhndle)
+{
+ struct poollist *respoollist;
+ unsigned int result = 0;
+
+ /* Allocate a pool structure */
+ respoollist = kzalloc(sizeof(*respoollist), GFP_KERNEL);
+ if (!respoollist)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Initialize the pool info */
+ lst_init(&respoollist->freereslst);
+ lst_init(&respoollist->actvreslst);
+
+ /* Create mutex */
+ respoollist->pool_mutex = kzalloc(sizeof(*respoollist->pool_mutex), GFP_KERNEL);
+ if (!respoollist->pool_mutex) {
+ result = ENOMEM;
+ goto error_create_context;
+ }
+ mutex_init(respoollist->pool_mutex);
+
+ /* Create context for the Id generator */
+ result = idgen_createcontext(POOL_IDGEN_MAX_ID,
+ POOL_IDGEN_BLOCK_SIZE, 0,
+ &respoollist->idgenhandle);
+ if (result != IMG_SUCCESS)
+ goto error_create_context;
+
+ /* Disable interrupts */
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_POOL_RES);
+
+ /* Add to list of pools */
+ lst_add(&poollist, respoollist);
+
+ /* Enable interrupts */
+ mutex_unlock(shared_res_mutex_handle);
+
+ /* Return handle to pool */
+ *poolhndle = respoollist;
+
+ return IMG_SUCCESS;
+
+ /* Error handling. */
+error_create_context:
+ kfree(respoollist);
+
+ return result;
+}
+
+/*
+ * This function destroys the pool.
+ */
+int pool_destroy(void *poolhndle)
+{
+ struct poollist *respoollist = poolhndle;
+ struct poolres *respool;
+ struct poolres *clonerespool;
+ unsigned int result = 0;
+
+ if (!poolinitdone || !respoollist) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error_nolock;
+ }
+
+ /* Lock the pool */
+ mutex_lock_nested(respoollist->pool_mutex, SUBCLASS_POOL);
+
+ /* Disable interrupts */
+ /*
+ * We need to check if we really need to check disable,
+ * interrupts because before deleting we need to make sure the
+ * pool lst is not being used other process. As of now getting ipl
+ * global mutex
+ */
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_POOL_RES);
+
+ /* Remove the pool from the active list */
+ lst_remove(&poollist, respoollist);
+
+ /* Enable interrupts */
+ mutex_unlock(shared_res_mutex_handle);
+
+ /* Destroy any resources in the free list */
+ respool = (struct poolres *)lst_removehead(&respoollist->freereslst);
+ while (respool) {
+ respool->desfunc(respool->resparam, respool->cb_handle);
+ kfree(respool);
+ respool = (struct poolres *)
+ lst_removehead(&respoollist->freereslst);
+ }
+
+ /* Destroy any resources in the active list */
+ respool = (struct poolres *)lst_removehead(&respoollist->actvreslst);
+ while (respool) {
+ clonerespool = (struct poolres *)
+ lst_removehead(&respool->clonereslst);
+ while (clonerespool) {
+ /*
+ * If we created a copy of the resources pvParam
+ * then free it.
+ * kfree(NULL) is safe and this check is probably not
+ * required
+ */
+ kfree(clonerespool->resparam);
+
+ kfree(clonerespool);
+ clonerespool = (struct poolres *)
+ lst_removehead(&respool->clonereslst);
+ }
+
+ /* Call the resource destructor */
+ respool->desfunc(respool->resparam, respool->cb_handle);
+ kfree(respool);
+ respool = (struct poolres *)
+ lst_removehead(&respoollist->actvreslst);
+ }
+ /* Destroy the context for the Id generator */
+ if (respoollist->idgenhandle)
+ result = idgen_destroycontext(respoollist->idgenhandle);
+
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+
+ /* Destroy mutex */
+ mutex_destroy(respoollist->pool_mutex);
+ kfree(respoollist->pool_mutex);
+ respoollist->pool_mutex = NULL;
+
+ /* Free the pool structure */
+ kfree(respoollist);
+
+ return IMG_SUCCESS;
+
+error_nolock:
+ return result;
+}
+
+int pool_setfreecalbck(void *poolhndle, pfrecalbkpntr pfnfree)
+{
+ struct poollist *respoollist = poolhndle;
+ struct poolres *respool;
+ unsigned int result = 0;
+
+ if (!poolinitdone || !respoollist) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error_nolock;
+ }
+
+ /* Lock the pool */
+ mutex_lock_nested(respoollist->pool_mutex, SUBCLASS_POOL);
+
+ respoollist->pfnfree = pfnfree;
+
+ /* If free callback set */
+ if (respoollist->pfnfree) {
+ /* Move resources from free to active list */
+ respool = (struct poolres *)
+ lst_removehead(&respoollist->freereslst);
+ while (respool) {
+ /* Add to active list */
+ lst_add(&respoollist->actvreslst, respool);
+ respool->refcnt++;
+
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+
+ /* Call the free callback */
+ respoollist->pfnfree(respool->resid, respool->resparam);
+
+ /* Lock the pool */
+ mutex_lock_nested(respoollist->pool_mutex, SUBCLASS_POOL);
+
+ /* Get next free resource */
+ respool = (struct poolres *)
+ lst_removehead(&respoollist->freereslst);
+ }
+ }
+
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+
+ /* Return IMG_SUCCESS */
+ return IMG_SUCCESS;
+
+error_nolock:
+ return result;
+}
+
+int pool_resreg(void *poolhndle, pdestcallbkptr fndestructor,
+ void *resparam, unsigned int resparamsize,
+ int balloc, unsigned int *residptr,
+ void **poolreshndle, void *cb_handle)
+{
+ struct poollist *respoollist = poolhndle;
+ struct poolres *respool;
+ unsigned int result = 0;
+
+ if (!poolinitdone || !respoollist) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error_nolock;
+ }
+
+ /* Allocate a resource structure */
+ respool = kzalloc(sizeof(*respool), GFP_KERNEL);
+ if (!respool)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Setup the resource */
+ respool->desfunc = fndestructor;
+ respool->cb_handle = cb_handle;
+ respool->resparam = resparam;
+ respool->resparmsize = resparamsize;
+ respool->respoollst = respoollist;
+ lst_init(&respool->clonereslst);
+
+ /* Lock the pool */
+ mutex_lock_nested(respoollist->pool_mutex, SUBCLASS_POOL);
+
+ /* Set resource id */
+ result = idgen_allocid(respoollist->idgenhandle,
+ (void *)respool, &respool->resid);
+ if (result != IMG_SUCCESS) {
+ kfree(respool);
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+ return result;
+ }
+
+ /* If allocated or free callback not set */
+ if (balloc || respoollist->pfnfree) {
+ /* Add to active list */
+ lst_add(&respoollist->actvreslst, respool);
+ respool->refcnt++;
+ } else {
+ /* Add to free list */
+ lst_add(&respoollist->freereslst, respool);
+ }
+
+ /* Return the resource id */
+ if (residptr)
+ *residptr = respool->resid;
+
+ /* Return the handle to the resource */
+ if (poolreshndle)
+ *poolreshndle = respool;
+
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+
+ /* If free callback set */
+ if (respoollist->pfnfree) {
+ /* Call the free callback */
+ respoollist->pfnfree(respool->resid, respool->resparam);
+ }
+
+ /* Return IMG_SUCCESS */
+ return IMG_SUCCESS;
+
+error_nolock:
+ return result;
+}
+
+int pool_resdestroy(void *poolreshndle, int bforce)
+{
+ struct poolres *respool = poolreshndle;
+ struct poollist *respoollist;
+ struct poolres *origrespool;
+ unsigned int result = 0;
+
+ if (!poolinitdone || !respool) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error_nolock;
+ }
+
+ respoollist = respool->respoollst;
+
+ /* If this is a clone */
+ if (respool->isclone) {
+ /* Get access to the original */
+ origrespool = respool->origres;
+ if (!origrespool) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ goto error_nolock;
+ }
+
+ if (origrespool->isclone) {
+ result = IMG_ERROR_UNEXPECTED_STATE;
+ goto error_nolock;
+ }
+
+ /* Remove from the clone list */
+ lst_remove(&origrespool->clonereslst, respool);
+
+ /* Free resource id */
+ result = idgen_freeid(respoollist->idgenhandle,
+ respool->resid);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /*
+ * If we created a copy of the resources pvParam then free it
+ * kfree(NULL) is safe and this check is probably not required.
+ */
+ kfree(respool->resparam);
+
+ /* Free the clone resource structure */
+ kfree(respool);
+
+ /* Set resource to be "freed" to the original */
+ respool = origrespool;
+ }
+
+ /* If there are still outstanding references */
+ if (!bforce && respool->refcnt != 0) {
+ /*
+ * We may need to mark the resource and destroy it when
+ * there are no outstanding references
+ */
+ return IMG_SUCCESS;
+ }
+
+ /* Has the resource outstanding references */
+ if (respool->refcnt != 0) {
+ /* Remove the resource from the active list */
+ lst_remove(&respoollist->actvreslst, respool);
+ } else {
+ /* Remove the resource from the free list */
+ lst_remove(&respoollist->freereslst, respool);
+ }
+
+ /* Free resource id */
+ result = idgen_freeid(respoollist->idgenhandle,
+ respool->resid);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Call the resource destructor */
+ respool->desfunc(respool->resparam, respool->cb_handle);
+ kfree(respool);
+
+ return IMG_SUCCESS;
+
+error_nolock:
+ return result;
+}
+
+int pool_resalloc(void *poolhndle, void *poolreshndle)
+{
+ struct poollist *respoollist = poolhndle;
+ struct poolres *respool = poolreshndle;
+ unsigned int result = 0;
+
+ if (!poolinitdone || !respoollist || !poolreshndle) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error_nolock;
+ }
+
+ /* Lock the pool */
+ mutex_lock_nested(respoollist->pool_mutex, SUBCLASS_POOL);
+
+ /* Remove resource from free list */
+ lst_remove(&respoollist->freereslst, respool);
+
+ /* Add to active list */
+ lst_add(&respoollist->actvreslst, respool);
+ respool->refcnt++;
+
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+
+ /* Return IMG_SUCCESS */
+ return IMG_SUCCESS;
+
+error_nolock:
+ return result;
+}
+
+int pool_resfree(void *poolreshndle)
+{
+ struct poolres *respool = poolreshndle;
+ struct poollist *respoollist;
+ struct poolres *origrespool;
+ unsigned int result = 0;
+
+ if (!poolinitdone || !respool) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error_nolock;
+ }
+
+ respoollist = respool->respoollst;
+
+ /* Lock the pool */
+ mutex_lock_nested(respoollist->pool_mutex, SUBCLASS_POOL);
+
+ /* If this is a clone */
+ if (respool->isclone) {
+ /* Get access to the original */
+ origrespool = respool->origres;
+ if (!origrespool) {
+ mutex_unlock(respoollist->pool_mutex);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Remove from the clone list */
+ lst_remove(&origrespool->clonereslst, respool);
+
+ /* Free resource id */
+ result = idgen_freeid(respoollist->idgenhandle,
+ respool->resid);
+ if (result != IMG_SUCCESS) {
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+ return result;
+ }
+
+ /*
+ * If we created a copy of the resources pvParam then free it
+ * kfree(NULL) is safe and this check is probably not required.
+ */
+ kfree(respool->resparam);
+
+ /* Free the clone resource structure */
+ kfree(respool);
+
+ /* Set resource to be "freed" to the original */
+ respool = origrespool;
+ }
+
+ /* Update the reference count */
+ respool->refcnt--;
+
+ /* If there are still outstanding references */
+ if (respool->refcnt != 0) {
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+ /* Return IMG_SUCCESS */
+ return IMG_SUCCESS;
+ }
+
+ /* Remove the resource from the active list */
+ lst_remove(&respoollist->actvreslst, respool);
+
+ /* If free callback set */
+ if (respoollist->pfnfree) {
+ /* Add to active list */
+ lst_add(&respoollist->actvreslst, respool);
+ respool->refcnt++;
+ } else {
+ /* Add to free list */
+ lst_add(&respoollist->freereslst, respool);
+ }
+
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+
+ /* If free callback set */
+ if (respoollist->pfnfree) {
+ /* Call the free callback */
+ respoollist->pfnfree(respool->resid, respool->resparam);
+ }
+
+ /* Return IMG_SUCCESS */
+ return IMG_SUCCESS;
+
+error_nolock:
+ return result;
+}
+
+int pool_resclone(void *poolreshndle, void **clonereshndle, void **resparam)
+{
+ struct poolres *respool = poolreshndle;
+ struct poollist *respoollist;
+ struct poolres *origrespool = respool;
+ struct poolres *clonerespool;
+ unsigned int result = 0;
+
+ if (!poolinitdone || !respool) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error_nolock;
+ }
+
+ /* Allocate a resource structure */
+ clonerespool = kzalloc(sizeof(*clonerespool), GFP_KERNEL);
+ if (!clonerespool)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ respoollist = respool->respoollst;
+ if (!respoollist)
+ return IMG_ERROR_FATAL;
+
+ /* Lock the pool */
+ mutex_lock_nested(respoollist->pool_mutex, SUBCLASS_POOL);
+
+ /* Set resource id */
+ result = idgen_allocid(respoollist->idgenhandle,
+ (void *)clonerespool, &clonerespool->resid);
+ if (result != IMG_SUCCESS)
+ goto error_alloc_id;
+
+ /* If this is a clone, set the original */
+ if (respool->isclone)
+ origrespool = respool->origres;
+
+ /* Setup the cloned resource */
+ clonerespool->isclone = 1;
+ clonerespool->respoollst = respoollist;
+ clonerespool->origres = origrespool;
+
+ /* Add to clone list */
+ lst_add(&origrespool->clonereslst, clonerespool);
+ origrespool->refcnt++;
+
+ /* If ppvParam is not IMG_NULL */
+ if (resparam) {
+ /* If the size of the original vParam is 0 */
+ if (origrespool->resparmsize == 0) {
+ *resparam = NULL;
+ } else {
+ /* Allocate memory for a copy of the original vParam */
+ /*
+ * kmemdup allocates memory of length
+ * origrespool->resparmsize and to resparam and copy
+ * origrespool->resparam to resparam of the allocated
+ * length
+ */
+ *resparam = kmemdup(origrespool->resparam,
+ origrespool->resparmsize,
+ GFP_KERNEL);
+ if (!(*resparam)) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error_copy_param;
+ }
+ }
+ }
+
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+
+ /* Return the cloned resource */
+ *clonereshndle = clonerespool;
+
+ /* Return IMG_SUCCESS */
+ return IMG_SUCCESS;
+
+ /* Error handling. */
+error_copy_param:
+ lst_remove(&origrespool->clonereslst, clonerespool);
+ origrespool->refcnt--;
+error_alloc_id:
+ kfree(clonerespool);
+
+ /* Unlock the pool */
+ mutex_unlock(respoollist->pool_mutex);
+
+error_nolock:
+ return result;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/pool_api.h b/drivers/media/platform/vxe-vxd/common/pool_api.h
new file mode 100644
index 000000000000..1e7803abb715
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/pool_api.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Resource pool manager API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __POOLAPI_H__
+#define __POOLAPI_H__
+
+#include "img_errors.h"
+#include "lst.h"
+
+/*
+ * This is the prototype for "free" callback functions. This function
+ * is called when resources are returned to the pools list of free resources.
+ * NOTE: The "freed" resource is then allocated and passed to the callback
+ * function.
+ */
+typedef void (*pfrecalbkpntr)(unsigned int ui32resid, void *resparam);
+
+/*
+ * This is the prototype for "destructor" callback functions. This function
+ * is called when a resource registered with the resource pool manager is to
+ * be destroyed.
+ */
+typedef void (*pdestcallbkptr)(void *resparam, void *cb_handle);
+
+/*
+ * pool_init - This function is used to initializes the resource pool manager component
+ * and should be called at start-up.
+ */
+int pool_init(void);
+
+/*
+ * This function is used to deinitialises the resource pool manager component
+ * and would normally be called at shutdown.
+ */
+void pool_deinit(void);
+
+/*
+ * This function is used to create a resource pool into which resources can be
+ * placed.
+ */
+int pool_api_create(void **poolhndle);
+
+/*
+ * This function is used to destroy a resource pool.
+ * NOTE: Destroying a resource pool destroys all of the resources within the
+ * pool by calling the associated destructor function #POOL_pfnDestructor
+ * defined when the resource what registered using POOL_ResRegister().
+ *
+ * NOTE: All of the pools resources must be in the pools free list - the
+ * allocated list must be empty.
+ */
+int pool_destroy(void *poolhndle);
+
+/*
+ * This function is used to set or remove a free callback function on a pool.
+ * The free callback function gets call for any resources already in the
+ * pools free list or for any resources that subsequently get freed.
+ * NOTE: The resource passed to the callback function has been allocated before
+ * the callback is made.
+ */
+int pool_setfreecalbck(void *poolhndle, pfrecalbkpntr pfnfree);
+
+/*
+ * This function is used to register a resource within a resource pool. The
+ * resource is added to the pools allocated or free list based on the value
+ * of bAlloc.
+ */
+int pool_resreg(void *poolhndle, pdestcallbkptr fndestructor,
+ void *resparam, unsigned int resparamsize,
+ int balloc, unsigned int *residptr,
+ void **poolreshndle, void *cb_handle);
+
+/*
+ * This function is used to destroy a resource.
+ */
+int pool_resdestroy(void *poolreshndle, int bforce);
+
+/*
+ * This function is used to get/allocate a resource from a pool. This moves
+ * the resource from the free to allocated list.
+ */
+int pool_resalloc(void *poolhndle, void *poolreshndle);
+
+/*
+ * This function is used to free a resource and return it to the pools lists of
+ * free resources.
+ * NOTE: The resources is only moved to the free list when all references to
+ * the resource have been freed.
+ */
+int pool_resfree(void *poolreshndle);
+
+/*
+ * This function is used to clone a resource - this creates an additional
+ * reference to the resource.
+ * NOTE: The resources is only moved to the free list when all references to
+ * the resource have been freed.
+ * NOTE: If this function is used to clone the resource's pvParam data then
+ * the clone of the data is freed when the clone of the resource is freed.
+ * The resource destructor is NOT used for this - simply an IMG_FREE.
+ */
+int pool_resclone(void *poolreshndle, void **clonereshndle, void **resparam);
+
+#endif /* __POOLAPI_H__ */
diff --git a/drivers/media/platform/vxe-vxd/common/ra.c b/drivers/media/platform/vxe-vxd/common/ra.c
new file mode 100644
index 000000000000..ac07737f351b
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/ra.c
@@ -0,0 +1,972 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implements generic resource allocation.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hash.h"
+#include "img_errors.h"
+#include "pool.h"
+#include "ra.h"
+
+static unsigned char global_init;
+
+/* pool of struct arena's */
+static struct pool *global_pool_arena;
+
+/* pool of struct boundary tag */
+static struct pool *global_pool_bt;
+
+/**
+ * ra_request_alloc_fail - ra_request_alloc_fail
+ * @import_hdnl : Callback handle.
+ * @requested_size : Requested allocation size.
+ * @ref : Pointer to user reference data.
+ * @alloc_flags : Allocation flags.
+ * @actual_size : Pointer to contain the actual allocated size.
+ * @base_addr : Allocation base(always 0,it is failing).
+ *
+ * Default callback allocator used if no callback is specified, always fails
+ * to allocate further resources to the arena.
+ */
+static int ra_request_alloc_fail(void *import_hdnl,
+ unsigned long long requested_size,
+ unsigned long long *actual_size,
+ void **ref,
+ unsigned int alloc_flags,
+ unsigned long long *base_addr)
+{
+ if (base_addr)
+ *base_addr = 0;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function ra_log2
+ * @Description
+ * Calculates the Log2(n) with n being a 64-bit value.
+ *
+ * @Input value : Input value.
+ * @Output None
+ * @Return result : Log2(ui64Value).
+ */
+
+static unsigned int ra_log2(unsigned long long value)
+{
+ int res = 0;
+
+ value >>= 1;
+ while (value > 0) {
+ value >>= 1;
+ res++;
+ }
+ return res;
+}
+
+/*
+ * @Function ra_segment_list_insert_after
+ * @Description Insert a boundary tag into an arena segment list after a
+ * specified boundary tag.
+ * @Input arena_arg : Pointer to the input arena.
+ * @Input bt_here_arg : The boundary tag before which psBTToInsert
+ * will be added .
+ * @Input bt_to_insert_arg : The boundary tag to insert.
+ * @Output None
+ * @Return None
+ */
+static void ra_segment_list_insert_after(struct arena *arena_arg,
+ struct btag *bt_here_arg,
+ struct btag *bt_to_insert_arg)
+{
+ bt_to_insert_arg->nxt_seg = bt_here_arg->nxt_seg;
+ bt_to_insert_arg->prv_seg = bt_here_arg;
+
+ if (!bt_here_arg->nxt_seg)
+ arena_arg->tail_seg = bt_to_insert_arg;
+ else
+ bt_here_arg->nxt_seg->prv_seg = bt_to_insert_arg;
+
+ bt_here_arg->nxt_seg = bt_to_insert_arg;
+}
+
+/*
+ * @Function ra_segment_list_insert
+ * @Description
+ * Insert a boundary tag into an arena segment list at the appropriate point.
+ * @Input arena_arg : Pointer to the input arena.
+ * @Input bt_to_insert_arg : The boundary tag to insert.
+ * @Output None
+ * @Return None
+ */
+static void ra_segment_list_insert(struct arena *arena_arg,
+ struct btag *bt_to_insert_arg)
+{
+ /* insert into the segment chain */
+ if (!arena_arg->head_seg) {
+ arena_arg->head_seg = bt_to_insert_arg;
+ arena_arg->tail_seg = bt_to_insert_arg;
+ bt_to_insert_arg->nxt_seg = NULL;
+ bt_to_insert_arg->prv_seg = NULL;
+ } else {
+ struct btag *bt_scan = arena_arg->head_seg;
+
+ while (bt_scan->nxt_seg &&
+ bt_to_insert_arg->base >=
+ bt_scan->nxt_seg->base) {
+ bt_scan = bt_scan->nxt_seg;
+ }
+ ra_segment_list_insert_after(arena_arg,
+ bt_scan,
+ bt_to_insert_arg);
+ }
+}
+
+/*
+ * @Function ra_SegmentListRemove
+ * @Description
+ * Insert a boundary tag into an arena segment list at the appropriate point.
+ * @Input arena_arg : Pointer to the input arena.
+ * @Input bt_to_remove_arg : The boundary tag to insert.
+ * @Output None
+ * @Return None
+ */
+static void ra_segment_list_remove(struct arena *arena_arg,
+ struct btag *bt_to_remove_arg)
+{
+ if (!bt_to_remove_arg->prv_seg)
+ arena_arg->head_seg = bt_to_remove_arg->nxt_seg;
+ else
+ bt_to_remove_arg->prv_seg->nxt_seg = bt_to_remove_arg->nxt_seg;
+
+ if (!bt_to_remove_arg->nxt_seg)
+ arena_arg->tail_seg = bt_to_remove_arg->prv_seg;
+ else
+ bt_to_remove_arg->nxt_seg->prv_seg = bt_to_remove_arg->prv_seg;
+}
+
+/*
+ * @Function ra_segment_split
+ * @Description
+ * Split a segment into two, maintain the arena segment list.
+ * The boundary tag should not be in the free table. Neither the original or
+ * the new psBTNeighbour bounary tag will be in the free table.
+ * @Input arena_arg : Pointer to the input arena.
+ * @Input bt_to_split_arg : The boundary tag to split.
+ * The required segment size of boundary tag after the split.
+ * @Output None
+ * @Return btag *: New boundary tag.
+ */
+static struct btag *ra_segment_split(struct arena *arena_arg,
+ struct btag *bt_to_split_arg,
+ unsigned long long size)
+{
+ struct btag *local_bt_neighbour = NULL;
+ int res = IMG_ERROR_FATAL;
+
+ res = pool_alloc(global_pool_bt, ((void **)&local_bt_neighbour));
+ if (res != IMG_SUCCESS)
+ return NULL;
+
+ local_bt_neighbour->prv_seg = bt_to_split_arg;
+ local_bt_neighbour->nxt_seg = bt_to_split_arg->nxt_seg;
+ local_bt_neighbour->bt_type = RA_BOUNDARY_TAG_TYPE_FREE;
+ local_bt_neighbour->size = (bt_to_split_arg->size - size);
+ local_bt_neighbour->base = (bt_to_split_arg->base + size);
+ local_bt_neighbour->nxt_free = NULL;
+ local_bt_neighbour->prv_free = NULL;
+ local_bt_neighbour->ref = bt_to_split_arg->ref;
+
+ if (!bt_to_split_arg->nxt_seg)
+ arena_arg->tail_seg = local_bt_neighbour;
+ else
+ bt_to_split_arg->nxt_seg->prv_seg = local_bt_neighbour;
+
+ bt_to_split_arg->nxt_seg = local_bt_neighbour;
+ bt_to_split_arg->size = size;
+
+ return local_bt_neighbour;
+}
+
+/*
+ * @Function ra_free_list_insert
+ * @Description
+ * Insert a boundary tag into an arena free table.
+ * @Input arena_arg : Pointer to the input arena.
+ * @Input bt_arg : The boundary tag to insert into an arena
+ * free table.
+ * @Output None
+ * @Return None
+ */
+static void ra_free_list_insert(struct arena *arena_arg,
+ struct btag *bt_arg)
+{
+ unsigned int index = ra_log2(bt_arg->size);
+
+ bt_arg->bt_type = RA_BOUNDARY_TAG_TYPE_FREE;
+ if (index < FREE_TABLE_LIMIT)
+ bt_arg->nxt_free = arena_arg->head_free[index];
+ else
+ bt_arg->nxt_free = NULL;
+
+ bt_arg->prv_free = NULL;
+
+ if (index < FREE_TABLE_LIMIT) {
+ if (arena_arg->head_free[index])
+ arena_arg->head_free[index]->prv_free = bt_arg;
+ }
+
+ if (index < FREE_TABLE_LIMIT)
+ arena_arg->head_free[index] = bt_arg;
+}
+
+/*
+ * @Function ra_free_list_remove
+ * @Description
+ * Remove a boundary tag from an arena free table.
+ * @Input arena_arg : Pointer to the input arena.
+ * @Input bt_arg : The boundary tag to remove from
+ * an arena free table.
+ * @Output None
+ * @Return None
+ */
+static void ra_free_list_remove(struct arena *arena_arg,
+ struct btag *bt_arg)
+{
+ unsigned int index = ra_log2(bt_arg->size);
+
+ if (bt_arg->nxt_free)
+ bt_arg->nxt_free->prv_free = bt_arg->prv_free;
+
+ if (!bt_arg->prv_free && index < FREE_TABLE_LIMIT)
+ arena_arg->head_free[index] = bt_arg->nxt_free;
+ else if (bt_arg->prv_free)
+ bt_arg->prv_free->nxt_free = bt_arg->nxt_free;
+}
+
+/*
+ * @Function ra_build_span_marker
+ * @Description
+ * Construct a span marker boundary tag.
+ * @Input base : The base of the boundary tag.
+ * @Output None
+ * @Return btag * : New span marker boundary tag
+ */
+static struct btag *ra_build_span_marker(unsigned long long base)
+{
+ struct btag *local_bt = NULL;
+ int res = IMG_ERROR_FATAL;
+
+ res = pool_alloc(global_pool_bt, ((void **)&local_bt));
+ if (res != IMG_SUCCESS)
+ return NULL;
+
+ local_bt->bt_type = RA_BOUNDARY_TAG_TYPE_SPAN;
+ local_bt->base = base;
+ local_bt->size = 0;
+ local_bt->nxt_seg = NULL;
+ local_bt->prv_seg = NULL;
+ local_bt->nxt_free = NULL;
+ local_bt->prv_free = NULL;
+ local_bt->ref = NULL;
+
+ return local_bt;
+}
+
+/*
+ * @Function ra_build_bt
+ * @Description
+ * Construct a boundary tag for a free segment.
+ * @Input ui64Base : The base of the resource segment.
+ * @Input ui64Size : The extent of the resource segment.
+ * @Output None
+ * @Return btag * : New boundary tag
+ */
+static struct btag *ra_build_bt(unsigned long long base, unsigned long long size)
+{
+ struct btag *local_bt = NULL;
+ int res = IMG_ERROR_FATAL;
+
+ res = pool_alloc(global_pool_bt, ((void **)&local_bt));
+
+ if (res != IMG_SUCCESS)
+ return local_bt;
+
+ local_bt->bt_type = RA_BOUNDARY_TAG_TYPE_FREE;
+ local_bt->base = base;
+ local_bt->size = size;
+ local_bt->nxt_seg = NULL;
+ local_bt->prv_seg = NULL;
+ local_bt->nxt_free = NULL;
+ local_bt->prv_free = NULL;
+ local_bt->ref = NULL;
+
+ return local_bt;
+}
+
+/*
+ * @Function ra_insert_resource
+ * @Description
+ * Add a free resource segment to an arena.
+ * @Input base : The base of the resource segment.
+ * @Input size : The size of the resource segment.
+ * @Output None
+ * @Return IMG_SUCCESS or an error code.
+ */
+static int ra_insert_resource(struct arena *arena_arg,
+ unsigned long long base,
+ unsigned long long size)
+{
+ struct btag *local_bt = NULL;
+
+ local_bt = ra_build_bt(base, size);
+ if (!local_bt)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ ra_segment_list_insert(arena_arg, local_bt);
+ ra_free_list_insert(arena_arg, local_bt);
+ arena_arg->max_idx = ra_log2(size);
+ if (1ULL << arena_arg->max_idx < size)
+ arena_arg->max_idx++;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function ra_insert_resource_span
+ * @Description
+ * Add a free resource span to an arena, complete with span markers.
+ * @Input arena_arg : Pointer to the input arena.
+ * @Input base : The base of the resource segment.
+ * @Input size : The size of the resource segment.
+ * @Output None
+ * @Return btag * : The boundary tag representing
+ * the free resource segment.
+ */
+static struct btag *ra_insert_resource_span(struct arena *arena_arg,
+ unsigned long long base,
+ unsigned long long size)
+{
+ struct btag *local_bt = NULL;
+ struct btag *local_bt_span_start = NULL;
+ struct btag *local_bt_span_end = NULL;
+
+ local_bt_span_start = ra_build_span_marker(base);
+ if (!local_bt_span_start)
+ return NULL;
+
+ local_bt_span_end = ra_build_span_marker(base + size);
+ if (!local_bt_span_end) {
+ pool_free(global_pool_bt, local_bt_span_start);
+ return NULL;
+ }
+
+ local_bt = ra_build_bt(base, size);
+ if (!local_bt) {
+ pool_free(global_pool_bt, local_bt_span_end);
+ pool_free(global_pool_bt, local_bt_span_start);
+ return NULL;
+ }
+
+ ra_segment_list_insert(arena_arg, local_bt_span_start);
+ ra_segment_list_insert_after(arena_arg,
+ local_bt_span_start,
+ local_bt);
+ ra_free_list_insert(arena_arg, local_bt);
+ ra_segment_list_insert_after(arena_arg,
+ local_bt,
+ local_bt_span_end);
+
+ return local_bt;
+}
+
+/*
+ * @Function ra_free_bt
+ * @Description
+ * Free a boundary tag taking care of the segment list and the
+ * boundary tag free table.
+ * @Input arena_arg : Pointer to the input arena.
+ * @Input bt_arg : The boundary tag to free.
+ * @Output None
+ * @Return None
+ */
+static void ra_free_bt(struct arena *arena_arg,
+ struct btag *bt_arg)
+{
+ struct btag *bt_neibr;
+
+ /* try and coalesce with left bt_neibr */
+ bt_neibr = bt_arg->prv_seg;
+ if (bt_neibr &&
+ bt_neibr->bt_type == RA_BOUNDARY_TAG_TYPE_FREE &&
+ bt_neibr->base + bt_neibr->size == bt_arg->base) {
+ ra_free_list_remove(arena_arg, bt_neibr);
+ ra_segment_list_remove(arena_arg, bt_neibr);
+ bt_arg->base = bt_neibr->base;
+ bt_arg->size += bt_neibr->size;
+ pool_free(global_pool_bt, bt_neibr);
+ }
+
+ /* try to coalesce with right psBTNeighbour */
+ bt_neibr = bt_arg->nxt_seg;
+ if (bt_neibr &&
+ bt_neibr->bt_type == RA_BOUNDARY_TAG_TYPE_FREE &&
+ bt_arg->base + bt_arg->size == bt_neibr->base) {
+ ra_free_list_remove(arena_arg, bt_neibr);
+ ra_segment_list_remove(arena_arg, bt_neibr);
+ bt_arg->size += bt_neibr->size;
+ pool_free(global_pool_bt, bt_neibr);
+ }
+
+ if (bt_arg->nxt_seg &&
+ bt_arg->nxt_seg->bt_type == RA_BOUNDARY_TAG_TYPE_SPAN &&
+ bt_arg->prv_seg && bt_arg->prv_seg->bt_type ==
+ RA_BOUNDARY_TAG_TYPE_SPAN) {
+ struct btag *ps_bt_nxt = bt_arg->nxt_seg;
+ struct btag *ps_bt_prev = bt_arg->prv_seg;
+
+ ra_segment_list_remove(arena_arg, ps_bt_nxt);
+ ra_segment_list_remove(arena_arg, ps_bt_prev);
+ ra_segment_list_remove(arena_arg, bt_arg);
+ arena_arg->import_free_fxn(arena_arg->import_hdnl,
+ bt_arg->base,
+ bt_arg->ref);
+ pool_free(global_pool_bt, ps_bt_nxt);
+ pool_free(global_pool_bt, ps_bt_prev);
+ pool_free(global_pool_bt, bt_arg);
+ } else {
+ ra_free_list_insert(arena_arg, bt_arg);
+ }
+}
+
+static int ra_check_btag(struct arena *arena_arg,
+ unsigned long long size_arg,
+ void **ref,
+ struct btag *bt_arg,
+ unsigned long long align_arg,
+ unsigned long long *base_arg,
+ unsigned int align_log2)
+{
+ unsigned long long local_align_base;
+ int res = IMG_ERROR_FATAL;
+
+ while (bt_arg) {
+ if (align_arg > 1ULL)
+ local_align_base = ((bt_arg->base + align_arg - 1)
+ >> align_log2) << align_log2;
+ else
+ local_align_base = bt_arg->base;
+
+ if ((bt_arg->base + bt_arg->size) >=
+ (local_align_base + size_arg)) {
+ ra_free_list_remove(arena_arg, bt_arg);
+
+ /*
+ * with align_arg we might need to discard the front of
+ * this segment
+ */
+ if (local_align_base > bt_arg->base) {
+ struct btag *btneighbor;
+
+ btneighbor = ra_segment_split(arena_arg,
+ bt_arg,
+ (local_align_base -
+ bt_arg->base));
+ /*
+ * Partition the buffer, create a new boundary
+ * tag
+ */
+ if (!btneighbor)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ ra_free_list_insert(arena_arg, bt_arg);
+ bt_arg = btneighbor;
+ }
+
+ /*
+ * The segment might be too big, if so, discard the back
+ * of the segment
+ */
+ if (bt_arg->size > size_arg) {
+ struct btag *btneighbor;
+
+ btneighbor = ra_segment_split(arena_arg,
+ bt_arg,
+ size_arg);
+ /*
+ * Partition the buffer, create a new boundary
+ * tag
+ */
+ if (!btneighbor)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ ra_free_list_insert(arena_arg, btneighbor);
+ }
+
+ bt_arg->bt_type = RA_BOUNDARY_TAG_TYPE_LIVE;
+
+ res = vid_hash_insert(arena_arg->hash_tbl,
+ bt_arg->base,
+ (unsigned long)bt_arg);
+ if (res != IMG_SUCCESS) {
+ ra_free_bt(arena_arg, bt_arg);
+ *base_arg = 0;
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ if (ref)
+ *ref = bt_arg->ref;
+
+ *base_arg = bt_arg->base;
+ return IMG_SUCCESS;
+ }
+ bt_arg = bt_arg->nxt_free;
+ }
+
+ return res;
+}
+
+/*
+ * @Function ra_attempt_alloc_aligned
+ * @Description Attempt to allocate from an arena
+ * @Input arena_arg: Pointer to the input arena
+ * @Input size_arg: The requested allocation size
+ * @Input ref: The user references associated with the allocated
+ * segment
+ * @Input align_arg: Required alignment
+ * @Output base_arg: Allocated resource size
+ * @Return IMG_SUCCESS or an error code
+ */
+static int ra_attempt_alloc_aligned(struct arena *arena_arg,
+ unsigned long long size_arg,
+ void **ref,
+ unsigned long long align_arg,
+ unsigned long long *base_arg)
+{
+ unsigned int index;
+ unsigned int align_log2;
+ int res = IMG_ERROR_FATAL;
+
+ if (!arena_arg || !base_arg)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /*
+ * Take the log of the alignment to get number of bits to shift
+ * left/right for multiply/divide. Assumption made here is that
+ * alignment has to be a power of 2 value. Aserting otherwise.
+ */
+ align_log2 = ra_log2(align_arg);
+
+ /*
+ * Search for a near fit free boundary tag, start looking at the
+ * log2 free table for our required size and work on up the table.
+ */
+ index = ra_log2(size_arg);
+
+ /*
+ * If the Size required is exactly 2**n then use the n bucket, because
+ * we know that every free block in that bucket is larger than 2**n,
+ * otherwise start at then next bucket up.
+ */
+ if (size_arg > (1ull << index))
+ index++;
+
+ while ((index < FREE_TABLE_LIMIT) && !arena_arg->head_free[index])
+ index++;
+
+ if (index >= FREE_TABLE_LIMIT) {
+ pr_err("requested allocation size doesn't fit in the arena. Increase MMU HEAP Size\n");
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ while (index < FREE_TABLE_LIMIT) {
+ if (arena_arg->head_free[index]) {
+ /* we have a cached free boundary tag */
+ struct btag *local_bt =
+ arena_arg->head_free[index];
+
+ res = ra_check_btag(arena_arg,
+ size_arg,
+ ref,
+ local_bt,
+ align_arg,
+ base_arg,
+ align_log2);
+ if (res != IMG_SUCCESS)
+ return res;
+ }
+ index++;
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function vid_ra_init
+ * @Description Initializes the RA module. Must be called before any other
+ * ra API function
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_initialise(void)
+{
+ int res = IMG_ERROR_FATAL;
+
+ if (!global_init) {
+ res = pool_create("img-arena",
+ sizeof(struct arena),
+ &global_pool_arena);
+ if (res != IMG_SUCCESS)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ res = pool_create("img-bt",
+ sizeof(struct btag),
+ &global_pool_bt);
+ if (res != IMG_SUCCESS) {
+ res = pool_delete(global_pool_arena);
+ global_pool_arena = NULL;
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+ global_init = 1;
+ res = IMG_SUCCESS;
+ }
+
+ return res;
+}
+
+/*
+ * @Function vid_ra_deinit
+ * @Description Deinitializes the RA module
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_deinit(void)
+{
+ int res = IMG_ERROR_FATAL;
+
+ if (global_init) {
+ if (global_pool_arena) {
+ res = pool_delete(global_pool_arena);
+ global_pool_arena = NULL;
+ }
+ if (global_pool_bt) {
+ res = pool_delete(global_pool_bt);
+ global_pool_bt = NULL;
+ }
+ global_init = 0;
+ res = IMG_SUCCESS;
+ }
+ return res;
+}
+
+/*
+ * @Function vid_ra_create
+ * @Description Used to create a resource arena.
+ * @Input name: The name of the arena for diagnostic purposes
+ * @Input base_arg: The base of an initial resource span or 0
+ * @Input size_arg: The size of an initial resource span or 0
+ * @Input quantum: The arena allocation quantum
+ * @Input (*import_alloc_fxn): A resource allocation callback or NULL
+ * @Input (*import_free_fxn): A resource de-allocation callback or NULL
+ * @Input import_hdnl: Handle passed to alloc and free or NULL
+ * @Output arena_hndl: The handle for the arene being created, or NULL
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_create(const unsigned char * const name,
+ unsigned long long base_arg,
+ unsigned long long size_arg,
+ unsigned long quantum,
+ int (*import_alloc_fxn)(void * const import_hdnl,
+ unsigned long long req_sz,
+ unsigned long long * const actl_sz,
+ void ** const ref,
+ unsigned int alloc_flags,
+ unsigned long long * const base_arg),
+ int (*import_free_fxn)(void * const import_hdnl,
+ unsigned long long import_base,
+ void * const import_ref),
+ void *import_hdnl,
+ void **arena_hndl)
+{
+ struct arena *local_arena = NULL;
+ unsigned int idx = 0;
+ int res = IMG_ERROR_FATAL;
+
+ if (!arena_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ *(arena_hndl) = NULL;
+
+ if (global_init) {
+ res = pool_alloc(global_pool_arena, ((void **)&local_arena));
+ if (!local_arena || res != IMG_SUCCESS)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ local_arena->name = NULL;
+ if (name)
+ local_arena->name = kstrdup((const signed char *)name,
+ GFP_KERNEL);
+ if (import_alloc_fxn)
+ local_arena->import_alloc_fxn = import_alloc_fxn;
+ else
+ local_arena->import_alloc_fxn = ra_request_alloc_fail;
+
+ local_arena->import_free_fxn = import_free_fxn;
+ local_arena->import_hdnl = import_hdnl;
+
+ for (idx = 0; idx < FREE_TABLE_LIMIT; idx++)
+ local_arena->head_free[idx] = NULL;
+
+ local_arena->head_seg = NULL;
+ local_arena->tail_seg = NULL;
+ local_arena->quantum = quantum;
+
+ res = vid_hash_create(MINIMUM_HASH_SIZE,
+ &local_arena->hash_tbl);
+
+ if (!local_arena->hash_tbl) {
+ vid_hash_delete(local_arena->hash_tbl);
+ kfree(local_arena->name);
+ local_arena->name = NULL;
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ //if (size_arg > (unsigned long long)0) {
+ if (size_arg > 0ULL) {
+ size_arg = (size_arg + quantum - 1) / quantum * quantum;
+
+ res = ra_insert_resource(local_arena,
+ base_arg,
+ size_arg);
+ if (res != IMG_SUCCESS) {
+ vid_hash_delete(local_arena->hash_tbl);
+ pool_free(global_pool_arena, local_arena);
+ kfree(local_arena->name);
+ local_arena->name = NULL;
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+ }
+ *(arena_hndl) = local_arena;
+ res = IMG_SUCCESS;
+ }
+
+ return res;
+}
+
+/*
+ * @Function vid_ra_delete
+ * @Description Used to delete a resource arena. All resources allocated from
+ * the arena must be freed before deleting the arena
+ * @Input arena_hndl: The handle to the arena to delete
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_delete(void * const arena_hndl)
+{
+ int res = IMG_ERROR_FATAL;
+ struct arena *local_arena = NULL;
+ unsigned int idx;
+
+ if (!arena_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (global_init) {
+ local_arena = (struct arena *)arena_hndl;
+ kfree(local_arena->name);
+ local_arena->name = NULL;
+ for (idx = 0; idx < FREE_TABLE_LIMIT; idx++)
+ local_arena->head_free[idx] = NULL;
+
+ while (local_arena->head_seg) {
+ struct btag *local_bt = local_arena->head_seg;
+
+ ra_segment_list_remove(local_arena, local_bt);
+ }
+ res = vid_hash_delete(local_arena->hash_tbl);
+ if (res != IMG_SUCCESS)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ res = pool_free(global_pool_arena, local_arena);
+ if (res != IMG_SUCCESS)
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+
+ return res;
+}
+
+/*
+ * @Function vid_ra_add
+ * @Description Used to add a resource span to an arena. The span must not
+ * overlap with any span previously added to the arena
+ * @Input base_arg: The base_arg of the span
+ * @Input size_arg: The size of the span
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_add(void * const arena_hndl, unsigned long long base_arg, unsigned long long size_arg)
+{
+ int res = IMG_ERROR_FATAL;
+ struct arena *local_arena = NULL;
+
+ if (!arena_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (global_init) {
+ local_arena = (struct arena *)arena_hndl;
+ size_arg = (size_arg + local_arena->quantum - 1) /
+ local_arena->quantum * local_arena->quantum;
+
+ res = ra_insert_resource(local_arena, base_arg, size_arg);
+ if (res != IMG_SUCCESS)
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ return res;
+}
+
+/*
+ * @Function vid_ra_alloc
+ * @Description Used to allocate resource from an arena
+ * @Input arena_hndl: The handle to the arena to create the resource
+ * @Input request_size: The requested size of resource segment
+ * @Input actl_size: The actualSize of resource segment
+ * @Input ref: The user reference associated with allocated resource
+ * span
+ * @Input alloc_flags: AllocationFlags influencing allocation policy
+ * @Input align_arg: The alignment constraint required for the allocated
+ * segment
+ * @Output base_args: The base of the allocated resource
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_alloc(void * const arena_hndl,
+ unsigned long long request_size,
+ unsigned long long * const actl_sz,
+ void ** const ref,
+ unsigned int alloc_flags,
+ unsigned long long alignarg,
+ unsigned long long * const basearg)
+{
+ int res = IMG_ERROR_FATAL;
+ struct arena *arn_ctx = NULL;
+ unsigned long long loc_size = request_size;
+
+ if (!arena_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (global_init) {
+ arn_ctx = (struct arena *)arena_hndl;
+ loc_size = ((loc_size + arn_ctx->quantum - 1) /
+ arn_ctx->quantum) * arn_ctx->quantum;
+
+ if (actl_sz)
+ *actl_sz = loc_size;
+
+ /*
+ * If allocation failed then we might have an import source
+ * which can provide more resource, else we will have to fail
+ * the allocation to the caller.
+ */
+ if (alloc_flags == RA_SEQUENTIAL_ALLOCATION)
+ res = ra_attempt_alloc_aligned(arn_ctx,
+ loc_size,
+ ref,
+ alignarg,
+ basearg);
+
+ if (res != IMG_SUCCESS) {
+ void *import_ref = NULL;
+ unsigned long long import_base = 0ULL;
+ unsigned long long locimprt_reqsz = loc_size;
+ unsigned long long locimprt_actsz = 0ULL;
+
+ res = arn_ctx->import_alloc_fxn(arn_ctx->import_hdnl,
+ locimprt_reqsz,
+ &locimprt_actsz,
+ &import_ref,
+ alloc_flags,
+ &import_base);
+
+ if (res == IMG_SUCCESS) {
+ struct btag *local_bt =
+ ra_insert_resource_span(arn_ctx,
+ import_base,
+ locimprt_actsz);
+
+ /*
+ * Successfully import more resource, create a
+ * span to represent it and retry the allocation
+ * attempt
+ */
+ if (!local_bt) {
+ /*
+ * Insufficient resources to insert the
+ * newly acquired span, so free it back
+ */
+ arn_ctx->import_free_fxn(arn_ctx->import_hdnl,
+ import_base,
+ import_ref);
+ return IMG_ERROR_UNEXPECTED_STATE;
+ }
+ local_bt->ref = import_ref;
+ if (alloc_flags == RA_SEQUENTIAL_ALLOCATION) {
+ res = ra_attempt_alloc_aligned(arn_ctx,
+ loc_size,
+ ref,
+ alignarg,
+ basearg);
+ }
+ }
+ }
+ }
+
+ return res;
+}
+
+/*
+ * @Function vid_ra_free
+ * @Description Used to free a resource segment
+ * @Input arena_hndl: The arena the segment was originally allocated from
+ * @Input base_arg: The base of the span
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_free(void * const arena_hndl, unsigned long long base_arg)
+{
+ int res = IMG_ERROR_FATAL;
+ struct arena *local_arena = NULL;
+ struct btag *local_bt = NULL;
+ unsigned long uip_res;
+
+ if (!arena_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (global_init) {
+ local_arena = (struct arena *)arena_hndl;
+
+ res = vid_hash_remove(local_arena->hash_tbl,
+ base_arg,
+ &uip_res);
+ if (res != IMG_SUCCESS)
+ return res;
+ local_bt = (struct btag *)uip_res;
+
+ ra_free_bt(local_arena, local_bt);
+ }
+
+ return res;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/ra.h b/drivers/media/platform/vxe-vxd/common/ra.h
new file mode 100644
index 000000000000..a4d529d635d7
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/ra.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Implements generic resource allocation.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef _RA_H_
+#define _RA_H_
+
+#define MINIMUM_HASH_SIZE (64)
+#define FREE_TABLE_LIMIT (64)
+
+/* Defines whether sequential or random allocation is used */
+enum {
+ RA_SEQUENTIAL_ALLOCATION = 0,
+ RA_RANDOM_ALLOCATION,
+ RA_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Defines boundary tag type */
+enum eboundary_tag_type {
+ RA_BOUNDARY_TAG_TYPE_SPAN = 0,
+ RA_BOUNDARY_TAG_TYPE_FREE,
+ RA_BOUNDARY_TAG_TYPE_LIVE,
+ RA_BOUNDARY_TAG_TYPE_MAX,
+ RA_BOUNDARY_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * @Description
+ * Boundary tags, used to describe a resource segment
+ *
+ * @enum0: span markers
+ * @enum1: free resource segment
+ * @enum2: allocated resource segment
+ * @enum3: max
+ * @base,size: The base resource of this segment and extent of this segment
+ * @nxt_seg, prv_seg: doubly linked ordered list of all segments
+ * within the arena
+ * @nxt_free, prv_free: doubly linked un-ordered list of free segments
+ * @reference : a user reference associated with this span, user
+ * references are currently only provided in
+ * the callback mechanism
+ */
+struct btag {
+ unsigned int bt_type;
+ unsigned long long base;
+ unsigned long long size;
+ struct btag *nxt_seg;
+ struct btag *prv_seg;
+ struct btag *nxt_free;
+ struct btag *prv_free;
+ void *ref;
+};
+
+/*
+ * @Description
+ * resource allocation arena
+ *
+ * @name: arena for diagnostics output
+ * @quantum: allocations within this arena are quantum sized
+ * @max_idx: index of the last position in the psBTHeadFree table,
+ * with available free space
+ * @import_alloc_fxn: import interface, if provided
+ * @import_free_fxn: import interface, if provided
+ * @import_hdnl: import interface, if provided
+ * @head_free: head of list of free boundary tags for indexed by Log2
+ * of the boundary tag size. Power-of-two table of free lists
+ * @head_seg, tail_seg : resource ordered segment list
+ * @ps_hash : segment address to boundary tag hash table
+ */
+struct arena {
+ unsigned char *name;
+ unsigned long quantum;
+ unsigned int max_idx;
+ int (*import_alloc_fxn)(void *import_hdnl,
+ unsigned long long requested_size,
+ unsigned long long *actual_size,
+ void **ref,
+ unsigned int alloc_flags,
+ unsigned long long *base_addr);
+ int (*import_free_fxn)(void *import_hdnl,
+ unsigned long long base,
+ void *ref);
+ void *import_hdnl;
+ struct btag *head_free[FREE_TABLE_LIMIT];
+ struct btag *head_seg;
+ struct btag *tail_seg;
+ struct hash *hash_tbl;
+};
+
+/*
+ * @Function vid_ra_init
+ * @Description Initializes the RA module. Must be called before any other
+ * ra API function
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_initialise(void);
+
+/*
+ * @Function vid_ra_deinit
+ * @Description Deinitializes the RA module
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_deinit(void);
+
+/*
+ * @Function vid_ra_create
+ * @Description Used to create a resource arena.
+ * @Input name: The name of the arena for diagnostic purposes
+ * @Input base_arg: The base of an initial resource span or 0
+ * @Input size_arg: The size of an initial resource span or 0
+ * @Input quantum: The arena allocation quantum
+ * @Input (*import_alloc_fxn): A resource allocation callback or NULL
+ * @Input (*import_free_fxn): A resource de-allocation callback or NULL
+ * @Input import_hdnl: Handle passed to alloc and free or NULL
+ * @Output arena_hndl: The handle for the arene being created, or NULL
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_create(const unsigned char * const name,
+ unsigned long long base_arg,
+ unsigned long long size_arg,
+ unsigned long quantum,
+ int (*import_alloc_fxn)(void * const import_hdnl,
+ unsigned long long req_sz,
+ unsigned long long * const actl_sz,
+ void ** const ref,
+ unsigned int alloc_flags,
+ unsigned long long * const base_arg),
+ int (*import_free_fxn)(void * const import_hdnl,
+ unsigned long long import_base,
+ void * const import_ref),
+ void *import_hdnl,
+ void **arena_hndl);
+
+/*
+ * @Function vid_ra_delete
+ * @Description Used to delete a resource arena. All resources allocated from
+ * the arena must be freed before deleting the arena
+ * @Input arena_hndl: The handle to the arena to delete
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_delete(void * const arena_hndl);
+
+/*
+ * @Function vid_ra_add
+ * @Description Used to add a resource span to an arena. The span must not
+ * overlap with any span previously added to the arena
+ * @Input base_arg: The base_arg of the span
+ * @Input size_arg: The size of the span
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_add(void * const arena_hndl, unsigned long long base_arg, unsigned long long size_arg);
+
+/*
+ * @Function vid_ra_alloc
+ * @Description Used to allocate resource from an arena
+ * @Input arena_hndl: The handle to the arena to create the resource
+ * @Input request_size: The requested size of resource segment
+ * @Input actl_size: The actualSize of resource segment
+ * @Input ref: The user reference associated with allocated resource
+ * span
+ * @Input alloc_flags: AllocationFlags influencing allocation policy
+ * @Input align_arg: The alignment constraint required for the allocated
+ * segment
+ * @Output base_args: The base of the allocated resource
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_alloc(void * const arena_hndl,
+ unsigned long long request_size,
+ unsigned long long * const actl_sz,
+ void ** const ref,
+ unsigned int alloc_flags,
+ unsigned long long align_arg,
+ unsigned long long * const base_arg);
+
+/*
+ * @Function vid_ra_free
+ * @Description Used to free a resource segment
+ * @Input arena_hndl: The arena the segment was originally allocated from
+ * @Input base_arg: The base of the span
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_free(void * const arena_hndl, unsigned long long base_arg);
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/common/resource.c b/drivers/media/platform/vxe-vxd/common/resource.c
new file mode 100644
index 000000000000..c3dd6d010d73
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/resource.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD DEC Resource manager implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "dq.h"
+#include "img_errors.h"
+#include "lst.h"
+#include "resource.h"
+
+struct resource_list_elem {
+ struct dq_linkage_t link;
+ void *item;
+ unsigned int id;
+ unsigned int *refcnt;
+};
+
+/*
+ * marks an item as used by incrementing the reference count
+ */
+int resource_item_use(unsigned int *refcnt)
+{
+ if (refcnt)
+ (*refcnt)++;
+
+ return 0;
+}
+
+/*
+ * returns an item by decrementing the reference count
+ */
+void resource_item_return(unsigned int *refcnt)
+{
+ if (refcnt && *refcnt > 0)
+ (*refcnt)--;
+}
+
+/*
+ * releases an item by setting reference count to 1 (original owner)
+ */
+int resource_item_release(unsigned int *refcnt)
+{
+ if (refcnt)
+ *refcnt = 1;
+
+ return 0;
+}
+
+/*
+ * indicates whether an item is free to be used (no owners)
+ */
+int resource_item_isavailable(unsigned int *refcnt)
+{
+ if (refcnt)
+ return (*refcnt == 0) ? 1 : 0;
+ else
+ return 0;
+}
+
+/*
+ * adds an item (and associated id) to a resource list
+ */
+int resource_list_add_img(struct lst_t *list, void *item, unsigned int id, unsigned int *refcnt)
+{
+ struct resource_list_elem *listelem = NULL;
+ int bfound = 0;
+ unsigned int result = 0;
+
+ if (!list || !item) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ /*
+ * Decrement the reference count on the item
+ * to signal that the owner has relinquished it.
+ */
+ resource_item_return(refcnt);
+
+ /*
+ * Determine whether this buffer is already in the list.
+ */
+ listelem = lst_first(list);
+ while (listelem) {
+ if (listelem->item == item) {
+ bfound = 1;
+ break;
+ }
+
+ listelem = lst_next(listelem);
+ }
+
+ if (!bfound) {
+ /*
+ * allocate the image buffer list element structure.
+ */
+ listelem = kmalloc(sizeof(*(listelem)), GFP_KERNEL);
+ if (!listelem) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+ memset(listelem, 0, sizeof(*(listelem)));
+
+ /*
+ * setup the list element.
+ */
+ listelem->item = item;
+ listelem->id = id;
+ listelem->refcnt = refcnt;
+
+ /*
+ * add the element to the list.
+ */
+ lst_add(list, (void *)listelem);
+ }
+
+ return 0;
+
+error:
+ return result;
+}
+
+/*
+ * obtains pointer to item at head of resource list
+ */
+void *resource_list_pickhead(struct lst_t *list)
+{
+ struct resource_list_elem *listelem = NULL;
+ void *item = NULL;
+
+ if (!list)
+ goto error;
+ /*
+ * peek the head item of the list.
+ */
+ listelem = lst_first(list);
+ if (listelem)
+ item = listelem->item;
+
+error:
+ return item;
+}
+
+/*
+ * removes item from resource list
+ */
+int resource_list_remove(struct lst_t *list, void *item)
+{
+ struct resource_list_elem *listelem = NULL;
+ unsigned int result = 0;
+
+ if (!list || !item) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ /*
+ * find the specified item in the list.
+ */
+ listelem = lst_first(list);
+ while (listelem) {
+ if (listelem->item == item) {
+ if (*listelem->refcnt != 0)
+ pr_warn("item remove from list still in use\n");
+
+ /*
+ * Remove the item from the list.
+ */
+ lst_remove(list, listelem);
+ /*
+ * Free the stream unit queue element.
+ */
+ kfree(listelem);
+ listelem = NULL;
+ return 0;
+ }
+
+ listelem = lst_next(listelem);
+ }
+
+#if defined(DEBUG_DECODER_DRIVER) || defined(DEBUG_ENCODER_DRIVER)
+ pr_info("item could not be located to remove from RESOURCE list\n");
+#endif
+
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+
+error:
+ return result;
+}
+
+/*
+ * resource_list_removehead - removes item at head of resource list
+ * @list: head of resource list
+ */
+void *resource_list_removehead(struct lst_t *list)
+{
+ struct resource_list_elem *listelem = NULL;
+ void *item = NULL;
+
+ if (!list)
+ goto error;
+
+ /*
+ * peek the head item of the list.
+ */
+ listelem = lst_removehead(list);
+ if (listelem) {
+ item = listelem->item;
+ kfree(listelem);
+ listelem = NULL;
+ }
+
+error:
+ return item;
+}
+
+/*
+ * removes next available item from resource list.
+ * item is freed if no longer used
+ */
+int resource_list_remove_nextavail(struct lst_t *list,
+ resource_pfn_freeitem fn_freeitem,
+ void *free_cb_param)
+{
+ struct resource_list_elem *listelem = NULL;
+ unsigned int result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+
+ if (!list) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ /*
+ * find the next unused item in the list.
+ */
+ listelem = lst_first(list);
+ while (listelem) {
+ if (resource_item_isavailable(listelem->refcnt)) {
+ resource_item_return(listelem->refcnt);
+
+ if (*listelem->refcnt == 0) {
+ if (fn_freeitem)
+ fn_freeitem(listelem->item, free_cb_param);
+ else
+ kfree(listelem->item);
+
+ listelem->item = NULL;
+ }
+
+ /*
+ * get the next element from the list.
+ */
+ lst_remove(list, listelem);
+
+ /*
+ * free the buffer list element.
+ */
+ kfree(listelem);
+ listelem = NULL;
+
+ result = 0;
+ break;
+ }
+
+ listelem = lst_next(listelem);
+ }
+
+ if (result == IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE)
+ pr_debug("failed to locate an available resource element to remove\n");
+
+error:
+ return result;
+}
+
+/*
+ * obtains pointer to an available item from the resource list
+ */
+void *resource_list_get_avail(struct lst_t *list)
+{
+ struct resource_list_elem *listelem = NULL;
+ void *item = NULL;
+
+ if (!list)
+ goto error;
+
+ /*
+ * find the next unused item in the list.
+ */
+ listelem = lst_first(list);
+ while (listelem) {
+ if (resource_item_isavailable(listelem->refcnt)) {
+ resource_item_use(listelem->refcnt);
+ item = listelem->item;
+ break;
+ }
+ listelem = lst_next(listelem);
+ }
+
+error:
+ return item;
+}
+
+/*
+ * signal duplicate use of specified item with resource list
+ */
+void *resource_list_reuseitem(struct lst_t *list, void *item)
+{
+ struct resource_list_elem *listelem = NULL;
+ void *ret_item = NULL;
+
+ if (!list || !item)
+ goto error;
+
+ /*
+ * find the specified item in the list.
+ */
+ listelem = lst_first(list);
+
+ while (listelem) {
+ if (listelem->item == item) {
+ resource_item_use(listelem->refcnt);
+ ret_item = item;
+ break;
+ }
+
+ listelem = lst_next(listelem);
+ }
+
+error:
+ return ret_item;
+}
+
+/*
+ * obtain pointer to item from resource list with id
+ */
+void *resource_list_getbyid(struct lst_t *list, unsigned int id)
+{
+ struct resource_list_elem *listelem = NULL;
+ void *item = NULL;
+
+ if (!list)
+ goto error;
+
+ /*
+ * find the next unused buffer in the list.
+ */
+ listelem = lst_first(list);
+ while (listelem) {
+ if (listelem->id == id) {
+ resource_item_use(listelem->refcnt);
+ item = listelem->item;
+ break;
+ }
+
+ listelem = lst_next(listelem);
+ }
+
+error:
+ return item;
+}
+
+/*
+ * obtain the number of available (unused) items within list.
+ */
+int resource_list_getnumavail(struct lst_t *list)
+{
+ struct resource_list_elem *listelem = NULL;
+ unsigned int num_items = 0;
+
+ if (!list)
+ goto error;
+
+ /*
+ * find the next unused buffer in the list.
+ */
+ listelem = lst_first(list);
+ while (listelem) {
+ if (resource_item_isavailable(listelem->refcnt))
+ num_items++;
+
+ listelem = lst_next(listelem);
+ }
+
+error:
+ return num_items;
+}
+
+/*
+ * Obtain the number of items within list
+ */
+int resource_list_getnum(struct lst_t *list)
+{
+ struct resource_list_elem *listelem = NULL;
+ unsigned int num_items = 0;
+
+ if (!list)
+ goto error;
+
+ /*
+ * find the next unused buffer in the list.
+ */
+ listelem = lst_first(list);
+ while (listelem) {
+ num_items++;
+ listelem = lst_next(listelem);
+ }
+
+error:
+ return num_items;
+}
+
+/*
+ * replaces an item (of specified id) within a resource list
+ */
+int resource_list_replace(struct lst_t *list, void *item, unsigned int id, unsigned int *refcnt,
+ resource_pfn_freeitem fn_freeitem,
+ void *free_cb_param)
+{
+ struct resource_list_elem *listelem = NULL;
+ unsigned int result = 0;
+
+ if (!list || !item) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ /*
+ * determine whether this sequence header is already in the list
+ */
+ listelem = lst_first(list);
+ while (listelem) {
+ if (listelem->id == id) {
+ resource_item_return(listelem->refcnt);
+ if (*listelem->refcnt == 0) {
+ if (fn_freeitem)
+ fn_freeitem(listelem->item,
+ free_cb_param);
+ else
+ kfree(listelem->item);
+ listelem->item = NULL;
+ }
+
+ lst_remove(list, listelem);
+ break;
+ }
+
+ listelem = lst_next(listelem);
+ }
+
+ if (!listelem) {
+ /*
+ * Allocate the sequence header list element structure.
+ */
+ listelem = kmalloc(sizeof(*(listelem)), GFP_KERNEL);
+ if (!listelem) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+ memset(listelem, 0, sizeof(*(listelem)));
+ }
+
+ /*
+ * setup the sequence header list element.
+ */
+ resource_item_use(refcnt);
+
+ listelem->item = item;
+ listelem->id = id;
+ listelem->refcnt = refcnt;
+
+ /*
+ * Add the sequence header list element to the sequence header list.
+ */
+ lst_add(list, (void *)listelem);
+
+ return 0;
+
+error:
+ return result;
+}
+
+/*
+ * removes all items from a resource list.
+ */
+int resource_list_empty(struct lst_t *list, unsigned int release_item,
+ resource_pfn_freeitem fn_freeitem,
+ void *free_cb_param)
+{
+ struct resource_list_elem *listelem = NULL;
+ unsigned int result = 0;
+
+ if (!list) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ /*
+ * remove all the buffer list elements from the image buffer list
+ */
+ listelem = lst_removehead(list);
+ while (listelem) {
+ if (release_item) {
+ resource_item_release(listelem->refcnt);
+ } else {
+ /*
+ * Return and free.
+ */
+ resource_item_return(listelem->refcnt);
+
+ if (!listelem->refcnt || *listelem->refcnt == 0) {
+ if (fn_freeitem)
+ fn_freeitem(listelem->item,
+ free_cb_param);
+ else
+ kfree(listelem->item);
+ listelem->item = NULL;
+ }
+ }
+
+ /*
+ * free the buffer list element.
+ */
+ kfree(listelem);
+ listelem = NULL;
+
+ /*
+ * Get the next element from the list.
+ */
+ listelem = lst_removehead(list);
+ }
+
+ return 0;
+
+error:
+ return result;
+}
+
+/*
+ * obtain the number of pictures within list
+ */
+int resource_getnumpict(struct lst_t *list)
+{
+ struct resource_list_elem *listelem = NULL;
+ unsigned int num_pict = 0;
+
+ if (!list)
+ goto error;
+
+ /*
+ * find the next unused buffer in the list.
+ */
+ listelem = lst_first(list);
+ while (listelem) {
+ num_pict++;
+ listelem = lst_next(listelem);
+ }
+
+error:
+ return num_pict;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/resource.h b/drivers/media/platform/vxe-vxd/common/resource.h
new file mode 100644
index 000000000000..b041ff918e23
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/resource.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC SYSDEV and UI Interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef _VXD_RESOURCE_H
+#define _VXD_RESOURCE_H
+
+typedef int (*resource_pfn_freeitem)(void *item, void *free_cb_param);
+
+int resource_item_use(unsigned int *refcnt);
+
+void resource_item_return(unsigned int *refcnt);
+
+int resource_item_release(unsigned int *refcnt);
+
+int resource_item_isavailable(unsigned int *refcnt);
+
+int resource_list_add_img(struct lst_t *list, void *item, unsigned int id, unsigned int *refcnt);
+
+void *resource_list_pickhead(struct lst_t *list);
+
+int resource_list_remove(struct lst_t *list, void *item);
+
+/**
+ * resource_list_removehead - removes item at head of resource list
+ * @list: head of resource list
+ */
+
+void *resource_list_removehead(struct lst_t *list);
+
+int resource_list_remove_nextavail(struct lst_t *list,
+ resource_pfn_freeitem fn_freeitem,
+ void *free_cb_param);
+
+void *resource_list_get_avail(struct lst_t *list);
+
+void *resource_list_reuseitem(struct lst_t *list, void *item);
+
+void *resource_list_getbyid(struct lst_t *list, unsigned int id);
+
+int resource_list_getnumavail(struct lst_t *list);
+
+int resource_list_getnum(struct lst_t *list);
+
+int resource_list_replace(struct lst_t *list, void *item, unsigned int id, unsigned int *refcnt,
+ resource_pfn_freeitem fn_freeitem,
+ void *free_cb_param);
+
+int resource_list_empty(struct lst_t *list, unsigned int release_item,
+ resource_pfn_freeitem fn_freeitem,
+ void *free_cb_param);
+
+int resource_getnumpict(struct lst_t *list);
+
+#endif /* _VXD_RESOURCE_H */
diff --git a/drivers/media/platform/vxe-vxd/common/rman_api.c b/drivers/media/platform/vxe-vxd/common/rman_api.c
new file mode 100644
index 000000000000..c6c3630e47b5
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/rman_api.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This component is used to track decoder resources,
+ * and share them across other components.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "dq.h"
+#include "idgen_api.h"
+#include "rman_api.h"
+#include "img_errors.h"
+
+/*
+ * The following macros are used to build/decompose the composite resource Id
+ * made up from the bucket index + 1 and the allocated resource Id.
+ */
+#define RMAN_CRESID_BUCKET_INDEX_BITS (8)
+#define RMAN_CRESID_RES_ID_BITS (32 - RMAN_CRESID_BUCKET_INDEX_BITS)
+#define RMAN_CRESID_MAX_RES_ID ((1 << RMAN_CRESID_RES_ID_BITS) - 1)
+#define RMAN_CRESID_RES_ID_MASK (RMAN_CRESID_MAX_RES_ID)
+#define RMAN_CRESID_BUCKET_SHIFT (RMAN_CRESID_RES_ID_BITS)
+#define RMAN_CRESID_MAX_BUCKET_INDEX \
+ ((1 << RMAN_CRESID_BUCKET_INDEX_BITS) - 1)
+
+#define RMAN_MAX_ID 4096
+#define RMAN_ID_BLOCKSIZE 256
+
+/* global state variable */
+static unsigned char inited;
+static struct rman_bucket *bucket_array[RMAN_CRESID_MAX_BUCKET_INDEX] = {0};
+static struct rman_bucket *global_res_bucket;
+static struct rman_bucket *shared_res_bucket;
+static struct mutex *shared_res_mutex_handle;
+static struct mutex *global_mutex;
+
+/*
+ * This structure contains the bucket information.
+ */
+struct rman_bucket {
+ void **link; /* to be part of single linked list */
+ struct dq_linkage_t res_list;
+ unsigned int bucket_idx;
+ void *id_gen;
+ unsigned int res_cnt;
+};
+
+/*
+ * This structure contains the resource details for a resource registered with
+ * the resource manager.
+ */
+struct rman_res {
+ struct dq_linkage_t link; /* to be part of double linked list */
+ struct rman_bucket *bucket;
+ unsigned int type_id;
+ rman_fn_free fn_free;
+ void *param;
+ unsigned int res_id;
+ struct mutex *mutex_handle; /*resource mutex */
+ unsigned char *res_name;
+ struct rman_res *shared_res;
+ unsigned int ref_cnt;
+};
+
+/*
+ * initialization
+ */
+int rman_initialise(void)
+{
+ unsigned int ret;
+
+ if (!inited) {
+ shared_res_mutex_handle = kzalloc(sizeof(*shared_res_mutex_handle), GFP_KERNEL);
+ if (!shared_res_mutex_handle)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ mutex_init(shared_res_mutex_handle);
+
+ /* Set initialised flag */
+ inited = TRUE;
+
+ /* Create the global resource bucket */
+ ret = rman_create_bucket((void **)&global_res_bucket);
+ IMG_DBG_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Create the shared resource bucket */
+ ret = rman_create_bucket((void **)&shared_res_bucket);
+ IMG_DBG_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ global_mutex = kzalloc(sizeof(*global_mutex), GFP_KERNEL);
+ if (!global_mutex)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ mutex_init(global_mutex);
+ }
+ return IMG_SUCCESS;
+}
+
+/*
+ * deinitialization
+ */
+void rman_deinitialise(void)
+{
+ unsigned int i;
+
+ if (inited) {
+ /* Destroy the golbal resource bucket */
+ rman_destroy_bucket(global_res_bucket);
+
+ /* Destroy the shared resource bucket */
+ rman_destroy_bucket(shared_res_bucket);
+
+ /* Make sure we destroy the mutex after destroying the bucket */
+ mutex_destroy(global_mutex);
+ kfree(global_mutex);
+ global_mutex = NULL;
+
+ /* Destroy mutex */
+ mutex_destroy(shared_res_mutex_handle);
+ kfree(shared_res_mutex_handle);
+ shared_res_mutex_handle = NULL;
+
+ /* Check all buckets destroyed */
+ for (i = 0; i < RMAN_CRESID_MAX_BUCKET_INDEX; i++)
+ IMG_DBG_ASSERT(!bucket_array[i]);
+
+ /* Reset initialised flag */
+ inited = FALSE;
+ }
+}
+
+int rman_create_bucket(void **res_bucket_handle)
+{
+ struct rman_bucket *bucket;
+ unsigned int i;
+ int ret;
+
+ IMG_DBG_ASSERT(inited);
+
+ /* Allocate a bucket structure */
+ bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
+ IMG_DBG_ASSERT(bucket);
+ if (!bucket)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Initialise the resource list */
+ dq_init(&bucket->res_list);
+
+ /* Then start allocating resource ids at the first */
+ ret = idgen_createcontext(RMAN_MAX_ID, RMAN_ID_BLOCKSIZE, FALSE,
+ &bucket->id_gen);
+ if (ret != IMG_SUCCESS) {
+ kfree(bucket);
+ IMG_DBG_ASSERT("failed to create IDGEN context" == NULL);
+ return ret;
+ }
+
+ /* Locate free bucket index within the table */
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_RMAN);
+ for (i = 0; i < RMAN_CRESID_MAX_BUCKET_INDEX; i++) {
+ if (!bucket_array[i])
+ break;
+ }
+ if (i >= RMAN_CRESID_MAX_BUCKET_INDEX) {
+ mutex_unlock(shared_res_mutex_handle);
+ idgen_destroycontext(bucket->id_gen);
+ kfree(bucket);
+ IMG_DBG_ASSERT("No free buckets left" == NULL);
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+
+ /* Allocate bucket index */
+ bucket->bucket_idx = i;
+ bucket_array[i] = bucket;
+
+ mutex_unlock(shared_res_mutex_handle);
+
+ /* Return the bucket handle */
+ *res_bucket_handle = bucket;
+
+ return IMG_SUCCESS;
+}
+
+void rman_destroy_bucket(void *res_bucket_handle)
+{
+ struct rman_bucket *bucket = (struct rman_bucket *)res_bucket_handle;
+
+ IMG_DBG_ASSERT(inited);
+
+ IMG_DBG_ASSERT(bucket);
+ if (!bucket)
+ return;
+
+ IMG_DBG_ASSERT(bucket->bucket_idx < RMAN_CRESID_MAX_BUCKET_INDEX);
+ IMG_DBG_ASSERT(bucket_array[bucket->bucket_idx]);
+
+ /* Free all resources from the bucket */
+ rman_free_resources(res_bucket_handle, RMAN_TYPE_P1);
+ rman_free_resources(res_bucket_handle, RMAN_TYPE_P2);
+ rman_free_resources(res_bucket_handle, RMAN_TYPE_P3);
+ rman_free_resources(res_bucket_handle, RMAN_ALL_TYPES);
+
+ /* free sticky resources last: other resources are dependent on them */
+ rman_free_resources(res_bucket_handle, RMAN_STICKY);
+ /* Use proper locking around global buckets. */
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_RMAN);
+
+ /* Free from array of bucket pointers */
+ bucket_array[bucket->bucket_idx] = NULL;
+
+ mutex_unlock(shared_res_mutex_handle);
+
+ /* Free the bucket itself */
+ idgen_destroycontext(bucket->id_gen);
+ kfree(bucket);
+}
+
+void *rman_get_global_bucket(void)
+{
+ IMG_DBG_ASSERT(inited);
+ IMG_DBG_ASSERT(global_res_bucket);
+
+ /* Return the handle of the global resource bucket */
+ return global_res_bucket;
+}
+
+int rman_register_resource(void *res_bucket_handle, unsigned int type_id,
+ rman_fn_free fnfree, void *param,
+ void **res_handle, unsigned int *res_id)
+{
+ struct rman_bucket *bucket = (struct rman_bucket *)res_bucket_handle;
+ struct rman_res *res;
+ int ret;
+
+ IMG_DBG_ASSERT(inited);
+ IMG_DBG_ASSERT(type_id != RMAN_ALL_TYPES);
+
+ IMG_DBG_ASSERT(res_bucket_handle);
+ if (!res_bucket_handle)
+ return IMG_ERROR_GENERIC_FAILURE;
+
+ /* Allocate a resource structure */
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ IMG_DBG_ASSERT(res);
+ if (!res)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Fill in the resource structure */
+ res->bucket = bucket;
+ res->type_id = type_id;
+ res->fn_free = fnfree;
+ res->param = param;
+
+ /* Allocate resource Id */
+ mutex_lock_nested(global_mutex, SUBCLASS_RMAN);
+ ret = idgen_allocid(bucket->id_gen, res, &res->res_id);
+ mutex_unlock(global_mutex);
+ if (ret != IMG_SUCCESS) {
+ IMG_DBG_ASSERT("failed to allocate RMAN id" == NULL);
+ return ret;
+ }
+ IMG_DBG_ASSERT(res->res_id <= RMAN_CRESID_MAX_RES_ID);
+
+ /* add this resource to the bucket */
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_RMAN);
+ dq_addtail(&bucket->res_list, res);
+
+ /* Update count of resources */
+ bucket->res_cnt++;
+ mutex_unlock(shared_res_mutex_handle);
+
+ /* If resource handle required */
+ if (res_handle)
+ *res_handle = res;
+
+ /* If resource id required */
+ if (res_id)
+ *res_id = rman_get_resource_id(res);
+
+ return IMG_SUCCESS;
+}
+
+unsigned int rman_get_resource_id(void *res_handle)
+{
+ struct rman_res *res = res_handle;
+ unsigned int ext_res_id;
+
+ IMG_DBG_ASSERT(res_handle);
+ if (!res_handle)
+ return 0;
+
+ IMG_DBG_ASSERT(res->res_id <= RMAN_CRESID_MAX_RES_ID);
+ IMG_DBG_ASSERT(res->bucket->bucket_idx < RMAN_CRESID_MAX_BUCKET_INDEX);
+ if (res->bucket->bucket_idx >= RMAN_CRESID_MAX_BUCKET_INDEX)
+ return 0;
+
+ ext_res_id = (((res->bucket->bucket_idx + 1) <<
+ RMAN_CRESID_BUCKET_SHIFT) | res->res_id);
+
+ return ext_res_id;
+}
+
+static void *rman_getresource_int(void *res_bucket_handle, unsigned int res_id,
+ unsigned int type_id, void **res_handle)
+{
+ struct rman_bucket *bucket = (struct rman_bucket *)res_bucket_handle;
+ struct rman_res *res;
+ int ret;
+
+ IMG_DBG_ASSERT(res_id <= RMAN_CRESID_MAX_RES_ID);
+
+ /* Loop over the resources in this bucket till we find the required id */
+ mutex_lock_nested(global_mutex, SUBCLASS_RMAN);
+ ret = idgen_gethandle(bucket->id_gen, res_id, (void **)&res);
+ mutex_unlock(global_mutex);
+ if (ret != IMG_SUCCESS) {
+ IMG_DBG_ASSERT("failed to get RMAN resource" == NULL);
+ return NULL;
+ }
+
+ /* If the resource handle is required */
+ if (res_handle)
+ *res_handle = res; /* Return it */
+
+ /* If the resource was not found */
+ IMG_DBG_ASSERT(res);
+ IMG_DBG_ASSERT((void *)res != &bucket->res_list);
+ if (!res || ((void *)res == &bucket->res_list))
+ return NULL;
+
+ /* Cross check the type */
+ IMG_DBG_ASSERT(type_id == res->type_id);
+
+ /* Return the resource. */
+ return res->param;
+}
+
+int rman_get_resource(unsigned int res_id, unsigned int type_id, void **param,
+ void **res_handle)
+{
+ unsigned int bucket_idx = (res_id >> RMAN_CRESID_BUCKET_SHIFT) - 1;
+ unsigned int int_res_id = (res_id & RMAN_CRESID_RES_ID_MASK);
+ void *local_param;
+
+ IMG_DBG_ASSERT(bucket_idx < RMAN_CRESID_MAX_BUCKET_INDEX);
+ if (bucket_idx >= RMAN_CRESID_MAX_BUCKET_INDEX)
+ return IMG_ERROR_INVALID_ID; /* Happens when bucket_idx == 0 */
+
+ IMG_DBG_ASSERT(bucket_array[bucket_idx]);
+ if (!bucket_array[bucket_idx])
+ return IMG_ERROR_INVALID_ID;
+
+ local_param = rman_getresource_int(bucket_array[bucket_idx],
+ int_res_id, type_id,
+ res_handle);
+
+ /* If we didn't find the resource */
+ if (!local_param)
+ return IMG_ERROR_INVALID_ID;
+
+ /* Return the resource */
+ if (param)
+ *param = local_param;
+
+ return IMG_SUCCESS;
+}
+
+int rman_get_named_resource(unsigned char *res_name, rman_fn_alloc fn_alloc,
+ void *alloc_info, void *res_bucket_handle,
+ unsigned int type_id, rman_fn_free fn_free,
+ void **param, void **res_handle, unsigned int *res_id)
+{
+ struct rman_bucket *bucket = res_bucket_handle;
+ struct rman_res *res;
+ unsigned int ret;
+ void *local_param;
+ unsigned char found = FALSE;
+
+ IMG_DBG_ASSERT(inited);
+
+ IMG_DBG_ASSERT(res_bucket_handle);
+ if (!res_bucket_handle)
+ return IMG_ERROR_GENERIC_FAILURE;
+
+ /* Lock the shared resources */
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_RMAN);
+ res = (struct rman_res *)dq_first(&bucket->res_list);
+ while (res && ((void *)res != &bucket->res_list)) {
+ /* If resource already in the shared list */
+ if (res->res_name && (strcmp(res_name,
+ res->res_name) == 0)) {
+ IMG_DBG_ASSERT(res->fn_free == fn_free);
+ found = TRUE;
+ break;
+ }
+
+ /* Move to next resource */
+ res = (struct rman_res *)dq_next(res);
+ }
+ mutex_unlock(shared_res_mutex_handle);
+
+ /* If the named resource was not found */
+ if (!found) {
+ /* Allocate the resource */
+ ret = fn_alloc(alloc_info, &local_param);
+ IMG_DBG_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Register the named resource */
+ ret = rman_register_resource(res_bucket_handle, type_id,
+ fn_free, local_param,
+ (void **)&res, NULL);
+ IMG_DBG_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_RMAN);
+ res->res_name = res_name;
+ mutex_unlock(shared_res_mutex_handle);
+ }
+
+ /* Return the pvParam value */
+ *param = res->param;
+
+ /* If resource handle required */
+ if (res_handle)
+ *res_handle = res;
+
+ /* If resource id required */
+ if (res_id)
+ *res_id = rman_get_resource_id(res);
+
+ /* Exit */
+ return IMG_SUCCESS;
+}
+
+static void rman_free_resource_int(struct rman_res *res)
+{
+ struct rman_bucket *bucket = res->bucket;
+
+ /* Remove the resource from the active list */
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_RMAN);
+
+ /* Remove from list */
+ dq_remove(res);
+
+ /* Update count of resources */
+ bucket->res_cnt--;
+
+ mutex_unlock(shared_res_mutex_handle);
+
+ /* If mutex associated with the resource */
+ if (res->mutex_handle) {
+ /* Destroy mutex */
+ mutex_destroy(res->mutex_handle);
+ kfree(res->mutex_handle);
+ res->mutex_handle = NULL;
+ }
+
+ /* If this resource is not already shared */
+ if (res->shared_res) {
+ /* Lock the shared resources */
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_RMAN);
+
+ /* Update the reference count */
+ IMG_DBG_ASSERT(res->shared_res->ref_cnt != 0);
+ res->shared_res->ref_cnt--;
+
+ /* If this is the last free for the shared resource */
+ if (res->shared_res->ref_cnt == 0)
+ /* Free the shared resource */
+ rman_free_resource_int(res->shared_res);
+
+ /* UnLock the shared resources */
+ mutex_unlock(shared_res_mutex_handle);
+ } else {
+ /* If there is a free callback function. */
+ if (res->fn_free)
+ /* Call resource free callback */
+ res->fn_free(res->param);
+ }
+
+ /* If the resource has a name then free it */
+ kfree(res->res_name);
+
+ /* Free the resource ID. */
+ mutex_lock_nested(global_mutex, SUBCLASS_RMAN);
+ idgen_freeid(bucket->id_gen, res->res_id);
+ mutex_unlock(global_mutex);
+
+ /* Free a resource structure */
+ kfree(res);
+}
+
+void rman_free_resource(void *res_handle)
+{
+ struct rman_res *res;
+
+ IMG_DBG_ASSERT(inited);
+
+ IMG_DBG_ASSERT(res_handle);
+ if (!res_handle)
+ return;
+
+ /* Get access to the resource structure */
+ res = (struct rman_res *)res_handle;
+
+ /* Free resource */
+ rman_free_resource_int(res);
+}
+
+void rman_lock_resource(void *res_handle)
+{
+ struct rman_res *res;
+
+ IMG_DBG_ASSERT(inited);
+
+ IMG_DBG_ASSERT(res_handle);
+ if (!res_handle)
+ return;
+
+ /* Get access to the resource structure */
+ res = (struct rman_res *)res_handle;
+
+ /* If this is a shared resource */
+ if (res->shared_res)
+ /* We need to lock/unlock the underlying shared resource */
+ res = res->shared_res;
+
+ /* If no mutex associated with this resource */
+ if (!res->mutex_handle) {
+ /* Create one */
+
+ res->mutex_handle = kzalloc(sizeof(*res->mutex_handle), GFP_KERNEL);
+ if (!res->mutex_handle)
+ return;
+
+ mutex_init(res->mutex_handle);
+ }
+
+ /* lock it */
+ mutex_lock(res->mutex_handle);
+}
+
+void rman_unlock_resource(void *res_handle)
+{
+ struct rman_res *res;
+
+ IMG_DBG_ASSERT(inited);
+
+ IMG_DBG_ASSERT(res_handle);
+ if (!res_handle)
+ return;
+
+ /* Get access to the resource structure */
+ res = (struct rman_res *)res_handle;
+
+ /* If this is a shared resource */
+ if (res->shared_res)
+ /* We need to lock/unlock the underlying shared resource */
+ res = res->shared_res;
+
+ IMG_DBG_ASSERT(res->mutex_handle);
+
+ /* Unlock mutex */
+ mutex_unlock(res->mutex_handle);
+}
+
+void rman_free_resources(void *res_bucket_handle, unsigned int type_id)
+{
+ struct rman_bucket *bucket = (struct rman_bucket *)res_bucket_handle;
+ struct rman_res *res;
+
+ IMG_DBG_ASSERT(inited);
+
+ IMG_DBG_ASSERT(res_bucket_handle);
+ if (!res_bucket_handle)
+ return;
+
+ /* Scan the active list looking for the resources to be freed */
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_RMAN);
+ res = (struct rman_res *)dq_first(&bucket->res_list);
+ while ((res) && ((void *)res != &bucket->res_list)) {
+ /* If this is resource is to be removed */
+ if ((type_id == RMAN_ALL_TYPES &&
+ res->type_id != RMAN_STICKY) ||
+ res->type_id == type_id) {
+ /* Yes, remove it, Free current resource */
+ mutex_unlock(shared_res_mutex_handle);
+ rman_free_resource_int(res);
+ mutex_lock_nested(shared_res_mutex_handle, SUBCLASS_RMAN);
+
+ /* Restart from the beginning of the list */
+ res = (struct rman_res *)dq_first(&bucket->res_list);
+ } else {
+ /* Move to next resource */
+ res = (struct rman_res *)lst_next(res);
+ }
+ }
+ mutex_unlock(shared_res_mutex_handle);
+}
diff --git a/drivers/media/platform/vxe-vxd/common/rman_api.h b/drivers/media/platform/vxe-vxd/common/rman_api.h
new file mode 100644
index 000000000000..baadc7f22eff
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/rman_api.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This component is used to track decoder resources,
+ * and share them across other components.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __RMAN_API_H__
+#define __RMAN_API_H__
+
+#include <linux/types.h>
+
+#include "img_errors.h"
+#include "lst.h"
+
+#define RMAN_ALL_TYPES (0xFFFFFFFF)
+#define RMAN_TYPE_P1 (0xFFFFFFFE)
+#define RMAN_TYPE_P2 (0xFFFFFFFE)
+#define RMAN_TYPE_P3 (0xFFFFFFFE)
+#define RMAN_STICKY (0xFFFFFFFD)
+
+int rman_initialise(void);
+
+void rman_deinitialise(void);
+
+int rman_create_bucket(void **res_handle);
+
+void rman_destroy_bucket(void *res_handle);
+
+void *rman_get_global_bucket(void);
+
+typedef void (*rman_fn_free) (void *param);
+
+int rman_register_resource(void *res_handle, unsigned int type_id, rman_fn_free fn_free,
+ void *param, void **res_handle_ptr,
+ unsigned int *res_id);
+
+typedef int (*rman_fn_alloc) (void *alloc_info, void **param);
+
+int rman_get_named_resource(unsigned char *res_name, rman_fn_alloc fn_alloc,
+ void *alloc_info, void *res_bucket_handle,
+ unsigned int type_id, rman_fn_free fn_free,
+ void **param, void **res_handle, unsigned int *res_id);
+
+unsigned int rman_get_resource_id(void *res_handle);
+
+int rman_get_resource(unsigned int res_id, unsigned int type_id, void **param,
+ void **res_handle);
+
+void rman_free_resource(void *res_handle);
+
+void rman_lock_resource(void *res_handle);
+
+void rman_unlock_resource(void *res_hanle);
+
+void rman_free_resources(void *res_bucket_handle, unsigned int type_id);
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/common/talmmu_api.c b/drivers/media/platform/vxe-vxd/common/talmmu_api.c
new file mode 100644
index 000000000000..04ddcc33505c
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/talmmu_api.c
@@ -0,0 +1,753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TAL MMU Extensions.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_errors.h"
+#include "lst.h"
+#include "talmmu_api.h"
+
+static int global_init;
+static struct lst_t gl_dmtmpl_lst = {0};
+static struct mutex *global_lock;
+
+static int talmmu_devmem_free(void *mem_hndl)
+{
+ struct talmmu_memory *mem = mem_hndl;
+ struct talmmu_devmem_heap *mem_heap;
+
+ if (!mem_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ mem_heap = mem->devmem_heap;
+
+ if (!mem->ext_dev_virtaddr)
+ addr_cx_free(&mem_heap->ctx, "", mem->dev_virtoffset);
+
+ mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+ lst_remove(&mem_heap->memory_list, mem);
+
+ mutex_unlock(global_lock);
+
+ kfree(mem);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * talmmu_devmem_heap_empty - talmmu_devmem_heap_empty
+ * @devmem_heap_hndl: device memory heap handle
+ *
+ * This function is used for emptying the device memory heap list
+ */
+int talmmu_devmem_heap_empty(void *devmem_heap_hndl)
+{
+ struct talmmu_devmem_heap *devmem_heap = devmem_heap_hndl;
+
+ if (!devmem_heap)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ while (!lst_empty(&devmem_heap->memory_list))
+ talmmu_devmem_free(lst_first(&devmem_heap->memory_list));
+
+ addr_cx_deinitialise(&devmem_heap->ctx);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function talmmu_devmem_heap_destroy
+ *
+ * @Description This function is used for freeing the device memory heap
+ *
+ * @Input
+ *
+ * @Output
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+static void talmmu_devmem_heap_destroy(void *devmem_heap_hndl)
+{
+ struct talmmu_devmem_heap *devmem_heap = devmem_heap_hndl;
+
+ talmmu_devmem_heap_empty(devmem_heap_hndl);
+ kfree(devmem_heap);
+}
+
+/*
+ * @Function talmmu_init
+ *
+ * @Description This function is used to initialize the TALMMU component.
+ *
+ * @Input None.
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_init(void)
+{
+ if (!global_init) {
+ /* If no mutex associated with this resource */
+ if (!global_lock) {
+ /* Create one */
+ global_lock = kzalloc(sizeof(*global_lock), GFP_KERNEL);
+ if (!global_lock)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ mutex_init(global_lock);
+ }
+
+ lst_init(&gl_dmtmpl_lst);
+ global_init = 1;
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function talmmu_deinit
+ *
+ * @Description This function is used to de-initialize the TALMMU component.
+ *
+ * @Input None.
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_deinit(void)
+{
+ struct talmmu_dm_tmpl *t;
+
+ if (global_init) {
+ while (!lst_empty(&gl_dmtmpl_lst)) {
+ t = (struct talmmu_dm_tmpl *)lst_first(&gl_dmtmpl_lst);
+ talmmu_devmem_template_destroy((void *)t);
+ }
+ mutex_destroy(global_lock);
+ kfree(global_lock);
+ global_lock = NULL;
+ global_init = 0;
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function talmmu_devmem_template_create
+ *
+ * @Description This function is used to create a device memory template
+ *
+ * @Input devmem_info: A pointer to a talmmu_devmem_info structure.
+ *
+ * @Output devmem_template_hndl: A pointer used to return the template
+ * handle
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_template_create(struct talmmu_devmem_info *devmem_info,
+ void **devmem_template_hndl)
+{
+ struct talmmu_dm_tmpl *devmem_template;
+ struct talmmu_dm_tmpl *tmp_devmem_template;
+
+ if (!devmem_info)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ devmem_template = kzalloc(sizeof(*devmem_template), GFP_KERNEL);
+ if (!devmem_template)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ devmem_template->devmem_info = *devmem_info;
+
+ lst_init(&devmem_template->devmem_ctx_list);
+
+ mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+ tmp_devmem_template = lst_first(&gl_dmtmpl_lst);
+ while (tmp_devmem_template)
+ tmp_devmem_template = lst_next(tmp_devmem_template);
+
+ devmem_template->page_num_shift = 12;
+ devmem_template->byte_in_pagemask = 0xFFF;
+ devmem_template->heap_alignment = 0x400000;
+ devmem_template->pagetable_entries_perpage =
+ (devmem_template->devmem_info.page_size / sizeof(unsigned int));
+ devmem_template->pagetable_num_shift = 10;
+ devmem_template->index_in_pagetable_mask = 0x3FF;
+ devmem_template->pagedir_num_shift = 22;
+
+ lst_add(&gl_dmtmpl_lst, devmem_template);
+
+ mutex_unlock(global_lock);
+
+ *devmem_template_hndl = devmem_template;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function talmmu_devmem_template_destroy
+ *
+ * @Description This function is used to obtain the template from the list and
+ * destroy
+ *
+ * @Input devmem_tmplt_hndl: Device memory template handle
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_template_destroy(void *devmem_tmplt_hndl)
+{
+ struct talmmu_dm_tmpl *dm_tmpl = devmem_tmplt_hndl;
+ unsigned int i;
+
+ if (!devmem_tmplt_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ while (!lst_empty(&dm_tmpl->devmem_ctx_list))
+ talmmu_devmem_ctx_destroy(lst_first(&dm_tmpl->devmem_ctx_list));
+
+ for (i = 0; i < dm_tmpl->num_heaps; i++)
+ talmmu_devmem_heap_destroy(dm_tmpl->devmem_heap[i]);
+
+ mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+ lst_remove(&gl_dmtmpl_lst, dm_tmpl);
+
+ mutex_unlock(global_lock);
+
+ kfree(dm_tmpl);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function talmmu_create_heap
+ *
+ * @Description This function is used to create a device memory heap
+ *
+ * @Input
+ *
+ * @Output
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+static int talmmu_create_heap(void *devmem_tmplt_hndl,
+ struct talmmu_heap_info *heap_info_arg,
+ unsigned char isfull,
+ struct talmmu_devmem_heap **devmem_heap_arg)
+{
+ struct talmmu_dm_tmpl *devmem_template = devmem_tmplt_hndl;
+ struct talmmu_devmem_heap *devmem_heap;
+
+ /* Allocating memory for device memory heap */
+ devmem_heap = kzalloc(sizeof(*devmem_heap), GFP_KERNEL);
+ if (!devmem_heap)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /*
+ * Update the device memory heap structure members
+ * Update the device memory template
+ */
+ devmem_heap->devmem_template = devmem_template;
+ /* Update the device memory heap information */
+ devmem_heap->heap_info = *heap_info_arg;
+
+ /* Initialize the device memory heap list */
+ lst_init(&devmem_heap->memory_list);
+
+ /* If full structure required */
+ if (isfull) {
+ addr_cx_initialise(&devmem_heap->ctx);
+ devmem_heap->regions.base_addr = 0;
+ devmem_heap->regions.size = devmem_heap->heap_info.size;
+ addr_cx_define_mem_region(&devmem_heap->ctx,
+ &devmem_heap->regions);
+ }
+
+ *devmem_heap_arg = devmem_heap;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function talmmu_devmem_heap_add
+ *
+ * @Description This function is for creating and adding the heap to the
+ * device memory template
+ *
+ * @Input devmem_tmplt_hndl: device memory template handle
+ *
+ * @Input heap_info_arg: pointer to the heap info structure
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_heap_add(void *devmem_tmplt_hndl,
+ struct talmmu_heap_info *heap_info_arg)
+{
+ struct talmmu_dm_tmpl *devmem_template = devmem_tmplt_hndl;
+ struct talmmu_devmem_heap *devmem_heap;
+ unsigned int res;
+
+ if (!devmem_tmplt_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (!heap_info_arg)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ res = talmmu_create_heap(devmem_tmplt_hndl,
+ heap_info_arg,
+ 1,
+ &devmem_heap);
+ if (res != IMG_SUCCESS)
+ return res;
+
+ devmem_template->devmem_heap[devmem_template->num_heaps] = devmem_heap;
+ devmem_template->num_heaps++;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function talmmu_devmem_ctx_create
+ *
+ * @Description This function is used to create a device memory context
+ *
+ * @Input devmem_tmplt_hndl: pointer to the device memory template handle
+ *
+ * @Input mmu_ctx_id: MMU context ID used with the TAL
+ *
+ * @Output devmem_ctx_hndl: pointer to the device memory context handle
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_ctx_create(void *devmem_tmplt_hndl,
+ unsigned int mmu_ctx_id,
+ void **devmem_ctx_hndl)
+{
+ struct talmmu_dm_tmpl *dm_tmpl = devmem_tmplt_hndl;
+ struct talmmu_devmem_ctx *dm_ctx;
+ struct talmmu_devmem_heap *dm_heap;
+ int i;
+ unsigned int res = IMG_SUCCESS;
+
+ if (!devmem_tmplt_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Allocate memory for device memory context */
+ dm_ctx = kzalloc((sizeof(struct talmmu_devmem_ctx)), GFP_KERNEL);
+ if (!dm_ctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /*
+ * Update the device memory context structure members
+ * Update the device memory template
+ */
+ dm_ctx->devmem_template = dm_tmpl;
+ /* Update MMU context ID */
+ dm_ctx->mmu_ctx_id = mmu_ctx_id;
+
+ /* Check for PTD Alignment */
+ if (dm_tmpl->devmem_info.ptd_alignment == 0)
+ /*
+ * Make sure alignment is a multiple of page size.
+ * Set up PTD alignment to Page Size
+ */
+ dm_tmpl->devmem_info.ptd_alignment =
+ dm_tmpl->devmem_info.page_size;
+
+ /* Reference or create heaps for this context */
+ for (i = 0; i < dm_tmpl->num_heaps; i++) {
+ dm_heap = dm_tmpl->devmem_heap[i];
+ if (!dm_heap)
+ goto error_heap_create;
+
+ switch (dm_heap->heap_info.heap_type) {
+ case TALMMU_HEAP_PERCONTEXT:
+ res = talmmu_create_heap(dm_tmpl,
+ &dm_heap->heap_info,
+ 1,
+ &dm_ctx->devmem_heap[i]);
+ if (res != IMG_SUCCESS)
+ goto error_heap_create;
+ break;
+
+ default:
+ break;
+ }
+
+ dm_ctx->num_heaps++;
+ }
+
+ mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+ /* Add the device memory context to the list */
+ lst_add(&dm_tmpl->devmem_ctx_list, dm_ctx);
+
+ dm_tmpl->num_ctxs++;
+
+ mutex_unlock(global_lock);
+
+ *devmem_ctx_hndl = dm_ctx;
+
+ return IMG_SUCCESS;
+
+error_heap_create:
+ /* Destroy the device memory heaps which were already created */
+ for (i--; i >= 0; i--) {
+ dm_heap = dm_ctx->devmem_heap[i];
+ if (dm_heap->heap_info.heap_type == TALMMU_HEAP_PERCONTEXT)
+ talmmu_devmem_heap_destroy(dm_heap);
+
+ dm_ctx->num_heaps--;
+ }
+ kfree(dm_ctx);
+ return res;
+}
+
+/*
+ * @Function talmmu_devmem_ctx_destroy
+ *
+ * @Description This function is used to get the device memory context from
+ * the list and destroy
+ *
+ * @Input devmem_ctx_hndl: device memory context handle
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_ctx_destroy(void *devmem_ctx_hndl)
+{
+ struct talmmu_devmem_ctx *devmem_ctx = devmem_ctx_hndl;
+ struct talmmu_dm_tmpl *devmem_template;
+ struct talmmu_devmem_heap *devmem_heap;
+ unsigned int i;
+
+ if (!devmem_ctx_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ devmem_template = devmem_ctx->devmem_template;
+
+ for (i = 0; i < devmem_ctx->num_heaps; i++) {
+ devmem_heap = devmem_ctx->devmem_heap[i];
+ if (!devmem_heap)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ talmmu_devmem_heap_destroy(devmem_heap);
+ }
+
+ devmem_ctx->pagedir = NULL;
+
+ mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+ lst_remove(&devmem_template->devmem_ctx_list, devmem_ctx);
+
+ devmem_ctx->devmem_template->num_ctxs--;
+
+ mutex_unlock(global_lock);
+
+ kfree(devmem_ctx);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function talmmu_get_heap_handle
+ *
+ * @Description This function is used to get the device memory heap handle
+ *
+ * @Input hid: heap id
+ *
+ * @Input devmem_ctx_hndl: device memory context handle
+ *
+ * @Output devmem_heap_hndl: pointer to the device memory heap handle
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_get_heap_handle(unsigned int hid,
+ void *devmem_ctx_hndl,
+ void **devmem_heap_hndl)
+{
+ struct talmmu_devmem_ctx *devmem_ctx = devmem_ctx_hndl;
+ unsigned int i;
+
+ if (!devmem_ctx_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ for (i = 0; i < devmem_ctx->num_heaps; i++) {
+ /*
+ * Checking for requested heap id match and return the device
+ * memory heap handle
+ */
+ if (devmem_ctx->devmem_heap[i]->heap_info.heap_id == hid) {
+ *devmem_heap_hndl = devmem_ctx->devmem_heap[i];
+ return IMG_SUCCESS;
+ }
+ }
+
+ return IMG_ERROR_GENERIC_FAILURE;
+}
+
+/*
+ * @Function talmmu_devmem_heap_options
+ *
+ * @Description This function is used to set additional heap options
+ *
+ * @Input devmem_heap_hndl: Handle for heap
+ *
+ * @Input heap_opt_id: Heap options ID
+ *
+ * @Input heap_options: Heap options
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+void talmmu_devmem_heap_options(void *devmem_heap_hndl,
+ enum talmmu_heap_option_id heap_opt_id,
+ union talmmu_heap_options heap_options)
+{
+ struct talmmu_devmem_heap *dm_heap = devmem_heap_hndl;
+
+ switch (heap_opt_id) {
+ case TALMMU_HEAP_OPT_ADD_GUARD_BAND:
+ dm_heap->guardband = heap_options.guardband_opt.guardband;
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * @Function talmmu_devmem_malloc_nonmap
+ *
+ * @Description
+ *
+ * @Input
+ *
+ * @Output
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+static int talmmu_devmem_alloc_nonmap(void *devmem_ctx_hndl,
+ void *devmem_heap_hndl,
+ unsigned int size,
+ unsigned int align,
+ unsigned int dev_virt_ofset,
+ unsigned char ext_dev_vaddr,
+ void **mem_hndl)
+{
+ struct talmmu_devmem_ctx *dm_ctx = devmem_ctx_hndl;
+ struct talmmu_dm_tmpl *dm_tmpl;
+ struct talmmu_devmem_heap *dm_heap = devmem_heap_hndl;
+ struct talmmu_memory *mem;
+ unsigned long long ui64_dev_offset = 0;
+ int res = IMG_SUCCESS;
+
+ if (!dm_ctx)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (!devmem_heap_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ dm_tmpl = dm_ctx->devmem_template;
+
+ /* Allocate memory for memory structure */
+ mem = kzalloc((sizeof(struct talmmu_memory)), GFP_KERNEL);
+ if (!mem)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ mem->devmem_heap = dm_heap;
+ mem->devmem_ctx = dm_ctx;
+ mem->ext_dev_virtaddr = ext_dev_vaddr;
+
+ /* We always for to be at least page aligned */
+ if (align >= dm_tmpl->devmem_info.page_size)
+ /*
+ * alignment is larger than page size - make sure alignment is
+ * a multiple of page size
+ */
+ mem->alignment = align;
+ else
+ /*
+ * alignment is smaller than page size - make sure page size is
+ * a multiple of alignment. Now round up alignment to one page
+ */
+ mem->alignment = dm_tmpl->devmem_info.page_size;
+
+ /* Round size up to next multiple of physical pages */
+ if ((size % dm_tmpl->devmem_info.page_size) != 0)
+ mem->size = ((size / dm_tmpl->devmem_info.page_size)
+ + 1) * dm_tmpl->devmem_info.page_size;
+ else
+ mem->size = size;
+
+ /* If the device virtual address was externally defined */
+ if (mem->ext_dev_virtaddr) {
+ res = IMG_ERROR_INVALID_PARAMETERS;
+ goto free_mem;
+ }
+
+ res = addr_cx_malloc_align_res(&dm_heap->ctx, "",
+ (mem->size + dm_heap->guardband),
+ mem->alignment,
+ &ui64_dev_offset);
+
+ mem->dev_virtoffset = (unsigned int)ui64_dev_offset;
+ if (res != IMG_SUCCESS)
+ /*
+ * If heap space is unavaliable return NULL, the caller must
+ * handle this condition
+ */
+ goto free_virt;
+
+ mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+ /*
+ * Add memory allocation to the list for this heap...
+ * If the heap is empty...
+ */
+ if (lst_empty(&dm_heap->memory_list))
+ /*
+ * Save flag to indicate whether the device virtual address
+ * is allocated internally or externally...
+ */
+ dm_heap->ext_dev_virtaddr = mem->ext_dev_virtaddr;
+
+ /*
+ * Once we have started allocating in one way ensure that we continue
+ * to do this...
+ */
+ lst_add(&dm_heap->memory_list, mem);
+
+ mutex_unlock(global_lock);
+
+ *mem_hndl = mem;
+
+ return IMG_SUCCESS;
+
+free_virt:
+ addr_cx_free(&dm_heap->ctx, "", mem->dev_virtoffset);
+free_mem:
+ kfree(mem);
+
+ return res;
+}
+
+/*
+ * @Function talmmu_devmem_addr_alloc
+ *
+ * @Description
+ *
+ * @Input
+ *
+ * @Output
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_addr_alloc(void *devmem_ctx_hndl,
+ void *devmem_heap_hndl,
+ unsigned int size,
+ unsigned int align,
+ void **mem_hndl)
+{
+ unsigned int res;
+ void *mem;
+
+ res = talmmu_devmem_alloc_nonmap(devmem_ctx_hndl,
+ devmem_heap_hndl,
+ size,
+ align,
+ 0,
+ 0,
+ &mem);
+ if (res != IMG_SUCCESS)
+ return res;
+
+ *mem_hndl = mem;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function talmmu_devmem_addr_free
+ *
+ * @Description This function is used to free device memory allocated using
+ * talmmu_devmem_addr_alloc().
+ *
+ * @Input mem_hndl : Handle for the memory object
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_addr_free(void *mem_hndl)
+{
+ unsigned int res;
+
+ if (!mem_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* free device memory allocated by calling talmmu_devmem_free() */
+ res = talmmu_devmem_free(mem_hndl);
+
+ return res;
+}
+
+/*
+ * @Function talmmu_get_dev_virt_addr
+ *
+ * @Description This function is use to obtain the device (virtual) memory
+ * address which may be required for as a device virtual address
+ * in some of the TAL image functions
+ *
+ * @Input mem_hndl : Handle for the memory object
+ *
+ * @Output dev_virt: A piointer used to return the device virtual address
+ *
+ * @Return IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_get_dev_virt_addr(void *mem_hndl,
+ unsigned int *dev_virt)
+{
+ struct talmmu_memory *mem = mem_hndl;
+ struct talmmu_devmem_heap *devmem_heap;
+
+ if (!mem_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ devmem_heap = mem->devmem_heap;
+
+ /*
+ * Device virtual address is addition of the specific device virtual
+ * offset and the base device virtual address from the heap information
+ */
+ *dev_virt = (devmem_heap->heap_info.basedev_virtaddr +
+ mem->dev_virtoffset);
+
+ return IMG_SUCCESS;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/talmmu_api.h b/drivers/media/platform/vxe-vxd/common/talmmu_api.h
new file mode 100644
index 000000000000..f37f78394d54
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/talmmu_api.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TAL MMU Extensions.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#include "addr_alloc.h"
+#include "ra.h"
+#include "lst.h"
+
+#ifndef __TALMMU_API_H__
+#define __TALMMU_API_H__
+
+#define TALMMU_MAX_DEVICE_HEAPS (32)
+#define TALMMU_MAX_TEMPLATES (32)
+
+/* MMU type */
+enum talmmu_mmu_type {
+ /* 4kb pages and 32-bit address range */
+ TALMMU_MMUTYPE_4K_PAGES_32BIT_ADDR = 0x1,
+ /* variable size pages and 32-bit address */
+ TALMMU_MMUTYPE_VAR_PAGES_32BIT_ADDR,
+ /* 4kb pages and 36-bit address range */
+ TALMMU_MMUTYPE_4K_PAGES_36BIT_ADDR,
+ /* 4kb pages and 40-bit address range */
+ TALMMU_MMUTYPE_4K_PAGES_40BIT_ADDR,
+ /* variable size pages and 40-bit address range */
+ TALMMU_MMUTYPE_VP_40BIT,
+ TALMMU_MMUTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Device flags */
+enum talmmu_dev_flags {
+ TALMMU_DEVFLAGS_NONE = 0x0,
+ TALMMU_DEVFLAGS_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Heap type */
+enum talmmu_heap_type {
+ TALMMU_HEAP_SHARED_EXPORTED,
+ TALMMU_HEAP_PERCONTEXT,
+ TALMMU_HEAP_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Heap flags */
+enum talmmu_eheapflags {
+ TALMMU_HEAPFLAGS_NONE = 0x0,
+ TALMMU_HEAPFLAGS_SET_CACHE_CONSISTENCY = 0x00000001,
+ TALMMU_HEAPFLAGS_128BYTE_INTERLEAVE = 0x00000002,
+ TALMMU_HEAPFLAGS_256BYTE_INTERLEAVE = 0x00000004,
+ TALMMU_HEAPFLAGS_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Contains the device memory information */
+struct talmmu_devmem_info {
+ /* device id */
+ unsigned int device_id;
+ /* mmu type */
+ enum talmmu_mmu_type mmu_type;
+ /* Device flags - bit flags that can be combined */
+ enum talmmu_dev_flags dev_flags;
+ /* Name of the memory space for page directory allocations */
+ unsigned char *pagedir_memspace_name;
+ /* Name of the memory space for page table allocations */
+ unsigned char *pagetable_memspace_name;
+ /* Page size in bytes */
+ unsigned int page_size;
+ /* PTD alignment, must be multiple of Page size */
+ unsigned int ptd_alignment;
+};
+
+struct talmmu_heap_info {
+ /* heap id */
+ unsigned int heap_id;
+ /* heap type */
+ enum talmmu_heap_type heap_type;
+ /* heap flags - bit flags that can be combined */
+ enum talmmu_eheapflags heap_flags;
+ /* Name of the memory space for memory allocations */
+ unsigned char *memspace_name;
+ /* Base device virtual address */
+ unsigned int basedev_virtaddr;
+ /* size in bytes */
+ unsigned int size;
+};
+
+/* Device memory template information */
+struct talmmu_dm_tmpl {
+ /* list */
+ struct lst_t list;
+ /* Copy of device memory info structure */
+ struct talmmu_devmem_info devmem_info;
+ /* Memory space ID for PTD allocations */
+ void *ptd_memspace_hndl;
+ /* Memory space ID for Page Table allocations */
+ void *ptentry_memspace_hndl;
+ /* number of heaps */
+ unsigned int num_heaps;
+ /* Array of heap pointers */
+ struct talmmu_devmem_heap *devmem_heap[TALMMU_MAX_DEVICE_HEAPS];
+ /* Number of active contexts */
+ unsigned int num_ctxs;
+ /* List of device memory context created from this template */
+ struct lst_t devmem_ctx_list;
+ /* Number of bits to shift right to obtain page number */
+ unsigned int page_num_shift;
+ /* Mask to extract byte-within-page */
+ unsigned int byte_in_pagemask;
+ /* Heap alignment */
+ unsigned int heap_alignment;
+ /* Page table entries/page */
+ unsigned int pagetable_entries_perpage;
+ /* Number of bits to shift right to obtain page table number */
+ unsigned int pagetable_num_shift;
+ /* Mask to extract index-within-page-table */
+ unsigned int index_in_pagetable_mask;
+ /* Number of bits to shift right to obtain page dir number */
+ unsigned int pagedir_num_shift;
+};
+
+/* Device memory heap information */
+struct talmmu_devmem_heap {
+ /* list item */
+ struct lst_t list;
+ /* Copy of the heap info structure */
+ struct talmmu_heap_info heap_info;
+ /* Pointer to the device memory template */
+ struct talmmu_dm_tmpl *devmem_template;
+ /* true if device virtual address offset allocated externally by user */
+ unsigned int ext_dev_virtaddr;
+ /* list of memory allocations */
+ struct lst_t memory_list;
+ /* Memory space ID for memory allocations */
+ void *memspace_hndl;
+ /* Address context structure */
+ struct addr_context ctx;
+ /* Regions structure */
+ struct addr_region regions;
+ /* size of heap guard band */
+ unsigned int guardband;
+};
+
+struct talmmu_devmem_ctx {
+ /* list item */
+ struct lst_t list;
+ /* Pointer to device template */
+ struct talmmu_dm_tmpl *devmem_template;
+ /* No. of heaps */
+ unsigned int num_heaps;
+ /* Array of heap pointers */
+ struct talmmu_devmem_heap *devmem_heap[TALMMU_MAX_DEVICE_HEAPS];
+ /* The MMU context id */
+ unsigned int mmu_ctx_id;
+ /* Pointer to the memory that represents Page directory */
+ unsigned int *pagedir;
+};
+
+struct talmmu_memory {
+ /* list item */
+ struct lst_t list;
+ /* Heap from which memory was allocated */
+ struct talmmu_devmem_heap *devmem_heap;
+ /* Context through which memory was allocated */
+ struct talmmu_devmem_ctx *devmem_ctx;
+ /* size */
+ unsigned int size;
+ /* alignment */
+ unsigned int alignment;
+ /* device virtual offset of allocation */
+ unsigned int dev_virtoffset;
+ /* true if device virtual address offset allocated externally by user */
+ unsigned int ext_dev_virtaddr;
+};
+
+/* This type defines the event types for the TALMMU callbacks */
+enum talmmu_event {
+ /* Function to flush the cache. */
+ TALMMU_EVENT_FLUSH_CACHE,
+ /*! Function to write the page directory address to the device */
+ TALMMU_EVENT_WRITE_PAGE_DIRECTORY_REF,
+ /* Placeholder*/
+ TALMMU_NO_OF_EVENTS
+};
+
+enum talmmu_heap_option_id {
+ /* Add guard band to all mallocs */
+ TALMMU_HEAP_OPT_ADD_GUARD_BAND,
+ TALMMU_HEAP_OPT_SET_MEM_ATTRIB,
+ TALMMU_HEAP_OPT_SET_MEM_POOL,
+
+ /* Placeholder */
+ TALMMU_NO_OF_OPTIONS,
+ TALMMU_NO_OF_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct talmmu_guardband_options {
+ unsigned int guardband;
+};
+
+union talmmu_heap_options {
+ /* Guardband parameters */
+ struct talmmu_guardband_options guardband_opt;
+};
+
+int talmmu_init(void);
+int talmmu_deinit(void);
+int talmmu_devmem_template_create(struct talmmu_devmem_info *devmem_info,
+ void **devmem_template_hndl);
+int talmmu_devmem_heap_add(void *devmem_tmplt_hndl,
+ struct talmmu_heap_info *heap_info_arg);
+int talmmu_devmem_template_destroy(void *devmem_tmplt_hndl);
+int talmmu_devmem_ctx_create(void *devmem_tmplt_hndl,
+ unsigned int mmu_ctx_id,
+ void **devmem_ctx_hndl);
+int talmmu_devmem_ctx_destroy(void *devmem_ctx_hndl);
+int talmmu_get_heap_handle(unsigned int hid,
+ void *devmem_ctx_hndl,
+ void **devmem_heap_hndl);
+/**
+ * talmmu_devmem_heap_empty - talmmu_devmem_heap_empty
+ * @devmem_heap_hndl: device memory heap handle
+ *
+ * This function is used for emptying the device memory heap list
+ */
+
+int talmmu_devmem_heap_empty(void *devmem_heap_hndl);
+void talmmu_devmem_heap_options(void *devmem_heap_hndl,
+ enum talmmu_heap_option_id heap_opt_id,
+ union talmmu_heap_options heap_options);
+int talmmu_devmem_addr_alloc(void *devmem_ctx_hndl,
+ void *devmem_heap_hndl,
+ unsigned int size,
+ unsigned int align,
+ void **mem_hndl);
+int talmmu_devmem_addr_free(void *mem_hndl);
+int talmmu_get_dev_virt_addr(void *mem_hndl,
+ unsigned int *dev_virt);
+
+#endif /* __TALMMU_API_H__ */
diff --git a/drivers/media/platform/vxe-vxd/common/vid_buf.h b/drivers/media/platform/vxe-vxd/common/vid_buf.h
new file mode 100644
index 000000000000..ac0e4f9b4894
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/vid_buf.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Low-level VXD interface component
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _VID_BUF_H
+#define _VID_BUF_H
+
+/*
+ * struct vidio_ddbufinfo - contains information about virtual address
+ * @buf_size: the size of the buffer (in bytes).
+ * @cpu_virt: the cpu virtual address (mapped into the local cpu mmu)
+ * @dev_virt: device virtual address (pages mapped into IMG H/W mmu)
+ * @hndl_memory: handle to device mmu mapping
+ * @buff_id: buffer id used in communication with interface
+ * @is_internal: true, if the buffer is allocated internally
+ * @ref_count: reference count (number of users)
+ * @kmstr_id: stream id
+ * @core_id: core id
+ */
+struct vidio_ddbufinfo {
+ unsigned int buf_size;
+ void *cpu_virt;
+ unsigned int dev_virt;
+ void *hndl_memory;
+ unsigned int buff_id;
+ unsigned int is_internal;
+ unsigned int ref_count;
+ unsigned int kmstr_id;
+ unsigned int core_id;
+};
+
+#endif /* _VID_BUF_H */
diff --git a/drivers/media/platform/vxe-vxd/common/work_queue.c b/drivers/media/platform/vxe-vxd/common/work_queue.c
new file mode 100644
index 000000000000..6bd91a7fdbf4
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/work_queue.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Work Queue Handling for Linux
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+
+#include "work_queue.h"
+
+/* Defining and initilizing mutex
+ */
+DEFINE_MUTEX(mutex);
+
+#define false 0
+#define true 1
+
+struct node {
+ void **key;
+ struct node *next;
+};
+
+struct node *work_head;
+struct node *delayed_work_head;
+
+void init_work(void **work_args, void *work_fn, uint8_t hwa_id)
+{
+ struct work_struct **work = (struct work_struct **)work_args;
+ //create a link
+ struct node *link = kmalloc(sizeof(*link), GFP_KERNEL);
+
+ *work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!(*work)) {
+ pr_err("Memory allocation failed for work_queue\n");
+ return;
+ }
+ INIT_WORK(*work, work_fn);
+
+ link->key = (void **)work;
+ mutex_lock(&mutex);
+ //point it to old first node
+ link->next = work_head;
+
+ //point first to new first node
+ work_head = link;
+ mutex_unlock(&mutex);
+}
+
+void init_delayed_work(void **work_args, void *work_fn, uint8_t hwa_id)
+{
+ struct delayed_work **work = (struct delayed_work **)work_args;
+ //create a link
+ struct node *link = kmalloc(sizeof(*link), GFP_KERNEL);
+
+ *work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!(*work)) {
+ pr_err("Memory allocation failed for delayed_work_queue\n");
+ return;
+ }
+ INIT_DELAYED_WORK(*work, work_fn);
+
+ link->key = (void **)work;
+ mutex_lock(&mutex);
+ //point it to old first node
+ link->next = delayed_work_head;
+
+ //point first to new first node
+ delayed_work_head = link;
+ mutex_unlock(&mutex);
+}
+
+/**
+ * get_work_buff - get_work_buff
+ * @key: key value
+ * @flag: flag
+ */
+
+void *get_work_buff(void *key, signed char flag)
+{
+ struct node *data = NULL;
+ void *work_new = NULL;
+ struct node *temp = NULL;
+ struct node *previous = NULL;
+ struct work_struct **work = NULL;
+
+ //start from the first link
+ mutex_lock(&mutex);
+ temp = work_head;
+
+ //if list is empty
+ if (!work_head) {
+ mutex_unlock(&mutex);
+ return NULL;
+ }
+
+ work = ((struct work_struct **)(temp->key));
+ //navigate through list
+ while (*work != key) {
+ //if it is last node
+ if (!temp->next) {
+ mutex_unlock(&mutex);
+ return NULL;
+ }
+ //store reference to current link
+ previous = temp;
+ //move to next link
+ temp = temp->next;
+ work = ((struct work_struct **)(temp->key));
+ }
+
+ if (flag) {
+ //found a match, update the link
+ if (temp == work_head) {
+ //change first to point to next link
+ work_head = work_head->next;
+ } else {
+ //bypass the current link
+ previous->next = temp->next;
+ }
+ }
+
+ mutex_unlock(&mutex);
+ //return temp;
+ data = temp;
+ if (data) {
+ work_new = data->key;
+ if (flag)
+ kfree(data);
+ }
+ return work_new;
+}
+
+void *get_delayed_work_buff(void *key, signed char flag)
+{
+ struct node *data = NULL;
+ void *dwork_new = NULL;
+ struct node *temp = NULL;
+ struct node *previous = NULL;
+ struct delayed_work **dwork = NULL;
+
+ if (flag) {
+ /* This Condition is true when kernel module is removed */
+ return delayed_work_head;
+ }
+ //start from the first link
+ mutex_lock(&mutex);
+ temp = delayed_work_head;
+
+ //if list is empty
+ if (!delayed_work_head) {
+ mutex_unlock(&mutex);
+ return NULL;
+ }
+
+ dwork = ((struct delayed_work **)(temp->key));
+ //navigate through list
+ while (&(*dwork)->work != key) {
+ //if it is last node
+ if (!temp->next) {
+ mutex_unlock(&mutex);
+ return NULL;
+ }
+ //store reference to current link
+ previous = temp;
+ //move to next link
+ temp = temp->next;
+ dwork = ((struct delayed_work **)(temp->key));
+ }
+
+ mutex_unlock(&mutex);
+ data = temp;
+ if (data) {
+ dwork_new = data->key;
+ if (flag)
+ kfree(data);
+ }
+ return dwork_new;
+}
diff --git a/drivers/media/platform/vxe-vxd/common/work_queue.h b/drivers/media/platform/vxe-vxd/common/work_queue.h
new file mode 100644
index 000000000000..44ed423334e2
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/common/work_queue.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Work Queue Related Definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef WORKQUEUE_H_
+#define WORKQUEUE_H_
+
+#include <linux/types.h>
+
+enum {
+ HWA_DECODER = 0,
+ HWA_ENCODER = 1,
+ HWA_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * init_work - This function provides the necessary initialization
+ * and saving given pointer(work_args) in linked list.
+ * @work_args: structure for the initialization
+ * @work_fn: work function pointer
+ *
+ * This function provides the necessary initialization
+ * and setting of the handler function (passed by the user).
+ */
+void init_work(void **work_args, void *work_fn, uint8_t hwa_id);
+
+/*
+ * init_delayed_work - This function provides the necessary initialization.
+ * and saving given pointer(work_args) in linked list.
+ * @work_args: structure for the initialization
+ * @work_fn: work function pointer
+ *
+ * This function provides the necessary initialization
+ * and setting of the handler function (passed by the user).
+ */
+void init_delayed_work(void **work_args, void *work_fn, uint8_t hwa_id);
+
+/*
+ * get_delayed_work_buff - This function return base address of given pointer
+ * @key: The given work struct pointer
+ * @flag: If TRUE, delete the node from the linked list.
+ *
+ * Return: Base address of the given input buffer.
+ */
+void *get_delayed_work_buff(void *key, signed char flag);
+
+/**
+ * get_work_buff - This function return base address of given pointer
+ * @key: The given work struct pointer
+ * @flag: If TRUE, delete the node from the linked list.
+ *
+ * Return: Base address of the given input buffer.
+ */
+void *get_work_buff(void *key, signed char flag);
+
+#endif /* WORKQUEUE_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/bspp.c b/drivers/media/platform/vxe-vxd/decoder/bspp.c
new file mode 100644
index 000000000000..82b5f0d93bd8
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/bspp.c
@@ -0,0 +1,2483 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD Bitstream Buffer Pre-Parser
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstreming
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp.h"
+#include "h264_secure_parser.h"
+#include "hevc_secure_parser.h"
+#ifdef HAS_JPEG
+#include "jpeg_secure_parser.h"
+#endif
+#include "lst.h"
+#include "swsr.h"
+#include "vdecdd_defs.h"
+#include "img_errors.h"
+
+#define BSPP_ERR_MSG_LENGTH 1024
+
+/*
+ * This type defines the exception flag to catch the error if more catch block
+ * is required to catch different kind of error then more enum can be added
+ * @breif BSPP exception handler to catch the errors
+ */
+enum bspp_exception_handler {
+ /* BSPP parse exception handler */
+ BSPP_EXCEPTION_HANDLER_NONE = 0x00,
+ /* Jump at exception (external use) */
+ BSPP_EXCEPTION_HANDLER_JUMP,
+ BSPP_EXCEPTION_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure contains bitstream buffer information.
+ * @brief BSPP Bitstream Buffer Information
+ */
+struct bspp_bitstream_buffer {
+ void **lst_link;
+ struct bspp_ddbuf_info ddbuf_info;
+ unsigned int data_size;
+ unsigned int bufmap_id;
+ enum vdec_bstr_element_type bstr_element_type;
+ unsigned long long bytes_read;
+ void *pict_tag_param;
+};
+
+/*
+ * This structure contains shift-register state.
+ * @brief BSPP Shift-register State
+ */
+struct bspp_parse_ctx {
+ void *swsr_context;
+ enum swsr_exception exception;
+};
+
+/*
+ * This structure contains context for the current picture.
+ * @brief BSPP Picture Context
+ */
+struct bspp_pict_ctx {
+ struct bspp_sequence_hdr_info *sequ_hdr_info;
+ int closed_gop;
+ struct bspp_pict_hdr_info pict_hdr_info[VDEC_H264_MVC_MAX_VIEWS];
+ struct bspp_sequence_hdr_info *ext_sequ_hdr_info;
+ int present;
+ int invalid;
+ int unsupported;
+ int finished;
+ unsigned int new_pict_signalled;
+};
+
+/*
+ * This structure contains resources allocated for the stream.
+ * @brief BSPP Stream Resource Allocations
+ */
+struct bspp_stream_alloc_data {
+ struct lst_t sequence_data_list[SEQUENCE_SLOTS];
+ struct lst_t pps_data_list[PPS_SLOTS];
+ struct lst_t available_sequence_list;
+ struct lst_t available_ppss_list;
+ struct lst_t raw_data_list_available;
+ struct lst_t raw_data_list_used;
+ struct lst_t vps_data_list[VPS_SLOTS];
+ struct lst_t raw_sei_alloc_list;
+ struct lst_t available_vps_list;
+};
+
+struct bspp_raw_sei_alloc {
+ void **lst_link;
+ struct vdec_raw_bstr_data raw_sei_data;
+};
+
+/*
+ * This structure contains bitstream parsing state information for the current
+ * group of buffers.
+ * @brief BSPP Bitstream Parsing State Information
+ */
+struct bspp_grp_bstr_ctx {
+ enum vdec_vid_std vid_std;
+ int disable_mvc;
+ int delim_present;
+ void *swsr_context;
+ enum bspp_unit_type unit_type;
+ enum bspp_unit_type last_unit_type;
+ int not_pic_unit_yet;
+ int not_ext_pic_unit_yet;
+ unsigned int total_data_size;
+ unsigned int total_bytes_read;
+ struct lst_t buffer_chain;
+ struct lst_t in_flight_bufs;
+ struct lst_t *pre_pict_seg_list[3];
+ struct lst_t *pict_seg_list[3];
+ void **pict_tag_param_array[3];
+ struct lst_t *segment_list;
+ void **pict_tag_param;
+ struct lst_t *free_segments;
+ unsigned int segment_offset;
+ int insert_start_code;
+ unsigned char start_code_suffix;
+ unsigned char current_view_idx;
+};
+
+/*
+ * This structure contains the stream context information.
+ * @brief BSPP Stream Context Information
+ */
+struct bspp_str_context {
+ enum vdec_vid_std vid_std;
+ int disable_mvc;
+ int full_scan;
+ int immediate_decode;
+ enum vdec_bstr_format bstr_format;
+ struct vdec_codec_config codec_config;
+ unsigned int user_str_id;
+ struct bspp_vid_std_features vid_std_features;
+ struct bspp_swsr_ctx swsr_ctx;
+ struct bspp_parser_callbacks parser_callbacks;
+ struct bspp_stream_alloc_data str_alloc;
+ unsigned int sequ_hdr_id;
+ unsigned char *sequ_hdr_info;
+ unsigned char *secure_sequence_info;
+ unsigned char *pps_info;
+ unsigned char *secure_pps_info;
+ unsigned char *raw_data;
+ struct bspp_grp_bstr_ctx grp_bstr_ctx;
+ struct bspp_parse_ctx parse_ctx;
+ struct bspp_inter_pict_data inter_pict_data;
+ struct lst_t decoded_pictures_list;
+ /* Mutex for secure access */
+ struct mutex *bspp_mutex;
+ int intra_frame_closed_gop;
+ struct bspp_pict_ctx pict_ctx;
+ struct bspp_parse_state parse_state;
+};
+
+/*
+ * This structure contains the standard related parser functions.
+ * @brief BSPP Standard Related Functions
+ */
+struct bspp_parser_functions {
+ /* Pointer to standard-specific parser configuration function */
+ bspp_cb_set_parser_config set_parser_config;
+ /* Pointer to standard-specific unit type determining function */
+ bspp_cb_determine_unit_type determine_unit_type;
+};
+
+static struct bspp_parser_functions parser_fxns[VDEC_STD_MAX] = {
+ /* VDEC_STD_UNDEFINED */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_MPEG2 */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_MPEG4 */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_H263 */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_H264 */
+ { .set_parser_config = bspp_h264_set_parser_config,
+ .determine_unit_type = bspp_h264_determine_unittype },
+ /* VDEC_STD_VC1 */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_AVS */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_REAL */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_JPEG */
+#ifdef HAS_JPEG
+ { .set_parser_config = bspp_jpeg_setparser_config,
+ .determine_unit_type = bspp_jpeg_determine_unit_type },
+#else
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+#endif
+ /* VDEC_STD_VP6 */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_VP8 */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_SORENSON */
+ { .set_parser_config = NULL, .determine_unit_type = NULL },
+ /* VDEC_STD_HEVC */
+ { .set_parser_config = bspp_hevc_set_parser_config,
+ .determine_unit_type = bspp_hevc_determine_unittype },
+};
+
+/*
+ * @Function bspp_get_pps_hdr
+ * @Description Obtains the most recent PPS header of a given Id.
+ */
+struct bspp_pps_info *bspp_get_pps_hdr(void *str_res_handle, unsigned int pps_id)
+{
+ struct bspp_stream_alloc_data *alloc_data =
+ (struct bspp_stream_alloc_data *)str_res_handle;
+
+ if (pps_id >= PPS_SLOTS || !alloc_data)
+ return NULL;
+
+ return lst_last(&alloc_data->pps_data_list[pps_id]);
+}
+
+/*
+ * @Function bspp_get_sequ_hdr
+ * @Description Obtains the most recent sequence header of a given Id.
+ */
+struct bspp_sequence_hdr_info *bspp_get_sequ_hdr(void *str_res_handle,
+ unsigned int sequ_id)
+{
+ struct bspp_stream_alloc_data *alloc_data =
+ (struct bspp_stream_alloc_data *)str_res_handle;
+ if (sequ_id >= SEQUENCE_SLOTS || !alloc_data)
+ return NULL;
+
+ return lst_last(&alloc_data->sequence_data_list[sequ_id]);
+}
+
+/*
+ * @Function bspp_free_bitstream_elem
+ * @Description Frees a bitstream chain element.
+ */
+static void bspp_free_bitstream_elem(struct bspp_bitstream_buffer *bstr_buf)
+{
+ memset(bstr_buf, 0, sizeof(struct bspp_bitstream_buffer));
+
+ kfree(bstr_buf);
+}
+
+/*
+ * @Function bspp_create_segment
+ * @Description Constructs a bitstream segment for the current unit and adds
+ * it to the list.
+ */
+static int bspp_create_segment(struct bspp_grp_bstr_ctx *grp_btsr_ctx,
+ struct bspp_bitstream_buffer *cur_buf)
+{
+ struct bspp_bitstr_seg *segment;
+ unsigned int result;
+
+ /*
+ * Only create a segment when data (not in a previous segment) has been
+ * parsed from the buffer.
+ */
+ if (cur_buf->bytes_read != grp_btsr_ctx->segment_offset) {
+ /* Allocate a software shift-register context structure */
+ segment = lst_removehead(grp_btsr_ctx->free_segments);
+ if (!segment) {
+ result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ goto error;
+ }
+ memset(segment, 0, sizeof(struct bspp_bitstr_seg));
+
+ segment->bufmap_id = cur_buf->bufmap_id;
+ segment->data_size = (unsigned int)cur_buf->bytes_read
+ - grp_btsr_ctx->segment_offset;
+ segment->data_byte_offset = grp_btsr_ctx->segment_offset;
+
+ if (cur_buf->bytes_read == cur_buf->data_size) {
+ /* This is the last segment in the buffer. */
+ segment->bstr_seg_flag |= VDECDD_BSSEG_LASTINBUFF;
+ }
+
+ /*
+ * Next segment will start part way through the buffer
+ * (current read position).
+ */
+ grp_btsr_ctx->segment_offset = (unsigned int)cur_buf->bytes_read;
+
+ if (grp_btsr_ctx->insert_start_code) {
+ segment->bstr_seg_flag |= VDECDD_BSSEG_INSERT_STARTCODE;
+ segment->start_code_suffix = grp_btsr_ctx->start_code_suffix;
+ grp_btsr_ctx->insert_start_code = 0;
+ }
+
+ lst_add(grp_btsr_ctx->segment_list, segment);
+
+ /*
+ * If multiple segments correspond to the same (picture)
+ * stream-unit, update it only the first time
+ */
+ if (cur_buf->pict_tag_param && grp_btsr_ctx->pict_tag_param &&
+ (grp_btsr_ctx->segment_list ==
+ grp_btsr_ctx->pict_seg_list[0] ||
+ grp_btsr_ctx->segment_list ==
+ grp_btsr_ctx->pict_seg_list[1] ||
+ grp_btsr_ctx->segment_list ==
+ grp_btsr_ctx->pict_seg_list[2]))
+ *grp_btsr_ctx->pict_tag_param = cur_buf->pict_tag_param;
+ }
+
+ return IMG_SUCCESS;
+error:
+ return result;
+}
+
+/*
+ * @Function bspp_DetermineUnitType
+ *
+ */
+static int bspp_determine_unit_type(enum vdec_vid_std vid_std,
+ unsigned char unit_type,
+ int disable_mvc,
+ enum bspp_unit_type *unit_type_enum)
+{
+ /* Determine the unit type from the NAL type. */
+ if (vid_std < VDEC_STD_MAX && parser_fxns[vid_std].determine_unit_type)
+ parser_fxns[vid_std].determine_unit_type(unit_type, disable_mvc, unit_type_enum);
+ else
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function bspp_shift_reg_cb
+ *
+ */
+static void bspp_shift_reg_cb(enum swsr_cbevent event,
+ struct bspp_grp_bstr_ctx *grp_btsr_ctx,
+ unsigned char nal_type,
+ unsigned char **data_buffer,
+ unsigned long long *data_size)
+{
+ unsigned int result;
+
+ switch (event) {
+ case SWSR_EVENT_INPUT_BUFFER_START: {
+ struct bspp_bitstream_buffer *next_buf;
+
+ /* Take the next bitstream buffer for use in shift-register. */
+ next_buf = lst_removehead(&grp_btsr_ctx->buffer_chain);
+
+ if (next_buf && data_buffer && data_size) {
+ lst_add(&grp_btsr_ctx->in_flight_bufs, next_buf);
+
+ *data_buffer = next_buf->ddbuf_info.cpu_virt_addr;
+ *data_size = next_buf->data_size;
+
+ next_buf->bytes_read = 0;
+ } else {
+ goto error;
+ }
+ }
+ break;
+ case SWSR_EVENT_OUTPUT_BUFFER_END: {
+ struct bspp_bitstream_buffer *cur_buf;
+
+ cur_buf = lst_removehead(&grp_btsr_ctx->in_flight_bufs);
+
+ if (cur_buf) {
+ /*
+ * Indicate that the whole buffer content has been
+ * used.
+ */
+ cur_buf->bytes_read = cur_buf->data_size;
+ grp_btsr_ctx->total_bytes_read += (unsigned int)cur_buf->bytes_read;
+
+ /*
+ * Construct segment for current buffer and add to
+ * active list.
+ */
+ result = bspp_create_segment(grp_btsr_ctx, cur_buf);
+ if (result != IMG_SUCCESS)
+ goto error;
+
+ /*
+ * Next segment will start at the beginning of the next
+ * buffer.
+ */
+ grp_btsr_ctx->segment_offset = 0;
+
+ /* Destroy the bitstream element. */
+ bspp_free_bitstream_elem(cur_buf);
+ } else {
+ goto error;
+ }
+ }
+ break;
+
+ case SWSR_EVENT_DELIMITER_NAL_TYPE:
+ /*
+ * Initialise the unit type with the last (unclassified or
+ * unsupported types are not retained since they.
+ */
+ grp_btsr_ctx->unit_type = grp_btsr_ctx->last_unit_type;
+
+ /*
+ * Determine the unit type without consuming any data (start
+ * code) from shift-register. Segments are created automatically
+ * when a new buffer is requested by the shift-register so the
+ * unit type must be known in order to switch over the segment
+ * list.
+ */
+ result = bspp_determine_unit_type(grp_btsr_ctx->vid_std, nal_type,
+ grp_btsr_ctx->disable_mvc,
+ &grp_btsr_ctx->unit_type);
+
+ /*
+ * Only look to change bitstream segment list when the unit type
+ * is different and the current unit contains data that could be
+ * placed in a new list.
+ */
+ if (grp_btsr_ctx->last_unit_type != grp_btsr_ctx->unit_type &&
+ grp_btsr_ctx->unit_type != BSPP_UNIT_UNSUPPORTED &&
+ grp_btsr_ctx->unit_type != BSPP_UNIT_UNCLASSIFIED) {
+ int prev_pict_data;
+ int curr_pict_data;
+
+ prev_pict_data = (grp_btsr_ctx->last_unit_type == BSPP_UNIT_PICTURE ||
+ grp_btsr_ctx->last_unit_type ==
+ BSPP_UNIT_SKIP_PICTURE) ? 1 : 0;
+
+ curr_pict_data = (grp_btsr_ctx->unit_type == BSPP_UNIT_PICTURE ||
+ grp_btsr_ctx->unit_type ==
+ BSPP_UNIT_SKIP_PICTURE) ? 1 : 0;
+
+ /*
+ * When switching between picture and non-picture
+ * units.
+ */
+ if ((prev_pict_data && !curr_pict_data) ||
+ (!prev_pict_data && curr_pict_data)) {
+ /*
+ * Only delimit unit change when we're not the
+ * first unit and we're not already in the last
+ * segment list.
+ */
+ if (grp_btsr_ctx->last_unit_type != BSPP_UNIT_NONE &&
+ grp_btsr_ctx->segment_list !=
+ grp_btsr_ctx->pict_seg_list[2]) {
+ struct bspp_bitstream_buffer *cur_buf =
+ lst_first(&grp_btsr_ctx->in_flight_bufs);
+ if (!cur_buf)
+ goto error;
+
+ /*
+ * Update the offset within current buf.
+ */
+ swsr_get_byte_offset_curbuf(grp_btsr_ctx->swsr_context,
+ &cur_buf->bytes_read);
+
+ /*
+ * Create the last segment of the
+ * previous type (which may split a
+ * buffer into two). If the unit is
+ * exactly at the start of a buffer this
+ * will not create a zero-byte segment.
+ */
+ result = bspp_create_segment(grp_btsr_ctx, cur_buf);
+ if (result != IMG_SUCCESS)
+ goto error;
+ }
+
+ /* Point at the next segment list. */
+ if (grp_btsr_ctx->segment_list
+ == grp_btsr_ctx->pre_pict_seg_list[0]) {
+ grp_btsr_ctx->segment_list =
+ grp_btsr_ctx->pict_seg_list[0];
+ grp_btsr_ctx->pict_tag_param =
+ grp_btsr_ctx->pict_tag_param_array[0];
+ } else if (grp_btsr_ctx->segment_list
+ == grp_btsr_ctx->pict_seg_list[0])
+ grp_btsr_ctx->segment_list =
+ grp_btsr_ctx->pre_pict_seg_list[1];
+ else if (grp_btsr_ctx->segment_list
+ == grp_btsr_ctx->pre_pict_seg_list[1]) {
+ grp_btsr_ctx->segment_list =
+ grp_btsr_ctx->pict_seg_list[1];
+ grp_btsr_ctx->pict_tag_param =
+ grp_btsr_ctx->pict_tag_param_array[1];
+ } else if (grp_btsr_ctx->segment_list
+ == grp_btsr_ctx->pict_seg_list[1])
+ grp_btsr_ctx->segment_list =
+ grp_btsr_ctx->pre_pict_seg_list[2];
+ else if (grp_btsr_ctx->segment_list
+ == grp_btsr_ctx->pre_pict_seg_list[2]) {
+ grp_btsr_ctx->segment_list =
+ grp_btsr_ctx->pict_seg_list[2];
+ grp_btsr_ctx->pict_tag_param =
+ grp_btsr_ctx->pict_tag_param_array[2];
+ }
+ }
+
+ grp_btsr_ctx->last_unit_type = grp_btsr_ctx->unit_type;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+error:
+ return;
+}
+
+/*
+ * @Function bspp_exception_handler
+ *
+ */
+static void bspp_exception_handler(enum swsr_exception exception, void *parse_ctx_handle)
+{
+ struct bspp_parse_ctx *parse_ctx = (struct bspp_parse_ctx *)parse_ctx_handle;
+
+ /* Store the exception. */
+ parse_ctx->exception = exception;
+
+ switch (parse_ctx->exception) {
+ case SWSR_EXCEPT_NO_EXCEPTION:
+ break;
+ case SWSR_EXCEPT_ENCAPULATION_ERROR1:
+ break;
+ case SWSR_EXCEPT_ENCAPULATION_ERROR2:
+ break;
+ case SWSR_EXCEPT_ACCESS_INTO_SCP:
+ break;
+ case SWSR_EXCEPT_ACCESS_BEYOND_EOD:
+ break;
+ case SWSR_EXCEPT_EXPGOULOMB_ERROR:
+ break;
+ case SWSR_EXCEPT_WRONG_CODEWORD_ERROR:
+ break;
+ case SWSR_EXCEPT_NO_SCP:
+ break;
+ case SWSR_EXCEPT_INVALID_CONTEXT:
+ break;
+
+ default:
+ break;
+ }
+
+ /* Clear the exception. */
+ swsr_check_exception(parse_ctx->swsr_context);
+}
+
+/*
+ * @Function bspp_reset_sequence
+ *
+ */
+static void bspp_reset_sequence(struct bspp_str_context *str_ctx,
+ struct bspp_sequence_hdr_info *sequ_hdr_info)
+{
+ /* Temporarily store relevant sequence fields. */
+ struct bspp_ddbuf_array_info aux_fw_sequence = sequ_hdr_info->fw_sequence;
+ void *aux_secure_sequence_info_hndl = sequ_hdr_info->secure_sequence_info;
+
+ struct bspp_ddbuf_array_info *tmp = &sequ_hdr_info->fw_sequence;
+
+ /* Reset all related structures. */
+ memset(((unsigned char *)tmp->ddbuf_info.cpu_virt_addr + tmp->buf_offset), 0x00,
+ sequ_hdr_info->fw_sequence.buf_element_size);
+
+ if (str_ctx->parser_callbacks.reset_data_cb)
+ str_ctx->parser_callbacks.reset_data_cb(BSPP_UNIT_SEQUENCE,
+ sequ_hdr_info->secure_sequence_info);
+ else
+ memset(aux_secure_sequence_info_hndl, 0, str_ctx->vid_std_features.seq_size);
+
+ memset(sequ_hdr_info, 0, sizeof(*sequ_hdr_info));
+
+ /* Restore relevant sequence fields. */
+ sequ_hdr_info->fw_sequence = aux_fw_sequence;
+ sequ_hdr_info->sequ_hdr_info.bufmap_id = aux_fw_sequence.ddbuf_info.bufmap_id;
+ sequ_hdr_info->sequ_hdr_info.buf_offset = aux_fw_sequence.buf_offset;
+ sequ_hdr_info->secure_sequence_info = aux_secure_sequence_info_hndl;
+}
+
+/*
+ * @Function bspp_reset_pps
+ *
+ */
+static void bspp_reset_pps(struct bspp_str_context *str_ctx,
+ struct bspp_pps_info *pps_info)
+{
+ /* Temporarily store relevant PPS fields. */
+ struct bspp_ddbuf_array_info aux_fw_pps = pps_info->fw_pps;
+ void *aux_secure_pps_info_hndl = pps_info->secure_pps_info;
+ struct bspp_ddbuf_array_info *tmp = &pps_info->fw_pps;
+
+ /* Reset all related structures. */
+ memset(((unsigned char *)tmp->ddbuf_info.cpu_virt_addr + tmp->buf_offset), 0x00,
+ pps_info->fw_pps.buf_element_size);
+
+ /* Reset the parser specific data. */
+ if (str_ctx->parser_callbacks.reset_data_cb)
+ str_ctx->parser_callbacks.reset_data_cb(BSPP_UNIT_PPS, pps_info->secure_pps_info);
+
+ /* Reset the common data. */
+ memset(pps_info, 0, sizeof(*pps_info));
+
+ /* Restore relevant PPS fields. */
+ pps_info->fw_pps = aux_fw_pps;
+ pps_info->bufmap_id = aux_fw_pps.ddbuf_info.bufmap_id;
+ pps_info->buf_offset = aux_fw_pps.buf_offset;
+ pps_info->secure_pps_info = aux_secure_pps_info_hndl;
+}
+
+/*
+ * @Function bspp_stream_submit_buffer
+ *
+ */
+int bspp_stream_submit_buffer(void *str_context_handle,
+ const struct bspp_ddbuf_info *ddbuf_info,
+ unsigned int bufmap_id,
+ unsigned int data_size,
+ void *pict_tag_param,
+ enum vdec_bstr_element_type bstr_element_type)
+{
+ struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+ struct bspp_bitstream_buffer *bstr_buf;
+ unsigned int result = IMG_SUCCESS;
+
+ if (!str_context_handle) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ if (bstr_element_type == VDEC_BSTRELEMENT_UNDEFINED ||
+ bstr_element_type >= VDEC_BSTRELEMENT_MAX) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ /*
+ * Check that the new bitstream buffer is compatible with those
+ * before.
+ */
+ bstr_buf = lst_last(&str_ctx->grp_bstr_ctx.buffer_chain);
+ if (bstr_buf && bstr_buf->bstr_element_type != bstr_element_type) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ /* Allocate a bitstream buffer chain element structure */
+ bstr_buf = kmalloc(sizeof(*bstr_buf), GFP_KERNEL);
+ if (!bstr_buf) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+ memset(bstr_buf, 0, sizeof(*bstr_buf));
+
+ /* Queue buffer in a chain since units might span buffers. */
+ if (ddbuf_info)
+ bstr_buf->ddbuf_info = *ddbuf_info;
+
+ bstr_buf->data_size = data_size;
+ bstr_buf->bstr_element_type = bstr_element_type;
+ bstr_buf->pict_tag_param = pict_tag_param;
+ bstr_buf->bufmap_id = bufmap_id;
+ lst_add(&str_ctx->grp_bstr_ctx.buffer_chain, bstr_buf);
+
+ str_ctx->grp_bstr_ctx.total_data_size += data_size;
+
+error:
+ return result;
+}
+
+/*
+ * @Function bspp_sequence_hdr_info
+ *
+ */
+static struct bspp_sequence_hdr_info *bspp_obtain_sequence_hdr(struct bspp_str_context *str_ctx)
+{
+ struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+ struct bspp_sequence_hdr_info *sequ_hdr_info;
+
+ /*
+ * Obtain any partially filled sequence data else provide a new one
+ * (always new for H.264 and HEVC)
+ */
+ sequ_hdr_info = lst_last(&str_alloc->sequence_data_list[BSPP_DEFAULT_SEQUENCE_ID]);
+ if (!sequ_hdr_info || sequ_hdr_info->ref_count > 0 || str_ctx->vid_std == VDEC_STD_H264 ||
+ str_ctx->vid_std == VDEC_STD_HEVC) {
+ /* Get Sequence resource. */
+ sequ_hdr_info = lst_removehead(&str_alloc->available_sequence_list);
+ if (sequ_hdr_info) {
+ bspp_reset_sequence(str_ctx, sequ_hdr_info);
+ sequ_hdr_info->sequ_hdr_info.sequ_hdr_id = BSPP_INVALID;
+ }
+ }
+
+ return sequ_hdr_info;
+}
+
+/*
+ * @Function bspp_submit_picture_decoded
+ *
+ */
+int bspp_submit_picture_decoded(void *str_context_handle,
+ struct bspp_picture_decoded *picture_decoded)
+{
+ struct bspp_picture_decoded *picture_decoded_elem;
+ struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+
+ /* Validate input arguments. */
+ if (!str_context_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ picture_decoded_elem = kmalloc(sizeof(*picture_decoded_elem), GFP_KERNEL);
+ if (!picture_decoded_elem)
+ return IMG_ERROR_MALLOC_FAILED;
+
+ *picture_decoded_elem = *picture_decoded;
+
+ /* Lock access to the list for adding a picture - HIGH PRIORITY */
+ mutex_lock_nested(str_ctx->bspp_mutex, SUBCLASS_BSPP);
+
+ lst_add(&str_ctx->decoded_pictures_list, picture_decoded_elem);
+
+ /* Unlock access to the list for adding a picture - HIGH PRIORITY */
+ mutex_unlock(str_ctx->bspp_mutex);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function bspp_check_and_detach_pps_info
+ *
+ */
+static void bspp_check_and_detach_pps_info(struct bspp_stream_alloc_data *str_alloc,
+ unsigned int pps_id)
+{
+ if (pps_id != BSPP_INVALID) {
+ struct bspp_pps_info *pps_info = lst_first(&str_alloc->pps_data_list[pps_id]);
+
+ if (!pps_info) /* Invalid id */
+ return;
+
+ pps_info->ref_count--;
+ /* If nothing references it any more */
+ if (pps_info->ref_count == 0) {
+ struct bspp_pps_info *next_pps_info = lst_next(pps_info);
+
+ /*
+ * If it is not the last sequence in the slot list
+ * remove it and return it to the pool-list
+ */
+ if (next_pps_info) {
+ lst_remove(&str_alloc->pps_data_list[pps_id], pps_info);
+ lst_addhead(&str_alloc->available_ppss_list, pps_info);
+ }
+ }
+ }
+}
+
+/*
+ * @Function bspp_picture_decoded
+ *
+ */
+static int bspp_picture_decoded(struct bspp_str_context *str_ctx,
+ struct bspp_picture_decoded *picture_decoded)
+{
+ struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+
+ /* Manage Sequence */
+ if (picture_decoded->sequ_hdr_id != BSPP_INVALID) {
+ struct bspp_sequence_hdr_info *seq =
+ lst_first(&str_alloc->sequence_data_list[picture_decoded->sequ_hdr_id]);
+
+ if (!seq)
+ return IMG_ERROR_INVALID_ID;
+
+ if (picture_decoded->not_decoded) {
+ /* Release sequence data. */
+ if (str_ctx->parser_callbacks.release_data_cb)
+ str_ctx->parser_callbacks.release_data_cb((void *)str_alloc,
+ BSPP_UNIT_SEQUENCE, seq->secure_sequence_info);
+ }
+
+ seq->ref_count--;
+ /* If nothing references it any more */
+ if (seq->ref_count == 0) {
+ struct bspp_sequence_hdr_info *next_sequ_hdr_info = lst_next(seq);
+
+ /*
+ * If it is not the last sequence in the slot list
+ * remove it and return it to the pool-list
+ */
+ if (next_sequ_hdr_info) {
+ lst_remove(&str_alloc->sequence_data_list
+ [picture_decoded->sequ_hdr_id], seq);
+ /* Release sequence data. */
+ if (str_ctx->parser_callbacks.release_data_cb)
+ str_ctx->parser_callbacks.release_data_cb((void *)str_alloc,
+ BSPP_UNIT_SEQUENCE, seq->secure_sequence_info);
+
+ lst_addhead(&str_alloc->available_sequence_list, seq);
+ }
+ }
+ }
+
+ /*
+ * Expect at least one valid PPS for H.264 and always invalid for all
+ * others
+ */
+ bspp_check_and_detach_pps_info(str_alloc, picture_decoded->pps_id);
+ bspp_check_and_detach_pps_info(str_alloc, picture_decoded->second_pps_id);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function bspp_service_pictures_decoded
+ *
+ */
+static int bspp_service_pictures_decoded(struct bspp_str_context *str_ctx)
+{
+ struct bspp_picture_decoded *picture_decoded;
+
+ while (1) {
+ /*
+ * Lock access to the list for removing a picture -
+ * LOW PRIORITY
+ */
+ mutex_lock_nested(str_ctx->bspp_mutex, SUBCLASS_BSPP);
+
+ picture_decoded = lst_removehead(&str_ctx->decoded_pictures_list);
+
+ /*
+ * Unlock access to the list for removing a picture -
+ * LOW PRIORITY
+ */
+ mutex_unlock(str_ctx->bspp_mutex);
+
+ if (!picture_decoded)
+ break;
+
+ bspp_picture_decoded(str_ctx, picture_decoded);
+ kfree(picture_decoded);
+ }
+
+ return IMG_SUCCESS;
+}
+
+static void bspp_remove_unused_vps(struct bspp_str_context *str_ctx, unsigned int vps_id)
+{
+ struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+ struct bspp_vps_info *temp_vps_info = NULL;
+ struct bspp_vps_info *next_temp_vps_info = NULL;
+
+ /*
+ * Check the whole Vps slot list for any unused Vpss
+ * BEFORE ADDING THE NEW ONE, if found remove them
+ */
+ next_temp_vps_info = lst_first(&str_alloc->vps_data_list[vps_id]);
+ while (next_temp_vps_info) {
+ /* Set Temp, it is the one which we will potentially remove */
+ temp_vps_info = next_temp_vps_info;
+ /*
+ * Set Next Temp, it is the one for the next iteration
+ * (we cannot ask for next after removing it)
+ */
+ next_temp_vps_info = lst_next(temp_vps_info);
+ /* If it is not used remove it */
+ if (temp_vps_info->ref_count == 0 && next_temp_vps_info) {
+ /* Return resource to the available pool */
+ lst_remove(&str_alloc->vps_data_list[vps_id], temp_vps_info);
+ lst_addhead(&str_alloc->available_vps_list, temp_vps_info);
+ }
+ }
+}
+
+static void bspp_remove_unused_pps(struct bspp_str_context *str_ctx, unsigned int pps_id)
+{
+ struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+ struct bspp_pps_info *temp_pps_info = NULL;
+ struct bspp_pps_info *next_temp_pps_info = NULL;
+
+ /*
+ * Check the whole PPS slot list for any unused PPSs BEFORE ADDING
+ * THE NEW ONE, if found remove them
+ */
+ next_temp_pps_info = lst_first(&str_alloc->pps_data_list[pps_id]);
+ while (next_temp_pps_info) {
+ /* Set Temp, it is the one which we will potentially remove */
+ temp_pps_info = next_temp_pps_info;
+ /*
+ * Set Next Temp, it is the one for the next iteration
+ * (we cannot ask for next after removing it)
+ */
+ next_temp_pps_info = lst_next(temp_pps_info);
+ /* If it is not used remove it */
+ if (temp_pps_info->ref_count == 0 && next_temp_pps_info) {
+ /* Return resource to the available pool */
+ lst_remove(&str_alloc->pps_data_list[pps_id], temp_pps_info);
+ lst_addhead(&str_alloc->available_ppss_list, temp_pps_info);
+ }
+ }
+}
+
+static void bspp_remove_unused_sequence(struct bspp_str_context *str_ctx, unsigned int sps_id)
+{
+ struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+ struct bspp_sequence_hdr_info *seq = NULL;
+ struct bspp_sequence_hdr_info *next_seq = NULL;
+
+ /*
+ * Check the whole sequence slot list for any unused sequences,
+ * if found remove them
+ */
+ next_seq = lst_first(&str_alloc->sequence_data_list[sps_id]);
+ while (next_seq) {
+ /* Set Temp, it is the one which we will potentially remove */
+ seq = next_seq;
+ /*
+ * Set Next Temp, it is the one for the next iteration (we
+ * cannot ask for next after removing it)
+ */
+ next_seq = lst_next(seq);
+
+ /*
+ * If the head is no longer used and there is something after,
+ * remove it
+ */
+ if (seq->ref_count == 0 && next_seq) {
+ /* Return resource to the pool-list */
+ lst_remove(&str_alloc->sequence_data_list[sps_id], seq);
+ if (str_ctx->parser_callbacks.release_data_cb) {
+ str_ctx->parser_callbacks.release_data_cb
+ ((void *)str_alloc,
+ BSPP_UNIT_SEQUENCE,
+ seq->secure_sequence_info);
+ }
+ lst_addhead(&str_alloc->available_sequence_list, seq);
+ }
+ }
+}
+
+/*
+ * @Function bspp_return_or_store_sequence_hdr
+ *
+ */
+static int bspp_return_or_store_sequence_hdr(struct bspp_str_context *str_ctx,
+ enum bspp_error_type parse_error,
+ struct bspp_sequence_hdr_info *sequ_hdr_info)
+{
+ struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+ struct bspp_sequence_hdr_info *prev_sequ_hdr_info;
+
+ if (((parse_error & BSPP_ERROR_UNRECOVERABLE) || (parse_error & BSPP_ERROR_UNSUPPORTED)) &&
+ sequ_hdr_info->sequ_hdr_info.sequ_hdr_id != BSPP_INVALID) {
+ prev_sequ_hdr_info =
+ lst_last(&str_alloc->sequence_data_list
+ [sequ_hdr_info->sequ_hdr_info.sequ_hdr_id]);
+
+ /* check if it's not the same pointer */
+ if (prev_sequ_hdr_info && prev_sequ_hdr_info != sequ_hdr_info) {
+ /*
+ * Throw away corrupted sequence header if a previous "good" one exists.
+ */
+ sequ_hdr_info->sequ_hdr_info.sequ_hdr_id = BSPP_INVALID;
+ }
+ }
+
+ /* Store or return Sequence resource. */
+ if (sequ_hdr_info->sequ_hdr_info.sequ_hdr_id != BSPP_INVALID) {
+ /* Only add when not already in list. */
+ if (sequ_hdr_info != lst_last(&str_alloc->sequence_data_list
+ [sequ_hdr_info->sequ_hdr_info.sequ_hdr_id])) {
+ /*
+ * Add new sequence header (not already in list) to end
+ * of the slot-list.
+ */
+ lst_add(&str_alloc->sequence_data_list
+ [sequ_hdr_info->sequ_hdr_info.sequ_hdr_id], sequ_hdr_info);
+ }
+
+ bspp_remove_unused_sequence(str_ctx, sequ_hdr_info->sequ_hdr_info.sequ_hdr_id);
+ } else {
+ /*
+ * if unit was not a sequnce info, add resource to the
+ * pool-list
+ */
+ lst_addhead(&str_alloc->available_sequence_list, sequ_hdr_info);
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function bspp_get_resource
+ *
+ */
+static int bspp_get_resource(struct bspp_str_context *str_ctx,
+ struct bspp_pict_hdr_info *pict_hdr_info,
+ struct bspp_unit_data *unit_data)
+{
+ int result = IMG_SUCCESS;
+ struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+
+ switch (unit_data->unit_type) {
+ case BSPP_UNIT_VPS:
+ /* Get VPS resource (HEVC only). */
+ if (unit_data->vid_std != VDEC_STD_HEVC)
+ break;
+ unit_data->out.vps_info = lst_removehead(&str_alloc->available_vps_list);
+ if (!unit_data->out.vps_info) {
+ result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ } else {
+ unit_data->out.vps_info->vps_id = BSPP_INVALID;
+ unit_data->out.vps_info->ref_count = 0;
+ }
+ break;
+ case BSPP_UNIT_SEQUENCE:
+ unit_data->out.sequ_hdr_info = bspp_obtain_sequence_hdr(str_ctx);
+ if (!unit_data->out.sequ_hdr_info)
+ result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+
+ break;
+
+ case BSPP_UNIT_PPS:
+ /* Get PPS resource (H.264 only). */
+ unit_data->out.pps_info = lst_removehead(&str_alloc->available_ppss_list);
+ /* allocate and return extra resources */
+ if (!unit_data->out.pps_info) {
+ result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ } else {
+ bspp_reset_pps(str_ctx, unit_data->out.pps_info);
+ unit_data->out.pps_info->pps_id = BSPP_INVALID;
+ }
+ break;
+
+ case BSPP_UNIT_PICTURE:
+ case BSPP_UNIT_SKIP_PICTURE:
+ unit_data->out.pict_hdr_info = pict_hdr_info;
+#ifdef HAS_JPEG
+ if (unit_data->vid_std == VDEC_STD_JPEG) {
+ unit_data->impl_sequ_hdr_info = bspp_obtain_sequence_hdr(str_ctx);
+ if (!unit_data->impl_sequ_hdr_info)
+ result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ }
+#endif
+ break;
+
+ default:
+ break;
+ }
+
+ return result;
+}
+
+/*
+ * @Function bspp_file_resource
+ * @Description Stores or returns all resources provided to parse unit.
+ */
+static int bspp_file_resource(struct bspp_str_context *str_ctx, struct bspp_unit_data *unit_data)
+{
+ unsigned int result = IMG_SUCCESS;
+ struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+
+ switch (unit_data->unit_type) {
+ case BSPP_UNIT_VPS:
+ /* Store or return VPS resource (HEVC only) */
+ if (unit_data->vid_std != VDEC_STD_HEVC)
+ break;
+
+ if (unit_data->out.vps_info->vps_id != BSPP_INVALID) {
+ lst_add(&str_alloc->vps_data_list[unit_data->out.vps_info->vps_id],
+ unit_data->out.vps_info);
+
+ bspp_remove_unused_vps(str_ctx, unit_data->out.vps_info->vps_id);
+ } else {
+ lst_addhead(&str_alloc->available_vps_list, unit_data->out.vps_info);
+ }
+ break;
+ case BSPP_UNIT_SEQUENCE:
+ result = bspp_return_or_store_sequence_hdr(str_ctx, unit_data->parse_error,
+ unit_data->out.sequ_hdr_info);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+ break;
+
+ case BSPP_UNIT_PPS:
+ /* Store or return PPS resource (H.264 only). */
+ if (unit_data->out.pps_info->pps_id != BSPP_INVALID) {
+ /*
+ * if unit was a PPS info, add resource to the slot-list
+ * AFTER REMOVING THE UNUSED ONES otherwise this will be
+ * removed along the rest unless special provision for
+ * last is made
+ */
+ lst_add(&str_alloc->pps_data_list[unit_data->out.pps_info->pps_id],
+ unit_data->out.pps_info);
+
+ bspp_remove_unused_pps(str_ctx, unit_data->out.pps_info->pps_id);
+ } else {
+ /*
+ * if unit was not a PPS info, add resource to the
+ * pool-list
+ */
+ lst_addhead(&str_alloc->available_ppss_list, unit_data->out.pps_info);
+ }
+ break;
+
+ case BSPP_UNIT_PICTURE:
+ case BSPP_UNIT_SKIP_PICTURE:
+#ifdef HAS_JPEG
+ if (unit_data->vid_std == VDEC_STD_JPEG) {
+ result = bspp_return_or_store_sequence_hdr(str_ctx,
+ unit_data->parse_error,
+ unit_data->impl_sequ_hdr_info);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+ }
+#endif
+ break;
+
+ default:
+ break;
+ }
+
+ return result;
+}
+
+/*
+ * @Function bspp_process_unit
+ *
+ */
+static int bspp_process_unit(struct bspp_str_context *str_ctx,
+ unsigned int size_delim_bits,
+ struct bspp_pict_ctx *pict_ctx,
+ struct bspp_parse_state *parse_state)
+{
+ struct bspp_unit_data unit_data;
+ unsigned long long unit_size = 0; /* Unit size (in bytes, size delimited only). */
+ unsigned int result;
+ unsigned char vidx = str_ctx->grp_bstr_ctx.current_view_idx;
+ struct bspp_pict_hdr_info *curr_pict_hdr_info;
+
+ /*
+ * during call to swsr_consume_delim(), above.
+ * Setup default unit data.
+ */
+ memset(&unit_data, 0, sizeof(struct bspp_unit_data));
+
+ if (str_ctx->grp_bstr_ctx.delim_present) {
+ /* Consume delimiter and catch any exceptions. */
+ /*
+ * Consume the bitstream unit delimiter (size or
+ * start code prefix).
+ * When size-delimited the unit size is also returned
+ * so that the next unit can be found.
+ */
+ result = swsr_consume_delim(str_ctx->swsr_ctx.swsr_context,
+ str_ctx->swsr_ctx.emulation_prevention,
+ size_delim_bits, &unit_size);
+ if (result != IMG_SUCCESS)
+ goto error;
+ }
+
+ unit_data.unit_type = str_ctx->grp_bstr_ctx.unit_type;
+ unit_data.vid_std = str_ctx->vid_std;
+ unit_data.delim_present = str_ctx->grp_bstr_ctx.delim_present;
+ unit_data.codec_config = &str_ctx->codec_config;
+ unit_data.parse_state = parse_state;
+ unit_data.pict_sequ_hdr_id = str_ctx->sequ_hdr_id;
+ unit_data.str_res_handle = &str_ctx->str_alloc;
+ unit_data.unit_data_size = str_ctx->grp_bstr_ctx.total_data_size;
+ unit_data.intra_frm_as_closed_gop = str_ctx->intra_frame_closed_gop;
+
+ /* ponit to picture headers, check boundaries */
+ curr_pict_hdr_info = vidx < VDEC_H264_MVC_MAX_VIEWS ?
+ &pict_ctx->pict_hdr_info[vidx] : NULL;
+ unit_data.parse_state->next_pict_hdr_info =
+ vidx + 1 < VDEC_H264_MVC_MAX_VIEWS ?
+ &pict_ctx->pict_hdr_info[vidx + 1] : NULL;
+ unit_data.parse_state->is_prefix = 0;
+
+ /* Obtain output data containers. */
+ result = bspp_get_resource(str_ctx, curr_pict_hdr_info, &unit_data);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Process Unit and catch any exceptions. */
+ /*
+ * Call the standard-specific function to parse the bitstream
+ * unit.
+ */
+ result = str_ctx->parser_callbacks.parse_unit_cb(str_ctx->swsr_ctx.swsr_context,
+ &unit_data);
+ if (result != IMG_SUCCESS) {
+ pr_err("Failed to process unit, error = %d", unit_data.parse_error);
+ goto error;
+ }
+
+ if (unit_data.parse_error != BSPP_ERROR_NONE)
+ pr_err("Issues found while processing unit, error = %d\n", unit_data.parse_error);
+
+ /* Store or return resource used for parsing unit. */
+ result = bspp_file_resource(str_ctx, &unit_data);
+
+ if (!str_ctx->inter_pict_data.seen_closed_gop &&
+ str_ctx->grp_bstr_ctx.unit_type == BSPP_UNIT_PICTURE &&
+ unit_data.slice &&
+ (unit_data.out.pict_hdr_info &&
+ unit_data.out.pict_hdr_info->intra_coded) &&
+ str_ctx->vid_std != VDEC_STD_H264)
+ unit_data.new_closed_gop = 1;
+
+ if (unit_data.new_closed_gop) {
+ str_ctx->inter_pict_data.seen_closed_gop = 1;
+ str_ctx->inter_pict_data.new_closed_gop = 1;
+ }
+
+ /*
+ * Post-process unit (use local context in case
+ * parse function tried to change the unit type.
+ */
+ if (str_ctx->grp_bstr_ctx.unit_type == BSPP_UNIT_PICTURE ||
+ str_ctx->grp_bstr_ctx.unit_type == BSPP_UNIT_SKIP_PICTURE) {
+ if (str_ctx->inter_pict_data.new_closed_gop) {
+ pict_ctx->closed_gop = 1;
+ str_ctx->inter_pict_data.new_closed_gop = 0;
+ }
+
+ if (unit_data.ext_slice && str_ctx->grp_bstr_ctx.not_ext_pic_unit_yet &&
+ unit_data.pict_sequ_hdr_id != BSPP_INVALID) {
+ unsigned int id = unit_data.pict_sequ_hdr_id;
+
+ str_ctx->grp_bstr_ctx.not_ext_pic_unit_yet = 0;
+ pict_ctx->ext_sequ_hdr_info =
+ lst_last(&str_ctx->str_alloc.sequence_data_list[id]);
+ }
+
+ if (unit_data.slice) {
+ if (!curr_pict_hdr_info) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+ if (str_ctx->grp_bstr_ctx.not_pic_unit_yet &&
+ unit_data.pict_sequ_hdr_id != BSPP_INVALID) {
+ str_ctx->grp_bstr_ctx.not_pic_unit_yet = 0;
+
+ /*
+ * depend upon the picture header being
+ * populated (in addition to slice data).
+ */
+ pict_ctx->present = 1;
+
+ /*
+ * Update the picture context from the last unit parsed.
+ * This context must be stored since a non-picture unit may follow.
+ * Obtain current instance of sequence data for given ID.
+ */
+ if (!pict_ctx->sequ_hdr_info) {
+ unsigned int id = unit_data.pict_sequ_hdr_id;
+
+ pict_ctx->sequ_hdr_info =
+ lst_last(&str_ctx->str_alloc.sequence_data_list[id]);
+
+ /* Do the sequence flagging/reference-counting */
+ pict_ctx->sequ_hdr_info->ref_count++;
+ }
+
+ /* Override the field here. */
+ if (str_ctx->swsr_ctx.sr_config.delim_type == SWSR_DELIM_NONE) {
+ if (str_ctx->grp_bstr_ctx.unit_type ==
+ BSPP_UNIT_SKIP_PICTURE) {
+ /* VDECFW_SKIPPED_PICTURE; */
+ curr_pict_hdr_info->parser_mode =
+ VDECFW_SKIPPED_PICTURE;
+ curr_pict_hdr_info->pic_data_size = 0;
+ } else {
+ /* VDECFW_SIZE_SIDEBAND; */
+ curr_pict_hdr_info->parser_mode =
+ VDECFW_SIZE_SIDEBAND;
+ curr_pict_hdr_info->pic_data_size =
+ str_ctx->grp_bstr_ctx.total_data_size;
+ }
+ } else if (str_ctx->swsr_ctx.sr_config.delim_type ==
+ SWSR_DELIM_SIZE) {
+ if (str_ctx->swsr_ctx.sr_config.delim_length <= 8)
+ /* VDECFW_SIZE_DELIMITED_1_ONLY; */
+ curr_pict_hdr_info->parser_mode =
+ VDECFW_SIZE_DELIMITED_1_ONLY;
+ else if (str_ctx->swsr_ctx.sr_config.delim_length <= 16)
+ /* VDECFW_SIZE_DELIMITED_2_ONLY; */
+ curr_pict_hdr_info->parser_mode =
+ VDECFW_SIZE_DELIMITED_2_ONLY;
+ else if (str_ctx->swsr_ctx.sr_config.delim_length <= 32)
+ /* VDECFW_SIZE_DELIMITED_4_ONLY; */
+ curr_pict_hdr_info->parser_mode =
+ VDECFW_SIZE_DELIMITED_4_ONLY;
+
+ curr_pict_hdr_info->pic_data_size +=
+ ((unsigned int)unit_size
+ + (size_delim_bits / 8));
+ } else if (str_ctx->swsr_ctx.sr_config.delim_type == SWSR_DELIM_SCP)
+ /* VDECFW_SCP_ONLY; */
+ curr_pict_hdr_info->parser_mode = VDECFW_SCP_ONLY;
+ }
+
+ /*
+ * for MVC, the Slice Extension should also have the
+ * same ParserMode as the Base view.
+ */
+ if (unit_data.parse_state->next_pict_hdr_info) {
+ unit_data.parse_state->next_pict_hdr_info->parser_mode =
+ curr_pict_hdr_info->parser_mode;
+ }
+
+ if (unit_data.parse_error & BSPP_ERROR_UNSUPPORTED) {
+ pict_ctx->invalid = 1;
+ pict_ctx->unsupported = 1;
+ } else if (!str_ctx->full_scan) {
+ /*
+ * Only parse up to and including the first
+ * valid video slice unless full scanning.
+ */
+ pict_ctx->finished = 1;
+ }
+ }
+ }
+
+ if (unit_data.extracted_all_data) {
+ enum swsr_found found;
+
+ swsr_byte_align(str_ctx->swsr_ctx.swsr_context);
+
+ found = swsr_check_delim_or_eod(str_ctx->swsr_ctx.swsr_context);
+ if (found != SWSR_FOUND_DELIM && found != SWSR_FOUND_EOD) {
+ /*
+ * Should already be at the next delimiter or EOD.
+ * Any bits left at the end of the unit could indicate
+ * corrupted syntax or erroneous parsing.
+ */
+ }
+ }
+
+ return IMG_SUCCESS;
+
+error:
+ if (unit_data.unit_type == BSPP_UNIT_PICTURE ||
+ unit_data.unit_type == BSPP_UNIT_SKIP_PICTURE)
+ pict_ctx->invalid = 1;
+
+ /*
+ * Tidy-up resources.
+ * Store or return resource used for parsing unit.
+ */
+ bspp_file_resource(str_ctx, &unit_data);
+
+ return result;
+}
+
+/*
+ * @Function bspp_terminate_buffer
+ *
+ */
+static int bspp_terminate_buffer(struct bspp_grp_bstr_ctx *grp_btsr_ctx,
+ struct bspp_bitstream_buffer *buf)
+{
+ int result = -1;
+
+ /* Indicate that all the data in buffer should be added to segment. */
+ buf->bytes_read = buf->data_size;
+
+ result = bspp_create_segment(grp_btsr_ctx, buf);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Next segment will start at the beginning of the next buffer. */
+ grp_btsr_ctx->segment_offset = 0;
+
+ bspp_free_bitstream_elem(buf);
+
+ return result;
+}
+
+/*
+ * @Function bspp_jump_to_next_view
+ *
+ */
+static int bspp_jump_to_next_view(struct bspp_grp_bstr_ctx *grp_btsr_ctx,
+ struct bspp_preparsed_data *preparsed_data,
+ struct bspp_parse_state *parse_state)
+{
+ struct bspp_bitstream_buffer *cur_buf;
+ int result;
+ unsigned int i;
+ unsigned char vidx;
+
+ if (!grp_btsr_ctx || !parse_state || !preparsed_data) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ vidx = grp_btsr_ctx->current_view_idx;
+
+ if (vidx >= VDEC_H264_MVC_MAX_VIEWS) {
+ result = IMG_ERROR_NOT_SUPPORTED;
+ goto error;
+ }
+
+ /* get current buffer */
+ cur_buf = (struct bspp_bitstream_buffer *)lst_first(&grp_btsr_ctx->in_flight_bufs);
+ if (!cur_buf) {
+ result = IMG_ERROR_CANCELLED;
+ goto error;
+ }
+
+ if (cur_buf->bufmap_id != parse_state->prev_buf_map_id) {
+ /*
+ * If we moved to the next buffer while parsing the slice
+ * header of the new view we have to reduce the size of
+ * the last segment up to the beginning of the new view slice
+ * and create a new segment from that point up to the end of
+ * the buffer. The new segment should belong to the new view.
+ * THIS ONLY WORKS IF THE SLICE HEADER DOES NOT SPAN MORE THAN
+ * TWO BUFFERS. If we want to support the case that the slice
+ * header of the new view spans multiple buffer we either have
+ * here remove all the segments up to the point were we find
+ * the buffer we are looking for, then adjust the size of this
+ * segment and then add the segments we removed to the next
+ * view list or we can implement a mechanism like the one that
+ * peeks for the NAL unit type and delimit the next view
+ * segment before parsing the first slice of the view.
+ */
+ struct bspp_bitstr_seg *segment;
+
+ segment = lst_last(grp_btsr_ctx->segment_list);
+ if (segment && segment->bufmap_id == parse_state->prev_buf_map_id) {
+ struct bspp_bitstream_buffer prev_buf;
+
+ segment->data_size -= parse_state->prev_buf_data_size
+ - parse_state->prev_byte_offset_buf;
+ segment->bstr_seg_flag &= ~VDECDD_BSSEG_LASTINBUFF;
+
+ /*
+ * Change the segmenOffset value with the value it
+ * would have if we had delemited the segment correctly
+ * beforehand.
+ */
+ grp_btsr_ctx->segment_offset = parse_state->prev_byte_offset_buf;
+
+ /* set lists of segments to new view... */
+ for (i = 0; i < BSPP_MAX_PICTURES_PER_BUFFER; i++) {
+ grp_btsr_ctx->pre_pict_seg_list[i] =
+ &preparsed_data->ext_pictures_data[vidx].pre_pict_seg_list
+ [i];
+ grp_btsr_ctx->pict_seg_list[i] =
+ &preparsed_data->ext_pictures_data[vidx].pict_seg_list[i];
+
+ lst_init(grp_btsr_ctx->pre_pict_seg_list[i]);
+ lst_init(grp_btsr_ctx->pict_seg_list[i]);
+ }
+ /* and current segment list */
+ grp_btsr_ctx->segment_list = grp_btsr_ctx->pict_seg_list[0];
+
+ memset(&prev_buf, 0, sizeof(struct bspp_bitstream_buffer));
+ prev_buf.bufmap_id = segment->bufmap_id;
+ prev_buf.data_size = parse_state->prev_buf_data_size;
+ prev_buf.bytes_read = prev_buf.data_size;
+
+ /* Create the segment the first part of the next view */
+ result = bspp_create_segment(grp_btsr_ctx, &prev_buf);
+ if (result != IMG_SUCCESS)
+ goto error;
+ } else {
+ result = IMG_ERROR_NOT_SUPPORTED;
+ goto error;
+ }
+ } else {
+ /*
+ * the data just parsed belongs to new view, so use previous byte
+ * offset
+ */
+ cur_buf->bytes_read = parse_state->prev_byte_offset_buf;
+
+ /* Create the segment for previous view */
+ result = bspp_create_segment(grp_btsr_ctx, cur_buf);
+ if (result != IMG_SUCCESS)
+ goto error;
+
+ /* set lists of segments to new view */
+ for (i = 0; i < BSPP_MAX_PICTURES_PER_BUFFER; i++) {
+ grp_btsr_ctx->pre_pict_seg_list[i] =
+ &preparsed_data->ext_pictures_data[vidx].pre_pict_seg_list[i];
+ grp_btsr_ctx->pict_seg_list[i] =
+ &preparsed_data->ext_pictures_data[vidx].pict_seg_list[i];
+
+ lst_init(grp_btsr_ctx->pre_pict_seg_list[i]);
+ lst_init(grp_btsr_ctx->pict_seg_list[i]);
+ }
+ /* and current segment list */
+ grp_btsr_ctx->segment_list = grp_btsr_ctx->pict_seg_list[0];
+ }
+
+ /* update prefix flag */
+ preparsed_data->ext_pictures_data[vidx].is_prefix = parse_state->is_prefix;
+ /* and view index */
+ grp_btsr_ctx->current_view_idx++;
+
+ /* set number of extended pictures */
+ preparsed_data->num_ext_pictures = grp_btsr_ctx->current_view_idx;
+
+error:
+ return result;
+}
+
+static void bspp_reset_pict_state(struct bspp_str_context *str_ctx, struct bspp_pict_ctx *pict_ctx,
+ struct bspp_parse_state *parse_state)
+{
+ memset(pict_ctx, 0, sizeof(struct bspp_pict_ctx));
+ memset(parse_state, 0, sizeof(struct bspp_parse_state));
+
+ /* Setup group buffer processing state. */
+ parse_state->inter_pict_ctx = &str_ctx->inter_pict_data;
+ parse_state->prev_bottom_pic_flag = (unsigned char)BSPP_INVALID;
+ parse_state->next_pic_is_new = 1;
+ parse_state->prev_frame_num = BSPP_INVALID;
+ parse_state->second_field_flag = 0;
+ parse_state->first_chunk = 1;
+}
+
+/*
+ * @Function bspp_stream_preparse_buffers
+ * @Description Buffer list cannot be processed since units in this last buffer
+ * may not be complete. Must wait until a buffer is provided with end-of-picture
+ * signalled. When the buffer indicates that units won't span then we can
+ * process the bitstream buffer chain.
+ */
+int bspp_stream_preparse_buffers(void *str_context_handle,
+ const struct bspp_ddbuf_info *contig_buf_info,
+ unsigned int contig_buf_map_id, struct lst_t *segments,
+ struct bspp_preparsed_data *preparsed_data,
+ int end_of_pic)
+{
+ struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+ struct bspp_pict_ctx *pict_ctx = &str_ctx->pict_ctx;
+ struct bspp_parse_state *parse_state = &str_ctx->parse_state;
+ int i;
+ unsigned int unit_count = 0, num_arrays = 0;
+ unsigned int size_delim_bits = 0;
+ enum swsr_found found = SWSR_FOUND_NONE;
+ unsigned int result;
+ struct bspp_bitstr_seg *segment;
+ struct lst_t temp_list;
+
+ /*
+ * since it is new picture, resetting the context status to
+ * beginning
+ */
+ /* TODO: revisit this */
+ pict_ctx->finished = 0;
+ pict_ctx->new_pict_signalled = 0;
+
+ if (!str_context_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (!segments || !preparsed_data)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Check that bitstream buffers have been registered. */
+ if (!lst_last(&str_ctx->grp_bstr_ctx.buffer_chain))
+ return IMG_ERROR_OPERATION_PROHIBITED;
+
+ /* Initialise the output data. */
+ memset(preparsed_data, 0, sizeof(struct bspp_preparsed_data));
+
+ if (!parse_state->initialised) {
+ bspp_reset_pict_state(str_ctx, pict_ctx, parse_state);
+ parse_state->initialised = 1;
+ }
+
+ for (i = 0; i < 3; i++) {
+ lst_init(&preparsed_data->picture_data.pre_pict_seg_list[i]);
+ lst_init(&preparsed_data->picture_data.pict_seg_list[i]);
+ }
+
+ /* Initialise parsing for this video standard. */
+ if (str_ctx->parser_callbacks.initialise_parsing_cb && parse_state->first_chunk)
+ str_ctx->parser_callbacks.initialise_parsing_cb(parse_state);
+
+ parse_state->first_chunk = 0;
+
+ for (i = 0; i < VDEC_H264_MVC_MAX_VIEWS; i++) {
+ pict_ctx->pict_hdr_info[i].pict_aux_data.id = BSPP_INVALID;
+ pict_ctx->pict_hdr_info[i].second_pict_aux_data.id = BSPP_INVALID;
+ }
+
+ /* Setup buffer group bitstream context. */
+ str_ctx->grp_bstr_ctx.vid_std = str_ctx->vid_std;
+ str_ctx->grp_bstr_ctx.disable_mvc = str_ctx->disable_mvc;
+ str_ctx->grp_bstr_ctx.delim_present = 1;
+ str_ctx->grp_bstr_ctx.swsr_context = str_ctx->swsr_ctx.swsr_context;
+ str_ctx->grp_bstr_ctx.unit_type = BSPP_UNIT_NONE;
+ str_ctx->grp_bstr_ctx.last_unit_type = BSPP_UNIT_NONE;
+ str_ctx->grp_bstr_ctx.not_pic_unit_yet = 1;
+ str_ctx->grp_bstr_ctx.not_ext_pic_unit_yet = 1;
+ str_ctx->grp_bstr_ctx.total_bytes_read = 0;
+ str_ctx->grp_bstr_ctx.current_view_idx = 0;
+
+ for (i = 0; i < 3; i++) {
+ str_ctx->grp_bstr_ctx.pre_pict_seg_list[i] =
+ &preparsed_data->picture_data.pre_pict_seg_list[i];
+ str_ctx->grp_bstr_ctx.pict_seg_list[i] =
+ &preparsed_data->picture_data.pict_seg_list[i];
+ str_ctx->grp_bstr_ctx.pict_tag_param_array[i] =
+ &preparsed_data->picture_data.pict_tag_param[i];
+ }
+ str_ctx->grp_bstr_ctx.segment_list = str_ctx->grp_bstr_ctx.pre_pict_seg_list[0];
+ str_ctx->grp_bstr_ctx.pict_tag_param = str_ctx->grp_bstr_ctx.pict_tag_param_array[0];
+ str_ctx->grp_bstr_ctx.free_segments = segments;
+ str_ctx->grp_bstr_ctx.segment_offset = 0;
+ str_ctx->grp_bstr_ctx.insert_start_code = 0;
+
+ /*
+ * Before processing the units service all the picture decoded events
+ * to free the resources1794
+ */
+ bspp_service_pictures_decoded(str_ctx);
+
+ /*
+ * A picture currently being parsed is already decoded (may happen
+ * after dwr in low latency mode) and its recourses were freed. Skip
+ * the rest of the picture.
+ */
+ if (pict_ctx->sequ_hdr_info && pict_ctx->sequ_hdr_info->ref_count == 0) {
+ pict_ctx->present = 0;
+ pict_ctx->finished = 1;
+ }
+
+ /*
+ * For bitstreams without unit delimiters treat all the buffers as
+ * a single unit whose type is defined by the first buffer element.
+ */
+ if (str_ctx->swsr_ctx.sr_config.delim_type == SWSR_DELIM_NONE) {
+ struct bspp_bitstream_buffer *cur_buf =
+ lst_first(&str_ctx->grp_bstr_ctx.buffer_chain);
+
+ /* if there is no picture data we must be skipped. */
+ if (!cur_buf || cur_buf->data_size == 0) {
+ str_ctx->grp_bstr_ctx.unit_type = BSPP_UNIT_SKIP_PICTURE;
+ } else if (cur_buf->bstr_element_type == VDEC_BSTRELEMENT_CODEC_CONFIG) {
+ str_ctx->grp_bstr_ctx.unit_type = BSPP_UNIT_SEQUENCE;
+ } else if (cur_buf->bstr_element_type == VDEC_BSTRELEMENT_PICTURE_DATA ||
+ cur_buf->bstr_element_type == VDEC_BSTRELEMENT_UNSPECIFIED) {
+ str_ctx->grp_bstr_ctx.unit_type = BSPP_UNIT_PICTURE;
+ str_ctx->grp_bstr_ctx.segment_list = str_ctx->grp_bstr_ctx.pict_seg_list[0];
+ }
+
+ str_ctx->grp_bstr_ctx.delim_present = 0;
+ }
+
+ /*
+ * Load the first section (buffer) of biststream into the software
+ * shift-register. BSPP maps "buffer" to "section" and allows for
+ * contiguous parsing of all buffers since unit boundaries are not
+ * known up-front. Unit parsing and segment creation is happening in a
+ * single pass.
+ */
+ result = swsr_start_bitstream(str_ctx->swsr_ctx.swsr_context,
+ &str_ctx->swsr_ctx.sr_config,
+ str_ctx->grp_bstr_ctx.total_data_size,
+ str_ctx->swsr_ctx.emulation_prevention);
+
+ /* Seek for next delimiter or end of data and catch any exceptions. */
+ if (str_ctx->grp_bstr_ctx.delim_present) {
+ /* Locate the first bitstream unit. */
+ found = swsr_seek_delim_or_eod(str_ctx->swsr_ctx.swsr_context);
+ }
+
+ if (str_ctx->swsr_ctx.sr_config.delim_type == SWSR_DELIM_SIZE) {
+ struct bspp_bitstream_buffer *cur_buf =
+ lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+
+ if (cur_buf->bstr_element_type == VDEC_BSTRELEMENT_CODEC_CONFIG &&
+ str_ctx->parser_callbacks.parse_codec_config_cb) {
+ /* Parse codec config header and catch any exceptions */
+ str_ctx->parser_callbacks.parse_codec_config_cb
+ (str_ctx->swsr_ctx.swsr_context,
+ &unit_count,
+ &num_arrays,
+ &str_ctx->swsr_ctx.sr_config.delim_length,
+ &size_delim_bits);
+ } else {
+ size_delim_bits = str_ctx->swsr_ctx.sr_config.delim_length;
+ }
+ }
+
+ /* Process all the bitstream units until the picture is located. */
+ while (found != SWSR_FOUND_EOD && !pict_ctx->finished) {
+ struct bspp_bitstream_buffer *cur_buf =
+ lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+
+ if (!cur_buf) {
+ pr_err("%s: cur_buf pointer is NULL\n", __func__);
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ if (str_ctx->swsr_ctx.sr_config.delim_type ==
+ SWSR_DELIM_SIZE && cur_buf->bstr_element_type ==
+ VDEC_BSTRELEMENT_CODEC_CONFIG &&
+ str_ctx->parser_callbacks.update_unit_counts_cb) {
+ /*
+ * Parse middle part of codec config header and catch
+ * any exceptions.
+ */
+ str_ctx->parser_callbacks.update_unit_counts_cb
+ (str_ctx->swsr_ctx.swsr_context,
+ &unit_count,
+ &num_arrays);
+ }
+
+ /* Process the next unit. */
+ result = bspp_process_unit(str_ctx, size_delim_bits, pict_ctx, parse_state);
+ if (result == IMG_ERROR_NOT_SUPPORTED)
+ goto error;
+
+ if (str_ctx->swsr_ctx.sr_config.delim_type != SWSR_DELIM_NONE)
+ str_ctx->grp_bstr_ctx.delim_present = 1;
+
+ /* jump to the next view */
+ if (parse_state->new_view) {
+ result = bspp_jump_to_next_view(&str_ctx->grp_bstr_ctx,
+ preparsed_data,
+ parse_state);
+ if (result != IMG_SUCCESS)
+ goto error;
+
+ parse_state->new_view = 0;
+ }
+
+ if (!pict_ctx->finished) {
+ /*
+ * Seek for next delimiter or end of data and catch any
+ * exceptions.
+ */
+ /* Locate the next bitstream unit or end of data */
+ found = swsr_seek_delim_or_eod(str_ctx->swsr_ctx.swsr_context);
+
+ {
+ struct bspp_bitstream_buffer *buf;
+ /* Update the offset within current buffer. */
+ swsr_get_byte_offset_curbuf(str_ctx->grp_bstr_ctx.swsr_context,
+ &parse_state->prev_byte_offset_buf);
+ buf = lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+ if (buf) {
+ parse_state->prev_buf_map_id = buf->bufmap_id;
+ parse_state->prev_buf_data_size = buf->data_size;
+ }
+ }
+ }
+ }
+
+ /* Finalize parsing for this video standard. */
+ if (str_ctx->parser_callbacks.finalise_parsing_cb && end_of_pic) {
+ str_ctx->parser_callbacks.finalise_parsing_cb((void *)&str_ctx->str_alloc,
+ parse_state);
+ }
+
+ /*
+ * Create segments for each buffer held by the software shift register
+ * (and not yet processed).
+ */
+ while (lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs)) {
+ struct bspp_bitstream_buffer *buf =
+ lst_removehead(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+
+ result = bspp_terminate_buffer(&str_ctx->grp_bstr_ctx, buf);
+ }
+
+ /*
+ * Create segments for each buffer not yet requested by the shift
+ * register.
+ */
+ while (lst_first(&str_ctx->grp_bstr_ctx.buffer_chain)) {
+ struct bspp_bitstream_buffer *buf =
+ lst_removehead(&str_ctx->grp_bstr_ctx.buffer_chain);
+
+ result = bspp_terminate_buffer(&str_ctx->grp_bstr_ctx, buf);
+ }
+
+ /*
+ * Populate the parsed data information for picture only if one is
+ * present. The anonymous data has already been added to the
+ * appropriate segment list.
+ */
+ if (pict_ctx->present && !pict_ctx->invalid) {
+ if (!pict_ctx->new_pict_signalled) {
+ /*
+ * Provide data about sequence used by picture.
+ * Signal "new sequence" if the sequence header is new
+ * or has changed. always switch seq when changing base
+ * and additional views
+ */
+ if (pict_ctx->sequ_hdr_info) {
+ if (pict_ctx->sequ_hdr_info->sequ_hdr_info.sequ_hdr_id !=
+ str_ctx->sequ_hdr_id ||
+ pict_ctx->sequ_hdr_info->ref_count == 1 ||
+ pict_ctx->ext_sequ_hdr_info ||
+ pict_ctx->closed_gop) {
+ preparsed_data->new_sequence = 1;
+ preparsed_data->sequ_hdr_info =
+ pict_ctx->sequ_hdr_info->sequ_hdr_info;
+ }
+ }
+
+ /* Signal "new subsequence" and its common header information. */
+ if (pict_ctx->ext_sequ_hdr_info) {
+ preparsed_data->new_sub_sequence = 1;
+ preparsed_data->ext_sequ_hdr_info =
+ pict_ctx->ext_sequ_hdr_info->sequ_hdr_info;
+
+ for (i = 0; i < VDEC_H264_MVC_MAX_VIEWS - 1;
+ i++) {
+ /*
+ * prefix is always the last one
+ * do not attach any header info to it
+ */
+ if (preparsed_data->ext_pictures_data[i].is_prefix)
+ break;
+
+ /* attach headers */
+ preparsed_data->ext_pictures_data[i].sequ_hdr_id =
+ pict_ctx->ext_sequ_hdr_info->sequ_hdr_info.sequ_hdr_id;
+ pict_ctx->ext_sequ_hdr_info->ref_count++;
+ preparsed_data->ext_pictures_data[i].pict_hdr_info =
+ pict_ctx->pict_hdr_info[i + 1];
+ }
+
+ preparsed_data->ext_pictures_data
+ [0].pict_hdr_info.first_pic_of_sequence =
+ preparsed_data->new_sub_sequence;
+
+ /*
+ * Update the base view common sequence info
+ * with the number of views that the stream has.
+ * Otherwise the number of views is inconsistent
+ * between base view sequence and dependent view
+ * sequences. Also base view sequence appears
+ * with one view and the driver calculates the
+ * wrong number of resources.
+ */
+ preparsed_data->sequ_hdr_info.com_sequ_hdr_info.num_views =
+ preparsed_data->ext_sequ_hdr_info.com_sequ_hdr_info.num_views;
+ }
+
+ /* Signal if this picture is the first in a closed GOP */
+ if (pict_ctx->closed_gop) {
+ preparsed_data->closed_gop = 1;
+ preparsed_data->sequ_hdr_info.com_sequ_hdr_info.not_dpb_flush =
+ str_ctx->inter_pict_data.not_dpb_flush;
+ }
+
+ /*
+ * Signal "new picture" and its common header
+ * information.
+ */
+ preparsed_data->new_picture = 1;
+ if (pict_ctx->sequ_hdr_info) {
+ preparsed_data->picture_data.sequ_hdr_id =
+ pict_ctx->sequ_hdr_info->sequ_hdr_info.sequ_hdr_id;
+ }
+ preparsed_data->picture_data.pict_hdr_info = pict_ctx->pict_hdr_info[0];
+
+ preparsed_data->picture_data.pict_hdr_info.first_pic_of_sequence =
+ preparsed_data->new_sequence;
+ if (contig_buf_info)
+ preparsed_data->picture_data.pict_hdr_info.fragmented_data = 1;
+ else
+ preparsed_data->picture_data.pict_hdr_info.fragmented_data = 0;
+
+ str_ctx->sequ_hdr_id = preparsed_data->picture_data.sequ_hdr_id;
+
+ pict_ctx->new_pict_signalled = 1;
+
+ /*
+ * aso/fmo supported only when a frame is submitted as
+ * a whole
+ */
+ if (parse_state->discontinuous_mb && !end_of_pic)
+ result = IMG_ERROR_NOT_SUPPORTED;
+ } else {
+ preparsed_data->new_fragment = 1;
+
+ if (parse_state->discontinuous_mb)
+ result = IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ lst_init(&temp_list);
+
+ segment = lst_removehead(&preparsed_data->picture_data.pict_seg_list[0]);
+ while (segment) {
+ lst_add(&temp_list, segment);
+ segment = lst_removehead(&preparsed_data->picture_data.pict_seg_list[0]);
+ }
+
+ segment = lst_removehead(&str_ctx->inter_pict_data.pic_prefix_seg);
+ while (segment) {
+ lst_add(&preparsed_data->picture_data.pict_seg_list[0],
+ segment);
+ segment = lst_removehead(&str_ctx->inter_pict_data.pic_prefix_seg);
+ }
+
+ segment = lst_removehead(&temp_list);
+ while (segment) {
+ lst_add(&preparsed_data->picture_data.pict_seg_list[0],
+ segment);
+ segment = lst_removehead(&temp_list);
+ }
+
+ for (i = 0; i < VDEC_H264_MVC_MAX_VIEWS; i++) {
+ unsigned int j;
+ struct bspp_picture_data *ext_pic_data =
+ &preparsed_data->ext_pictures_data[i];
+
+ if (preparsed_data->ext_pictures_data[i].is_prefix) {
+ for (j = 0; j < BSPP_MAX_PICTURES_PER_BUFFER;
+ j++) {
+ segment = lst_removehead(&ext_pic_data->pict_seg_list[j]);
+ while (segment) {
+ lst_add(&str_ctx->inter_pict_data.pic_prefix_seg,
+ segment);
+ segment = lst_removehead
+ (&ext_pic_data->pict_seg_list[j]);
+ }
+ }
+ preparsed_data->num_ext_pictures--;
+ break;
+ }
+ }
+ } else if (pict_ctx->present && pict_ctx->sequ_hdr_info) {
+ /*
+ * Reduce the reference count since this picture will not be
+ * decoded.
+ */
+ pict_ctx->sequ_hdr_info->ref_count--;
+ /* Release sequence data. */
+ if (str_ctx->parser_callbacks.release_data_cb) {
+ str_ctx->parser_callbacks.release_data_cb((void *)&str_ctx->str_alloc,
+ BSPP_UNIT_SEQUENCE,
+ pict_ctx->sequ_hdr_info->secure_sequence_info);
+ }
+ }
+
+ /* Reset the group bitstream context */
+ lst_init(&str_ctx->grp_bstr_ctx.buffer_chain);
+ memset(&str_ctx->grp_bstr_ctx, 0, sizeof(str_ctx->grp_bstr_ctx));
+
+ /*
+ * for now: return IMG_ERROR_NOT_SUPPORTED only if explicitly set by
+ * parser
+ */
+ result = (result == IMG_ERROR_NOT_SUPPORTED) ?
+ IMG_ERROR_NOT_SUPPORTED : IMG_SUCCESS;
+
+ if (end_of_pic)
+ parse_state->initialised = 0;
+
+ return result;
+
+error:
+ /* Free the SWSR list of buffers */
+ while (lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs))
+ lst_removehead(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+
+ return result;
+}
+
+/*
+ * @Function bspp_stream_destroy
+ *
+ */
+int bspp_stream_destroy(void *str_context_handle)
+{
+ struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+ unsigned int i;
+ unsigned int sps_id;
+ unsigned int pps_id;
+ struct bspp_sequence_hdr_info *sequ_hdr_info;
+ struct bspp_pps_info *pps_info;
+ unsigned int result;
+
+ /* Validate input arguments. */
+ if (!str_context_handle) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ swsr_deinitialise(str_ctx->swsr_ctx.swsr_context);
+
+ /*
+ * Service all the picture decoded events and free any unused
+ * resources.
+ */
+ bspp_service_pictures_decoded(str_ctx);
+ for (sps_id = 0; sps_id < SEQUENCE_SLOTS; sps_id++)
+ bspp_remove_unused_sequence(str_ctx, sps_id);
+
+ if (str_ctx->vid_std_features.uses_pps) {
+ for (pps_id = 0; pps_id < PPS_SLOTS; pps_id++)
+ bspp_remove_unused_pps(str_ctx, pps_id);
+ }
+
+ if (str_ctx->vid_std_features.uses_vps) {
+ struct bspp_vps_info *vps_info;
+
+ for (i = 0; i < VPS_SLOTS; ++i) {
+ vps_info = lst_removehead(&str_ctx->str_alloc.vps_data_list[i]);
+
+ if (vps_info)
+ lst_add(&str_ctx->str_alloc.available_vps_list, vps_info);
+
+ /*
+ * when we are done with the stream we should have MAXIMUM 1 VPS
+ * per slot, so after removing this one we should have none
+ * In case of "decodenframes" this is not true because we send more
+ * pictures for decode than what we expect to receive back, which
+ * means that potentially additional sequences/PPS are in the list
+ */
+ vps_info = lst_removehead(&str_ctx->str_alloc.vps_data_list[i]);
+ if (vps_info) {
+ do {
+ lst_add(&str_ctx->str_alloc.available_vps_list, vps_info);
+ vps_info =
+ lst_removehead(&str_ctx->str_alloc.vps_data_list[i]);
+ } while (vps_info);
+ }
+ VDEC_ASSERT(lst_empty(&str_ctx->str_alloc.vps_data_list[i]));
+ }
+
+ vps_info = NULL;
+ for (i = 0; i < MAX_VPSS; ++i) {
+ VDEC_ASSERT(!lst_empty(&str_ctx->str_alloc.available_vps_list));
+ vps_info = lst_removehead(&str_ctx->str_alloc.available_vps_list);
+ if (vps_info) {
+ kfree(vps_info->secure_vpsinfo);
+ kfree(vps_info);
+ } else {
+ VDEC_ASSERT(vps_info);
+ pr_err("vps still active at shutdown\n");
+ }
+ }
+ VDEC_ASSERT(lst_empty(&str_ctx->str_alloc.available_vps_list));
+ }
+
+ /* Free the memory required for this stream. */
+ for (i = 0; i < SEQUENCE_SLOTS; i++) {
+ sequ_hdr_info = lst_removehead(&str_ctx->str_alloc.sequence_data_list[i]);
+ if (sequ_hdr_info) {
+ if (str_ctx->parser_callbacks.release_data_cb)
+ str_ctx->parser_callbacks.release_data_cb
+ ((void *)&str_ctx->str_alloc,
+ BSPP_UNIT_SEQUENCE,
+ sequ_hdr_info->secure_sequence_info);
+ lst_add(&str_ctx->str_alloc.available_sequence_list,
+ sequ_hdr_info);
+ }
+
+ /*
+ * when we are done with the stream we should have MAXIMUM 1
+ * sequence per slot, so after removing this one we should have
+ * none In case of "decoded frames" this is not true because we
+ * send more pictures for decode than what we expect to receive
+ * back, which means that potentially additional sequences/PPS
+ * are in the list
+ */
+ sequ_hdr_info = lst_removehead(&str_ctx->str_alloc.sequence_data_list[i]);
+ if (sequ_hdr_info) {
+ unsigned int count_extra_sequences = 0;
+
+ do {
+ count_extra_sequences++;
+ if (str_ctx->parser_callbacks.release_data_cb) {
+ str_ctx->parser_callbacks.release_data_cb
+ ((void *)&str_ctx->str_alloc,
+ BSPP_UNIT_SEQUENCE,
+ sequ_hdr_info->secure_sequence_info);
+ }
+ lst_add(&str_ctx->str_alloc.available_sequence_list,
+ sequ_hdr_info);
+ sequ_hdr_info =
+ lst_removehead(&str_ctx->str_alloc.sequence_data_list[i]);
+ } while (sequ_hdr_info);
+ }
+ }
+
+ if (str_ctx->vid_std_features.uses_pps) {
+ for (i = 0; i < PPS_SLOTS; i++) {
+ pps_info = lst_removehead(&str_ctx->str_alloc.pps_data_list[i]);
+ if (pps_info)
+ lst_add(&str_ctx->str_alloc.available_ppss_list, pps_info);
+
+ /*
+ * when we are done with the stream we should have
+ * MAXIMUM 1 PPS per slot, so after removing this one
+ * we should have none
+ * In case of "decodedframes" this is not true because
+ * we send more pictures for decode than what we expect
+ * to receive back, which means that potentially
+ * additional sequences/PPS are in the list
+ */
+ pps_info = lst_removehead(&str_ctx->str_alloc.pps_data_list[i]);
+ if (pps_info) {
+ unsigned int count_extra_ppss = 0;
+
+ do {
+ count_extra_ppss++;
+ lst_add(&str_ctx->str_alloc.available_ppss_list,
+ pps_info);
+ pps_info =
+ lst_removehead(&str_ctx->str_alloc.pps_data_list[i]);
+ } while (pps_info);
+ }
+ }
+ }
+
+ for (i = 0; i < MAX_SEQUENCES; i++) {
+ sequ_hdr_info = lst_removehead(&str_ctx->str_alloc.available_sequence_list);
+ if (sequ_hdr_info && str_ctx->parser_callbacks.destroy_data_cb)
+ str_ctx->parser_callbacks.destroy_data_cb
+ (BSPP_UNIT_SEQUENCE, sequ_hdr_info->secure_sequence_info);
+ }
+
+ kfree(str_ctx->secure_sequence_info);
+ str_ctx->secure_sequence_info = NULL;
+ kfree(str_ctx->sequ_hdr_info);
+ str_ctx->sequ_hdr_info = NULL;
+
+ if (str_ctx->vid_std_features.uses_pps) {
+ for (i = 0; i < MAX_PPSS; i++) {
+ pps_info = lst_removehead(&str_ctx->str_alloc.available_ppss_list);
+ if (pps_info && str_ctx->parser_callbacks.destroy_data_cb)
+ str_ctx->parser_callbacks.destroy_data_cb
+ (BSPP_UNIT_PPS, pps_info->secure_pps_info);
+ }
+
+ kfree(str_ctx->secure_pps_info);
+ str_ctx->secure_pps_info = NULL;
+ kfree(str_ctx->pps_info);
+ str_ctx->pps_info = NULL;
+ }
+
+ /* destroy mutex */
+ mutex_destroy(str_ctx->bspp_mutex);
+ kfree(str_ctx->bspp_mutex);
+ str_ctx->bspp_mutex = NULL;
+
+ kfree(str_ctx);
+
+ return IMG_SUCCESS;
+error:
+ return result;
+}
+
+/*
+ * @Function bspp_set_codec_config
+ *
+ */
+int bspp_set_codec_config(const void *str_context_handle,
+ const struct vdec_codec_config *codec_config)
+{
+ struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+ unsigned int result = IMG_SUCCESS;
+
+ /* Validate input arguments. */
+ if (!str_context_handle || !codec_config) {
+ result = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+
+ switch (str_ctx->vid_std) {
+ default:
+ result = IMG_ERROR_NOT_SUPPORTED;
+ break;
+ }
+error:
+ return result;
+}
+
+/*
+ * @Function bspp_stream_create
+ *
+ */
+int bspp_stream_create(const struct vdec_str_configdata *str_config_data,
+ void **str_ctx_handle,
+ struct bspp_ddbuf_array_info fw_sequence[],
+ struct bspp_ddbuf_array_info fw_pps[])
+{
+ struct bspp_str_context *str_ctx;
+ unsigned int result = IMG_SUCCESS;
+ unsigned int i;
+ struct bspp_sequence_hdr_info *sequ_hdr_info;
+ struct bspp_pps_info *pps_info;
+ struct bspp_parse_state *parse_state;
+
+ /* Allocate a stream structure */
+ str_ctx = kmalloc(sizeof(*str_ctx), GFP_KERNEL);
+ if (!str_ctx) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+ memset(str_ctx, 0, sizeof(*str_ctx));
+
+ /* Initialise the stream context structure. */
+ str_ctx->sequ_hdr_id = BSPP_INVALID;
+ str_ctx->vid_std = str_config_data->vid_std;
+ str_ctx->bstr_format = str_config_data->bstr_format;
+ str_ctx->disable_mvc = str_config_data->disable_mvc;
+ str_ctx->full_scan = str_config_data->full_scan;
+ str_ctx->immediate_decode = str_config_data->immediate_decode;
+ str_ctx->intra_frame_closed_gop = str_config_data->intra_frame_closed_gop;
+
+ parse_state = &str_ctx->parse_state;
+
+ /* Setup group buffer processing state. */
+ parse_state->inter_pict_ctx = &str_ctx->inter_pict_data;
+ parse_state->prev_bottom_pic_flag = (unsigned char)BSPP_INVALID;
+ parse_state->next_pic_is_new = 1;
+ parse_state->prev_frame_num = BSPP_INVALID;
+ parse_state->second_field_flag = 0;
+
+ lst_init(&str_ctx->grp_bstr_ctx.buffer_chain);
+
+ if (str_ctx->vid_std < VDEC_STD_MAX && parser_fxns[str_ctx->vid_std].set_parser_config) {
+ parser_fxns[str_ctx->vid_std].set_parser_config(str_ctx->bstr_format,
+ &str_ctx->vid_std_features,
+ &str_ctx->swsr_ctx,
+ &str_ctx->parser_callbacks,
+ &str_ctx->inter_pict_data);
+ } else {
+ result = IMG_ERROR_NOT_SUPPORTED;
+ goto error;
+ }
+
+ /* Allocate the memory required for this stream for Sequence/PPS info */
+ lst_init(&str_ctx->str_alloc.available_sequence_list);
+
+ str_ctx->sequ_hdr_info = kmalloc((MAX_SEQUENCES * sizeof(struct bspp_sequence_hdr_info)),
+ GFP_KERNEL);
+ if (!str_ctx->sequ_hdr_info) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+ memset(str_ctx->sequ_hdr_info, 0x00,
+ (MAX_SEQUENCES * sizeof(struct bspp_sequence_hdr_info)));
+
+ str_ctx->secure_sequence_info =
+ kmalloc((MAX_SEQUENCES * str_ctx->vid_std_features.seq_size),
+ GFP_KERNEL);
+ if (!str_ctx->secure_sequence_info) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+ memset(str_ctx->secure_sequence_info, 0x00,
+ (MAX_SEQUENCES * str_ctx->vid_std_features.seq_size));
+
+ sequ_hdr_info = (struct bspp_sequence_hdr_info *)(str_ctx->sequ_hdr_info);
+ for (i = 0; i < MAX_SEQUENCES; i++) {
+ /* Deal with the device memory for FW SPS data */
+ sequ_hdr_info->fw_sequence = fw_sequence[i];
+ sequ_hdr_info->sequ_hdr_info.bufmap_id =
+ fw_sequence[i].ddbuf_info.bufmap_id;
+ sequ_hdr_info->sequ_hdr_info.buf_offset =
+ fw_sequence[i].buf_offset;
+ sequ_hdr_info->secure_sequence_info = (void *)(str_ctx->secure_sequence_info +
+ (i * str_ctx->vid_std_features.seq_size));
+
+ lst_add(&str_ctx->str_alloc.available_sequence_list,
+ sequ_hdr_info);
+ sequ_hdr_info++;
+ }
+
+ if (str_ctx->vid_std_features.uses_pps) {
+ lst_init(&str_ctx->str_alloc.available_ppss_list);
+ str_ctx->pps_info = kmalloc((MAX_PPSS * sizeof(struct bspp_pps_info)), GFP_KERNEL);
+ if (!str_ctx->pps_info) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+ memset(str_ctx->pps_info, 0x00, (MAX_PPSS * sizeof(struct bspp_pps_info)));
+ str_ctx->secure_pps_info = kmalloc((MAX_PPSS * str_ctx->vid_std_features.pps_size),
+ GFP_KERNEL);
+ if (!str_ctx->secure_pps_info) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+ memset(str_ctx->secure_pps_info, 0x00,
+ (MAX_PPSS * str_ctx->vid_std_features.pps_size));
+
+ pps_info = (struct bspp_pps_info *)(str_ctx->pps_info);
+ for (i = 0; i < MAX_PPSS; i++) {
+ /* Deal with the device memory for FW PPS data */
+ pps_info->fw_pps = fw_pps[i];
+ pps_info->bufmap_id = fw_pps[i].ddbuf_info.bufmap_id;
+ pps_info->buf_offset = fw_pps[i].buf_offset;
+
+ /*
+ * We have no container for the PPS that passes down to the kernel,
+ * for this reason the h264 secure parser needs to populate that
+ * info into the picture header (Second)PictAuxData.
+ */
+ pps_info->secure_pps_info = (void *)(str_ctx->secure_pps_info + (i *
+ str_ctx->vid_std_features.pps_size));
+
+ lst_add(&str_ctx->str_alloc.available_ppss_list, pps_info);
+ pps_info++;
+ }
+
+ /* As only standards that use PPS also use VUI, initialise
+ * the appropriate data structures here.
+ * Initialise the list of raw bitstream data containers.
+ */
+ lst_init(&str_ctx->str_alloc.raw_data_list_available);
+ lst_init(&str_ctx->str_alloc.raw_data_list_used);
+ }
+
+ if (str_ctx->vid_std_features.uses_vps) {
+ struct bspp_vps_info *vps_info;
+
+ lst_init(&str_ctx->str_alloc.available_vps_list);
+ for (i = 0; i < MAX_VPSS; ++i) {
+ vps_info = kmalloc(sizeof(*vps_info), GFP_KERNEL);
+ VDEC_ASSERT(vps_info);
+ if (!vps_info) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ memset(vps_info, 0x00, sizeof(struct bspp_vps_info));
+ /*
+ * for VPS we do not allocate device memory since (at least for now)
+ * there is no need to pass any data from VPS directly to FW
+ */
+ /* Allocate memory for BSPP local VPS data structure. */
+ vps_info->secure_vpsinfo =
+ kmalloc(str_ctx->vid_std_features.vps_size, GFP_KERNEL);
+
+ VDEC_ASSERT(vps_info->secure_vpsinfo);
+ if (!vps_info->secure_vpsinfo) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+ memset(vps_info->secure_vpsinfo, 0, str_ctx->vid_std_features.vps_size);
+
+ lst_add(&str_ctx->str_alloc.available_vps_list, vps_info);
+ }
+ }
+
+ /* ... and initialise the lists that will use this data */
+ for (i = 0; i < SEQUENCE_SLOTS; i++)
+ lst_init(&str_ctx->str_alloc.sequence_data_list[i]);
+
+ if (str_ctx->vid_std_features.uses_pps)
+ for (i = 0; i < PPS_SLOTS; i++)
+ lst_init(&str_ctx->str_alloc.pps_data_list[i]);
+
+ str_ctx->bspp_mutex = kzalloc(sizeof(*str_ctx->bspp_mutex), GFP_KERNEL);
+ if (!str_ctx->bspp_mutex) {
+ result = -ENOMEM;
+ goto error;
+ }
+ mutex_init(str_ctx->bspp_mutex);
+
+ /* Initialise the software shift-register */
+ swsr_initialise(bspp_exception_handler, &str_ctx->parse_ctx,
+ (swsr_callback_fxn) bspp_shift_reg_cb,
+ &str_ctx->grp_bstr_ctx,
+ &str_ctx->swsr_ctx.swsr_context);
+
+ /* Setup the parse context */
+ str_ctx->parse_ctx.swsr_context = str_ctx->swsr_ctx.swsr_context;
+
+ *str_ctx_handle = str_ctx;
+
+ return IMG_SUCCESS;
+
+error:
+ if (str_ctx) {
+ kfree(str_ctx->sequ_hdr_info);
+ kfree(str_ctx->secure_sequence_info);
+ kfree(str_ctx->pps_info);
+ kfree(str_ctx->secure_pps_info);
+ kfree(str_ctx);
+ }
+
+ return result;
+}
+
+void bspp_freeraw_sei_datacontainer(const void *str_res,
+ struct vdec_raw_bstr_data *rawsei_datacontainer)
+{
+ struct bspp_raw_sei_alloc *rawsei_alloc = NULL;
+
+ /* Check input params. */
+ if (str_res && rawsei_datacontainer) {
+ struct bspp_stream_alloc_data *alloc_data =
+ (struct bspp_stream_alloc_data *)str_res;
+
+ rawsei_alloc = container_of(rawsei_datacontainer,
+ struct bspp_raw_sei_alloc,
+ raw_sei_data);
+ memset(&rawsei_alloc->raw_sei_data, 0, sizeof(rawsei_alloc->raw_sei_data));
+ lst_remove(&alloc_data->raw_sei_alloc_list, rawsei_alloc);
+ kfree(rawsei_alloc);
+ }
+}
+
+void bspp_freeraw_sei_datalist(const void *str_res, struct vdec_raw_bstr_data *rawsei_datalist)
+{
+ /* Check input params. */
+ if (rawsei_datalist && str_res) {
+ struct vdec_raw_bstr_data *sei_raw_datacurr = NULL;
+
+ /* Start fromm the first element... */
+ sei_raw_datacurr = rawsei_datalist;
+ /* Free all the linked raw SEI data containers. */
+ while (sei_raw_datacurr) {
+ struct vdec_raw_bstr_data *seiraw_datanext =
+ sei_raw_datacurr->next;
+ bspp_freeraw_sei_datacontainer(str_res, sei_raw_datacurr);
+ sei_raw_datacurr = seiraw_datanext;
+ }
+ }
+}
+
+void bspp_streamrelese_rawbstrdataplain(const void *str_res, const void *rawdata)
+{
+ struct bspp_stream_alloc_data *str_alloc =
+ (struct bspp_stream_alloc_data *)str_res;
+ struct bspp_raw_bitstream_data *rawbstrdata =
+ (struct bspp_raw_bitstream_data *)rawdata;
+
+ if (rawbstrdata) {
+ /* Decrement the raw bitstream data reference count. */
+ rawbstrdata->ref_count--;
+ /* If no entity is referencing the raw
+ * bitstream data any more
+ */
+ if (rawbstrdata->ref_count == 0) {
+ /* ... free the raw bistream data buffer... */
+ kfree(rawbstrdata->raw_bitstream_data.data);
+ memset(&rawbstrdata->raw_bitstream_data, 0,
+ sizeof(rawbstrdata->raw_bitstream_data));
+ /* ...and return it to the list. */
+ lst_remove(&str_alloc->raw_data_list_used, rawbstrdata);
+ lst_add(&str_alloc->raw_data_list_available, rawbstrdata);
+ }
+ }
+}
+
+struct bspp_vps_info *bspp_get_vpshdr(void *str_res, unsigned int vps_id)
+{
+ struct bspp_stream_alloc_data *alloc_data =
+ (struct bspp_stream_alloc_data *)str_res;
+
+ if (vps_id >= VPS_SLOTS || !alloc_data)
+ return NULL;
+
+ return lst_last(&alloc_data->vps_data_list[vps_id]);
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/bspp.h b/drivers/media/platform/vxe-vxd/decoder/bspp.h
new file mode 100644
index 000000000000..2198d9d6966e
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/bspp.h
@@ -0,0 +1,363 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Bitstream Buffer Pre-Parser
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ * Re-written for upstreming
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __BSPP_H__
+#define __BSPP_H__
+
+#include <linux/types.h>
+
+#include "h264fw_data.h"
+#include "lst.h"
+#include "vdec_defs.h"
+
+/*
+ * There are up to 2 pictures in each buffer
+ * (plus trailing data for the next picture, e.g. PPS).
+ */
+#define BSPP_MAX_PICTURES_PER_BUFFER 3
+
+#define BSPP_INVALID ((unsigned int)(-1))
+
+/*
+ * This enables signalling of closed gop at every I frame. Add resilience to
+ * seeking functionality.
+ */
+#define I_FRAME_SIGNALS_CLOSED_GOP
+
+/*
+ * enum bspp_error_type - enumeration of parsing error , different error flag
+ * for different data unit
+ */
+enum bspp_error_type {
+ /* No Error in parsing. */
+ BSPP_ERROR_NONE = (0),
+ /* Correction in VSH, Replaced VSH with faulty one */
+ BSPP_ERROR_CORRECTION_VSH = (1 << 0),
+ /*
+ * Correction in parsed Value, clamp the value if it goes beyond
+ * the limit
+ */
+ BSPP_ERROR_CORRECTION_VALIDVALUE = (1 << 1),
+ /* Error in Aux data (i.e. PPS in H.264) parsing */
+ BSPP_ERROR_AUXDATA = (1 << 2),
+ /* Error in parsing, more data remains in VSH data unit after parsing */
+ BSPP_ERROR_DATA_REMAINS = (1 << 3),
+ /* Error in parsing, parsed codeword is invalid */
+ BSPP_ERROR_INVALID_VALUE = (1 << 4),
+ /* Error in parsing, parsing error */
+ BSPP_ERROR_DECODE = (1 << 5),
+ /* reference frame is not available for decoding */
+ BSPP_ERROR_NO_REF_FRAME = (1 << 6),
+ /* Non IDR frame loss detected */
+ BSPP_ERROR_NONIDR_FRAME_LOSS = (1 << 7),
+ /* IDR frame loss detected */
+ BSPP_ERROR_IDR_FRAME_LOSS = (1 << 8),
+ /* Error in parsing, insufficient data to complete parsing */
+ BSPP_ERROR_INSUFFICIENT_DATA = (1 << 9),
+ /* Severe Error, Error indicates, no support for this picture data */
+ BSPP_ERROR_UNSUPPORTED = (1 << 10),
+ /* Severe Error, Error in which could not be recovered */
+ BSPP_ERROR_UNRECOVERABLE = (1 << 11),
+ /* Severe Error, to indicate that NAL Header is absent after SCP */
+ BSPP_ERROR_NO_NALHEADER = (1 << 12),
+ BSPP_ERROR_NO_SEQUENCE_HDR = (1 << 13),
+ BSPP_ERROR_SIGNALED_IN_STREAM = (1 << 14),
+ BSPP_ERROR_UNKNOWN_DATAUNIT_DETECTED = (1 << 15),
+ BSPP_ERROR_NO_PPS = (1 << 16),
+ BSPP_ERROR_NO_VPS = (1 << 17),
+ BSPP_ERROR_OUT_OF_MEMORY = (1 << 18),
+ /* The shift value of the last error bit */
+ BSPP_ERROR_MAX_SHIFT = 18,
+ BSPP_ERROR_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * struct bspp_ddbuf_info - Buffer info
+ * @buf_size: The size of the buffer (in bytes)
+ * @cpu_virt_addr: The CPU virtual address (mapped into the local cpu MMU)
+ * @mem_attrib: Memory attributes
+ * @bufmap_id: buffer mappind id
+ */
+struct bspp_ddbuf_info {
+ unsigned int buf_size;
+ void *cpu_virt_addr;
+ enum sys_emem_attrib mem_attrib;
+ unsigned int buf_id;
+ unsigned int bufmap_id;
+};
+
+/*
+ * struct bspp_ddbuf_array_info - Buffer array info
+ * @ddbuf_info: Buffer info (container)
+ * @buf_element_size: Size of each element
+ * @buf_offset: Offset for each element
+ */
+struct bspp_ddbuf_array_info {
+ struct bspp_ddbuf_info ddbuf_info;
+ unsigned int buf_element_size;
+ unsigned int buf_offset;
+};
+
+/**
+ * struct bspp_bitstr_seg - Bitstream segment
+ * @lst_padding:
+ * @data_size: Size of data
+ * @data_byte_offset: Offset for data
+ * @bstr_seg_flag: flag indicates the bitstream segment type
+ * @start_code_suffix: start code prefix
+ * @bufmap_id: Buffer map ID
+ */
+struct bspp_bitstr_seg {
+ void *lst_padding;
+ unsigned int data_size;
+ unsigned int data_byte_offset;
+ unsigned int bstr_seg_flag;
+ unsigned char start_code_suffix;
+ unsigned int bufmap_id;
+};
+
+/*
+ * struct bspp_pict_data - Picture Header Data Information
+ * @bufmap_id: Buffer ID to use inside kernel #VXDIO_sDdBufInfo
+ * @buf_offset: Buffer offset (for packed device buffers, e.g. PPS)
+ * @pic_data: Picture data
+ * @size: Size (in bytes) of data.
+ * @data_id: Data identifier.
+ */
+struct bspp_pict_data {
+ unsigned int bufmap_id;
+ unsigned int buf_offset;
+ void *pic_data;
+ unsigned int size;
+ unsigned int id;
+};
+
+/*
+ * struct bspp_pict_hdr_info - Picture Header Information
+ */
+struct bspp_pict_hdr_info {
+ /*
+ * Picture is entirely intra-coded and doesn't use any reference data.
+ * NOTE: should be IMG_FALSE if this cannot be determined.
+ */
+ int intra_coded;
+ /* Picture might be referenced by subsequent pictures. */
+ int ref;
+ /* Picture is a field as part of a frame. */
+ int field;
+ /* Emulation prevention bytes are present in picture data. */
+ int emulation_prevention;
+ /* Post Processing */
+ int post_processing;
+ /* Macroblocks within the picture may not occur in raster-scan order */
+ int discontinuous_mbs;
+ /* Flag to indicate data is span across mulitple buffer. */
+ int fragmented_data;
+ /* SOS fields count value */
+ unsigned char sos_count;
+ /* This picture is the first of the sequence or not */
+ int first_pic_of_sequence;
+
+ enum vdecfw_parsermode parser_mode;
+ /* Total size of picture data which is going to be submitted. */
+ unsigned int pic_data_size;
+ /* Size of coded frame as specified in the bitstream. */
+ struct vdec_pict_size coded_frame_size;
+ /* Display information for picture */
+ struct vdec_pict_disp_info disp_info;
+
+ /* Picture auxiliary data (e.g. H.264 SPS/PPS) */
+ struct bspp_pict_data pict_aux_data;
+ /* Picture auxiliary data (e.g. H.264 SPS/PPS) for 2nd picture */
+ struct bspp_pict_data second_pict_aux_data;
+ /* Slice group-map data. */
+ struct bspp_pict_data pict_sgm_data;
+#ifdef HAS_JPEG
+ /* JPEG specific picture header information.*/
+ struct vdec_jpeg_pict_hdr_info jpeg_pict_hdr_info;
+#endif
+
+ struct h264_pict_hdr_info {
+ void *raw_vui_data;
+ void *raw_sei_data_list_first_field;
+ void *raw_sei_data_list_second_field;
+ unsigned char nal_ref_idc;
+ unsigned short frame_num;
+ } h264_pict_hdr_info;
+
+ struct { /* HEVC specific frame information.*/
+ int range_ext_present;
+ int is_full_range_ext;
+ void *raw_vui_data;
+ void *raw_sei_datalist_firstfield;
+ void *raw_sei_datalist_secondfield;
+ } hevc_pict_hdr_info;
+};
+
+/*
+ * struct bspp_sequ_hdr_info - Sequence header information
+ */
+struct bspp_sequ_hdr_info {
+ unsigned int sequ_hdr_id;
+ unsigned int ref_count;
+ struct vdec_comsequ_hdrinfo com_sequ_hdr_info;
+ unsigned int bufmap_id;
+ unsigned int buf_offset;
+};
+
+/*
+ * struct bspp_picture_data - Picture data
+ */
+struct bspp_picture_data {
+ /* Anonymous */
+ /*
+ * Bitstream segments that contain other (non-picture) data before
+ * the picture in the buffer (elements of type #VDECDD_sBitStrSeg).
+ */
+ struct lst_t pre_pict_seg_list[BSPP_MAX_PICTURES_PER_BUFFER];
+ /* Picture */
+ unsigned int sequ_hdr_id;
+ struct bspp_pict_hdr_info pict_hdr_info;
+ /*
+ * Bitstream segments that contain picture data, one for each field
+ * (if present in same group of buffers (elements of type
+ * #VDECDD_sBitStrSeg).
+ */
+ struct lst_t pict_seg_list[BSPP_MAX_PICTURES_PER_BUFFER];
+ void *pict_tag_param[BSPP_MAX_PICTURES_PER_BUFFER];
+ int is_prefix;
+};
+
+/*
+ * struct bspp_preparsed_data - Pre-parsed buffer information
+ */
+struct bspp_preparsed_data {
+ /* Sequence */
+ int new_sequence;
+ struct bspp_sequ_hdr_info sequ_hdr_info;
+ int sequence_end;
+
+ /* Closed GOP */
+ int closed_gop;
+
+ /* Picture */
+ int new_picture;
+ int new_fragment;
+ struct bspp_picture_data picture_data;
+
+ /* Additional pictures (MVC extension) */
+ int new_sub_sequence;
+ struct bspp_sequ_hdr_info ext_sequ_hdr_info;
+ /* non-base view pictures + picture prefix for next frame */
+ struct bspp_picture_data ext_pictures_data[VDEC_H264_MVC_MAX_VIEWS];
+ unsigned int num_ext_pictures;
+
+ /*
+ * Additional information
+ * Flags word to indicate error in parsing/decoding - see
+ * #VDEC_eErrorType
+ */
+ unsigned int error_flags;
+};
+
+/*
+ * struct bspp_picture_decoded - used to store picture-decoded information for
+ * resource handling (sequences/PPSs)
+ */
+struct bspp_picture_decoded {
+ void **lst_link;
+ unsigned int sequ_hdr_id;
+ unsigned int pps_id;
+ unsigned int second_pps_id;
+ int not_decoded;
+ struct vdec_raw_bstr_data *sei_raw_data_first_field;
+ struct vdec_raw_bstr_data *sei_raw_data_second_field;
+};
+
+/*
+ * @Function bspp_stream_create
+ * @Description Creates a stream context for which to pre-parse bitstream
+ * buffers. The following allocations will take place:
+ * - Local storage for high-level header parameters (secure)
+ * - Host memory for common sequence information (insecure)
+ * - Device memory for Sequence information (secure)
+ * - Device memory for PPS (secure, H.264 only)
+ * @Input vdec_str_configdata : config data corresponding to bitstream
+ * @Output str_context : A pointer used to return the stream context handle
+ * @Input fw_sequ: FW sequence data
+ * @Input fw_pps: FW pps data
+ * @Return This function returns either IMG_SUCCESS or an error code.
+ */
+int bspp_stream_create(const struct vdec_str_configdata *str_config_data,
+ void **str_context,
+ struct bspp_ddbuf_array_info fw_sequ[],
+ struct bspp_ddbuf_array_info fw_pps[]);
+
+/*
+ * @Function bspp_set_codec_config
+ * @Description This function is used to set the out-of-band codec config data.
+ * @Input str_context_handle : Stream context handle.
+ * @Input codec_config : Codec-config data
+ * @Return This function returns either IMG_SUCCESS or an error code.
+ */
+int bspp_set_codec_config(const void *str_context_handle,
+ const struct vdec_codec_config *codec_config);
+
+/*
+ * @Function bspp_stream_destroy
+ * @Description Destroys a stream context used to pre-parse bitstream buffers.
+ * @Input str_context_handle : Stream context handle.
+ * @Return This function returns either IMG_SUCCESS or an error code.
+ */
+int bspp_stream_destroy(void *str_context_handle);
+
+/*
+ * @Function bspp_submit_picture_decoded
+ */
+int bspp_submit_picture_decoded(void *str_context_handle,
+ struct bspp_picture_decoded *picture_decoded);
+
+/*
+ * @Function bspp_stream_submit_buffer
+ */
+int bspp_stream_submit_buffer(void *str_context_handle,
+ const struct bspp_ddbuf_info *ddbuf_info,
+ unsigned int bufmap_id,
+ unsigned int data_size,
+ void *pict_tag_param,
+ enum vdec_bstr_element_type bstr_element_type);
+
+/*
+ * @Function bspp_stream_preparse_buffers
+ * @Description Pre-parses bistream buffer and returns picture information in
+ * structure that also signals when the buffer is last in picture.
+ * @Input str_context_handle: Stream context handle.
+ * @Input contiguous_buf_info : Contiguous buffer information
+ * multiple segments that may be non contiguous in memory
+ * @Input contiguous_buf_map_id : Contiguous Buffer Map id
+ * @Input segments: Pointer to a list of segments (see #VDECDD_sBitStrSeg)
+ * @Output preparsed_data: Container to return picture information. Only
+ * provide when buffer is last in picture (see #bForceEop in
+ * function #VDEC_StreamSubmitBstrBuf)
+ * @Output eos_flag: flag indicates end of stream
+ * @Return int : This function returns either IMG_SUCCESS or an error code.
+ */
+int bspp_stream_preparse_buffers
+ (void *str_context_handle,
+ const struct bspp_ddbuf_info *contiguous_buf_info,
+ unsigned int contiguous_buf_map_id,
+ struct lst_t *segments,
+ struct bspp_preparsed_data *preparsed_data,
+ int eos_flag);
+
+#endif /* __BSPP_H__ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/bspp_int.h b/drivers/media/platform/vxe-vxd/decoder/bspp_int.h
new file mode 100644
index 000000000000..e37c8c9c415b
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/bspp_int.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Bitstream Buffer Pre-Parser Internal
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __BSPP_INT_H__
+#define __BSPP_INT_H__
+
+#include "bspp.h"
+#include "swsr.h"
+
+#define VDEC_MB_DIMENSION (16)
+#define MAX_COMPONENTS (4)
+
+#define print_value(a, ...)
+
+#define BSPP_DEFAULT_SEQUENCE_ID (0)
+
+enum bspp_unit_type {
+ BSPP_UNIT_NONE = 0,
+ /* Only relevant for HEVC. */
+ BSPP_UNIT_VPS,
+ /* Only relevant for h.264 and HEVC */
+ BSPP_UNIT_SEQUENCE, BSPP_UNIT_PPS,
+ /*
+ * !< Data from these units should be placed in non-picture bitstream
+ * segment lists. In conformant streams these units should not occur
+ * in-between the picture data.
+ */
+ BSPP_UNIT_PICTURE,
+ BSPP_UNIT_SKIP_PICTURE,
+ BSPP_UNIT_NON_PICTURE,
+ BSPP_UNIT_UNCLASSIFIED,
+ /* Unit is unsupported, don't change segment list */
+ BSPP_UNIT_UNSUPPORTED,
+ BSPP_UNIT_MAX,
+ BSPP_UNIT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct bspp_raw_bitstream_data {
+ void **lst_link;
+ unsigned int ref_count;
+ struct vdec_raw_bstr_data raw_bitstream_data;
+};
+
+/*
+ * struct bspp_h264_inter_pict_ctx
+ * @Brief: This structure contains H264 state to be retained between pictures.
+ */
+struct bspp_h264_inter_pict_ctx {
+ /*
+ * The following get applied to every picture until updated
+ * (bitstream properties)
+ */
+ int disable_vdmc_filt;
+ int b4x4transform_mb_unavailable;
+ /*
+ * The following get applied to the next picture only
+ * (picture properties)
+ */
+ int repeat_first_field;
+ unsigned int max_frm_repeat;
+ /*
+ * Control variable to decide when to attach the SEI info
+ * (picture properties) to a picture
+ */
+ int sei_info_attached_to_pic;
+ /*
+ * The following variable is an approximation because we cannot
+ * parse out-of-order, it takes value as described:
+ * 1) Initially it is BSPP_INVALID
+ * 2) The first SPS sets it to its SPSid
+ * 3) The last bspp_H264SeiBufferingPeriod sets it, and it is used
+ * for every SEI parsing until updated by another
+ * bspp_H264SeiBufferingPeriod message
+ */
+ unsigned int active_sps_for_sei_parsing;
+ unsigned short current_view_id;
+ struct vdec_raw_bstr_data *sei_raw_data_list;
+};
+
+/* This structure contains HEVC state to be retained between pictures. */
+struct bspp_hevc_inter_pict_ctx {
+ /* Picture count in a sequence */
+ unsigned int seq_pic_count;
+ struct {
+ /* There was EOS NAL detected and no new picture yet */
+ unsigned eos_detected : 1;
+ /* This is first picture after EOS NAL */
+ unsigned first_after_eos : 1;
+ };
+
+ /* control variable to decide when to attach the SEI info
+ * (picture properties) to a picture.
+ */
+ unsigned char sei_info_attached_to_pic;
+ /* Raw SEI list to be attached to a picture. */
+ struct vdec_raw_bstr_data *sei_rawdata_list;
+ /* Handle to a picture header field to attach the raw SEI list to. */
+ void **hndl_pichdr_sei_rawdata_list;
+};
+
+/*
+ * struct bspp_inter_pict_data
+ * @Brief This structure contains state to be retained between pictures.
+ */
+struct bspp_inter_pict_data {
+ /* A closed GOP has occurred in the bitstream. */
+ int seen_closed_gop;
+ /* Closed GOP has been signaled by a unit before the next picture */
+ int new_closed_gop;
+ /* Indicates whether or not DPB flush is needed */
+ int not_dpb_flush;
+ struct lst_t pic_prefix_seg;
+ union {
+ struct bspp_h264_inter_pict_ctx h264_ctx;
+ struct bspp_hevc_inter_pict_ctx hevc_ctx;
+ };
+};
+
+/*
+ * struct bspp_parse_state
+ * @Brief This structure contains parse state
+ */
+struct bspp_parse_state {
+ struct bspp_inter_pict_data *inter_pict_ctx;
+ int initialised;
+
+ /* Input/Output (H264 etc. state). */
+ /* For SCP ASO detection we need to log 3 components */
+ unsigned int prev_first_mb_in_slice[MAX_COMPONENTS];
+ struct bspp_pict_hdr_info *next_pict_hdr_info;
+ unsigned char prev_bottom_pic_flag;
+ unsigned char second_field_flag;
+ unsigned char next_pic_is_new;
+ unsigned int prev_frame_num;
+ unsigned int prev_pps_id;
+ unsigned int prev_field_pic_flag;
+ unsigned int prev_nal_ref_idc;
+ unsigned int prev_pic_order_cnt_lsb;
+ int prev_delta_pic_order_cnt_bottom;
+ int prev_delta_pic_order_cnt[2];
+ int prev_nal_unit_type;
+ int prev_idr_pic_id;
+ int discontinuous_mb;
+ /* Position in bitstream before parsing a unit */
+ unsigned long long prev_byte_offset_buf;
+ unsigned int prev_buf_map_id;
+ unsigned int prev_buf_data_size;
+ /*
+ * !< Flags word to indicate error in parsing/decoding
+ * - see #VDEC_eErrorType.
+ */
+ unsigned int error_flags;
+ /* Outputs. */
+ int new_closed_gop;
+ unsigned char new_view;
+ unsigned char is_prefix;
+ int first_chunk;
+};
+
+/*
+ * struct bspp_pps_info
+ * @Brief Contains PPS information
+ */
+struct bspp_pps_info {
+ void **lst_link;
+ /* PPS Id. INSECURE MEMORY HOST */
+ unsigned int pps_id;
+ /* Reference count for PPS. INSECURE MEMORY HOST */
+ unsigned int ref_count;
+ struct bspp_ddbuf_array_info fw_pps;
+ /* Buffer ID to be used in Kernel */
+ unsigned int bufmap_id;
+ /* Parsing Info. SECURE MEMORY HOST */
+ void *secure_pps_info;
+ /* Buffer Offset to be used in kernel */
+ unsigned int buf_offset;
+};
+
+/*
+ * struct bspp_sequence_hdr_info
+ * @Brief Contains SPS information
+ */
+struct bspp_sequence_hdr_info {
+ void **lst_link;
+ /* Reference count for sequence header */
+ unsigned int ref_count;
+ struct bspp_sequ_hdr_info sequ_hdr_info;
+ struct bspp_ddbuf_array_info fw_sequence;
+ /* Parsing Info. SECURE MEMORY HOST */
+ void *secure_sequence_info;
+};
+
+enum bspp_element_status {
+ BSPP_UNALLOCATED = 0,
+ BSPP_AVAILABLE,
+ BSPP_UNAVAILABLE,
+ BSPP_STATUSMAX,
+ BSPP_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct bspp_vps_info {
+ void **lst_link;
+ /* VPS Id INSECURE MEMORY HOST */
+ unsigned int vps_id;
+ /* Reference count for video header. INSECURE MEMORY HOST */
+ unsigned int ref_count;
+ /*!< Parsing Info. SECURE MEMORY HOST */
+ void *secure_vpsinfo;
+};
+
+/*
+ * struct bspp_unit_data
+ * @Brief Contains bitstream unit data
+ */
+struct bspp_unit_data {
+ /* Input. */
+ /* Indicates which output data to populate */
+ enum bspp_unit_type unit_type;
+ /* Video Standard of unit to parse */
+ enum vdec_vid_std vid_std;
+ /* Indicates whether delimiter is present for unit */
+ int delim_present;
+ /* Codec configuration used by this stream */
+ const struct vdec_codec_config *codec_config;
+ void *str_res_handle;
+ /* Needed for calculating the size of the last fragment */
+ unsigned int unit_data_size;
+ /* Input/Output. */
+ struct bspp_parse_state *parse_state;
+ /* Output */
+ /* eVidStd == VDEC_STD_H263 && BSPP_UNIT_PICTURE. */
+ struct bspp_sequence_hdr_info *impl_sequ_hdr_info;
+ /* Union of output data for each of the unit types. */
+ union {
+ /* BSPP_UNIT_SEQUENCE. */
+ struct bspp_sequence_hdr_info *sequ_hdr_info;
+ /* BSPP_UNIT_PPS. */
+ struct bspp_pps_info *pps_info;
+ /* BSPP_UNIT_PICTURE. */
+ struct bspp_pict_hdr_info *pict_hdr_info;
+ /* For Video Header (HEVC) */
+ struct bspp_vps_info *vps_info;
+ } out;
+
+ /*
+ * For picture it should give the SequenceHdrId, for anything
+ * else it should contain BSPP_INVALID. This value is pre-loaded
+ * with the sequence ID of the last picture.
+ */
+ unsigned int pict_sequ_hdr_id;
+ /* State: output. */
+ /*
+ * Picture unit (BSPP_UNIT_PICTURE) contains slice data.
+ * Picture header information must be populated once this unit has been
+ * parsed.
+ */
+ int slice;
+ int ext_slice; /* Current slice belongs to non-base view (MVC only) */
+ /*
+ * True if we meet a unit that signifies closed gop, different
+ * for each standard.
+ */
+ int new_closed_gop;
+ /* True if the end of a sequence of pictures has been reached. */
+ int sequence_end;
+ /*
+ * Extracted all data from unit whereby shift-register should now
+ * be at the next delimiter or end of data (when byte-aligned).
+ */
+ int extracted_all_data;
+ /* Indicates the presence of any errors while processing this unit. */
+ enum bspp_error_type parse_error;
+ /* To turn on/off considering I-Frames as ClosedGop boundaries. */
+ int intra_frm_as_closed_gop;
+};
+
+/*
+ * struct bspp_swsr_ctx
+ * @brief BSPP Software Shift Register Context Information
+ */
+struct bspp_swsr_ctx {
+ /*
+ * Default configuration for the shift-register for this
+ * stream. The delimiter type may be adjusted for each unit
+ * where the buffer requires it. Information about how to
+ * process each unit will be passed down with the picture
+ * header information.
+ */
+ struct swsr_config sr_config;
+ /*
+ * Emulation prevention scheme present in bitstream. This is
+ * sometimes not ascertained (e.g. VC-1) until the first
+ * bitstream buffer (often codec configuration) has been
+ * received.
+ */
+ enum swsr_emprevent emulation_prevention;
+ /* Software shift-register context. */
+ void *swsr_context;
+};
+
+/*
+ * struct bspp_vid_std_features
+ * @brief BSPP Video Standard Specific Features and Information
+ */
+struct bspp_vid_std_features {
+ /* The size of the sequence header structure for this video standard */
+ unsigned long seq_size;
+ /* This video standard uses Picture Parameter Sets. */
+ int uses_pps;
+ /*
+ * The size of the Picture Parameter Sets structure for
+ * this video standard.
+ */
+ unsigned long pps_size;
+ /* This video standard uses Video Parameter Sets. */
+ int uses_vps;
+ /*
+ * The size of the Video Parameter Sets structure for
+ * this video standard
+ */
+ unsigned long vps_size;
+};
+
+/*
+ * @Function bspp_cb_parse_unit
+ * @Description Function prototype for the parse unit callback functions.
+ * @Input swsr_context_handle: A handle to software shift-register context
+ * @InOut unit_data: A pointer to unit data which includes input & output
+ * parameters as defined by structure.
+ * @Return int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_parse_unit)(void *swsr_context_handle,
+ struct bspp_unit_data *unit_data);
+
+/*
+ * @Function bspp_pfnReleaseData
+ * @Description This is a function prototype for the data releasing callback
+ * functions.
+ * @Input str_alloc_handle : A handle to stream related resources.
+ * @Input data_type : A type of data which is to be released.
+ * @Input data_handle : A handle for data which is to be released.
+ * @Return int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_release_data)(void *str_alloc_handle,
+ enum bspp_unit_type data_type,
+ void *data_handle);
+
+/*
+ * @Function bspp_cb_reset_data
+ * @Description This is a function prototype for the data resetting callback
+ * functions.
+ * @Input data_type : A type of data which is to be reset.
+ * @InOut data_handle : A handle for data which is to be reset.
+ * @Return int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_reset_data)(enum bspp_unit_type data_type,
+ void *data_handle);
+
+/*
+ * @Function bspp_cb_destroy_data
+ * @Description This is a function prototype for the data destruction callback
+ * functions.
+ * @Input data_type : A type of data which is to be destroyed.
+ * @InOut data_handle : A handle for data which is to be destroyed.
+ * @Return int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_destroy_data)(enum bspp_unit_type data_type,
+ void *data_handle);
+
+/*
+ * @Function bspp_cb_parse_codec_config
+ * @Description This is a function prototype for parsing codec config bitstream
+ * element for size delimited bitstreams.
+ * @Input swsr_context_handle: A handle to Shift Register processing
+ * current bitstream.
+ * @Output unit_count: A pointer to variable in which to return unit count.
+ * @Output unit_array_count: A pointer to variable in which to return unit
+ * array count.
+ * @Output delim_length: A pointer to variable in which to return NAL
+ * delimiter length in bits.
+ * @Output size_delim_length: A pointer to variable in which to return size
+ * delimiter length in bits.
+ * @Return None.
+ */
+typedef void (*bspp_cb_parse_codec_config)(void *swsr_context_handle,
+ unsigned int *unit_count,
+ unsigned int *unit_array_count,
+ unsigned int *delim_length,
+ unsigned int *size_delim_length);
+
+/*
+ * @Function bspp_cb_update_unit_counts
+ * @Description This is a function prototype for updating unit counts for size
+ * delimited bitstreams.
+ * @Input swsr_context_handle: A handle to Shift Register processing
+ * current bitstream.
+ * @InOut unit_count: A pointer to variable holding current unit count
+ * @InOut unit_array_count: A pointer to variable holding current unit
+ * array count.
+ * @Return None.
+ */
+typedef void (*bspp_cb_update_unit_counts)(void *swsr_context_handle,
+ unsigned int *unit_count,
+ unsigned int *unit_array_count);
+
+/*
+ * @Function bspp_cb_initialise_parsing
+ * @Description This prototype is for unit group parsing initialization.
+ * @InOut parse_state: The current unit group parsing state.
+ * @Return None.
+ */
+typedef void (*bspp_cb_initialise_parsing)(struct bspp_parse_state *prs_state);
+
+/*
+ * @Function bspp_cb_finalise_parsing
+ * @Description This is prototype is for unit group parsing finalization.
+ * @Input str_alloc_handle: A handle to stream related resources.
+ * @InOut parse_state: The current unit group parsing state.
+ * @Return None.
+ */
+typedef void (*bspp_cb_finalise_parsing)(void *str_alloc_handle,
+ struct bspp_parse_state *parse_state);
+
+/*
+ * struct bspp_parser_callbacks
+ * @brief BSPP Standard Related Parser Callback Functions
+ */
+struct bspp_parser_callbacks {
+ /* Pointer to standard-specific unit parsing callback function. */
+ bspp_cb_parse_unit parse_unit_cb;
+ /* Pointer to standard-specific data releasing callback function. */
+ bspp_cb_release_data release_data_cb;
+ /* Pointer to standard-specific data resetting callback function. */
+ bspp_cb_reset_data reset_data_cb;
+ /* Pointer to standard-specific data destruction callback function. */
+ bspp_cb_destroy_data destroy_data_cb;
+ /* Pointer to standard-specific codec config parsing callback function */
+ bspp_cb_parse_codec_config parse_codec_config_cb;
+ /* Pointer to standard-specific unit count updating callback function */
+ bspp_cb_update_unit_counts update_unit_counts_cb;
+ /*
+ * Pointer to standard-specific unit group parsing initialization
+ * function.
+ */
+ bspp_cb_initialise_parsing initialise_parsing_cb;
+ /*
+ * Pointer to standard-specific unit group parsing finalization
+ * function
+ */
+ bspp_cb_finalise_parsing finalise_parsing_cb;
+};
+
+/*
+ * @Function bspp_cb_set_parser_config
+ * @Description Prototype is for the setting parser config callback functions.
+ * @Input bstr_format: Input bitstream format.
+ * @Output vid_std_features: Features of video standard for this bitstream.
+ * @Output swsr_ctx: Software Shift Register settings for this bitstream.
+ * @Output parser_callbacks: Parser functions to be used for parsing this
+ * bitstream.
+ * @Output inter_pict_data: Inter-picture settings specific for this
+ * bitstream.
+ * @Return int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_set_parser_config)(enum vdec_bstr_format bstr_format,
+ struct bspp_vid_std_features *vid_std_features,
+ struct bspp_swsr_ctx *swsr_ctx,
+ struct bspp_parser_callbacks *parser_callbacks,
+ struct bspp_inter_pict_data *inter_pict_data);
+
+/*
+ * @Function bspp_cb_determine_unit_type
+ * @Description This is a function prototype for determining the BSPP unit type
+ * based on the bitstream (video standard specific) unit type
+ * callback functions.
+ * @Input bitstream_unit_type: Bitstream (video standard specific) unit
+ * type.
+ * @Input disable_mvc: Skip MVC related units (relevant for standards
+ * that support it).
+ * @InOut bspp_unit_type *: Last BSPP unit type on input. Current BSPP
+ * unit type on output.
+ * @Return None.
+ */
+typedef void (*bspp_cb_determine_unit_type)(unsigned char bitstream_unit_type,
+ int disable_mvc,
+ enum bspp_unit_type *bspp_unit_type);
+
+struct bspp_pps_info *bspp_get_pps_hdr(void *str_res_handle, unsigned int pps_id);
+
+struct bspp_sequence_hdr_info *bspp_get_sequ_hdr(void *str_res_handle,
+ unsigned int sequ_id);
+
+struct bspp_vps_info *bspp_get_vpshdr(void *str_res, unsigned int vps_id);
+
+void bspp_streamrelese_rawbstrdataplain(const void *str_res,
+ const void *rawdata);
+
+void bspp_freeraw_sei_datacontainer(const void *str_res,
+ struct vdec_raw_bstr_data *rawsei_datacontainer);
+
+void bspp_freeraw_sei_datalist(const void *str_res,
+ struct vdec_raw_bstr_data *rawsei_datalist);
+
+#endif /* __BSPP_INT_H__ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/core.c b/drivers/media/platform/vxe-vxd/decoder/core.c
new file mode 100644
index 000000000000..e6887ee93abc
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/core.c
@@ -0,0 +1,3719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD Decoder Core component function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include "core.h"
+#include "decoder.h"
+#include "img_errors.h"
+#include "img_pixfmts.h"
+#include "img_profiles_levels.h"
+#include "lst.h"
+#include "resource.h"
+#include "rman_api.h"
+#include "vdecdd_utils.h"
+#include "vdec_mmu_wrapper.h"
+#include "vxd_dec.h"
+
+#ifdef HAS_HEVC
+#define SEQ_RES_NEEDED
+#define GENC_BUFF_COUNT 4
+#endif
+
+/*
+ * This enum defines resource availability masks.
+ * @brief Resource Availability
+ */
+enum core_availability {
+ CORE_AVAIL_PICTBUF = (1 << 0),
+ CORE_AVAIL_PICTRES = (1 << 1),
+ CORE_AVAIL_CORE = (1 << 2),
+ CORE_AVAIL_MAX,
+ CORE_AVAIL_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct core_mbparam_alloc_info {
+ unsigned char alloc_mbparam_bufs;
+ unsigned int mbparam_size;
+ unsigned int overalloc_mbnum;
+};
+
+static struct core_mbparam_alloc_info mbparam_allocinfo[VDEC_STD_MAX - 1] = {
+ /* AllocFlag MBParamSize Overalloc */
+ /* MPEG2 */ { TRUE, 0xc8, 0 },
+ /* MPEG4 */ { TRUE, 0xc8, 0 },
+ /* H263 */ { TRUE, 0xc8, 0 },
+ /* H264 */ { TRUE, 0x80, 0 },
+ /* VC1 */ { TRUE, 0x80, (4096 * 2) / 0x80 },
+ /* AVS */ { TRUE, 0x80, 0 },
+ /* REAL */ { TRUE, 0x80, 0 },
+ /* JPEG */ { FALSE, 0x00, 0 },
+ /* VP6 */ { TRUE, 0x80, 0 },
+ /* VP8 */ { TRUE, 0x80, 0 },
+ /* SORENSON */ { TRUE, 0xc8, 0 },
+ /* HEVC */ { TRUE, 0x40, 0 },
+};
+
+struct vxdio_mempool {
+ unsigned int mem_heap_id;
+ enum sys_emem_attrib mem_attrib;
+};
+
+static unsigned int global_avail_slots;
+static unsigned char is_core_initialized;
+
+/*
+ * This structure contains the core Context.
+ * @brief core Context
+ */
+struct core_context {
+ struct vdecdd_dddev_context *dev_ctx;
+ /* List of stream context structures */
+ struct lst_t core_str_ctx;
+ vxd_cb vxd_str_processed_cb;
+};
+
+/* Global Core Context */
+static struct core_context *global_core_ctx;
+
+/*
+ * This structure contains the picture buffer size info.
+ * @brief Picture Resource Info
+ */
+struct core_pict_bufsize_info {
+ unsigned int mbparams_bufsize;
+
+#ifdef HAS_HEVC
+ union {
+ struct hevc_bufsize_pict {
+ /* Size of GENC fragment buffer for HEVC */
+ unsigned int genc_fragment_bufsize;
+ } hevc_bufsize_pict;
+ };
+#endif
+};
+
+/*
+ * This structure contains the sequence resource info.
+ * @brief Sequence Resource Info
+ */
+struct core_seq_resinfo {
+ union {
+#ifdef HAS_HEVC
+ struct hevc_bufsize_seqres {
+ unsigned int genc_bufsize; /* Size of GEN buffers for HEVC */
+ unsigned int intra_bufsize; /* Size of GEN buffers for HEVC */
+ unsigned int aux_bufsize; /* Size of GEN buffers for HEVC */
+ } hevc_bufsize_seqres;
+#endif
+
+#ifndef SEQ_RES_NEEDED
+ unsigned int dummy;
+#endif
+ };
+};
+
+struct core_pict_resinfo {
+ unsigned int pict_res_num;
+ struct core_pict_bufsize_info size_info;
+ unsigned char is_valid;
+};
+
+/*
+ * This structure contains the standard specific part of plant context.
+ * @brief Standard Specific Context
+ */
+struct core_std_spec_context {
+ union {
+#ifdef HAS_HEVC
+ struct hevc_ctx {
+ /* Counts genc buffer allocations */
+ unsigned short genc_id_gen;
+ } hevc_ctx;
+#else
+ unsigned int dummy;
+#endif
+ };
+};
+
+struct core_stream_context;
+
+struct core_std_spec_operations {
+ /* Allocates standard specific picture buffers. */
+ int (*alloc_picture_buffers)(struct core_stream_context *core_strctx,
+ struct vdecdd_pict_resint *pict_resint,
+ struct vxdio_mempool mem_pool,
+ struct core_pict_resinfo *pict_res_info);
+
+ /* Frees standard specific picture buffers. */
+ int (*free_picture_resource)(struct core_stream_context *core_strctx,
+ struct vdecdd_pict_resint *pic_res_int);
+
+ /* Allocates standard specific sequence buffers. */
+ int (*alloc_sequence_buffers)(struct core_stream_context *core_strctx,
+ struct vdecdd_seq_resint *seq_res_int,
+ struct vxdio_mempool mem_pool,
+ struct core_seq_resinfo *seq_res_info);
+
+ /* Frees standard specific sequence buffers. */
+ int (*free_sequence_resource)(struct core_stream_context *core_strctx,
+ struct vdecdd_seq_resint *seq_res_int);
+
+ /* Returns buffer's sizes (common and standard specific). */
+ int (*bufs_get_size)(struct core_stream_context *core_strctx,
+ const struct vdec_comsequ_hdrinfo *seq_hdrinfo,
+ struct vdec_pict_size *max_pict_size,
+ struct core_pict_bufsize_info *size_info,
+ struct core_seq_resinfo *seq_resinfo,
+ unsigned char *resource_needed);
+
+ /* Checks whether resource is still suitable. */
+ unsigned char (*is_stream_resource_suitable)(struct core_pict_resinfo *pict_resinfo,
+ struct core_pict_resinfo *old_pict_resinfo,
+ struct core_seq_resinfo *seq_resinfo,
+ struct core_seq_resinfo *old_seq_resinfo);
+};
+
+/*
+ * This structure contains the core Stream Context.
+ * @brief core Stream Context
+ */
+struct core_stream_context {
+ void **link; /* to be part of single linked list */
+ struct core_context *core_ctx;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ struct vxd_dec_ctx *vxd_dec_context;
+
+ /* list of picture buffers */
+ struct lst_t pict_buf_list;
+
+ /* List of picture resources allocated for this stream */
+ struct lst_t pict_res_list;
+ struct lst_t old_pict_res_list;
+
+ struct lst_t aux_pict_res_list;
+
+#ifdef SEQ_RES_NEEDED
+ /* List of active sequence resources that are allocated for this stream. */
+ struct lst_t seq_res_list;
+ /*
+ * List of sequence resources that are allocated for this stream but no
+ * longer suitable for new sequence(s).
+ */
+ struct lst_t old_seq_res_list;
+#endif
+
+ /* List of sequence header information */
+ struct lst_t seq_hdr_list;
+ /* Queue of stream units to be processed */
+ struct lst_t str_unit_list;
+
+ struct vdec_comsequ_hdrinfo comseq_hdr_info;
+ unsigned char opcfg_set;
+ /* Picture buffer layout to use for decoding. */
+ struct vdecdd_ddpict_buf disp_pict_buf;
+ struct vdec_str_opconfig op_cfg;
+ unsigned char new_seq;
+ unsigned char new_op_cfg;
+ unsigned char no_prev_refs_used;
+ unsigned int avail_slots;
+ unsigned int res_avail;
+ unsigned char stopped;
+ struct core_pict_resinfo pict_resinfo;
+ /* Current sequence resource info. */
+ struct core_seq_resinfo seq_resinfo;
+
+ /* Reconstructed picture buffer */
+ struct vdecdd_ddpict_buf recon_pictbuf;
+ /* Coded picture size of last reconfiguration */
+ struct vdec_pict_size coded_pict_size;
+ /* Standard specific operations. */
+ struct core_std_spec_operations *std_spec_ops;
+ /* Standard specific context. */
+ struct core_std_spec_context std_spec_context;
+};
+
+#ifdef HAS_HEVC
+static int core_free_hevc_picture_resource(struct core_stream_context *core_strctx,
+ struct vdecdd_pict_resint *pic_res_int);
+
+static int core_free_hevc_sequence_resource(struct core_stream_context *core_strctx,
+ struct vdecdd_seq_resint *seq_res_int);
+
+static int core_hevc_bufs_get_size(struct core_stream_context *core_str_ctx,
+ const struct vdec_comsequ_hdrinfo *seq_hdr_info,
+ struct vdec_pict_size *max_pict_size,
+ struct core_pict_bufsize_info *size_info,
+ struct core_seq_resinfo *seq_res_info,
+ unsigned char *resource_needed);
+
+static unsigned char core_is_hevc_stream_resource_suitable
+ (struct core_pict_resinfo *pict_res_info,
+ struct core_pict_resinfo *old_pict_res_info,
+ struct core_seq_resinfo *seq_res_info,
+ struct core_seq_resinfo *old_seq_res_info);
+
+static int core_alloc_hevc_specific_seq_buffers(struct core_stream_context *core_strctx,
+ struct vdecdd_seq_resint *seq_res_int,
+ struct vxdio_mempool mempool,
+ struct core_seq_resinfo *seq_res_info);
+
+static int core_alloc_hevc_specific_pict_buffers(struct core_stream_context *core_strctx,
+ struct vdecdd_pict_resint *pict_res_int,
+ struct vxdio_mempool mempool,
+ struct core_pict_resinfo *pict_res_info);
+#endif
+
+static int
+core_common_bufs_getsize(struct core_stream_context *core_str_ctx,
+ const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ struct vdec_pict_size *max_pict_size,
+ struct core_pict_bufsize_info *size_info,
+ struct core_seq_resinfo *seq_res_info, unsigned char *res_needed);
+
+static struct core_std_spec_operations std_specific_ops[VDEC_STD_MAX - 1] = {
+ /* AllocPicture FreePicture AllocSeq FreeSeq BufsGetSize IsStreamResourceSuitable */
+ /* MPEG2 */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+ /* MPEG4 */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+ /* H263 */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+ /* H264 */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = core_common_bufs_getsize,
+ .is_stream_resource_suitable = NULL},
+
+ /* VC1 */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+ /* AVS */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+ /* REAL */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+ /* JPEG */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+ /* VP6 */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+ /* VP8 */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+ /* SORENSON */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+
+#ifdef HAS_HEVC
+ /* HEVC*/ { .alloc_picture_buffers = core_alloc_hevc_specific_pict_buffers,
+ .free_picture_resource = core_free_hevc_picture_resource,
+ .alloc_sequence_buffers = core_alloc_hevc_specific_seq_buffers,
+ .free_sequence_resource = core_free_hevc_sequence_resource,
+ .bufs_get_size = core_hevc_bufs_get_size,
+ .is_stream_resource_suitable = core_is_hevc_stream_resource_suitable},
+#else
+ /* HEVC */ { .alloc_picture_buffers = NULL,
+ .free_picture_resource = NULL,
+ .alloc_sequence_buffers = NULL,
+ .free_sequence_resource = NULL,
+ .bufs_get_size = NULL,
+ .is_stream_resource_suitable = NULL},
+#endif
+};
+
+#ifdef ERROR_CONCEALMENT
+/*
+ * This structure contains the Error Recovery Frame Store info.
+ * @brief Error Recovery Frame Store Info
+ */
+struct core_err_recovery_frame_info {
+ /* Flag to indicate if Error Recovery Frame Store is enabled for standard. */
+ unsigned char enabled;
+ /* Limitation for maximum frame size based on dimensions. */
+ unsigned int max_size;
+};
+
+static struct core_err_recovery_frame_info err_recovery_frame_info[VDEC_STD_MAX - 1] = {
+ /* enabled max_frame_size */
+ /* MPEG2 */ { TRUE, ~0 },
+ /* MPEG4 */ { TRUE, ~0 },
+ /* H263 */ { FALSE, 0 },
+ /* H264 */ { TRUE, ~0 },
+ /* VC1 */ { FALSE, 0 },
+ /* AVS */ { FALSE, 0 },
+ /* REAL */ { FALSE, 0 },
+ /* JPEG */ { FALSE, 0 },
+ /* VP6 */ { FALSE, 0 },
+ /* VP8 */ { FALSE, 0 },
+ /* SORENSON */ { FALSE, 0 },
+ /* HEVC */ { TRUE, ~0 },
+};
+#endif
+
+static void core_fw_response_cb(int res_str_id, unsigned int *msg, unsigned int msg_size,
+ unsigned int msg_flags)
+{
+ struct core_stream_context *core_str_ctx;
+ int ret;
+
+ /* extract core_str_ctx and dec_core_ctx from res_str_id */
+ VDEC_ASSERT(res_str_id);
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID, (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ pr_err("could not extract core_str_context\n");
+
+ ret = decoder_service_firmware_response(core_str_ctx->dd_str_ctx->dec_ctx,
+ msg, msg_size, msg_flags);
+ VDEC_ASSERT((ret == IMG_SUCCESS) | (ret == IMG_ERROR_FATAL));
+ if (ret != IMG_SUCCESS)
+ pr_err("decoder_service_firmware_response failed\n");
+}
+
+/*
+ * @Function core_initialise
+ */
+int core_initialise(void *dev_handle, unsigned int int_heap_id, void *vxd_cb_ptr)
+{
+ struct vdecdd_dd_devconfig dev_cfg_local;
+ unsigned int num_pipes_local;
+ int ret;
+
+ if (is_core_initialized)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ is_core_initialized = TRUE;
+
+ global_core_ctx = kzalloc(sizeof(*global_core_ctx), GFP_KERNEL);
+ if (!global_core_ctx) {
+ is_core_initialized = FALSE;
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ global_core_ctx->dev_ctx = kzalloc(sizeof(*global_core_ctx->dev_ctx), GFP_KERNEL);
+ if (!global_core_ctx->dev_ctx) {
+ kfree(global_core_ctx);
+ global_core_ctx = NULL;
+ is_core_initialized = FALSE;
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Initialise device context. */
+ global_core_ctx->dev_ctx->dev_handle = dev_handle; /* v4L2 dev handle */
+ global_core_ctx->vxd_str_processed_cb = (vxd_cb)vxd_cb_ptr;
+
+ ret = decoder_initialise(global_core_ctx->dev_ctx, int_heap_id,
+ &dev_cfg_local, &num_pipes_local,
+ &global_core_ctx->dev_ctx->dec_context);
+ if (ret != IMG_SUCCESS)
+ goto decoder_init_error;
+
+ global_core_ctx->dev_ctx->internal_heap_id = int_heap_id;
+
+#ifdef DEBUG_DECODER_DRIVER
+ /* Dump codec config */
+ pr_info("Decode slots/core: %d", dev_cfg_local.num_slots_per_pipe);
+#endif
+
+ lst_init(&global_core_ctx->core_str_ctx);
+
+ /* Ensure the resource manager is initialised.. */
+ ret = rman_initialise();
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto rman_init_error;
+
+ /* Create resource bucket.. */
+ ret = rman_create_bucket(&global_core_ctx->dev_ctx->res_buck_handle);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto create_bucket_error;
+
+ return IMG_SUCCESS;
+
+create_bucket_error:
+ rman_deinitialise();
+
+rman_init_error:
+ decoder_deinitialise(global_core_ctx->dev_ctx->dec_context);
+
+decoder_init_error:
+ kfree(global_core_ctx->dev_ctx);
+ global_core_ctx->dev_ctx = NULL;
+ kfree(global_core_ctx);
+ global_core_ctx = NULL;
+
+ is_core_initialized = FALSE;
+
+ return ret;
+}
+
+/*
+ * @Function core_check_decoder_support
+ * @Description
+ * This function determines whether Decoder supports bitstream and
+ * configuration.
+ */
+static int
+core_check_decoder_support(const struct vdecdd_dddev_context *dd_dev_ctx,
+ const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_comsequ_hdrinfo *prev_seq_hdrinfo,
+ const struct bspp_pict_hdr_info *prev_pict_hdrinfo,
+ const struct vdecdd_mapbuf_info *map_bufinfo,
+ struct vdecdd_supp_check *supp_check)
+{
+ int ret;
+ struct vdec_unsupp_flags unsupported;
+ struct vdec_pict_rendinfo disp_pict_rend_info;
+
+ memset(&disp_pict_rend_info, 0, sizeof(struct vdec_pict_rendinfo));
+
+ /*
+ * If output picture buffer information is provided create another
+ * with properties required by bitstream so that it can be compared.
+ */
+ if (supp_check->disp_pictbuf) {
+ struct vdec_pict_rend_config pict_rend_cfg;
+
+ memset(&pict_rend_cfg, 0, sizeof(pict_rend_cfg));
+
+ /*
+ * Cannot validate the display picture buffer layout without
+ * knowing the pixel format required for the output and the
+ * sequence information.
+ */
+ if (supp_check->comseq_hdrinfo && supp_check->op_cfg) {
+ pict_rend_cfg.coded_pict_size =
+ supp_check->comseq_hdrinfo->max_frame_size;
+
+ pict_rend_cfg.byte_interleave =
+ supp_check->disp_pictbuf->buf_config.byte_interleave;
+
+ pict_rend_cfg.packed =
+ supp_check->disp_pictbuf->buf_config.packed;
+
+ pict_rend_cfg.stride_alignment =
+ supp_check->disp_pictbuf->buf_config.stride_alignment;
+
+ /*
+ * Recalculate render picture layout based upon
+ * sequence and output config.
+ */
+ vdecddutils_pictbuf_getinfo(str_cfg_data,
+ &pict_rend_cfg,
+ supp_check->op_cfg,
+ &disp_pict_rend_info);
+ }
+ }
+ /* Check that the decoder supports the picture. */
+ ret = decoder_check_support(dd_dev_ctx->dec_context, str_cfg_data,
+ supp_check->op_cfg,
+ supp_check->disp_pictbuf,
+ (disp_pict_rend_info.rendered_size) ?
+ &disp_pict_rend_info : NULL,
+ supp_check->comseq_hdrinfo,
+ supp_check->pict_hdrinfo,
+ prev_seq_hdrinfo,
+ prev_pict_hdrinfo,
+ supp_check->non_cfg_req,
+ &unsupported,
+ &supp_check->features);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS) {
+ if (ret == IMG_ERROR_NOT_SUPPORTED)
+ supp_check->unsupp_flags = unsupported;
+ }
+
+ return ret;
+}
+
+/*
+ * @Function core_supported_features
+ */
+int core_supported_features(struct vdec_features *features)
+{
+ struct vdecdd_dddev_context *dd_dev_ctx;
+
+ VDEC_ASSERT(global_core_ctx);
+
+ dd_dev_ctx = global_core_ctx->dev_ctx;
+ VDEC_ASSERT(dd_dev_ctx);
+ if (!dd_dev_ctx)
+ return IMG_ERROR_NOT_INITIALISED;
+
+ return decoder_supported_features(dd_dev_ctx->dec_context, features);
+}
+
+/*
+ * @Function core_stream_stop
+ */
+int core_stream_stop(unsigned int res_str_id)
+{
+ int ret = IMG_SUCCESS;
+ struct vdecdd_str_unit *stop_unit;
+ struct vdecdd_ddstr_ctx *ddstr_ctx;
+ struct core_stream_context *core_str_ctx;
+
+ /*
+ * Stream based messages without a device context
+ * must have a stream ID.
+ */
+ VDEC_ASSERT(res_str_id);
+
+ if (res_str_id == 0) {
+ pr_err("Invalid params passed to %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID,
+ (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ VDEC_ASSERT(core_str_ctx);
+
+ ddstr_ctx = core_str_ctx->dd_str_ctx;
+
+ /* Validate input arguments */
+ VDEC_ASSERT(ddstr_ctx);
+
+ /*
+ * Disregard this stop request if the stream is currently
+ * stopped or being stopped.
+ */
+ if (ddstr_ctx->dd_str_state == VDECDD_STRSTATE_PLAYING) {
+ vdecddutils_create_strunit(&stop_unit, NULL);
+ if (!stop_unit) {
+ pr_err("Failed to allocate memory for stop unit\n");
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+ memset(stop_unit, 0, sizeof(*stop_unit));
+
+ stop_unit->str_unit_type = VDECDD_STRUNIT_STOP;
+ stop_unit->str_unit_tag = NULL;
+ stop_unit->decode = FALSE;
+
+ /*
+ * Since the stop is now to be passed to the decoder signal
+ * that we're stopping.
+ */
+ ddstr_ctx->dd_str_state = VDECDD_STRSTATE_STOPPING;
+ decoder_stream_process_unit(ddstr_ctx->dec_ctx, stop_unit);
+ core_str_ctx->stopped = TRUE;
+ vdecddutils_free_strunit(stop_unit);
+ }
+
+ return ret;
+}
+
+/*
+ * @Function core_is_stream_idle
+ */
+static unsigned char core_is_stream_idle(struct vdecdd_ddstr_ctx *dd_str_ctx)
+{
+ unsigned char is_stream_idle;
+
+ is_stream_idle = decoder_is_stream_idle(dd_str_ctx->dec_ctx);
+
+ return is_stream_idle;
+}
+
+/*
+ * @Function core_stream_destroy
+ */
+int core_stream_destroy(unsigned int res_str_id)
+{
+ struct vdecdd_ddstr_ctx *ddstr_ctx;
+ struct core_stream_context *core_str_ctx;
+ int ret;
+
+ /*
+ * Stream based messages without a device context
+ * must have a stream ID.
+ */
+ VDEC_ASSERT(res_str_id);
+
+ if (res_str_id == 0) {
+ pr_err("Invalid params passed to %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID,
+ (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ VDEC_ASSERT(core_str_ctx);
+
+ ddstr_ctx = core_str_ctx->dd_str_ctx;
+
+ /* Validate input arguments */
+ VDEC_ASSERT(ddstr_ctx);
+
+ ret = core_stream_stop(res_str_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ lst_remove(&global_core_ctx->core_str_ctx, core_str_ctx);
+
+ /* Destroy stream if idle otherwise wait and do it later */
+ if (core_is_stream_idle(ddstr_ctx))
+ rman_free_resource(ddstr_ctx->res_handle);
+
+ pr_debug("Core stream destroy successfully\n");
+ /* Return success.. */
+ return IMG_SUCCESS;
+}
+
+static int
+core_picture_attach_resources(struct core_stream_context *core_str_ctx,
+ struct vdecdd_str_unit *str_unit, unsigned char check)
+{
+ unsigned int ret = IMG_SUCCESS;
+
+ /*
+ * Take sequence header from cache.
+ * Note: sequence header id must be set in PICTURE_START unit
+ */
+ str_unit->seq_hdr_info = resource_list_getbyid(&core_str_ctx->seq_hdr_list,
+ str_unit->seq_hdr_id);
+
+ /* Check is not needed e.g. when freeing resources at stream destroy */
+ if (check && !str_unit->seq_hdr_info) {
+ pr_err("[USERSID=0x%08X] Sequence header not available for current picture while attaching",
+ core_str_ctx->dd_str_ctx->str_config_data.user_str_id);
+ ret = IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ return ret;
+}
+
+/*
+ * @Function core_handle_processed_unit
+ */
+static int core_handle_processed_unit(struct core_stream_context *c_str_ctx,
+ struct vdecdd_str_unit *str_unit)
+{
+ struct bspp_bitstr_seg *bstr_seg;
+ struct vdecdd_ddstr_ctx *dd_str_ctx = c_str_ctx->dd_str_ctx;
+ int ret;
+ struct core_context *g_ctx = global_core_ctx;
+
+ pr_debug("%s stream unit type = %d\n", __func__, str_unit->str_unit_type);
+ /* check for type of the unit */
+ switch (str_unit->str_unit_type) {
+ case VDECDD_STRUNIT_SEQUENCE_START:
+ /* nothing to be done as sps is maintained till it changes */
+ break;
+
+ case VDECDD_STRUNIT_PICTURE_START:
+ /* Loop over bit stream segments.. */
+ bstr_seg = (struct bspp_bitstr_seg *)
+ lst_removehead(&str_unit->bstr_seg_list);
+
+ while (bstr_seg) {
+ lst_add(&c_str_ctx->vxd_dec_context->seg_list, bstr_seg);
+ if (bstr_seg->bstr_seg_flag & VDECDD_BSSEG_LASTINBUFF &&
+ dd_str_ctx->dd_str_state != VDECDD_STRSTATE_STOPPED) {
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+ /* Get access to map info context.. */
+ ret = rman_get_resource(bstr_seg->bufmap_id, VDECDD_BUFMAP_TYPE_ID,
+ (void **)&ddbuf_map_info, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ g_ctx->vxd_str_processed_cb(c_str_ctx->vxd_dec_context,
+ VXD_CB_STRUNIT_PROCESSED,
+ bstr_seg->bufmap_id);
+ }
+ /* Get next segment. */
+ bstr_seg = (struct bspp_bitstr_seg *)
+ lst_removehead(&str_unit->bstr_seg_list);
+ }
+ break;
+
+ case VDECDD_STRUNIT_PICTURE_END:
+ g_ctx->vxd_str_processed_cb(c_str_ctx->vxd_dec_context,
+ VXD_CB_PICT_END, 0xFFFF);
+ break;
+
+ case VDECDD_STRUNIT_STOP:
+ /*
+ * Signal that the stream has been stopped in the
+ * device driver.
+ */
+ dd_str_ctx->dd_str_state = VDECDD_STRSTATE_STOPPED;
+
+ break;
+
+ default:
+ pr_err("Invalid stream unit type passed\n");
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[SID=0x%08X] [UTYPE=0x%08X] PROCESSED",
+ dd_str_ctx->res_str_id,
+ str_unit->str_unit_type);
+#endif
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+}
+
+static int
+core_handle_decoded_picture(struct core_stream_context *core_str_ctx,
+ struct vdecdd_picture *picture, unsigned int type)
+{
+ /* Pick the client image buffer. */
+ struct vdecdd_ddbuf_mapinfo *pictbuf_mapinfo = picture->disp_pict_buf.pict_buf;
+
+ VDEC_ASSERT(pictbuf_mapinfo);
+ if (!pictbuf_mapinfo)
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+
+ global_core_ctx->vxd_str_processed_cb(core_str_ctx->vxd_dec_context,
+ (enum vxd_cb_type)type, pictbuf_mapinfo->buf_map_id);
+ return IMG_SUCCESS;
+}
+
+static int core_stream_processed_cb(void *handle, int cb_type, void *cb_item)
+{
+ int ret;
+ struct core_stream_context *core_str_ctx =
+ (struct core_stream_context *)handle;
+ VDEC_ASSERT(core_str_ctx);
+ if (!core_str_ctx) {
+ pr_err("NULL handle passed to core callback\n");
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+
+ pr_debug("%s callback type = %d\n", __func__, cb_type);
+ /* Based on callback type, retrieve the item */
+ switch (cb_type) {
+ case VXD_CB_STRUNIT_PROCESSED:
+ {
+ struct vdecdd_str_unit *str_unit =
+ (struct vdecdd_str_unit *)cb_item;
+ VDEC_ASSERT(str_unit);
+ if (!str_unit) {
+ pr_err("NULL item passed to core callback type STRUNIT_PROCESSED\n");
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+ ret = core_handle_processed_unit(core_str_ctx, str_unit);
+ if (ret != IMG_SUCCESS) {
+ pr_err("core_handle_processed_unit returned error\n");
+ return ret;
+ }
+ break;
+ }
+
+ case VXD_CB_PICT_DECODED:
+ case VXD_CB_PICT_DISPLAY:
+ case VXD_CB_PICT_RELEASE:
+ {
+ struct vdecdd_picture *picture = (struct vdecdd_picture *)cb_item;
+
+ if (!picture) {
+ pr_err("NULL item passed to core callback type PICTURE_DECODED\n");
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+ ret = core_handle_decoded_picture(core_str_ctx, picture, cb_type);
+ break;
+ }
+
+ case VXD_CB_STR_END:
+ global_core_ctx->vxd_str_processed_cb(core_str_ctx->vxd_dec_context,
+ (enum vxd_cb_type)cb_type, 0);
+ ret = IMG_SUCCESS;
+
+ break;
+
+ case VXD_CB_ERROR_FATAL:
+ /*
+ * Whenever the error case occurs, we need to handle the error case.
+ * Need to forward this error to v4l2 glue layer.
+ */
+ global_core_ctx->vxd_str_processed_cb(core_str_ctx->vxd_dec_context,
+ (enum vxd_cb_type)cb_type, *((unsigned int *)cb_item));
+ ret = IMG_SUCCESS;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return ret;
+}
+
+static int core_decoder_queries(void *handle, int query, void *item)
+{
+ struct core_stream_context *core_str_ctx =
+ (struct core_stream_context *)handle;
+ VDEC_ASSERT(core_str_ctx);
+ if (!core_str_ctx) {
+ pr_err("NULL handle passed to %s callback\n", __func__);
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+
+ switch (query) {
+ case DECODER_CORE_GET_RES_LIMIT:
+ {
+ unsigned int num_img_bufs;
+ unsigned int num_res;
+
+ num_img_bufs = resource_list_getnum(&core_str_ctx->pict_buf_list);
+
+ /* Return the number of internal resources. */
+ num_res = core_str_ctx->pict_resinfo.pict_res_num;
+
+ /* Return the minimum of the two. */
+ *((unsigned int *)item) = vdec_size_min(num_img_bufs, num_res);
+ }
+ break;
+
+ default:
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+ return IMG_SUCCESS;
+}
+
+static int
+core_free_common_picture_resource(struct core_stream_context *core_str_ctx,
+ struct vdecdd_pict_resint *pict_resint)
+{
+ int ret = IMG_SUCCESS;
+
+ if (pict_resint->mb_param_buf && pict_resint->mb_param_buf->ddbuf_info.hndl_memory) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("mmu_free for buff_id[%d]\n",
+ pict_resint->mb_param_buf->ddbuf_info.buff_id);
+#endif
+ ret = mmu_free_mem(core_str_ctx->dd_str_ctx->mmu_str_handle,
+ &pict_resint->mb_param_buf->ddbuf_info);
+ if (ret != IMG_SUCCESS)
+ pr_err("MMU_Free for MBParam buffer failed with error %u", ret);
+
+ kfree(pict_resint->mb_param_buf);
+ pict_resint->mb_param_buf = NULL;
+ }
+ return ret;
+}
+
+static int core_free_resbuf(struct vdecdd_ddbuf_mapinfo **buf_handle, void *mmu_handle)
+{
+ int ret = IMG_SUCCESS;
+ struct vdecdd_ddbuf_mapinfo *buf = *buf_handle;
+
+ if (buf) {
+ if (buf->ddbuf_info.hndl_memory) {
+ ret = mmu_free_mem(mmu_handle, &buf->ddbuf_info);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ }
+ kfree(buf);
+ *buf_handle = NULL;
+ }
+ return ret;
+}
+
+/*
+ * @Function core_free_picture_resource
+ */
+static int
+core_free_picture_resource(struct core_stream_context *core_strctx,
+ struct vdecdd_pict_resint *pict_resint)
+{
+ int result = IMG_SUCCESS;
+
+ /* Check input arguments */
+ if (!core_strctx || !pict_resint) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ result = core_free_common_picture_resource(core_strctx, pict_resint);
+
+ VDEC_ASSERT(core_strctx->std_spec_ops);
+ if (core_strctx->std_spec_ops->free_picture_resource)
+ core_strctx->std_spec_ops->free_picture_resource(core_strctx,
+ pict_resint);
+
+#ifdef SEQ_RES_NEEDED
+ if (pict_resint->seq_resint) {
+ resource_item_return(&pict_resint->seq_resint->ref_count);
+ pict_resint->seq_resint = 0;
+ }
+#endif
+
+ if (result == IMG_SUCCESS)
+ kfree(pict_resint);
+
+ return result;
+}
+
+/*
+ * @Function core_free_sequence_resource
+ */
+#ifdef SEQ_RES_NEEDED
+static int
+core_free_common_sequence_resource(struct core_stream_context *core_strctx,
+ struct vdecdd_seq_resint *seqres_int)
+{
+ int result;
+
+ result = core_free_resbuf(&seqres_int->err_pict_buf,
+ core_strctx->dd_str_ctx->mmu_str_handle);
+ if (result != IMG_SUCCESS)
+ pr_err("MMU_Free for Error Recover Frame Store buffer failed with error %u",
+ result);
+
+ return result;
+}
+
+static void
+core_free_sequence_resource(struct core_stream_context *core_strctx,
+ struct vdecdd_seq_resint *seqres_int)
+{
+ VDEC_ASSERT(core_strctx->std_spec_ops);
+ core_free_common_sequence_resource(core_strctx, seqres_int);
+
+ if (core_strctx->std_spec_ops->free_sequence_resource)
+ core_strctx->std_spec_ops->free_sequence_resource(core_strctx, seqres_int);
+
+ kfree(seqres_int);
+}
+#endif
+
+/*
+ * @Function core_stream_resource_deprecate
+ */
+static int core_stream_resource_deprecate(struct core_stream_context *core_str_ctx)
+{
+ struct vdecdd_pict_resint *picres_int;
+ int ret;
+
+ /* Free all "old" picture resources since these should now be unused. */
+ picres_int = lst_first(&core_str_ctx->old_pict_res_list);
+ while (picres_int) {
+ if (picres_int->ref_cnt != 0) {
+ pr_warn("[USERSID=0x%08X] Internal resource should be unused since it has been deprecated before",
+ core_str_ctx->dd_str_ctx->str_config_data.user_str_id);
+
+ picres_int = lst_next(picres_int);
+ } else {
+ struct vdecdd_pict_resint *picres_int_to_remove = picres_int;
+
+ picres_int = lst_next(picres_int);
+
+ lst_remove(&core_str_ctx->old_pict_res_list, picres_int_to_remove);
+ ret = core_free_picture_resource(core_str_ctx, picres_int_to_remove);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+ }
+
+ /* Move all "active" picture resources to the "old" list if they are still in use. */
+ picres_int = lst_removehead(&core_str_ctx->pict_res_list);
+ while (picres_int) {
+ /* Remove picture resource from the list. */
+ ret = resource_list_remove(&core_str_ctx->aux_pict_res_list, picres_int);
+
+ /* IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE is a valid return code
+ * e.g. during reconfigure we are clearing the sPictBufferList list
+ * and then try to remove the buffers again from the same list (empty now)
+ * though core UNMAP_BUF messages
+ */
+ if (ret != IMG_SUCCESS && ret != IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE) {
+ pr_err("[USERSID=0x%08X] Failed to remove picture resource",
+ core_str_ctx->dd_str_ctx->str_config_data.user_str_id);
+ return ret;
+ }
+ /*
+ * If the active resource is not being used, free now.
+ * Otherwise add to the old list to be freed later.
+ */
+ if (picres_int->ref_cnt == 0) {
+ ret = core_free_picture_resource(core_str_ctx, picres_int);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ } else {
+ lst_add(&core_str_ctx->old_pict_res_list, picres_int);
+ }
+ picres_int = lst_removehead(&core_str_ctx->pict_res_list);
+ }
+
+ /* Reset the resource configuration. */
+ memset(&core_str_ctx->pict_resinfo, 0, sizeof(core_str_ctx->pict_resinfo));
+
+#ifdef SEQ_RES_NEEDED
+ {
+ struct vdecdd_seq_resint *seqres_int;
+
+ /* Free all "old" sequence resources since these should now be unused. */
+ seqres_int = lst_first(&core_str_ctx->old_seq_res_list);
+ while (seqres_int) {
+ if (seqres_int->ref_count != 0) {
+ pr_warn("[USERSID=0x%08X] Internal sequence resource should be unused since it has been deprecated before",
+ core_str_ctx->dd_str_ctx->str_config_data.user_str_id);
+ seqres_int = lst_next(seqres_int);
+ } else {
+ struct vdecdd_seq_resint *seqres_int_to_remove = seqres_int;
+
+ seqres_int = lst_next(seqres_int);
+
+ lst_remove(&core_str_ctx->old_seq_res_list, seqres_int_to_remove);
+ core_free_sequence_resource(core_str_ctx, seqres_int_to_remove);
+ }
+ }
+
+ /* Move all "active" sequence resources to the "old"
+ * list if they are still in use.
+ */
+ seqres_int = lst_removehead(&core_str_ctx->seq_res_list);
+ while (seqres_int) {
+ /*
+ * If the active resource is not being used, free now.
+ * Otherwise add to the old list to be freed later.
+ */
+ seqres_int->ref_count == 0 ? core_free_sequence_resource(core_str_ctx,
+ seqres_int) :
+ lst_add(&core_str_ctx->old_seq_res_list, seqres_int);
+
+ seqres_int = lst_removehead(&core_str_ctx->seq_res_list);
+ }
+
+ /* Reset the resource configuration. */
+ memset(&core_str_ctx->seq_resinfo, 0, sizeof(core_str_ctx->seq_resinfo));
+ }
+#endif
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_stream_resource_destroy
+ */
+static int core_stream_resource_destroy(struct core_stream_context *core_str_ctx)
+{
+ struct vdecdd_pict_resint *picres_int;
+ int ret;
+
+ /* Remove any "active" picture resources allocated for this stream. */
+ picres_int = lst_removehead(&core_str_ctx->pict_res_list);
+ while (picres_int) {
+ ret = core_free_picture_resource(core_str_ctx, picres_int);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ picres_int = lst_removehead(&core_str_ctx->pict_res_list);
+ }
+
+ /* Remove any "old" picture resources allocated for this stream. */
+ picres_int = lst_removehead(&core_str_ctx->old_pict_res_list);
+ while (picres_int) {
+ ret = core_free_picture_resource(core_str_ctx, picres_int);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ picres_int = lst_removehead(&core_str_ctx->old_pict_res_list);
+ }
+
+ /* Reset the resource configuration. */
+ memset(&core_str_ctx->pict_resinfo, 0, sizeof(core_str_ctx->pict_resinfo));
+
+#ifdef SEQ_RES_NEEDED
+ {
+ struct vdecdd_seq_resint *seqres_int;
+
+ /* Remove any "active" sequence resources allocated for this stream. */
+ seqres_int = lst_removehead(&core_str_ctx->seq_res_list);
+ while (seqres_int) {
+ core_free_sequence_resource(core_str_ctx, seqres_int);
+ seqres_int = lst_removehead(&core_str_ctx->seq_res_list);
+ }
+
+ /* Remove any "old" sequence resources allocated for this stream. */
+ seqres_int = lst_removehead(&core_str_ctx->old_seq_res_list);
+ while (seqres_int) {
+ core_free_sequence_resource(core_str_ctx, seqres_int);
+ seqres_int = lst_removehead(&core_str_ctx->old_seq_res_list);
+ }
+
+ /* Reset the resource configuration. */
+ memset(&core_str_ctx->seq_resinfo, 0, sizeof(core_str_ctx->seq_resinfo));
+ }
+#endif
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_fn_free_stream_unit
+ */
+static int core_fn_free_stream_unit(struct vdecdd_str_unit *str_unit, void *param)
+{
+ struct core_stream_context *core_str_ctx = (struct core_stream_context *)param;
+ unsigned int ret = IMG_SUCCESS;
+
+ /* Attach picture resources where required. */
+ if (str_unit->str_unit_type == VDECDD_STRUNIT_PICTURE_START)
+ /*
+ * Do not force attachment because the resources can be
+ * unattached yet, e.g. in case of not yet processed picture
+ * units
+ */
+ ret = core_picture_attach_resources(core_str_ctx, str_unit, FALSE);
+
+ str_unit->decode = FALSE;
+
+ return ret;
+}
+
+/*
+ * @Function core_fn_free_stream
+ */
+static void core_fn_free_stream(void *param)
+{
+ int ret;
+ struct vdecdd_ddstr_ctx *dd_str_context;
+ struct vdecdd_dddev_context *dd_dev_ctx;
+ struct core_stream_context *core_str_ctx;
+
+ /* Validate input arguments */
+ VDEC_ASSERT(param);
+
+ core_str_ctx = (struct core_stream_context *)param;
+
+ dd_str_context = core_str_ctx->dd_str_ctx;
+
+ VDEC_ASSERT(dd_str_context);
+ if (!dd_str_context)
+ return;
+
+ dd_dev_ctx = dd_str_context->dd_dev_context;
+ VDEC_ASSERT(dd_dev_ctx);
+
+ if (!lst_empty(&core_str_ctx->str_unit_list)) {
+ /*
+ * Try and empty the list. Since this function is tearing down the core stream,
+ * test result using assert and continue to tidy-up as much as possible.
+ */
+ ret = resource_list_empty(&core_str_ctx->str_unit_list, FALSE,
+ (resource_pfn_freeitem)core_fn_free_stream_unit,
+ core_str_ctx);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ }
+
+ if (!lst_empty(&core_str_ctx->pict_buf_list)) {
+ /*
+ * Try and empty the list. Since this function is tearing down the core stream,
+ * test result using assert and continue to tidy-up as much as possible.
+ */
+ ret = resource_list_empty(&core_str_ctx->pict_buf_list, TRUE, NULL, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ }
+
+ if (!lst_empty(&core_str_ctx->aux_pict_res_list)) {
+ /*
+ * Try and empty the list. Since this function is tearing down the core stream,
+ * test result using assert and continue to tidy-up as much as possible.
+ */
+ ret = resource_list_empty(&core_str_ctx->aux_pict_res_list, TRUE, NULL, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ }
+
+ if (!lst_empty(&core_str_ctx->seq_hdr_list)) {
+ /*
+ * Try and empty the list. Since this function is tearing down the core stream,
+ * test result using assert and continue to tidy-up as much as possible.
+ */
+ ret = resource_list_empty(&core_str_ctx->seq_hdr_list, FALSE, NULL, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ }
+
+ /* Destroy stream in the Decoder. */
+ if (dd_str_context->dec_ctx) {
+ ret = decoder_stream_destroy(dd_str_context->dec_ctx, FALSE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ dd_str_context->dec_ctx = NULL;
+ }
+
+ core_stream_resource_destroy(core_str_ctx);
+
+ /* Destroy the MMU context for this stream. */
+ if (dd_str_context->mmu_str_handle) {
+ ret = mmu_stream_destroy(dd_str_context->mmu_str_handle);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ dd_str_context->mmu_str_handle = NULL;
+ }
+
+ /* Destroy the stream resources. */
+ if (dd_str_context->res_buck_handle) {
+ rman_destroy_bucket(dd_str_context->res_buck_handle);
+ dd_str_context->res_buck_handle = NULL;
+ }
+
+ /* Free stream context. */
+ kfree(dd_str_context);
+
+ /* Free the stream context. */
+ kfree(core_str_ctx);
+}
+
+/*
+ * @Function core_is_unsupported
+ */
+static unsigned char core_is_unsupported(struct vdec_unsupp_flags *unsupp_flags)
+{
+ unsigned char unsupported = FALSE;
+
+ if (unsupp_flags->str_cfg || unsupp_flags->seq_hdr ||
+ unsupp_flags->pict_hdr || unsupp_flags->str_opcfg ||
+ unsupp_flags->op_bufcfg)
+ unsupported = TRUE;
+
+ return unsupported;
+}
+
+int core_stream_create(void *vxd_dec_ctx_arg,
+ const struct vdec_str_configdata *str_cfg_data,
+ unsigned int *res_str_id)
+{
+ int ret;
+ struct vdecdd_ddstr_ctx *dd_str_context;
+ struct vdecdd_supp_check supp_check;
+ struct vdecdd_dddev_context *dd_dev_ctx;
+ struct core_stream_context *core_str_ctx;
+
+ /* Validate input arguments */
+ VDEC_ASSERT(str_cfg_data);
+ VDEC_ASSERT(res_str_id);
+
+ VDEC_ASSERT(global_core_ctx);
+ dd_dev_ctx = global_core_ctx->dev_ctx;
+
+ VDEC_ASSERT(dd_dev_ctx);
+ if (!dd_dev_ctx)
+ return IMG_ERROR_NOT_INITIALISED;
+
+ /* Allocate Core Stream Context */
+ core_str_ctx = kzalloc(sizeof(*core_str_ctx), GFP_KERNEL);
+ if (!core_str_ctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ core_str_ctx->core_ctx = global_core_ctx;
+ core_str_ctx->vxd_dec_context = (struct vxd_dec_ctx *)vxd_dec_ctx_arg;
+ /* register callback for firmware response */
+ core_str_ctx->vxd_dec_context->cb = (decode_cb)core_fw_response_cb;
+
+ lst_init(&core_str_ctx->pict_buf_list);
+ lst_init(&core_str_ctx->pict_res_list);
+ lst_init(&core_str_ctx->old_pict_res_list);
+ lst_init(&core_str_ctx->aux_pict_res_list);
+ lst_init(&core_str_ctx->seq_hdr_list);
+ lst_init(&core_str_ctx->str_unit_list);
+
+#ifdef SEQ_RES_NEEDED
+ lst_init(&core_str_ctx->seq_res_list);
+ lst_init(&core_str_ctx->old_seq_res_list);
+#endif
+
+ /* Allocate device stream context.. */
+ dd_str_context = kzalloc(sizeof(*dd_str_context), GFP_KERNEL);
+ VDEC_ASSERT(dd_str_context);
+ if (!dd_str_context) {
+ kfree(core_str_ctx);
+ core_str_ctx = NULL;
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ dd_str_context->dd_dev_context = dd_dev_ctx;
+ core_str_ctx->dd_str_ctx = dd_str_context;
+
+ /* Check stream configuration. */
+ memset(&supp_check, 0x0, sizeof(supp_check));
+ ret = core_check_decoder_support(dd_dev_ctx, str_cfg_data, NULL, NULL, NULL, &supp_check);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ if (core_is_unsupported(&supp_check.unsupp_flags)) {
+ ret = IMG_ERROR_NOT_SUPPORTED;
+ goto error;
+ }
+
+ /* Create a bucket for the resources.. */
+ ret = rman_create_bucket(&dd_str_context->res_buck_handle);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /* Register the stream as a device resource.. */
+ ret = rman_register_resource(dd_dev_ctx->res_buck_handle,
+ VDECDD_STREAM_TYPE_ID,
+ core_fn_free_stream, core_str_ctx,
+ &dd_str_context->res_handle,
+ &dd_str_context->res_str_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /* Create unique Stream Id */
+ dd_str_context->km_str_id = core_str_ctx->vxd_dec_context->stream.id;
+
+ /*
+ * Create stream in the Decoder.
+ * NOTE: this must take place first since it creates the MMU context.
+ */
+ ret = decoder_stream_create(dd_dev_ctx->dec_context, *str_cfg_data,
+ dd_str_context->km_str_id,
+ &dd_str_context->mmu_str_handle,
+ core_str_ctx->vxd_dec_context,
+ core_str_ctx, &dd_str_context->dec_ctx,
+ (void *)core_stream_processed_cb,
+ (void *)core_decoder_queries);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /* Setup stream context.. */
+ dd_str_context->str_config_data = *str_cfg_data;
+ dd_str_context->dd_str_state = VDECDD_STRSTATE_STOPPED;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[SID=0x%08X] New stream created [USERSID=0x%08X]",
+ dd_str_context->res_str_id, str_cfg_data->user_str_id);
+#endif
+
+ *res_str_id = dd_str_context->res_str_id;
+ if (str_cfg_data->vid_std > 0 && str_cfg_data->vid_std <= VDEC_STD_MAX) {
+ core_str_ctx->std_spec_ops = &std_specific_ops[str_cfg_data->vid_std - 1];
+ } else {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ lst_add(&global_core_ctx->core_str_ctx, core_str_ctx);
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+
+error:
+ if (dd_str_context->res_handle)
+ rman_free_resource(dd_str_context->res_handle);
+ else
+ core_fn_free_stream(core_str_ctx);
+
+ return ret;
+}
+
+static int
+core_get_resource_availability(struct core_stream_context *core_str_ctx)
+{
+ unsigned int avail = ~0;
+
+ if (resource_list_getnumavail(&core_str_ctx->pict_buf_list) == 0)
+ avail &= ~CORE_AVAIL_PICTBUF;
+
+ if (resource_list_getnumavail(&core_str_ctx->aux_pict_res_list) == 0)
+ avail &= ~CORE_AVAIL_PICTRES;
+
+ if (global_avail_slots == 0)
+ avail &= ~CORE_AVAIL_CORE;
+
+ return avail;
+}
+
+static int
+core_stream_set_pictbuf_config(struct vdecdd_ddstr_ctx *dd_str_ctx,
+ struct vdec_pict_bufconfig *pictbuf_cfg)
+{
+ int ret;
+
+ /* Validate input arguments */
+ VDEC_ASSERT(dd_str_ctx);
+ VDEC_ASSERT(pictbuf_cfg);
+
+ /*
+ * If there are no buffers mapped or the configuration is not set
+ * (only done when reconfiguring output) then calculate the output
+ * picture buffer layout.
+ */
+ if (dd_str_ctx->map_buf_info.num_buf == 0 ||
+ dd_str_ctx->disp_pict_buf.buf_config.buf_size == 0) {
+ struct vdecdd_supp_check supp_check;
+ struct vdecdd_ddpict_buf disp_pictbuf;
+
+ memset(&disp_pictbuf, 0, sizeof(disp_pictbuf));
+
+ disp_pictbuf.buf_config = *pictbuf_cfg;
+
+ /*
+ * Ensure that the external picture buffer information
+ * is compatible with the hardware and convert to internal
+ * driver representation.
+ */
+ ret = vdecddutils_convert_buffer_config(&dd_str_ctx->str_config_data,
+ &disp_pictbuf.buf_config,
+ &disp_pictbuf.rend_info);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /*
+ * Provide the current state for validation against the new
+ * buffer configuration.
+ */
+ memset(&supp_check, 0, sizeof(supp_check));
+ supp_check.disp_pictbuf = &disp_pictbuf;
+
+ if (dd_str_ctx->comseq_hdr_info.max_frame_size.width)
+ supp_check.comseq_hdrinfo = &dd_str_ctx->comseq_hdr_info;
+
+ if (dd_str_ctx->str_op_configured)
+ supp_check.op_cfg = &dd_str_ctx->opconfig;
+
+ ret = core_check_decoder_support(dd_str_ctx->dd_dev_context,
+ &dd_str_ctx->str_config_data,
+ &dd_str_ctx->prev_comseq_hdr_info,
+ &dd_str_ctx->prev_pict_hdr_info,
+ &dd_str_ctx->map_buf_info,
+ &supp_check);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ if (core_is_unsupported(&supp_check.unsupp_flags)) {
+ ret = IMG_ERROR_NOT_SUPPORTED;
+ goto error;
+ }
+
+ dd_str_ctx->disp_pict_buf = disp_pictbuf;
+ } else {
+ /*
+ * Check configuration of buffer matches that for stream
+ * including any picture buffers that are already mapped.
+ */
+ if (memcmp(pictbuf_cfg, &dd_str_ctx->disp_pict_buf.buf_config,
+ sizeof(*pictbuf_cfg))) {
+ /*
+ * Configuration of output buffer doesn't match the
+ * rest.
+ */
+ pr_err("[SID=0x%08X] All output buffers must have the same properties.",
+ dd_str_ctx->res_str_id);
+ ret = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+ }
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+
+error:
+ return ret;
+}
+
+int
+core_stream_set_output_config(unsigned int res_str_id,
+ struct vdec_str_opconfig *str_opcfg,
+ struct vdec_pict_bufconfig *pict_bufcfg_handle)
+{
+ struct vdecdd_supp_check supp_check;
+ struct vdec_pict_bufconfig pict_buf_cfg;
+ struct vdec_pict_rendinfo disp_pict_rend_info;
+ int ret;
+
+ struct vdecdd_ddstr_ctx *dd_str_context;
+ struct core_stream_context *core_str_ctx;
+
+ /*
+ * Stream based messages without a device context
+ * must have a stream ID.
+ */
+ VDEC_ASSERT(res_str_id);
+
+ /* Get access to stream context */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID, (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_context = core_str_ctx->dd_str_ctx;
+
+ VDEC_ASSERT(dd_str_context);
+ VDEC_ASSERT(str_opcfg);
+
+ memset(&supp_check, 0, sizeof(supp_check));
+ if (core_str_ctx->new_seq)
+ supp_check.comseq_hdrinfo = &dd_str_context->comseq_hdr_info;
+ else
+ supp_check.comseq_hdrinfo = NULL;
+
+ supp_check.op_cfg = str_opcfg;
+
+ /*
+ * Validate stream output configuration against display
+ * buffer properties if no new picture buffer configuration
+ * is provided.
+ */
+ if (!pict_bufcfg_handle) {
+ VDEC_ASSERT(dd_str_context->disp_pict_buf.rend_info.rendered_size);
+ supp_check.disp_pictbuf = &dd_str_context->disp_pict_buf;
+ }
+
+ /* Validate output configuration. */
+ ret = core_check_decoder_support(dd_str_context->dd_dev_context,
+ &dd_str_context->str_config_data,
+ &dd_str_context->prev_comseq_hdr_info,
+ &dd_str_context->prev_pict_hdr_info,
+ &dd_str_context->map_buf_info,
+ &supp_check);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return IMG_SUCCESS;
+
+ if (core_is_unsupported(&supp_check.unsupp_flags))
+ return IMG_ERROR_NOT_SUPPORTED;
+
+ /* Update the stream output configuration. */
+ dd_str_context->opconfig = *str_opcfg;
+
+ /* Mark output as configured. */
+ dd_str_context->str_op_configured = TRUE;
+
+ if (pict_bufcfg_handle) {
+ /*
+ * Clear/invalidate the latest picture buffer configuration
+ * since it is easier to reuse the set function to calculate
+ * for this new output configuration than to determine
+ * compatibility. Keep a copy beforehand just in case the new
+ * configuration is invalid.
+ */
+ if (dd_str_context->disp_pict_buf.rend_info.rendered_size != 0) {
+ pict_buf_cfg = dd_str_context->disp_pict_buf.buf_config;
+ disp_pict_rend_info = dd_str_context->disp_pict_buf.rend_info;
+
+ memset(&dd_str_context->disp_pict_buf.buf_config, 0,
+ sizeof(dd_str_context->disp_pict_buf.buf_config));
+ memset(&dd_str_context->disp_pict_buf.rend_info, 0,
+ sizeof(dd_str_context->disp_pict_buf.rend_info));
+ }
+
+ /*
+ * Recalculate the picture buffer internal layout from the
+ * externalconfiguration. These settings provided by the
+ * allocator should be adhered to since the display process
+ * will expect the decoder to use them.
+ * If the configuration is invalid we need to leave the
+ * decoder state as it was before.
+ */
+ ret = core_stream_set_pictbuf_config(dd_str_context, pict_bufcfg_handle);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS && dd_str_context->disp_pict_buf.rend_info.rendered_size
+ != 0) {
+ /* Restore old picture buffer configuration */
+ dd_str_context->disp_pict_buf.buf_config =
+ pict_buf_cfg;
+ dd_str_context->disp_pict_buf.rend_info =
+ disp_pict_rend_info;
+ return ret;
+ }
+ } else if (core_is_unsupported(&supp_check.unsupp_flags)) {
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ /* Return success.. */
+ return ret;
+}
+
+/*
+ * @Function core_stream_play
+ */
+int core_stream_play(unsigned int res_str_id)
+{
+ int ret;
+ struct vdecdd_ddstr_ctx *dd_str_context;
+ struct core_stream_context *core_str_ctx;
+ /* Picture buffer layout to use for decoding. */
+ struct vdecdd_ddpict_buf *disp_pict_buf;
+ struct vdec_str_opconfig *op_cfg;
+ struct vdecdd_supp_check supp_check;
+
+ /*
+ * Stream based messages without a device context
+ * must have a stream ID.
+ */
+ VDEC_ASSERT(res_str_id);
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID,
+ (void **)&core_str_ctx, NULL);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_context = core_str_ctx->dd_str_ctx;
+
+ VDEC_ASSERT(dd_str_context);
+
+ /* Ensure we are stopped. */
+ VDEC_ASSERT(dd_str_context->dd_str_state == VDECDD_STRSTATE_STOPPED);
+
+ /* Set "playing". */
+ dd_str_context->dd_str_state = VDECDD_STRSTATE_PLAYING;
+
+ /* set that is it not yet in closed GOP */
+ core_str_ctx->no_prev_refs_used = TRUE;
+
+ disp_pict_buf = dd_str_context->disp_pict_buf.rend_info.rendered_size ?
+ &dd_str_context->disp_pict_buf : NULL;
+ op_cfg = dd_str_context->str_op_configured ?
+ &dd_str_context->opconfig : NULL;
+
+ if (disp_pict_buf && op_cfg) {
+ VDEC_ASSERT(!disp_pict_buf->pict_buf);
+
+ if (memcmp(&core_str_ctx->op_cfg, op_cfg,
+ sizeof(core_str_ctx->op_cfg)) ||
+ memcmp(&core_str_ctx->disp_pict_buf, disp_pict_buf,
+ sizeof(core_str_ctx->disp_pict_buf)))
+ core_str_ctx->new_op_cfg = TRUE;
+
+ core_str_ctx->disp_pict_buf = *disp_pict_buf;
+ core_str_ctx->op_cfg = *op_cfg;
+
+ core_str_ctx->opcfg_set = TRUE;
+ } else {
+ core_str_ctx->opcfg_set = FALSE;
+ /* Must not be decoding without output configuration */
+ VDEC_ASSERT(0);
+ }
+
+ memset(&supp_check, 0, sizeof(supp_check));
+
+ if (vdec_size_nz(core_str_ctx->comseq_hdr_info.max_frame_size))
+ supp_check.comseq_hdrinfo = &core_str_ctx->comseq_hdr_info;
+
+ if (core_str_ctx->opcfg_set) {
+ supp_check.op_cfg = &core_str_ctx->op_cfg;
+ supp_check.disp_pictbuf = &core_str_ctx->disp_pict_buf;
+ }
+ supp_check.non_cfg_req = TRUE;
+ ret = core_check_decoder_support(dd_str_context->dd_dev_context,
+ &dd_str_context->str_config_data,
+ &dd_str_context->prev_comseq_hdr_info,
+ &dd_str_context->prev_pict_hdr_info,
+ &dd_str_context->map_buf_info,
+ &supp_check);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_deinitialise
+ */
+int core_deinitialise(void)
+{
+ struct vdecdd_dddev_context *dd_dev_ctx;
+ int ret;
+
+ dd_dev_ctx = global_core_ctx->dev_ctx;
+ VDEC_ASSERT(dd_dev_ctx);
+ if (!dd_dev_ctx)
+ return IMG_ERROR_NOT_INITIALISED;
+
+ ret = decoder_deinitialise(dd_dev_ctx->dec_context);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+
+ /* Free context resources.. */
+ rman_destroy_bucket(dd_dev_ctx->res_buck_handle);
+
+ rman_deinitialise();
+
+ kfree(dd_dev_ctx);
+
+ global_core_ctx->dev_ctx = NULL;
+
+ kfree(global_core_ctx);
+ global_core_ctx = NULL;
+
+ is_core_initialized = FALSE;
+
+ pr_debug("Core deinitialise successfully\n");
+ return IMG_SUCCESS;
+}
+
+static int core_get_mb_num(unsigned int width, unsigned int height)
+{
+ /*
+ * Calculate the number of MBs needed for current video
+ * sequence settings.
+ */
+ unsigned int width_mb = ALIGN(width, VDEC_MB_DIMENSION) / VDEC_MB_DIMENSION;
+ unsigned int height_mb = ALIGN(height, 2 * VDEC_MB_DIMENSION) / VDEC_MB_DIMENSION;
+
+ return width_mb * height_mb;
+}
+
+static int core_common_bufs_getsize(struct core_stream_context *core_str_ctx,
+ const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ struct vdec_pict_size *max_pict_size,
+ struct core_pict_bufsize_info *size_info,
+ struct core_seq_resinfo *seq_res_info,
+ unsigned char *res_needed)
+{
+ enum vdec_vid_std vid_std = core_str_ctx->dd_str_ctx->str_config_data.vid_std;
+ unsigned int std_idx = vid_std - 1;
+ unsigned int mb_num = 0;
+
+ if (core_str_ctx->dd_str_ctx->str_config_data.vid_std >= VDEC_STD_MAX)
+ return IMG_ERROR_GENERIC_FAILURE;
+
+ /* Reset the MB parameters buffer size. */
+ size_info->mbparams_bufsize = 0;
+
+ if (mbparam_allocinfo[std_idx].alloc_mbparam_bufs) {
+ *res_needed = TRUE;
+
+ /*
+ * Calculate the number of MBs needed for current video
+ * sequence settings.
+ */
+ mb_num = core_get_mb_num(max_pict_size->width, max_pict_size->height);
+
+ /* Calculate the final number of MBs needed. */
+ mb_num += mbparam_allocinfo[std_idx].overalloc_mbnum;
+
+ /* Calculate the MB params buffer size. */
+ size_info->mbparams_bufsize = mb_num * mbparam_allocinfo[std_idx].mbparam_size;
+
+ /* Adjust the buffer size for MSVDX. */
+ vdecddutils_buf_vxd_adjust_size(&size_info->mbparams_bufsize);
+
+ if (comseq_hdrinfo->separate_chroma_planes)
+ size_info->mbparams_bufsize *= 3;
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_pict_res_getinfo
+ */
+static int
+core_pict_res_getinfo(struct core_stream_context *core_str_ctx,
+ const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ const struct vdec_str_opconfig *op_cfg,
+ const struct vdecdd_ddpict_buf *disp_pictbuf,
+ struct core_pict_resinfo *pict_resinfo,
+ struct core_seq_resinfo *seq_resinfo)
+{
+ struct vdec_pict_size coded_pict_size;
+ struct dec_ctx *decctx;
+ unsigned char res_needed = FALSE;
+ int ret;
+
+ /* Reset the picture resource info. */
+ memset(pict_resinfo, 0, sizeof(*pict_resinfo));
+
+ coded_pict_size = comseq_hdrinfo->max_frame_size;
+
+ VDEC_ASSERT(core_str_ctx->std_spec_ops);
+ if (core_str_ctx->std_spec_ops->bufs_get_size)
+ core_str_ctx->std_spec_ops->bufs_get_size(core_str_ctx, comseq_hdrinfo,
+ &coded_pict_size,
+ &pict_resinfo->size_info, seq_resinfo, &res_needed);
+
+ /* If any picture resources are needed... */
+ if (res_needed) {
+ /* Get the number of resources required. */
+ ret = vdecddutils_get_minrequired_numpicts
+ (&core_str_ctx->dd_str_ctx->str_config_data,
+ comseq_hdrinfo, op_cfg,
+ &pict_resinfo->pict_res_num);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ decctx = (struct dec_ctx *)global_core_ctx->dev_ctx->dec_context;
+
+ if (core_str_ctx->dd_str_ctx->str_config_data.vid_std == VDEC_STD_HEVC)
+ pict_resinfo->pict_res_num += decctx->dev_cfg->num_slots_per_pipe - 1;
+ else
+ pict_resinfo->pict_res_num +=
+ decctx->num_pipes * decctx->dev_cfg->num_slots_per_pipe - 1;
+ }
+
+ return IMG_SUCCESS;
+}
+
+static int core_alloc_resbuf(struct vdecdd_ddbuf_mapinfo **buf_handle,
+ unsigned int size, void *mmu_handle,
+ struct vxdio_mempool mem_pool)
+{
+ int ret;
+ struct vdecdd_ddbuf_mapinfo *buf;
+
+ *buf_handle = kzalloc(sizeof(**buf_handle), GFP_KERNEL);
+ buf = *buf_handle;
+ VDEC_ASSERT(buf);
+ if (buf) {
+ buf->mmuheap_id = MMU_HEAP_STREAM_BUFFERS;
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d calling MMU_StreamMalloc", __func__, __LINE__);
+#endif
+ ret = mmu_stream_alloc(mmu_handle, buf->mmuheap_id,
+ mem_pool.mem_heap_id,
+ mem_pool.mem_attrib, size,
+ DEV_MMU_PAGE_SIZE,
+ &buf->ddbuf_info);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ ret = IMG_ERROR_OUT_OF_MEMORY;
+ } else {
+ ret = IMG_ERROR_OUT_OF_MEMORY;
+ }
+ return ret;
+}
+
+#ifdef SEQ_RES_NEEDED
+static int core_alloc_common_sequence_buffers(struct core_stream_context *core_str_ctx,
+ struct vdecdd_seq_resint *seqres_int,
+ struct vxdio_mempool mem_pool,
+ struct core_seq_resinfo *seqres_info,
+ struct core_pict_resinfo *pictres_info,
+ const struct vdec_str_opconfig *op_cfg,
+ const struct vdecdd_ddpict_buf *disp_pict_buf)
+{
+ int ret = IMG_SUCCESS;
+#ifdef ERROR_CONCEALMENT
+ enum vdec_vid_std vid_std = core_str_ctx->dd_str_ctx->str_config_data.vid_std;
+ unsigned int std_idx = vid_std - 1;
+ struct vidio_ddbufinfo *err_buf_info;
+
+ /* Allocate error concealment pattern frame for current sequence */
+ if (err_recovery_frame_info[std_idx].enabled) {
+ struct vdec_pict_bufconfig buf_config;
+ unsigned int size;
+
+ buf_config = disp_pict_buf->buf_config;
+ size = buf_config.coded_width * buf_config.coded_height;
+
+ if (err_recovery_frame_info[std_idx].max_size > size) {
+ seqres_int->err_pict_buf = kzalloc(sizeof(*seqres_int->err_pict_buf),
+ GFP_KERNEL);
+ VDEC_ASSERT(seqres_int->err_pict_buf);
+ if (!seqres_int->err_pict_buf)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ seqres_int->err_pict_buf->mmuheap_id = MMU_HEAP_STREAM_BUFFERS;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("===== %s:%d calling MMU_StreamMalloc", __func__, __LINE__);
+#endif
+ ret = mmu_stream_alloc(core_str_ctx->dd_str_ctx->mmu_str_handle,
+ seqres_int->err_pict_buf->mmuheap_id,
+ mem_pool.mem_heap_id,
+ (enum sys_emem_attrib)(mem_pool.mem_attrib |
+ SYS_MEMATTRIB_CPU_WRITE),
+ buf_config.buf_size,
+ DEV_MMU_PAGE_ALIGNMENT,
+ &seqres_int->err_pict_buf->ddbuf_info);
+ if (ret != IMG_SUCCESS)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* make grey pattern - luma & chroma at mid-rail */
+ err_buf_info = &seqres_int->err_pict_buf->ddbuf_info;
+ if (op_cfg->pixel_info.mem_pkg == PIXEL_BIT10_MP) {
+ unsigned int *out = (unsigned int *)err_buf_info->cpu_virt;
+ unsigned int i;
+
+ for (i = 0; i < err_buf_info->buf_size / sizeof(unsigned int); i++)
+ /* See PIXEL_BIT10_MP layout definition */
+ out[i] = 0x20080200;
+ } else {
+ /* Note: Setting 0x80 also gives grey pattern
+ * for 10bit upacked MSB format.
+ */
+ memset(err_buf_info->cpu_virt, 0x80, err_buf_info->buf_size);
+ }
+ }
+ }
+#endif
+ return ret;
+}
+#endif
+
+/*
+ * @Function core_do_resource_realloc
+ */
+static unsigned char core_do_resource_realloc(struct core_stream_context *core_str_ctx,
+ struct core_pict_resinfo *pictres_info,
+ struct core_seq_resinfo *seqres_info)
+{
+ VDEC_ASSERT(core_str_ctx->std_spec_ops);
+ /* If buffer sizes are sufficient and only the greater number of resources is needed... */
+ if (core_str_ctx->pict_resinfo.size_info.mbparams_bufsize >=
+ pictres_info->size_info.mbparams_bufsize &&
+ (core_str_ctx->std_spec_ops->is_stream_resource_suitable ?
+ core_str_ctx->std_spec_ops->is_stream_resource_suitable(pictres_info,
+ &core_str_ctx->pict_resinfo,
+ seqres_info, &core_str_ctx->seq_resinfo) : TRUE) &&
+ core_str_ctx->pict_resinfo.pict_res_num < pictres_info->pict_res_num)
+ /* ...full internal resource reallocation is not required. */
+ return FALSE;
+
+ /* Otherwise request full internal resource reallocation. */
+ return TRUE;
+}
+
+/*
+ * @Function core_is_stream_resource_suitable
+ */
+static unsigned char core_is_stream_resource_suitable
+ (struct core_stream_context *core_str_ctx,
+ const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ const struct vdec_str_opconfig *op_cfg,
+ const struct vdecdd_ddpict_buf *disp_pict_buf,
+ struct core_pict_resinfo *pictres_info,
+ struct core_seq_resinfo *seqres_info_ptr)
+{
+ int ret;
+ struct core_pict_resinfo aux_pictes_info;
+ struct core_pict_resinfo *aux_pictes_info_ptr;
+ struct core_seq_resinfo seqres_info;
+
+ /* If resource info is needed externally, just use it. Otherwise use internal structure. */
+ if (pictres_info)
+ aux_pictes_info_ptr = pictres_info;
+ else
+ aux_pictes_info_ptr = &aux_pictes_info;
+
+ if (!seqres_info_ptr)
+ seqres_info_ptr = &seqres_info;
+
+ /* Get the resource info for current settings. */
+ ret = core_pict_res_getinfo(core_str_ctx, comseq_hdrinfo, op_cfg, disp_pict_buf,
+ aux_pictes_info_ptr, seqres_info_ptr);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return FALSE;
+
+ VDEC_ASSERT(core_str_ctx->std_spec_ops);
+ if (core_str_ctx->std_spec_ops->is_stream_resource_suitable) {
+ if (!core_str_ctx->std_spec_ops->is_stream_resource_suitable
+ (aux_pictes_info_ptr,
+ &core_str_ctx->pict_resinfo,
+ seqres_info_ptr, &core_str_ctx->seq_resinfo))
+ return FALSE;
+ }
+
+ /* Check the number of picture resources required against the current number. */
+ if (aux_pictes_info_ptr->pict_res_num > core_str_ctx->pict_resinfo.pict_res_num)
+ return FALSE;
+
+ return TRUE;
+}
+
+static int core_alloc_common_pict_buffers(struct core_stream_context *core_str_ctx,
+ struct vdecdd_pict_resint *pictres_int,
+ struct vxdio_mempool mem_pool,
+ struct core_pict_resinfo *pictres_info)
+{
+ int ret = IMG_SUCCESS;
+
+ /* If MB params buffers are needed... */
+ if (pictres_info->size_info.mbparams_bufsize > 0)
+ /* Allocate the MB parameters buffer info structure. */
+ ret = core_alloc_resbuf(&pictres_int->mb_param_buf,
+ pictres_info->size_info.mbparams_bufsize,
+ core_str_ctx->dd_str_ctx->mmu_str_handle,
+ mem_pool);
+
+ return ret;
+}
+
+/*
+ * @Function core_stream_resource_create
+ */
+static int core_stream_resource_create(struct core_stream_context *core_str_ctx,
+ unsigned char closed_gop, unsigned int mem_heap_id,
+ const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ const struct vdec_str_opconfig *op_cfg,
+ const struct vdecdd_ddpict_buf *disp_pict_buf)
+{
+ struct vdecdd_pict_resint *pictres_int = NULL;
+ int ret = IMG_SUCCESS;
+ unsigned int i, start_cnt = 0;
+ struct core_pict_resinfo pictres_info;
+ struct vdecdd_seq_resint *seqres_int = NULL;
+ struct core_seq_resinfo seqres_info;
+ struct vxdio_mempool mem_pool;
+
+ mem_pool.mem_heap_id = mem_heap_id;
+ mem_pool.mem_attrib = (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED
+ | SYS_MEMATTRIB_WRITECOMBINE | SYS_MEMATTRIB_INTERNAL);
+
+#ifdef SEQ_RES_NEEDED
+ seqres_int = lst_first(&core_str_ctx->seq_res_list);
+#endif
+ /*
+ * Clear the reconstructed picture buffer layout if the previous
+ * references are no longer used. Only under these circumstances
+ * should the bitstream resolution change.
+ */
+ if (closed_gop) {
+ memset(&core_str_ctx->recon_pictbuf.rend_info, 0,
+ sizeof(core_str_ctx->recon_pictbuf.rend_info));
+ memset(&core_str_ctx->coded_pict_size, 0, sizeof(core_str_ctx->coded_pict_size));
+ } else {
+ if (vdec_size_ne(core_str_ctx->coded_pict_size, comseq_hdrinfo->max_frame_size)) {
+ VDEC_ASSERT(FALSE);
+ pr_err("Coded picture size changed within the closed GOP (i.e. mismatched references)");
+ }
+ }
+
+ /* If current buffers are not suitable for specified VSH/Output config... */
+ if (!core_is_stream_resource_suitable(core_str_ctx, comseq_hdrinfo,
+ op_cfg, disp_pict_buf, &pictres_info,
+ &seqres_info)) {
+ /* If full internal resource reallocation is needed... */
+ if (core_do_resource_realloc(core_str_ctx, &pictres_info, &seqres_info)) {
+ /*
+ * Mark all the active resources as deprecated and
+ * free-up where no longer used.
+ */
+ core_stream_resource_deprecate(core_str_ctx);
+ } else {
+ /* Use current buffer size settings. */
+ pictres_info.size_info = core_str_ctx->pict_resinfo.size_info;
+ seqres_info = core_str_ctx->seq_resinfo;
+
+ /* Set start counter to only allocate the number of
+ * resources that are missing.
+ */
+ start_cnt = core_str_ctx->pict_resinfo.pict_res_num;
+ }
+
+#ifdef SEQ_RES_NEEDED
+ /* allocate sequence resources */
+ {
+ seqres_int = kzalloc(sizeof(*seqres_int), GFP_KERNEL);
+ VDEC_ASSERT(seqres_int);
+ if (!seqres_int)
+ goto err_out_of_memory;
+
+ lst_add(&core_str_ctx->seq_res_list, seqres_int);
+ /* Allocate sequence buffers common for all standards. */
+ ret = core_alloc_common_sequence_buffers
+ (core_str_ctx, seqres_int, mem_pool,
+ &seqres_info,
+ &pictres_info, op_cfg, disp_pict_buf);
+ if (ret != IMG_SUCCESS)
+ goto err_out_of_memory;
+
+ VDEC_ASSERT(core_str_ctx->std_spec_ops);
+ if (core_str_ctx->std_spec_ops->alloc_sequence_buffers) {
+ ret = core_str_ctx->std_spec_ops->alloc_sequence_buffers
+ (core_str_ctx, seqres_int,
+ mem_pool, &seqres_info);
+ if (ret != IMG_SUCCESS)
+ goto err_out_of_memory;
+ }
+ }
+#endif
+ /* Allocate resources for current settings. */
+ for (i = start_cnt; i < pictres_info.pict_res_num; i++) {
+ /* Allocate the picture resources structure. */
+ pictres_int = kzalloc(sizeof(*pictres_int), GFP_KERNEL);
+ VDEC_ASSERT(pictres_int);
+ if (!pictres_int)
+ goto err_out_of_memory;
+
+ /* Allocate picture buffers common for all standards. */
+ ret = core_alloc_common_pict_buffers(core_str_ctx, pictres_int,
+ mem_pool, &pictres_info);
+ if (ret != IMG_SUCCESS)
+ goto err_out_of_memory;
+
+ /* Allocate standard specific picture buffers. */
+ VDEC_ASSERT(core_str_ctx->std_spec_ops);
+ if (core_str_ctx->std_spec_ops->alloc_picture_buffers) {
+ ret = core_str_ctx->std_spec_ops->alloc_picture_buffers
+ (core_str_ctx, pictres_int,
+ mem_pool, &pictres_info);
+ if (ret != IMG_SUCCESS)
+ goto err_out_of_memory;
+ }
+
+ /* attach sequence resources */
+#ifdef SEQ_RES_NEEDED
+ resource_item_use(&seqres_int->ref_count);
+ pictres_int->seq_resint = seqres_int;
+#endif
+ lst_add(&core_str_ctx->pict_res_list, pictres_int);
+ core_str_ctx->pict_resinfo.pict_res_num++;
+ }
+ }
+
+ /*
+ * When demand for picture resources reduces (in quantity) the extra buffers
+ * are still retained. Preserve the existing count in case the demand increases
+ * again, at which time these residual buffers won't need to be reallocated.
+ */
+ pictres_info.pict_res_num = core_str_ctx->pict_resinfo.pict_res_num;
+
+ /* Store the current resource config. */
+ core_str_ctx->pict_resinfo = pictres_info;
+ core_str_ctx->seq_resinfo = seqres_info;
+
+ pictres_int = lst_first(&core_str_ctx->pict_res_list);
+ while (pictres_int) {
+ /*
+ * Increment the reference count to indicate that this resource is also
+ * held by plant until it is added to the Scheduler list. If the resource has
+ * not just been created it might already be in circulation.
+ */
+ resource_item_use(&pictres_int->ref_cnt);
+#ifdef SEQ_RES_NEEDED
+ /* attach sequence resources */
+ resource_item_use(&seqres_int->ref_count);
+ pictres_int->seq_resint = seqres_int;
+#endif
+ /* Add the internal picture resources to the list. */
+ ret = resource_list_add_img(&core_str_ctx->aux_pict_res_list,
+ pictres_int, 0, &pictres_int->ref_cnt);
+
+ pictres_int = lst_next(pictres_int);
+ }
+
+ /*
+ * Set the reconstructed buffer properties if they
+ * may have been changed.
+ */
+ if (core_str_ctx->recon_pictbuf.rend_info.rendered_size == 0) {
+ core_str_ctx->recon_pictbuf.rend_info =
+ disp_pict_buf->rend_info;
+ core_str_ctx->recon_pictbuf.buf_config =
+ disp_pict_buf->buf_config;
+ core_str_ctx->coded_pict_size = comseq_hdrinfo->max_frame_size;
+ } else {
+ if (memcmp(&disp_pict_buf->rend_info,
+ &core_str_ctx->recon_pictbuf.rend_info,
+ sizeof(core_str_ctx->recon_pictbuf.rend_info))) {
+ /*
+ * Reconstructed picture buffer information has changed
+ * during a closed GOP.
+ */
+ VDEC_ASSERT
+ ("Reconstructed picture buffer information cannot change within a GOP"
+ == NULL);
+ pr_err("Reconstructed picture buffer information cannot change within a GOP.");
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+ }
+
+ /*
+ * When demand for picture resources reduces (in quantity) the extra buffers
+ * are still retained. Preserve the existing count in case the demand increases
+ * again, at which time these residual buffers won't need to be reallocated.
+ */
+ pictres_info.pict_res_num = core_str_ctx->pict_resinfo.pict_res_num;
+
+ /* Store the current resource config. */
+ core_str_ctx->pict_resinfo = pictres_info;
+ core_str_ctx->seq_resinfo = seqres_info;
+
+ return IMG_SUCCESS;
+
+ /* Handle out of memory errors. */
+err_out_of_memory:
+ /* Free resources being currently allocated. */
+ if (pictres_int) {
+ core_free_common_picture_resource(core_str_ctx, pictres_int);
+ if (core_str_ctx->std_spec_ops->free_picture_resource)
+ core_str_ctx->std_spec_ops->free_picture_resource(core_str_ctx,
+ pictres_int);
+
+ kfree(pictres_int);
+ }
+
+#ifdef SEQ_RES_NEEDED
+ if (seqres_int) {
+ core_free_common_sequence_resource(core_str_ctx, seqres_int);
+
+ if (core_str_ctx->std_spec_ops->free_sequence_resource)
+ core_str_ctx->std_spec_ops->free_sequence_resource(core_str_ctx,
+ seqres_int);
+
+ VDEC_ASSERT(lst_last(&core_str_ctx->seq_res_list) == seqres_int);
+ lst_remove(&core_str_ctx->seq_res_list, seqres_int);
+ kfree(seqres_int);
+ }
+#endif
+
+ /* Free all the other resources. */
+ core_stream_resource_destroy(core_str_ctx);
+
+ pr_err("[USERSID=0x%08X] Core not able to allocate stream resources due to lack of memory",
+ core_str_ctx->dd_str_ctx->str_config_data.user_str_id);
+
+ return IMG_ERROR_OUT_OF_MEMORY;
+}
+
+static int
+core_reconfigure_recon_pictbufs(struct core_stream_context *core_str_ctx,
+ unsigned char no_references)
+{
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ int ret;
+
+ dd_str_ctx = core_str_ctx->dd_str_ctx;
+ VDEC_ASSERT(dd_str_ctx->str_op_configured);
+
+ /* Re-configure the internal picture buffers now that none are held. */
+ ret = core_stream_resource_create(core_str_ctx, no_references,
+ dd_str_ctx->dd_dev_context->internal_heap_id,
+ &dd_str_ctx->comseq_hdr_info,
+ &dd_str_ctx->opconfig,
+ &dd_str_ctx->disp_pict_buf);
+ return ret;
+}
+
+/*
+ * @Function core_picture_prepare
+ */
+static int core_picture_prepare(struct core_stream_context *core_str_ctx,
+ struct vdecdd_str_unit *str_unit)
+{
+ int ret = IMG_SUCCESS;
+ struct vdecdd_picture *pict_local = NULL;
+ unsigned int avail = 0;
+ unsigned char need_pict_res;
+
+ /*
+ * For normal decode, setup picture data.
+ * Preallocate the picture structure.
+ */
+ pict_local = kzalloc(sizeof(*pict_local), GFP_KERNEL);
+ if (!pict_local)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Determine whether the picture can be decoded. */
+ ret = decoder_get_load(core_str_ctx->dd_str_ctx->dec_ctx, &global_avail_slots);
+ if (ret != IMG_SUCCESS) {
+ pr_err("No resources avaialable to decode this picture");
+ ret = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ goto unwind;
+ }
+
+ /*
+ * Load and availability is cached in stream context simply
+ * for status reporting.
+ */
+ avail = core_get_resource_availability(core_str_ctx);
+
+ if ((avail & CORE_AVAIL_CORE) == 0) {
+ /* Return straight away if the core is not available */
+ ret = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ goto unwind;
+ }
+
+ if (core_str_ctx->new_op_cfg || core_str_ctx->new_seq) {
+ /*
+ * Reconstructed buffers should be checked for reconfiguration
+ * under these conditions:
+ * 1. New output configuration,
+ * 2. New sequence.
+ * Core can decide to reset the reconstructed buffer properties
+ * if there are no previous reference pictures used
+ * (i.e. at a closed GOP). This code must go here because we
+ * may not stop when new sequence is found or references become
+ * unused.
+ */
+ ret = core_reconfigure_recon_pictbufs(core_str_ctx,
+ core_str_ctx->no_prev_refs_used);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto unwind;
+ }
+
+ /* Update the display information for this picture. */
+ ret = vdecddutils_get_display_region(&str_unit->pict_hdr_info->coded_frame_size,
+ &str_unit->pict_hdr_info->disp_info.enc_disp_region,
+ &str_unit->pict_hdr_info->disp_info.disp_region);
+
+ if (ret != IMG_SUCCESS)
+ goto unwind;
+
+ /* Clear internal state */
+ core_str_ctx->new_seq = FALSE;
+ core_str_ctx->new_op_cfg = FALSE;
+ core_str_ctx->no_prev_refs_used = FALSE;
+
+ /*
+ * Recalculate this since we might have just created
+ * internal resources.
+ */
+ core_str_ctx->res_avail = core_get_resource_availability(core_str_ctx);
+
+ /*
+ * If picture resources were needed for this stream, picture resources
+ * list wouldn't be empty
+ */
+ need_pict_res = !lst_empty(&core_str_ctx->aux_pict_res_list);
+ /* If there are resources available */
+ if ((core_str_ctx->res_avail & CORE_AVAIL_PICTBUF) &&
+ (!need_pict_res || (core_str_ctx->res_avail & CORE_AVAIL_PICTRES))) {
+ /* Pick internal picture resources. */
+ if (need_pict_res) {
+ pict_local->pict_res_int =
+ resource_list_get_avail(&core_str_ctx->aux_pict_res_list);
+
+ VDEC_ASSERT(pict_local->pict_res_int);
+ if (!pict_local->pict_res_int) {
+ ret = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ goto unwind;
+ }
+ }
+
+ /* Pick the client image buffer. */
+ pict_local->disp_pict_buf.pict_buf =
+ resource_list_get_avail(&core_str_ctx->pict_buf_list);
+ VDEC_ASSERT(pict_local->disp_pict_buf.pict_buf);
+ if (!pict_local->disp_pict_buf.pict_buf) {
+ ret = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ goto unwind;
+ }
+ } else {
+ /* Need resources to process picture start. */
+ ret = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ goto unwind;
+ }
+
+ /* Ensure that the buffer contains layout information. */
+ pict_local->disp_pict_buf.rend_info = core_str_ctx->disp_pict_buf.rend_info;
+ pict_local->disp_pict_buf.buf_config = core_str_ctx->disp_pict_buf.buf_config;
+ pict_local->op_config = core_str_ctx->op_cfg;
+ pict_local->last_pict_in_seq = str_unit->last_pict_in_seq;
+
+ str_unit->dd_pict_data = pict_local;
+
+ /* Indicate that all necessary resources are now available. */
+ if (core_str_ctx->res_avail != ~0) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("LAST AVAIL: 0x%08X\n", core_str_ctx->res_avail);
+#endif
+ core_str_ctx->res_avail = ~0;
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ /* dump decoder internal resource addresses */
+ if (pict_local->pict_res_int) {
+ if (pict_local->pict_res_int->mb_param_buf) {
+ pr_info("[USERSID=0x%08X] MB parameter buffer device virtual address: 0x%08X",
+ core_str_ctx->dd_str_ctx->str_config_data.user_str_id,
+ pict_local->pict_res_int->mb_param_buf->ddbuf_info.dev_virt);
+ }
+
+ if (core_str_ctx->comseq_hdr_info.separate_chroma_planes) {
+ pr_info("[USERSID=0x%08X] Display picture virtual address: LUMA 0x%08X, CHROMA 0x%08X, CHROMA2 0x%08X",
+ core_str_ctx->dd_str_ctx->str_config_data.user_str_id,
+ pict_local->disp_pict_buf.pict_buf->ddbuf_info.dev_virt,
+ pict_local->disp_pict_buf.pict_buf->ddbuf_info.dev_virt +
+ pict_local->disp_pict_buf.rend_info.plane_info
+ [VDEC_PLANE_VIDEO_U].offset,
+ pict_local->disp_pict_buf.pict_buf->ddbuf_info.dev_virt +
+ pict_local->disp_pict_buf.rend_info.plane_info
+ [VDEC_PLANE_VIDEO_V].offset);
+ } else {
+ pr_info("[USERSID=0x%08X] Display picture virtual address: LUMA 0x%08X, CHROMA 0x%08X",
+ core_str_ctx->dd_str_ctx->str_config_data.user_str_id,
+ pict_local->disp_pict_buf.pict_buf->ddbuf_info.dev_virt,
+ pict_local->disp_pict_buf.pict_buf->ddbuf_info.dev_virt +
+ pict_local->disp_pict_buf.rend_info.plane_info
+ [VDEC_PLANE_VIDEO_UV].offset);
+ }
+ }
+#endif
+
+ ret = core_picture_attach_resources(core_str_ctx, str_unit, TRUE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto unwind;
+
+ return IMG_SUCCESS;
+
+unwind:
+ if (pict_local->pict_res_int) {
+ resource_item_return(&pict_local->pict_res_int->ref_cnt);
+ pict_local->pict_res_int = NULL;
+ }
+ if (pict_local->disp_pict_buf.pict_buf) {
+ resource_item_return(&pict_local->disp_pict_buf.pict_buf->ddbuf_info.ref_count);
+ pict_local->disp_pict_buf.pict_buf = NULL;
+ }
+ kfree(pict_local);
+ return ret;
+}
+
+/*
+ * @Function core_validate_new_sequence
+ */
+static int core_validate_new_sequence(struct core_stream_context *core_str_ctx,
+ const struct vdec_comsequ_hdrinfo *comseq_hdrinfo)
+{
+ int ret;
+ struct vdecdd_supp_check supp_check;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ unsigned int num_req_bufs_prev, num_req_bufs_cur;
+ struct vdecdd_mapbuf_info mapbuf_info;
+
+ memset(&supp_check, 0, sizeof(supp_check));
+
+ /*
+ * Omit picture header from this setup since we can'supp_check
+ * validate this here.
+ */
+ supp_check.comseq_hdrinfo = comseq_hdrinfo;
+
+ if (core_str_ctx->opcfg_set) {
+ supp_check.op_cfg = &core_str_ctx->op_cfg;
+ supp_check.disp_pictbuf = &core_str_ctx->disp_pict_buf;
+
+ ret = vdecddutils_get_minrequired_numpicts
+ (&core_str_ctx->dd_str_ctx->str_config_data,
+ &core_str_ctx->comseq_hdr_info,
+ &core_str_ctx->op_cfg,
+ &num_req_bufs_prev);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ ret = vdecddutils_get_minrequired_numpicts
+ (&core_str_ctx->dd_str_ctx->str_config_data,
+ comseq_hdrinfo,
+ &core_str_ctx->op_cfg,
+ &num_req_bufs_cur);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ /* Check if the output configuration is compatible with new VSH. */
+ dd_str_ctx = core_str_ctx->dd_str_ctx;
+ mapbuf_info = dd_str_ctx->map_buf_info;
+
+ /* Check the compatibility of the bitstream data and configuration */
+ supp_check.non_cfg_req = TRUE;
+ ret = core_check_decoder_support(dd_str_ctx->dd_dev_context,
+ &dd_str_ctx->str_config_data,
+ &dd_str_ctx->prev_comseq_hdr_info,
+ &dd_str_ctx->prev_pict_hdr_info,
+ &mapbuf_info, &supp_check);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ core_str_ctx->new_seq = TRUE;
+
+ return IMG_SUCCESS;
+}
+
+static int
+core_validate_new_picture(struct core_stream_context *core_str_ctx,
+ const struct bspp_pict_hdr_info *pict_hdrinfo,
+ unsigned int *features)
+{
+ int ret;
+ struct vdecdd_supp_check supp_check;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ struct vdecdd_mapbuf_info mapbuf_info;
+
+ memset(&supp_check, 0, sizeof(supp_check));
+ supp_check.comseq_hdrinfo = &core_str_ctx->comseq_hdr_info;
+ supp_check.pict_hdrinfo = pict_hdrinfo;
+
+ /*
+ * They cannot become invalid during a sequence.
+ * However, output configuration may signal something that
+ * changes compatibility on a closed GOP within a sequence
+ * (e.g. resolution may significantly decrease
+ * in a GOP and scaling wouldn't be supported). This resolution shift
+ * would not be signalled in the sequence header
+ * (since that is the maximum) but only
+ * found now when validating the first picture in the GOP.
+ */
+ if (core_str_ctx->opcfg_set)
+ supp_check.op_cfg = &core_str_ctx->op_cfg;
+
+ /*
+ * Check if the new picture is compatible with the
+ * current driver state.
+ */
+ dd_str_ctx = core_str_ctx->dd_str_ctx;
+ mapbuf_info = dd_str_ctx->map_buf_info;
+
+ /* Check the compatibility of the bitstream data and configuration */
+ supp_check.non_cfg_req = TRUE;
+ ret = core_check_decoder_support(dd_str_ctx->dd_dev_context,
+ &dd_str_ctx->str_config_data,
+ &dd_str_ctx->prev_comseq_hdr_info,
+ &dd_str_ctx->prev_pict_hdr_info,
+ &mapbuf_info, &supp_check);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ if (supp_check.unsupp_flags.str_opcfg || supp_check.unsupp_flags.pict_hdr)
+ return IMG_ERROR_NOT_SUPPORTED;
+
+ /*
+ * Clear the reconfiguration flags unless triggered by
+ * unsupported output config.
+ */
+ *features = supp_check.features;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_stream_submit_unit
+ */
+int core_stream_submit_unit(unsigned int res_str_id, struct vdecdd_str_unit *str_unit)
+{
+ int ret;
+ unsigned char process_str_unit = TRUE;
+
+ struct vdecdd_ddstr_ctx *dd_str_context;
+ struct core_stream_context *core_str_ctx;
+
+ /*
+ * Stream based messages without a device context
+ * must have a stream ID.
+ */
+ VDEC_ASSERT(res_str_id);
+ VDEC_ASSERT(str_unit);
+
+ if (res_str_id == 0 || !str_unit) {
+ pr_err("Invalid params passed to %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID, (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ VDEC_ASSERT(core_str_ctx);
+ dd_str_context = core_str_ctx->dd_str_ctx;
+ VDEC_ASSERT(dd_str_context);
+
+ ret = resource_list_add_img(&core_str_ctx->str_unit_list, str_unit, 0, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+
+ pr_debug("%s stream unit type = %d\n", __func__, str_unit->str_unit_type);
+ switch (str_unit->str_unit_type) {
+ case VDECDD_STRUNIT_SEQUENCE_START:
+ if (str_unit->seq_hdr_info) {
+ /* Add sequence header to cache. */
+ ret =
+ resource_list_replace(&core_str_ctx->seq_hdr_list,
+ str_unit->seq_hdr_info,
+ str_unit->seq_hdr_info->sequ_hdr_id,
+ &str_unit->seq_hdr_info->ref_count,
+ NULL, NULL);
+
+ if (ret != IMG_SUCCESS)
+ pr_err("[USERSID=0x%08X] Failed to replace resource",
+ res_str_id);
+ } else {
+ /* ...or take from cache. */
+ str_unit->seq_hdr_info =
+ resource_list_getbyid(&core_str_ctx->seq_hdr_list,
+ str_unit->seq_hdr_id);
+ }
+
+ VDEC_ASSERT(str_unit->seq_hdr_info);
+ if (!str_unit->seq_hdr_info) {
+ pr_err("Sequence header information not available for current picture");
+ break;
+ }
+ /*
+ * Check that this latest sequence header information is
+ * compatible with current state and then if no errors store
+ * as current.
+ */
+ core_str_ctx->comseq_hdr_info = str_unit->seq_hdr_info->com_sequ_hdr_info;
+
+ ret = core_validate_new_sequence(core_str_ctx,
+ &str_unit->seq_hdr_info->com_sequ_hdr_info);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_context->prev_comseq_hdr_info =
+ dd_str_context->comseq_hdr_info;
+ dd_str_context->comseq_hdr_info =
+ str_unit->seq_hdr_info->com_sequ_hdr_info;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[SID=0x%08X] VSH: Maximum Frame Resolution [%dx%d]",
+ dd_str_context->res_str_id,
+ dd_str_context->comseq_hdr_info.max_frame_size.width,
+ dd_str_context->comseq_hdr_info.max_frame_size.height);
+#endif
+
+ break;
+
+ case VDECDD_STRUNIT_PICTURE_START:
+ /*
+ * Check that the picture configuration is compatible
+ * with the current state.
+ */
+ ret = core_validate_new_picture(core_str_ctx,
+ str_unit->pict_hdr_info,
+ &str_unit->features);
+ if (ret != IMG_SUCCESS) {
+ if (ret == IMG_ERROR_NOT_SUPPORTED) {
+ /*
+ * Do not process stream unit since there is
+ * something unsupported.
+ */
+ process_str_unit = FALSE;
+ break;
+ }
+ }
+
+ /* Prepare picture for decoding. */
+ ret = core_picture_prepare(core_str_ctx, str_unit);
+ if (ret != IMG_SUCCESS)
+ if (ret == IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE ||
+ ret == IMG_ERROR_NOT_SUPPORTED)
+ /*
+ * Do not process stream unit since there is
+ * something unsupported or resources are not
+ * available.
+ */
+ process_str_unit = FALSE;
+ break;
+
+ default:
+ /*
+ * Sequence/picture headers should only be attached to
+ * corresponding units.
+ */
+ VDEC_ASSERT(!str_unit->seq_hdr_info);
+ VDEC_ASSERT(!str_unit->pict_hdr_info);
+ break;
+ }
+
+ if (process_str_unit) {
+ /* Submit stream unit to the decoder for processing. */
+ str_unit->decode = TRUE;
+ ret = decoder_stream_process_unit(dd_str_context->dec_ctx,
+ str_unit);
+ } else {
+ ret = IMG_ERROR_GENERIC_FAILURE;
+ }
+
+ return ret;
+}
+
+/*
+ * @Function core_stream_fill_pictbuf
+ */
+int core_stream_fill_pictbuf(unsigned int buf_map_id)
+{
+ int ret;
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ struct core_stream_context *core_str_ctx;
+
+ /* Get access to map info context.. */
+ ret = rman_get_resource(buf_map_id, VDECDD_BUFMAP_TYPE_ID,
+ (void **)&ddbuf_map_info, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_ctx = ddbuf_map_info->ddstr_context;
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(dd_str_ctx->res_str_id, VDECDD_STREAM_TYPE_ID,
+ (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Check buffer type. */
+ VDEC_ASSERT(ddbuf_map_info->buf_type == VDEC_BUFTYPE_PICTURE);
+
+ /* Add the image buffer to the list */
+ ret = resource_list_add_img(&core_str_ctx->pict_buf_list, ddbuf_map_info,
+ 0, &ddbuf_map_info->ddbuf_info.ref_count);
+
+ return ret;
+}
+
+/*
+ * @Function core_fn_free_mapped
+ */
+static void core_fn_free_mapped(void *param)
+{
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info =
+ (struct vdecdd_ddbuf_mapinfo *)param;
+
+ /* Validate input arguments */
+ VDEC_ASSERT(param);
+
+ /* Do not free the MMU mapping. It is handled by talmmu code. */
+ kfree(ddbuf_map_info);
+}
+
+/*
+ * @Function core_stream_map_buf
+ */
+int core_stream_map_buf(unsigned int res_str_id, enum vdec_buf_type buf_type,
+ struct vdec_buf_info *buf_info, unsigned int *buf_map_id)
+{
+ int ret;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ struct core_stream_context *core_str_ctx;
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+
+ /*
+ * Stream based messages without a device context
+ * must have a stream ID.
+ */
+ VDEC_ASSERT(res_str_id);
+ VDEC_ASSERT(buf_type < VDEC_BUFTYPE_MAX);
+ VDEC_ASSERT(buf_info);
+ VDEC_ASSERT(buf_map_id);
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID,
+ (void **)&core_str_ctx, NULL);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_ctx = core_str_ctx->dd_str_ctx;
+
+ VDEC_ASSERT(dd_str_ctx);
+
+ /* Allocate an active stream unit.. */
+ ddbuf_map_info = kzalloc(sizeof(*ddbuf_map_info), GFP_KERNEL);
+ VDEC_ASSERT(ddbuf_map_info);
+
+ if (!ddbuf_map_info) {
+ pr_err("[SID=0x%08X] Failed to allocate memory for DD buffer map information",
+ dd_str_ctx->res_str_id);
+
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+ memset(ddbuf_map_info, 0, sizeof(*ddbuf_map_info));
+
+ /* Save the stream context etc. */
+ ddbuf_map_info->ddstr_context = dd_str_ctx;
+ ddbuf_map_info->buf_type = buf_type;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d vdec2plus: vxd map buff id %d", __func__, __LINE__,
+ buf_info->buf_id);
+#endif
+ ddbuf_map_info->buf_id = buf_info->buf_id;
+
+ /* Register the allocation as a stream resource.. */
+ ret = rman_register_resource(dd_str_ctx->res_buck_handle,
+ VDECDD_BUFMAP_TYPE_ID,
+ core_fn_free_mapped,
+ ddbuf_map_info,
+ &ddbuf_map_info->res_handle,
+ buf_map_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ ddbuf_map_info->buf_map_id = *buf_map_id;
+
+ if (buf_type == VDEC_BUFTYPE_PICTURE) {
+ if (dd_str_ctx->map_buf_info.num_buf == 0) {
+ dd_str_ctx->map_buf_info.buf_size = buf_info->buf_size;
+ dd_str_ctx->map_buf_info.byte_interleave =
+ buf_info->pictbuf_cfg.byte_interleave;
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[SID=0x%08X] Mapped Buffer size: %d (bytes)",
+ dd_str_ctx->res_str_id, buf_info->buf_size);
+#endif
+ } else {
+ /*
+ * Same byte interleaved setting should be used.
+ * Convert to actual bools by comparing with zero.
+ */
+ if (buf_info->pictbuf_cfg.byte_interleave !=
+ dd_str_ctx->map_buf_info.byte_interleave) {
+ pr_err("[SID=0x%08X] Buffer cannot be mapped since its byte interleave value (%s) is not the same as buffers already mapped (%s)",
+ dd_str_ctx->res_str_id,
+ buf_info->pictbuf_cfg.byte_interleave ?
+ "ON" : "OFF",
+ dd_str_ctx->map_buf_info.byte_interleave ?
+ "ON" : "OFF");
+ ret = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+ }
+
+ /* Configure the buffer.. */
+ ret = core_stream_set_pictbuf_config(dd_str_ctx, &buf_info->pictbuf_cfg);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+ }
+
+ /* Map heap from VDEC to MMU. */
+ switch (buf_type) {
+ case VDEC_BUFTYPE_BITSTREAM:
+ ddbuf_map_info->mmuheap_id = MMU_HEAP_BITSTREAM_BUFFERS;
+ break;
+
+ case VDEC_BUFTYPE_PICTURE:
+ mmu_get_heap(buf_info->pictbuf_cfg.stride[VDEC_PLANE_VIDEO_Y],
+ &ddbuf_map_info->mmuheap_id);
+ break;
+
+ default:
+ VDEC_ASSERT(FALSE);
+ }
+
+ /* Map this buffer into the MMU. */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("----- %s:%d calling MMU_StreamMapExt", __func__, __LINE__);
+#endif
+ ret = mmu_stream_map_ext(dd_str_ctx->mmu_str_handle,
+ (enum mmu_eheap_id)ddbuf_map_info->mmuheap_id,
+ ddbuf_map_info->buf_id,
+ buf_info->buf_size, DEV_MMU_PAGE_SIZE,
+ buf_info->mem_attrib,
+ buf_info->cpu_linear_addr,
+ &ddbuf_map_info->ddbuf_info);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ if (buf_type == VDEC_BUFTYPE_PICTURE)
+ dd_str_ctx->map_buf_info.num_buf++;
+
+ /*
+ * Initialise the reference count to indicate that the client
+ * still holds the buffer.
+ */
+ ddbuf_map_info->ddbuf_info.ref_count = 1;
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+
+error:
+ if (ddbuf_map_info) {
+ if (ddbuf_map_info->res_handle)
+ rman_free_resource(ddbuf_map_info->res_handle);
+ else
+ kfree(ddbuf_map_info);
+ }
+
+ return ret;
+}
+
+/*
+ * @Function core_stream_map_buf_sg
+ */
+int core_stream_map_buf_sg(unsigned int res_str_id, enum vdec_buf_type buf_type,
+ struct vdec_buf_info *buf_info,
+ void *sgt, unsigned int *buf_map_id)
+{
+ int ret;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ struct core_stream_context *core_str_ctx;
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+
+ /*
+ * Resource stream ID cannot be zero. If zero just warning and
+ * proceeding further will break the code. Return IMG_ERROR_INVALID_ID.
+ */
+ if (res_str_id <= 0)
+ return IMG_ERROR_INVALID_ID;
+
+ VDEC_ASSERT(buf_type < VDEC_BUFTYPE_MAX);
+ VDEC_ASSERT(buf_info);
+ VDEC_ASSERT(buf_map_id);
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID, (void **)&core_str_ctx, NULL);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_ctx = core_str_ctx->dd_str_ctx;
+
+ VDEC_ASSERT(dd_str_ctx);
+
+ /* Allocate an active stream unit.. */
+ ddbuf_map_info = kzalloc(sizeof(*ddbuf_map_info), GFP_KERNEL);
+ VDEC_ASSERT(ddbuf_map_info);
+
+ if (!ddbuf_map_info) {
+ pr_err("[SID=0x%08X] Failed to allocate memory for DD buffer map information",
+ dd_str_ctx->res_str_id);
+
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Save the stream context etc. */
+ ddbuf_map_info->ddstr_context = dd_str_ctx;
+ ddbuf_map_info->buf_type = buf_type;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d vdec2plus: vxd map buff id %d", __func__, __LINE__,
+ buf_info->buf_id);
+#endif
+ ddbuf_map_info->buf_id = buf_info->buf_id;
+
+ /* Register the allocation as a stream resource.. */
+ ret = rman_register_resource(dd_str_ctx->res_buck_handle,
+ VDECDD_BUFMAP_TYPE_ID,
+ core_fn_free_mapped, ddbuf_map_info,
+ &ddbuf_map_info->res_handle, buf_map_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ ddbuf_map_info->buf_map_id = *buf_map_id;
+
+ if (buf_type == VDEC_BUFTYPE_PICTURE) {
+ if (dd_str_ctx->map_buf_info.num_buf == 0) {
+ dd_str_ctx->map_buf_info.buf_size = buf_info->buf_size;
+
+ dd_str_ctx->map_buf_info.byte_interleave =
+ buf_info->pictbuf_cfg.byte_interleave;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[SID=0x%08X] Mapped Buffer size: %d (bytes)",
+ dd_str_ctx->res_str_id, buf_info->buf_size);
+#endif
+ } else {
+ /*
+ * Same byte interleaved setting should be used.
+ * Convert to actual bools by comparing with zero.
+ */
+ if (buf_info->pictbuf_cfg.byte_interleave !=
+ dd_str_ctx->map_buf_info.byte_interleave) {
+ pr_err("[SID=0x%08X] Buffer cannot be mapped since its byte interleave value (%s) is not the same as buffers already mapped (%s)",
+ dd_str_ctx->res_str_id,
+ buf_info->pictbuf_cfg.byte_interleave ?
+ "ON" : "OFF",
+ dd_str_ctx->map_buf_info.byte_interleave ?
+ "ON" : "OFF");
+ ret = IMG_ERROR_INVALID_PARAMETERS;
+ goto error;
+ }
+ }
+
+ /* Configure the buffer.. */
+ ret = core_stream_set_pictbuf_config(dd_str_ctx, &buf_info->pictbuf_cfg);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+ }
+
+ /* Map heap from VDEC to MMU. */
+ switch (buf_type) {
+ case VDEC_BUFTYPE_BITSTREAM:
+ ddbuf_map_info->mmuheap_id = MMU_HEAP_BITSTREAM_BUFFERS;
+ break;
+
+ case VDEC_BUFTYPE_PICTURE:
+ mmu_get_heap(buf_info->pictbuf_cfg.stride[VDEC_PLANE_VIDEO_Y],
+ &ddbuf_map_info->mmuheap_id);
+ break;
+
+ default:
+ VDEC_ASSERT(FALSE);
+ }
+
+ /* Map this buffer into the MMU. */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("----- %s:%d calling MMU_StreamMapExt_sg", __func__, __LINE__);
+#endif
+ ret =
+ mmu_stream_map_ext_sg(dd_str_ctx->mmu_str_handle,
+ (enum mmu_eheap_id)ddbuf_map_info->mmuheap_id,
+ sgt, buf_info->buf_size, DEV_MMU_PAGE_SIZE,
+ buf_info->mem_attrib, buf_info->cpu_linear_addr,
+ &ddbuf_map_info->ddbuf_info,
+ &ddbuf_map_info->buf_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ if (buf_type == VDEC_BUFTYPE_PICTURE)
+ dd_str_ctx->map_buf_info.num_buf++;
+
+ /*
+ * Initialise the reference count to indicate that the client
+ * still holds the buffer.
+ */
+ ddbuf_map_info->ddbuf_info.ref_count = 1;
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+
+error:
+ if (ddbuf_map_info->res_handle)
+ rman_free_resource(ddbuf_map_info->res_handle);
+ else
+ kfree(ddbuf_map_info);
+
+ return ret;
+}
+
+/*
+ * @Function core_stream_unmap_buf
+ */
+int core_stream_unmap_buf(unsigned int buf_map_id)
+{
+ int ret;
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ struct core_stream_context *core_str_ctx;
+
+ /* Get access to map info context.. */
+ ret = rman_get_resource(buf_map_id, VDECDD_BUFMAP_TYPE_ID,
+ (void **)&ddbuf_map_info, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_ctx = ddbuf_map_info->ddstr_context;
+ VDEC_ASSERT(dd_str_ctx);
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(dd_str_ctx->res_str_id, VDECDD_STREAM_TYPE_ID,
+ (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(core_str_ctx);
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("UNMAP: PM [0x%p] --> VM [0x%08X - 0x%08X] (%d bytes)",
+ ddbuf_map_info->ddbuf_info.cpu_virt,
+ ddbuf_map_info->ddbuf_info.dev_virt,
+ ddbuf_map_info->ddbuf_info.dev_virt +
+ ddbuf_map_info->ddbuf_info.buf_size,
+ ddbuf_map_info->ddbuf_info.buf_size);
+#endif
+
+ /* Buffer should only be held by the client. */
+ VDEC_ASSERT(ddbuf_map_info->ddbuf_info.ref_count == 1);
+ if (ddbuf_map_info->ddbuf_info.ref_count != 1)
+ return IMG_ERROR_MEMORY_IN_USE;
+
+ ddbuf_map_info->ddbuf_info.ref_count = 0;
+ if (ddbuf_map_info->buf_type == VDEC_BUFTYPE_PICTURE) {
+ /* Remove this picture buffer from pictbuf list */
+ ret = resource_list_remove(&core_str_ctx->pict_buf_list, ddbuf_map_info);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS || ret == IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE);
+ if (ret != IMG_SUCCESS && ret != IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE)
+ return ret;
+
+ ddbuf_map_info->ddstr_context->map_buf_info.num_buf--;
+
+ /* Clear some state if there are no more mapped buffers. */
+ if (dd_str_ctx->map_buf_info.num_buf == 0) {
+ dd_str_ctx->map_buf_info.buf_size = 0;
+ dd_str_ctx->map_buf_info.byte_interleave = FALSE;
+ }
+ }
+
+ /* Unmap this buffer from the MMU. */
+ ret = mmu_free_mem(dd_str_ctx->mmu_str_handle, &ddbuf_map_info->ddbuf_info);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Free buffer map info. */
+ rman_free_resource(ddbuf_map_info->res_handle);
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_stream_unmap_buf_sg
+ */
+int core_stream_unmap_buf_sg(unsigned int buf_map_id)
+{
+ int ret;
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ struct core_stream_context *core_str_ctx;
+
+ /* Get access to map info context.. */
+ ret = rman_get_resource(buf_map_id, VDECDD_BUFMAP_TYPE_ID, (void **)&ddbuf_map_info, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_ctx = ddbuf_map_info->ddstr_context;
+ VDEC_ASSERT(dd_str_ctx);
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(dd_str_ctx->res_str_id, VDECDD_STREAM_TYPE_ID,
+ (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(core_str_ctx);
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("UNMAP: PM [0x%p] --> VM [0x%08X - 0x%08X] (%d bytes)",
+ ddbuf_map_info->ddbuf_info.cpu_virt,
+ ddbuf_map_info->ddbuf_info.dev_virt,
+ ddbuf_map_info->ddbuf_info.dev_virt +
+ ddbuf_map_info->ddbuf_info.buf_size,
+ ddbuf_map_info->ddbuf_info.buf_size);
+#endif
+
+ /* Buffer should only be held by the client. */
+ VDEC_ASSERT(ddbuf_map_info->ddbuf_info.ref_count == 1);
+ if (ddbuf_map_info->ddbuf_info.ref_count != 1)
+ return IMG_ERROR_MEMORY_IN_USE;
+
+ ddbuf_map_info->ddbuf_info.ref_count = 0;
+
+ if (ddbuf_map_info->buf_type == VDEC_BUFTYPE_PICTURE) {
+ /* Remove this picture buffer from pictbuf list */
+ ret = resource_list_remove(&core_str_ctx->pict_buf_list, ddbuf_map_info);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS || ret == IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE);
+ if (ret != IMG_SUCCESS && ret != IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE)
+ return ret;
+
+ ddbuf_map_info->ddstr_context->map_buf_info.num_buf--;
+
+ /*
+ * Clear some state if there are no more
+ * mapped buffers.
+ */
+ if (dd_str_ctx->map_buf_info.num_buf == 0) {
+ dd_str_ctx->map_buf_info.buf_size = 0;
+ dd_str_ctx->map_buf_info.byte_interleave = FALSE;
+ }
+ }
+
+ /* Unmap this buffer from the MMU. */
+ ret = mmu_free_mem_sg(dd_str_ctx->mmu_str_handle, &ddbuf_map_info->ddbuf_info);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Free buffer map info. */
+ rman_free_resource(ddbuf_map_info->res_handle);
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_stream_flush
+ */
+int core_stream_flush(unsigned int res_str_id, unsigned char discard_refs)
+{
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+ struct core_stream_context *core_str_ctx;
+ int ret;
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID,
+ (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_ctx = core_str_ctx->dd_str_ctx;
+
+ VDEC_ASSERT(dd_str_ctx);
+ VDEC_ASSERT(dd_str_ctx->dd_str_state == VDECDD_STRSTATE_STOPPED);
+
+ /*
+ * If unsupported sequence is found, we need to do additional
+ * check for DPB flush condition
+ */
+ if (!dd_str_ctx->comseq_hdr_info.not_dpb_flush) {
+ ret = decoder_stream_flush(dd_str_ctx->dec_ctx, discard_refs);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_stream_release_bufs
+ */
+int core_stream_release_bufs(unsigned int res_str_id, enum vdec_buf_type buf_type)
+{
+ int ret;
+ struct core_stream_context *core_str_ctx;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID, (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_ctx = core_str_ctx->dd_str_ctx;
+
+ VDEC_ASSERT(dd_str_ctx);
+ VDEC_ASSERT(buf_type < VDEC_BUFTYPE_MAX);
+
+ switch (buf_type) {
+ case VDEC_BUFTYPE_PICTURE:
+ {
+ /* Empty all the decoded picture related buffer lists. */
+ ret = resource_list_empty(&core_str_ctx->pict_buf_list, TRUE, NULL, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ break;
+ }
+
+ case VDEC_BUFTYPE_BITSTREAM:
+ {
+ /* Empty the stream unit queue. */
+ ret = resource_list_empty(&core_str_ctx->str_unit_list, FALSE,
+ (resource_pfn_freeitem)core_fn_free_stream_unit,
+ core_str_ctx);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ break;
+ }
+
+ case VDEC_BUFTYPE_ALL:
+ {
+ /* Empty all the decoded picture related buffer lists. */
+ ret = resource_list_empty(&core_str_ctx->pict_buf_list, TRUE, NULL, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+
+ /* Empty the stream unit queue. */
+ ret = resource_list_empty(&core_str_ctx->str_unit_list, FALSE,
+ (resource_pfn_freeitem)core_fn_free_stream_unit,
+ core_str_ctx);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ break;
+ }
+
+ default:
+ {
+ ret = IMG_ERROR_INVALID_PARAMETERS;
+ VDEC_ASSERT(FALSE);
+ break;
+ }
+ }
+
+ if (buf_type == VDEC_BUFTYPE_PICTURE || buf_type == VDEC_BUFTYPE_ALL) {
+ ret = decoder_stream_release_buffers(dd_str_ctx->dec_ctx);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_stream_get_status
+ */
+int core_stream_get_status(unsigned int res_str_id,
+ struct vdecdd_decstr_status *str_st)
+{
+ int ret;
+ struct core_stream_context *core_str_ctx;
+ struct vdecdd_ddstr_ctx *dd_str_ctx;
+
+ /* Get access to stream context.. */
+ ret = rman_get_resource(res_str_id, VDECDD_STREAM_TYPE_ID, (void **)&core_str_ctx, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dd_str_ctx = core_str_ctx->dd_str_ctx;
+
+ VDEC_ASSERT(dd_str_ctx);
+ VDEC_ASSERT(str_st);
+
+ ret = decoder_stream_get_status(dd_str_ctx->dec_ctx, str_st);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Return success.. */
+ return IMG_SUCCESS;
+}
+
+#ifdef HAS_HEVC
+/*
+ * @Function core_free_hevc_picture_resource
+ */
+static int core_free_hevc_picture_resource(struct core_stream_context *core_strctx,
+ struct vdecdd_pict_resint *pic_res_int)
+{
+ int ret = IMG_SUCCESS;
+
+ ret = core_free_resbuf(&pic_res_int->genc_fragment_buf,
+ core_strctx->dd_str_ctx->mmu_str_handle);
+ if (ret != IMG_SUCCESS)
+ pr_err("MMU_Free for Genc Fragment buffer failed with error %u", ret);
+
+ return ret;
+}
+
+/*
+ * @Function core_free_hevc_sequence_resource
+ */
+static int core_free_hevc_sequence_resource(struct core_stream_context *core_strctx,
+ struct vdecdd_seq_resint *seq_res_int)
+{
+ unsigned int i;
+ int local_result = IMG_SUCCESS;
+ int ret = IMG_SUCCESS;
+
+ for (i = 0; i < GENC_BUFF_COUNT; ++i) {
+ local_result = core_free_resbuf(&seq_res_int->genc_buffers[i],
+ core_strctx->dd_str_ctx->mmu_str_handle);
+ if (local_result != IMG_SUCCESS) {
+ ret = local_result;
+ pr_warn("MMU_Free for GENC buffer %u failed with error %u", i,
+ local_result);
+ }
+ }
+
+ local_result = core_free_resbuf(&seq_res_int->intra_buffer,
+ core_strctx->dd_str_ctx->mmu_str_handle);
+ if (local_result != IMG_SUCCESS) {
+ ret = local_result;
+ pr_warn("MMU_Free for GENC buffer %u failed with error %u", i, local_result);
+ }
+
+ local_result = core_free_resbuf(&seq_res_int->aux_buffer,
+ core_strctx->dd_str_ctx->mmu_str_handle);
+ if (local_result != IMG_SUCCESS) {
+ ret = local_result;
+ pr_warn("MMU_Free for GENC buffer %u failed with error %u", i, local_result);
+ }
+
+ return ret;
+}
+
+/*
+ * @Function core_hevc_bufs_get_size
+ */
+static int core_hevc_bufs_get_size(struct core_stream_context *core_strctx,
+ const struct vdec_comsequ_hdrinfo *seqhdr_info,
+ struct vdec_pict_size *max_pict_size,
+ struct core_pict_bufsize_info *size_info,
+ struct core_seq_resinfo *seqres_info,
+ unsigned char *resource_needed)
+{
+ enum vdec_vid_std vid_std = core_strctx->dd_str_ctx->str_config_data.vid_std;
+ unsigned int std_idx = vid_std - 1;
+
+ static const unsigned short max_slice_segments_list
+ [HEVC_LEVEL_MAJOR_NUM][HEVC_LEVEL_MINOR_NUM] = {
+ /* level: 1.0 1.1 1.2 */
+ { 16, 0, 0, },
+ /* level: 2.0 2.1 2.2 */
+ { 16, 20, 0, },
+ /* level: 3.0 3.1 3.2 */
+ { 30, 40, 0, },
+ /* level: 4.0 4.1 4.2 */
+ { 75, 75, 0, },
+ /* level: 5.0 5.1 5.2 */
+ { 200, 200, 200, },
+ /* level: 6.0 6.1 6.2 */
+ { 600, 600, 600, }
+ };
+
+ static const unsigned char max_tile_cols_list
+ [HEVC_LEVEL_MAJOR_NUM][HEVC_LEVEL_MINOR_NUM] = {
+ /* level: 1.0 1.1 1.2 */
+ { 1, 0, 0, },
+ /* level: 2.0 2.1 2.2 */
+ { 1, 1, 0, },
+ /* level: 3.0 3.1 3.2 */
+ { 2, 3, 0, },
+ /* level: 4.0 4.1 4.2 */
+ { 5, 5, 0, },
+ /* level: 5.0 5.1 5.2 */
+ { 10, 10, 10, },
+ /* level: 6.0 6.1 6.2 */
+ { 20, 20, 20, }
+ };
+
+ /* TRM 3.11.11 */
+ static const unsigned int total_sample_per_mb[PIXEL_FORMAT_444 + 1] = {
+ 256, 384, 384, 512, 768};
+
+ static const unsigned int HEVC_LEVEL_IDC_MIN = 30;
+ static const unsigned int HEVC_LEVEL_IDC_MAX = 186;
+ static const unsigned int GENC_ALIGNMENT = 0x1000;
+ static const unsigned int mb_size = 16;
+ static const unsigned int max_mb_rows_in_ctu = 4;
+ static const unsigned int bytes_per_fragment_pointer = 16;
+
+ const unsigned int max_tile_height_in_mbs =
+ seqhdr_info->max_frame_size.height / mb_size;
+
+ signed char level_maj = seqhdr_info->codec_level / 30;
+ signed char level_min = (seqhdr_info->codec_level % 30) / 3;
+
+ /*
+ * If we are somehow able to deliver more information here (CTU size,
+ * number of tile columns/rows) then memory usage could be reduced
+ */
+ const struct pixel_pixinfo *pix_info = &seqhdr_info->pixel_info;
+ const unsigned int bit_depth = pix_info->bitdepth_y >= pix_info->bitdepth_c ?
+ pix_info->bitdepth_y : pix_info->bitdepth_c;
+ unsigned short max_slice_segments;
+ unsigned char max_tile_cols;
+ unsigned int raw_byte_per_mb;
+ unsigned int *genc_fragment_bufsize;
+ unsigned int *genc_buf_size;
+
+ /* Reset the MB parameters buffer size. */
+ size_info->mbparams_bufsize = 0;
+ *resource_needed = TRUE;
+
+ if (mbparam_allocinfo[std_idx].alloc_mbparam_bufs) {
+ /* shall be == 64 (0x40)*/
+ const unsigned int align = mbparam_allocinfo[std_idx].mbparam_size;
+ const unsigned int dpb_width = (max_pict_size->width + align * 2 - 1) / align * 2;
+ const unsigned int pic_height = (max_pict_size->height + align - 1) / align;
+ const unsigned int pic_width = (max_pict_size->width + align - 1) / align;
+
+ /* calculating for worst case: max frame size, B-frame */
+ size_info->mbparams_bufsize = (align * 2) * pic_width * pic_height +
+ align * dpb_width * pic_height;
+
+ /* Adjust the buffer size for MSVDX. */
+ vdecddutils_buf_vxd_adjust_size(&size_info->mbparams_bufsize);
+ }
+
+ if (seqhdr_info->codec_level > HEVC_LEVEL_IDC_MAX ||
+ seqhdr_info->codec_level < HEVC_LEVEL_IDC_MIN) {
+ level_maj = 6;
+ level_min = 2;
+ }
+
+ if (level_maj > 0 && level_maj <= HEVC_LEVEL_MAJOR_NUM &&
+ level_min >= 0 && level_min < HEVC_LEVEL_MINOR_NUM) {
+ max_slice_segments = max_slice_segments_list[level_maj - 1][level_min];
+ max_tile_cols = max_tile_cols_list[level_maj - 1][level_min];
+ } else {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ raw_byte_per_mb = total_sample_per_mb[pix_info->chroma_fmt_idc] *
+ VDEC_ALIGN_SIZE(bit_depth, 8, unsigned int, int) / 8;
+
+ genc_fragment_bufsize = &size_info->hevc_bufsize_pict.genc_fragment_bufsize;
+ genc_buf_size = &seqres_info->hevc_bufsize_seqres.genc_bufsize;
+
+ *genc_fragment_bufsize = bytes_per_fragment_pointer * (seqhdr_info->max_frame_size.height /
+ mb_size * max_tile_cols + max_slice_segments - 1) * max_mb_rows_in_ctu;
+
+ /*
+ * GencBufferSize formula is taken from TRM and found by HW * CSIM teams for a sensible
+ * streams i.e. size_of_stream < size_of_output_YUV. In videostream data base it's
+ * possible to find pathological Argon streams that do not fulfill this sensible
+ * requirement. eg. #58417, #58419, #58421, #58423. To make a #58417 stream running the
+ * formula below should be changed from (2 * 384) *... ---> (3 * 384) *...
+ * This solution is applied by DEVA.
+ */
+ *genc_buf_size = 2 * raw_byte_per_mb * seqhdr_info->max_frame_size.width /
+ mb_size * max_tile_height_in_mbs / 4;
+
+ *genc_buf_size = VDEC_ALIGN_SIZE(*genc_buf_size, GENC_ALIGNMENT,
+ unsigned int, unsigned int);
+ *genc_fragment_bufsize = VDEC_ALIGN_SIZE(*genc_fragment_bufsize, GENC_ALIGNMENT,
+ unsigned int, unsigned int);
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("Sizes for GENC in HEVC: 0x%X (frag), 0x%X (x4)",
+ *genc_fragment_bufsize,
+ *genc_buf_size);
+#endif
+
+ seqres_info->hevc_bufsize_seqres.intra_bufsize = 4 * seqhdr_info->max_frame_size.width;
+ if (seqhdr_info->pixel_info.mem_pkg != PIXEL_BIT8_MP)
+ seqres_info->hevc_bufsize_seqres.intra_bufsize *= 2;
+
+ seqres_info->hevc_bufsize_seqres.aux_bufsize = (512 * 1024);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_is_hevc_stream_resource_suitable
+ */
+static unsigned char
+core_is_hevc_stream_resource_suitable(struct core_pict_resinfo *pict_res_info,
+ struct core_pict_resinfo *old_pict_res_info,
+ struct core_seq_resinfo *seq_res_info,
+ struct core_seq_resinfo *old_seq_res_info)
+{
+ return (seq_res_info->hevc_bufsize_seqres.genc_bufsize <=
+ old_seq_res_info->hevc_bufsize_seqres.genc_bufsize &&
+ seq_res_info->hevc_bufsize_seqres.intra_bufsize <=
+ old_seq_res_info->hevc_bufsize_seqres.intra_bufsize &&
+ seq_res_info->hevc_bufsize_seqres.aux_bufsize <=
+ old_seq_res_info->hevc_bufsize_seqres.aux_bufsize &&
+ pict_res_info->size_info.hevc_bufsize_pict.genc_fragment_bufsize <=
+ old_pict_res_info->size_info.hevc_bufsize_pict.genc_fragment_bufsize);
+}
+
+/*
+ * @Function core_alloc_hevc_specific_seq_buffers
+ */
+static int
+core_alloc_hevc_specific_seq_buffers(struct core_stream_context *core_strctx,
+ struct vdecdd_seq_resint *seqres_int,
+ struct vxdio_mempool mempool,
+ struct core_seq_resinfo *seqres_info)
+{
+ unsigned int i;
+ int ret = IMG_SUCCESS;
+
+ /* Allocate GENC buffers */
+ for (i = 0; i < GENC_BUFF_COUNT; ++i) {
+ /* Allocate the GENC buffer info structure. */
+ ret = core_alloc_resbuf(&seqres_int->genc_buffers[i],
+ seqres_info->hevc_bufsize_seqres.genc_bufsize,
+ core_strctx->dd_str_ctx->mmu_str_handle,
+ mempool);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ seqres_int->genc_buf_id = ++core_strctx->std_spec_context.hevc_ctx.genc_id_gen;
+
+ /* Allocate the intra buffer info structure. */
+ ret = core_alloc_resbuf(&seqres_int->intra_buffer,
+ seqres_info->hevc_bufsize_seqres.intra_bufsize,
+ core_strctx->dd_str_ctx->mmu_str_handle,
+ mempool);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Allocate the aux buffer info structure. */
+ ret = core_alloc_resbuf(&seqres_int->aux_buffer,
+ seqres_info->hevc_bufsize_seqres.aux_bufsize,
+ core_strctx->dd_str_ctx->mmu_str_handle,
+ mempool);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function core_alloc_hevc_specific_pict_buffers
+ */
+static int
+core_alloc_hevc_specific_pict_buffers(struct core_stream_context *core_strctx,
+ struct vdecdd_pict_resint *pict_res_int,
+ struct vxdio_mempool mempool,
+ struct core_pict_resinfo *pict_res_info)
+{
+ int ret;
+
+ /* Allocate the GENC fragment buffer. */
+ ret = core_alloc_resbuf(&pict_res_int->genc_fragment_buf,
+ pict_res_info->size_info.hevc_bufsize_pict.genc_fragment_bufsize,
+ core_strctx->dd_str_ctx->mmu_str_handle,
+ mempool);
+
+ return ret;
+}
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/core.h b/drivers/media/platform/vxe-vxd/decoder/core.h
new file mode 100644
index 000000000000..23a2ec835a15
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/core.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Decoder CORE and V4L2 Node Interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef __CORE_H__
+#define __CORE_H__
+
+#include <linux/types.h>
+#include "decoder.h"
+
+int core_initialise(void *dev_handle, unsigned int internal_heap_id,
+ void *cb);
+
+/**
+ * core_deinitialise - deinitialise core
+ */
+int core_deinitialise(void);
+
+int core_supported_features(struct vdec_features *features);
+
+int core_stream_create(void *vxd_dec_ctx_arg,
+ const struct vdec_str_configdata *str_cfgdata,
+ unsigned int *res_str_id);
+
+int core_stream_destroy(unsigned int res_str_id);
+
+int core_stream_play(unsigned int res_str_id);
+
+int core_stream_stop(unsigned int res_str_id);
+
+int core_stream_map_buf(unsigned int res_str_id, enum vdec_buf_type buf_type,
+ struct vdec_buf_info *buf_info, unsigned int *buf_map_id);
+
+int core_stream_map_buf_sg(unsigned int res_str_id,
+ enum vdec_buf_type buf_type,
+ struct vdec_buf_info *buf_info,
+ void *sgt, unsigned int *buf_map_id);
+
+int core_stream_unmap_buf(unsigned int buf_map_id);
+
+int core_stream_unmap_buf_sg(unsigned int buf_map_id);
+
+int core_stream_submit_unit(unsigned int res_str_id,
+ struct vdecdd_str_unit *str_unit);
+
+int core_stream_fill_pictbuf(unsigned int buf_map_id);
+
+/* This function to be called before stream play */
+int core_stream_set_output_config(unsigned int res_str_id,
+ struct vdec_str_opconfig *str_opcfg,
+ struct vdec_pict_bufconfig *pict_bufcg);
+
+int core_stream_flush(unsigned int res_str_id, unsigned char discard_refs);
+
+int core_stream_release_bufs(unsigned int res_str_id,
+ enum vdec_buf_type buf_type);
+
+int core_stream_get_status(unsigned int res_str_id,
+ struct vdecdd_decstr_status *str_status);
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/dec_resources.c b/drivers/media/platform/vxe-vxd/decoder/dec_resources.c
new file mode 100644
index 000000000000..e993a45eb540
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/dec_resources.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD Decoder resource allocation and tracking function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "decoder.h"
+#include "dec_resources.h"
+#include "hw_control.h"
+#include "h264fw_data.h"
+#include "h264_idx.h"
+#include "h264_vlc.h"
+#include "img_mem.h"
+#include "pool_api.h"
+#include "vdecdd_utils.h"
+#include "vdec_mmu_wrapper.h"
+#include "vid_buf.h"
+#include "vxd_mmu_defs.h"
+
+#define DECODER_END_BYTES_SIZE 40
+
+#define BATCH_MSG_BUFFER_SIZE (8 * 4096)
+#define INTRA_BUF_SIZE (1024 * 32)
+#define AUX_LINE_BUFFER_SIZE (512 * 1024)
+
+static void decres_pack_vlc_tables(unsigned short *packed,
+ unsigned short *unpacked,
+ unsigned short size)
+{
+ unsigned short i, j;
+
+ for (i = 0; i < size; i++) {
+ j = i * 3;
+ /*
+ * opcode 14:12
+ * width 11:9
+ * symbol 8:0
+ */
+ packed[i] = 0 | ((unpacked[j]) << 12) |
+ ((unpacked[j + 1]) << 9) | (unpacked[j + 2]);
+ }
+}
+
+struct dec_vlctable {
+ void *data;
+ unsigned int num_entries;
+ void *index_table;
+ unsigned int num_tables;
+};
+
+/*
+ * Union with sizes of firmware parser header structure sizes. Dec_resources
+ * uses the largest to allocate the header buffer.
+ */
+union decres_fw_hdrs {
+ struct h264fw_header_data h264_header;
+};
+
+/*
+ * This array contains the size of each resource allocation.
+ * @brief Resource Allocation Sizes
+ * NOTE: This should be kept in step with #DECODER_eResType.
+ */
+static const unsigned int res_size[DECODER_RESTYPE_MAX] = {
+ sizeof(struct vdecfw_transaction),
+ sizeof(union decres_fw_hdrs),
+ BATCH_MSG_BUFFER_SIZE,
+#ifdef HAS_HEVC
+ MEM_TO_REG_BUF_SIZE + SLICE_PARAMS_BUF_SIZE + ABOVE_PARAMS_BUF_SIZE,
+#endif
+};
+
+static const unsigned char start_code[] = {
+ 0x00, 0x00, 0x01, 0x00,
+};
+
+static void decres_get_vlc_data(struct dec_vlctable *vlc_table,
+ enum vdec_vid_std vid_std)
+{
+ switch (vid_std) {
+ case VDEC_STD_H264:
+ vlc_table->data = h264_vlc_table_data;
+ vlc_table->num_entries = h264_vlc_table_size;
+ vlc_table->index_table = h264_vlc_index_data;
+ vlc_table->num_tables = h264_vlc_index_size;
+ break;
+
+ default:
+ memset(vlc_table, 0x0, sizeof(*vlc_table));
+ break;
+ }
+}
+
+static void decres_fnbuf_info_destructor(void *param, void *cb_handle)
+{
+ struct vidio_ddbufinfo *dd_bufinfo = (struct vidio_ddbufinfo *)param;
+ int ret;
+ void *mmu_handle = cb_handle;
+
+ VDEC_ASSERT(dd_bufinfo);
+
+ ret = mmu_free_mem(mmu_handle, dd_bufinfo);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+
+ kfree(dd_bufinfo);
+ dd_bufinfo = NULL;
+}
+
+int dec_res_picture_detach(void **res_ctx, struct dec_decpict *dec_pict)
+{
+ struct dec_res_ctx *local_res_ctx;
+
+ VDEC_ASSERT(res_ctx);
+ VDEC_ASSERT(res_ctx && *res_ctx);
+ VDEC_ASSERT(dec_pict);
+ VDEC_ASSERT(dec_pict && dec_pict->transaction_info);
+
+ if (!res_ctx || !(*res_ctx) || !dec_pict ||
+ !dec_pict->transaction_info) {
+ pr_err("Invalid parameters\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ local_res_ctx = (struct dec_res_ctx *)*res_ctx;
+
+ /* return transaction buffer */
+ lst_add(&local_res_ctx->pool_data_list[DECODER_RESTYPE_TRANSACTION],
+ dec_pict->transaction_info);
+ pool_resfree(dec_pict->transaction_info->res);
+
+ /* return picture header information buffer */
+ lst_add(&local_res_ctx->pool_data_list[DECODER_RESTYPE_HDR],
+ dec_pict->hdr_info);
+ pool_resfree(dec_pict->hdr_info->res);
+
+ /* return batch message buffer */
+ lst_add(&local_res_ctx->pool_data_list[DECODER_RESTYPE_BATCH_MSG],
+ dec_pict->batch_msginfo);
+ pool_resfree(dec_pict->batch_msginfo->res);
+
+#ifdef HAS_HEVC
+ if (dec_pict->pvdec_info) {
+ lst_add(&local_res_ctx->pool_data_list[DECODER_RESTYPE_PVDEC_BUF],
+ dec_pict->pvdec_info);
+ pool_resfree(dec_pict->pvdec_info->res);
+ }
+#endif
+
+ return IMG_SUCCESS;
+}
+
+static int decres_get_resource(struct dec_res_ctx *res_ctx,
+ enum dec_res_type res_type,
+ struct res_resinfo **res_info,
+ unsigned char fill_zeros)
+{
+ struct res_resinfo *local_res_info = NULL;
+ unsigned int ret = IMG_SUCCESS;
+
+ VDEC_ASSERT(res_ctx);
+ VDEC_ASSERT(res_info);
+
+ local_res_info = lst_removehead(&res_ctx->pool_data_list[res_type]);
+ VDEC_ASSERT(local_res_info);
+ if (local_res_info) {
+ VDEC_ASSERT(local_res_info->ddbuf_info);
+ if (local_res_info->ddbuf_info) {
+ ret = pool_resalloc(res_ctx->res_pool[res_type], local_res_info->res);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS) {
+ ret = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ return ret;
+ }
+
+ if (fill_zeros)
+ memset(local_res_info->ddbuf_info->cpu_virt, 0,
+ local_res_info->ddbuf_info->buf_size);
+
+ *res_info = local_res_info;
+ } else {
+ ret = IMG_ERROR_FATAL;
+ return ret;
+ }
+ } else {
+ ret = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ return ret;
+ }
+
+ return ret;
+}
+
+int dec_res_picture_attach(void **res_ctx, enum vdec_vid_std vid_std,
+ struct dec_decpict *dec_pict)
+{
+ struct dec_res_ctx *local_res_ctx;
+ int ret;
+
+ VDEC_ASSERT(res_ctx);
+ VDEC_ASSERT(res_ctx && *res_ctx);
+ VDEC_ASSERT(dec_pict);
+ if (!res_ctx || !(*res_ctx) || !dec_pict) {
+ pr_err("Invalid parameters");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ local_res_ctx = (struct dec_res_ctx *)*res_ctx;
+
+ /* Obtain transaction buffer. */
+ ret = decres_get_resource(local_res_ctx, DECODER_RESTYPE_TRANSACTION,
+ &dec_pict->transaction_info, TRUE);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Obtain picture header information buffer */
+ ret = decres_get_resource(local_res_ctx, DECODER_RESTYPE_HDR,
+ &dec_pict->hdr_info, TRUE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+#ifdef HAS_HEVC
+ /* Obtain HEVC buffer */
+ if (vid_std == VDEC_STD_HEVC) {
+ ret = decres_get_resource(local_res_ctx, DECODER_RESTYPE_PVDEC_BUF,
+ &dec_pict->pvdec_info, TRUE);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+#endif
+ /* Obtain picture batch message buffer */
+ ret = decres_get_resource(local_res_ctx, DECODER_RESTYPE_BATCH_MSG,
+ &dec_pict->batch_msginfo, TRUE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dec_pict->intra_bufinfo = &local_res_ctx->intra_bufinfo;
+ dec_pict->auxline_bufinfo = &local_res_ctx->auxline_bufinfo;
+ dec_pict->vlc_tables_bufinfo =
+ &local_res_ctx->vlc_tables_bufinfo[vid_std];
+ dec_pict->vlc_idx_tables_bufinfo =
+ &local_res_ctx->vlc_idxtables_bufinfo[vid_std];
+ dec_pict->start_code_bufinfo = &local_res_ctx->start_code_bufinfo;
+
+ return IMG_SUCCESS;
+}
+
+int dec_res_create(void *mmu_handle, struct vxd_coreprops *core_props,
+ unsigned int num_dec_slots,
+ unsigned int mem_heap_id, void **resources)
+{
+ struct dec_res_ctx *local_res_ctx;
+ int ret;
+ unsigned int i = 0;
+ struct dec_vlctable vlc_table;
+ enum sys_emem_attrib mem_attrib;
+
+ VDEC_ASSERT(core_props);
+ VDEC_ASSERT(resources);
+ if (!core_props || !resources) {
+ pr_err("Invalid parameters");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ mem_attrib = (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE);
+ mem_attrib |= (enum sys_emem_attrib)SYS_MEMATTRIB_INTERNAL;
+
+ local_res_ctx = kzalloc(sizeof(*local_res_ctx), GFP_KERNEL);
+ VDEC_ASSERT(local_res_ctx);
+ if (!local_res_ctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Allocate Intra buffer. */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d call MMU_StreamMalloc", __func__, __LINE__);
+#endif
+
+ ret = mmu_stream_alloc(mmu_handle, MMU_HEAP_STREAM_BUFFERS, mem_heap_id,
+ mem_attrib,
+ core_props->num_pixel_pipes *
+ INTRA_BUF_SIZE * 3,
+ DEV_MMU_PAGE_ALIGNMENT,
+ &local_res_ctx->intra_bufinfo);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /* Allocate aux line buffer. */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d call MMU_StreamMalloc", __func__, __LINE__);
+#endif
+ ret = mmu_stream_alloc(mmu_handle, MMU_HEAP_STREAM_BUFFERS, mem_heap_id,
+ mem_attrib,
+ AUX_LINE_BUFFER_SIZE * 3 *
+ core_props->num_pixel_pipes,
+ DEV_MMU_PAGE_ALIGNMENT,
+ &local_res_ctx->auxline_bufinfo);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /* Allocate standard-specific buffers. */
+ for (i = VDEC_STD_UNDEFINED + 1; i < VDEC_STD_MAX; i++) {
+ decres_get_vlc_data(&vlc_table, (enum vdec_vid_std)i);
+
+ if (vlc_table.num_tables > 0) {
+ /*
+ * Size of VLC IDX table in bytes. Has to be aligned
+ * to 4, so transfer to MTX succeeds.
+ * (VLC IDX is copied to local RAM of MTX)
+ */
+ unsigned int vlc_idxtable_sz =
+ ALIGN((sizeof(unsigned short) * vlc_table.num_tables * 3), 4);
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info(" %s:%d calling MMU_StreamMalloc", __func__, __LINE__);
+#endif
+
+ ret = mmu_stream_alloc(mmu_handle,
+ MMU_HEAP_STREAM_BUFFERS,
+ mem_heap_id, (enum sys_emem_attrib)(mem_attrib |
+ SYS_MEMATTRIB_CORE_READ_ONLY |
+ SYS_MEMATTRIB_CPU_WRITE),
+ sizeof(unsigned short) * vlc_table.num_entries,
+ DEV_MMU_PAGE_ALIGNMENT,
+ &local_res_ctx->vlc_tables_bufinfo[i]);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ if (vlc_table.data)
+ decres_pack_vlc_tables
+ (local_res_ctx->vlc_tables_bufinfo[i].cpu_virt,
+ vlc_table.data,
+ vlc_table.num_entries);
+
+ /* VLC index table */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d calling MMU_StreamMalloc",
+ __func__, __LINE__);
+#endif
+ ret = mmu_stream_alloc(mmu_handle,
+ MMU_HEAP_STREAM_BUFFERS,
+ mem_heap_id, (enum sys_emem_attrib)(mem_attrib |
+ SYS_MEMATTRIB_CORE_READ_ONLY |
+ SYS_MEMATTRIB_CPU_WRITE),
+ vlc_idxtable_sz,
+ DEV_MMU_PAGE_ALIGNMENT,
+ &local_res_ctx->vlc_idxtables_bufinfo[i]);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ if (vlc_table.index_table)
+ memcpy(local_res_ctx->vlc_idxtables_bufinfo[i].cpu_virt,
+ vlc_table.index_table,
+ local_res_ctx->vlc_idxtables_bufinfo[i].buf_size);
+ }
+ }
+
+ /* Start code */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d calling MMU_StreamMalloc", __func__, __LINE__);
+#endif
+ ret = mmu_stream_alloc(mmu_handle, MMU_HEAP_STREAM_BUFFERS, mem_heap_id,
+ (enum sys_emem_attrib)(mem_attrib |
+ SYS_MEMATTRIB_CORE_READ_ONLY |
+ SYS_MEMATTRIB_CPU_WRITE),
+ sizeof(start_code),
+ DEV_MMU_PAGE_ALIGNMENT,
+ &local_res_ctx->start_code_bufinfo);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ memcpy(local_res_ctx->start_code_bufinfo.cpu_virt, start_code, sizeof(start_code));
+
+ for (i = 0; i < DECODER_RESTYPE_MAX; i++) {
+ unsigned int j;
+
+ ret = pool_api_create(&local_res_ctx->res_pool[i]);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ lst_init(&local_res_ctx->pool_data_list[i]);
+
+ for (j = 0; j < num_dec_slots; j++) {
+ struct res_resinfo *local_res_info;
+
+ local_res_info = kzalloc(sizeof(*local_res_info), GFP_KERNEL);
+
+ VDEC_ASSERT(local_res_info);
+ if (!local_res_info) {
+ pr_err("Failed to allocate memory\n");
+ ret = IMG_ERROR_OUT_OF_MEMORY;
+ goto error_local_res_info_alloc;
+ }
+
+ local_res_info->ddbuf_info = kzalloc(sizeof(*local_res_info->ddbuf_info),
+ GFP_KERNEL);
+ VDEC_ASSERT(local_res_info->ddbuf_info);
+ if (!local_res_info->ddbuf_info) {
+ pr_err("Failed to allocate memory for resource buffer information structure");
+ ret = IMG_ERROR_OUT_OF_MEMORY;
+ goto error_local_dd_buf_alloc;
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d calling MMU_StreamMalloc", __func__, __LINE__);
+#endif
+ ret = mmu_stream_alloc(mmu_handle, MMU_HEAP_STREAM_BUFFERS,
+ mem_heap_id, (enum sys_emem_attrib)(mem_attrib |
+ SYS_MEMATTRIB_CPU_READ |
+ SYS_MEMATTRIB_CPU_WRITE),
+ res_size[i],
+ DEV_MMU_PAGE_ALIGNMENT,
+ local_res_info->ddbuf_info);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error_local_res_alloc;
+
+ /* Register with the buffer pool */
+ ret = pool_resreg(local_res_ctx->res_pool[i],
+ decres_fnbuf_info_destructor,
+ local_res_info->ddbuf_info,
+ sizeof(*local_res_info->ddbuf_info),
+ FALSE, NULL,
+ &local_res_info->res, mmu_handle);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error_local_res_register;
+
+ lst_add(&local_res_ctx->pool_data_list[i],
+ local_res_info);
+ continue;
+
+/* Roll back in case of local errors. */
+error_local_res_register: mmu_free_mem(mmu_handle, local_res_info->ddbuf_info);
+
+error_local_res_alloc: kfree(local_res_info->ddbuf_info);
+
+error_local_dd_buf_alloc: kfree(local_res_info);
+
+error_local_res_info_alloc: goto error;
+ }
+ }
+
+ *resources = (void *)local_res_ctx;
+
+ return IMG_SUCCESS;
+
+/* Roll back in case of errors. */
+error: dec_res_destroy(mmu_handle, (void *)local_res_ctx);
+
+ return ret;
+}
+
+/*
+ *@Function RESOURCES_Destroy
+ *
+ */
+int dec_res_destroy(void *mmudev_handle, void *res_ctx)
+{
+ int ret = IMG_SUCCESS;
+ int ret1 = IMG_SUCCESS;
+ unsigned int i = 0;
+ struct res_resinfo *local_res_info;
+ struct res_resinfo *next_res_info;
+
+ struct dec_res_ctx *local_res_ctx = (struct dec_res_ctx *)res_ctx;
+
+ if (!local_res_ctx) {
+ pr_err("Invalid parameters");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (local_res_ctx->intra_bufinfo.hndl_memory) {
+ ret1 = mmu_free_mem(mmudev_handle, &local_res_ctx->intra_bufinfo);
+ VDEC_ASSERT(ret1 == IMG_SUCCESS);
+ if (ret1 != IMG_SUCCESS)
+ ret = ret1;
+ }
+
+ if (local_res_ctx->auxline_bufinfo.hndl_memory) {
+ ret1 = mmu_free_mem(mmudev_handle, &local_res_ctx->auxline_bufinfo);
+ VDEC_ASSERT(ret1 == IMG_SUCCESS);
+ if (ret1 != IMG_SUCCESS)
+ ret = ret1;
+ }
+
+ for (i = 0; i < VDEC_STD_MAX; i++) {
+ if (local_res_ctx->vlc_tables_bufinfo[i].hndl_memory) {
+ ret1 = mmu_free_mem(mmudev_handle, &local_res_ctx->vlc_tables_bufinfo[i]);
+ VDEC_ASSERT(ret1 == IMG_SUCCESS);
+ if (ret1 != IMG_SUCCESS)
+ ret = ret1;
+ }
+
+ if (local_res_ctx->vlc_idxtables_bufinfo[i].hndl_memory) {
+ ret1 = mmu_free_mem(mmudev_handle,
+ &local_res_ctx->vlc_idxtables_bufinfo[i]);
+ VDEC_ASSERT(ret1 == IMG_SUCCESS);
+ if (ret1 != IMG_SUCCESS)
+ ret = ret1;
+ }
+ }
+
+ if (local_res_ctx->start_code_bufinfo.hndl_memory) {
+ ret1 = mmu_free_mem(mmudev_handle, &local_res_ctx->start_code_bufinfo);
+ VDEC_ASSERT(ret1 == IMG_SUCCESS);
+ if (ret1 != IMG_SUCCESS)
+ ret = ret1;
+ }
+
+ for (i = 0; i < DECODER_RESTYPE_MAX; i++) {
+ if (local_res_ctx->res_pool[i]) {
+ local_res_info =
+ lst_first(&local_res_ctx->pool_data_list[i]);
+ while (local_res_info) {
+ next_res_info = lst_next(local_res_info);
+ lst_remove(&local_res_ctx->pool_data_list[i], local_res_info);
+ ret1 = pool_resdestroy(local_res_info->res, TRUE);
+ VDEC_ASSERT(ret1 == IMG_SUCCESS);
+ if (ret1 != IMG_SUCCESS)
+ ret = ret1;
+ kfree(local_res_info);
+ local_res_info = next_res_info;
+ }
+ pool_destroy(local_res_ctx->res_pool[i]);
+ }
+ }
+
+ kfree(local_res_ctx);
+ return ret;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/dec_resources.h b/drivers/media/platform/vxe-vxd/decoder/dec_resources.h
new file mode 100644
index 000000000000..d068ca57d147
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/dec_resources.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Decoder resource allocation and destroy Interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _DEC_RESOURCES_H_
+#define _DEC_RESOURCES_H_
+
+#include "decoder.h"
+#include "lst.h"
+
+/*
+ * This structure contains the core resources.
+ * @brief Decoder Core Resources
+ */
+struct dec_res_ctx {
+ struct vidio_ddbufinfo intra_bufinfo;
+ struct vidio_ddbufinfo auxline_bufinfo;
+ struct vidio_ddbufinfo start_code_bufinfo;
+ struct vidio_ddbufinfo vlc_tables_bufinfo[VDEC_STD_MAX];
+ struct vidio_ddbufinfo vlc_idxtables_bufinfo[VDEC_STD_MAX];
+ void *res_pool[DECODER_RESTYPE_MAX];
+ struct lst_t pool_data_list[DECODER_RESTYPE_MAX];
+};
+
+int dec_res_picture_detach(void **res_ctx, struct dec_decpict *dec_pict);
+
+int dec_res_picture_attach(void **res_ctx, enum vdec_vid_std vid_std,
+ struct dec_decpict *dec_pict);
+
+int dec_res_create(void *mmudev_handle,
+ struct vxd_coreprops *core_props, unsigned int num_dec_slots,
+ unsigned int mem_heap_id, void **resources);
+
+int dec_res_destroy(void *mmudev_handle, void *res_ctx);
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/decoder.c b/drivers/media/platform/vxe-vxd/decoder/decoder.c
new file mode 100644
index 000000000000..4791d12f8fd2
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/decoder.c
@@ -0,0 +1,4621 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD Decoder Component function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include "decoder.h"
+#include "dec_resources.h"
+#include "dq.h"
+#include "hw_control.h"
+#include "h264fw_data.h"
+#include "idgen_api.h"
+#include "img_errors.h"
+#ifdef HAS_JPEG
+#include "jpegfw_data.h"
+#endif
+#include "lst.h"
+#include "pool_api.h"
+#include "resource.h"
+#include "translation_api.h"
+#include "vdecdd_utils.h"
+#include "vdec_mmu_wrapper.h"
+#include "vxd_dec.h"
+
+#define CORE_NUM_DECODE_SLOTS 2
+
+#define MAX_PLATFORM_SUPPORTED_HEIGHT 65536
+#define MAX_PLATFORM_SUPPORTED_WIDTH 65536
+
+#define MAX_CONCURRENT_STREAMS 16
+
+/* Maximum number of unique picture ids within stream. */
+#define DECODER_MAX_PICT_ID GET_STREAM_PICTURE_ID(((1ULL << 32) - 1ULL))
+
+/* Maximum number of concurrent pictures within stream. */
+#define DECODER_MAX_CONCURRENT_PICT 0x100
+
+static inline unsigned int get_next_picture_id(unsigned int cur_pict_id)
+{
+ return(cur_pict_id == FWIF_BIT_MASK(FWIF_NUMBITS_STREAM_PICTURE_ID) ?
+ 1 : cur_pict_id + 1);
+}
+
+static inline unsigned int get_prev_picture_id(unsigned int cur_pict_id)
+{
+ return(cur_pict_id == 1 ?
+ FWIF_BIT_MASK(FWIF_NUMBITS_STREAM_PICTURE_ID) :
+ cur_pict_id - 1);
+}
+
+#define H264_SGM_BUFFER_BYTES_PER_MB 1
+#define H264_SGM_MAX_MBS 3600
+
+#define CONTEXT_BUFF_SIZE (72)
+
+/*
+ * Number of bits in transaction ID used to represent
+ * picture number in stream.
+ */
+#define FWIF_NUMBITS_STREAM_PICTURE_ID 16
+/*
+ * Number of bits in transaction ID used to represent
+ * picture number in core.
+ */
+#define FWIF_NUMBITS_CORE_PICTURE_ID 4
+/*
+ * Number of bits in transaction ID used to represent
+ * stream id.
+ */
+#define FWIF_NUMBITS_STREAM_ID 8
+/* Number of bits in transaction ID used to represent core id. */
+#define FWIF_NUMBITS_CORE_ID 4
+
+/* Offset in transaction ID to picture number in stream. */
+#define FWIF_OFFSET_STREAM_PICTURE_ID 0
+/* Offset in transaction ID to picture number in core. */
+#define FWIF_OFFSET_CORE_PICTURE_ID (FWIF_OFFSET_STREAM_PICTURE_ID + \
+ FWIF_NUMBITS_STREAM_PICTURE_ID)
+/* Offset in transaction ID to stream id. */
+#define FWIF_OFFSET_STREAM_ID (FWIF_OFFSET_CORE_PICTURE_ID + \
+ FWIF_NUMBITS_CORE_PICTURE_ID)
+/* Offset in transaction ID to core id. */
+#define FWIF_OFFSET_CORE_ID (FWIF_OFFSET_STREAM_ID + \
+ FWIF_NUMBITS_STREAM_ID)
+
+/* Maximum number of unique picture ids within stream. */
+#define DECODER_MAX_PICT_ID GET_STREAM_PICTURE_ID(((1ULL << 32) - 1ULL))
+
+/* Maximum number of concurrent pictures within stream. */
+#define DECODER_MAX_CONCURRENT_PICT 0x100
+
+#define CREATE_TRANSACTION_ID(core_id, stream_id, core_pic, stream_pic) \
+ (SET_CORE_ID((core_id)) | SET_STREAM_ID((stream_id)) | \
+ SET_CORE_PICTURE_ID((core_pic)) | SET_STREAM_PICTURE_ID((stream_pic)))
+
+static inline struct dec_core_ctx *decoder_str_ctx_to_core_ctx(struct dec_str_ctx *decstrctx)
+{
+ if (decstrctx && decstrctx->decctx)
+ return decstrctx->decctx->dec_core_ctx;
+
+ else
+ return NULL;
+}
+
+static const struct vdecdd_dd_devconfig def_dev_cfg = {
+ CORE_NUM_DECODE_SLOTS, /* ui32NumSlotsPerPipe; */
+};
+
+/*
+ * This array defines names of the VDEC standards.
+ * Shall be in sync with #VDEC_eVidStd
+ * @brief Names of the VDEC standards
+ */
+static unsigned char *vid_std_names[] = {
+ "VDEC_STD_UNDEFINED",
+ "VDEC_STD_MPEG2",
+ "VDEC_STD_MPEG4",
+ "VDEC_STD_H263",
+ "VDEC_STD_H264",
+ "VDEC_STD_VC1",
+ "VDEC_STD_AVS",
+ "VDEC_STD_REAL",
+ "VDEC_STD_JPEG",
+ "VDEC_STD_VP6",
+ "VDEC_STD_VP8",
+ "VDEC_STD_SORENSON",
+ "VDEC_STD_HEVC"
+};
+
+#ifdef ERROR_RECOVERY_SIMULATION
+extern int fw_error_value;
+#endif
+
+/*
+ * @Function decoder_set_device_config
+ */
+static int decoder_set_device_config(const struct vdecdd_dd_devconfig **dd_dev_config)
+{
+ struct vdecdd_dd_devconfig *local_dev_cfg;
+
+ VDEC_ASSERT(dd_dev_config);
+
+ /* Allocate device configuration structure */
+ local_dev_cfg = kzalloc(sizeof(*local_dev_cfg), GFP_KERNEL);
+ VDEC_ASSERT(local_dev_cfg);
+ if (!local_dev_cfg)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Set the default device configuration */
+ *local_dev_cfg = def_dev_cfg;
+
+ *dd_dev_config = local_dev_cfg;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_set_feature_flags
+ * @Description
+ * This function sets the features flags from the core properties.
+ * @Input p : Pointer to core properties.
+ * @Input core_feat_flags : Pointer to core feature flags word.
+ */
+static void decoder_set_feature_flags(struct vxd_coreprops *core_props,
+ unsigned int *core_feat_flags,
+ unsigned int *pipe_feat_flags)
+{
+ unsigned char pipe_minus_one;
+
+ VDEC_ASSERT(core_props);
+ VDEC_ASSERT(core_feat_flags);
+ VDEC_ASSERT(pipe_feat_flags);
+
+ for (pipe_minus_one = 0; pipe_minus_one < core_props->num_pixel_pipes;
+ pipe_minus_one++) {
+ *core_feat_flags |= pipe_feat_flags[pipe_minus_one] |=
+ core_props->h264[pipe_minus_one] ?
+ VDECDD_COREFEATURE_H264 : 0;
+#ifdef HAS_JPEG
+ *core_feat_flags |= pipe_feat_flags[pipe_minus_one] |=
+ core_props->jpeg[pipe_minus_one] ?
+ VDECDD_COREFEATURE_JPEG : 0;
+#endif
+#ifdef HAS_HEVC
+ *core_feat_flags |= pipe_feat_flags[pipe_minus_one] |=
+ core_props->hevc[pipe_minus_one] ?
+ VDECDD_COREFEATURE_HEVC : 0;
+#endif
+ }
+}
+
+/*
+ * @Function decoder_stream_get_context
+ * @Description
+ * This function returns the stream context structure for the given
+ * stream handle.
+ * @Return struct dec_str_ctx : This function returns a pointer
+ * to the stream
+ * context structure or NULL if not found.
+ */
+static struct dec_str_ctx *decoder_stream_get_context(void *dec_str_context)
+{
+ return (struct dec_str_ctx *)dec_str_context;
+}
+
+/*
+ * @Function decoder_core_enumerate
+ * @Description
+ * This function enumerates a decoder core and returns its handle.
+ * Usage: before calls to other DECODE_Core or DECODE_Stream functions.
+ * @Input dec_context : Pointer to Decoder context.
+ * @Input dev_cfg : Device configuration.
+ * @Return This function returns either IMG_SUCCESS or an
+ * error code.
+ */
+static int decoder_core_enumerate(struct dec_ctx *dec_context,
+ const struct vdecdd_dd_devconfig *dev_cfg,
+ unsigned int *num_pipes)
+{
+ struct dec_core_ctx *dec_core_ctx_local;
+ unsigned int ret;
+ unsigned int ptd_align = DEV_MMU_PAGE_ALIGNMENT;
+
+ /* Create the core. */
+ dec_core_ctx_local = kzalloc(sizeof(*dec_core_ctx_local), GFP_KERNEL);
+ if (!dec_core_ctx_local)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ dec_core_ctx_local->dec_ctx = (struct dec_ctx *)dec_context;
+
+ /* Initialise the hwctrl block here */
+ ret = hwctrl_initialise(dec_core_ctx_local, dec_context->user_data,
+ dev_cfg, &dec_core_ctx_local->core_props,
+ &dec_core_ctx_local->hw_ctx);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ decoder_set_feature_flags(&dec_core_ctx_local->core_props,
+ &dec_core_ctx_local->core_features,
+ dec_core_ctx_local->pipe_features);
+
+ /* Perform device setup for master core. */
+ if (num_pipes)
+ *num_pipes = dec_core_ctx_local->core_props.num_pixel_pipes;
+
+ /* DEVA PVDEC FW requires PTD to be 64k aligned. */
+ ptd_align = 0x10000;
+
+ /* Create a device MMU context. */
+ ret = mmu_device_create(dec_core_ctx_local->core_props.mmu_type,
+ ptd_align, &dec_context->mmu_dev_handle);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ dec_core_ctx_local->enumerated = TRUE;
+
+ dec_context->dec_core_ctx = dec_core_ctx_local;
+
+ return IMG_SUCCESS;
+
+error:
+ if (dec_core_ctx_local) {
+ unsigned int deinit_result = IMG_SUCCESS;
+
+ /* Destroy a device MMU context. */
+ if (dec_context->mmu_dev_handle) {
+ deinit_result =
+ mmu_device_destroy(dec_context->mmu_dev_handle);
+ VDEC_ASSERT(deinit_result == IMG_SUCCESS);
+ if (deinit_result != IMG_SUCCESS)
+ pr_warn("MMU_DeviceDestroy() failed to tidy-up after error");
+ }
+
+ kfree(dec_core_ctx_local);
+ dec_core_ctx_local = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * @Function decoder_initialise
+ */
+int decoder_initialise(void *user_init_data, unsigned int int_heap_id,
+ struct vdecdd_dd_devconfig *dd_device_config,
+ unsigned int *num_pipes,
+ void **dec_ctx_handle)
+{
+ struct dec_ctx *dec_context = (struct dec_ctx *)*dec_ctx_handle;
+ int ret;
+
+ if (!dec_context) {
+ dec_context = kzalloc(sizeof(*dec_context), GFP_KERNEL);
+ VDEC_ASSERT(dec_context);
+ if (!dec_context)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ *dec_ctx_handle = dec_context;
+ }
+
+ /* Determine which standards are supported. */
+ memset(&dec_context->sup_stds, 0x0, sizeof(dec_context->sup_stds[VDEC_STD_MAX]));
+
+ dec_context->sup_stds[VDEC_STD_H264] = TRUE;
+#ifdef HAS_HEVC
+ dec_context->sup_stds[VDEC_STD_HEVC] = TRUE;
+#endif
+ if (!dec_context->inited) {
+ /* Check and store input parameters. */
+ dec_context->user_data = user_init_data;
+ dec_context->dev_handle =
+ ((struct vdecdd_dddev_context *)user_init_data)->dev_handle;
+
+ /* Initialise the context lists. */
+ lst_init(&dec_context->str_list);
+
+ /* Make sure POOL API is initialised */
+ ret = pool_init();
+ if (ret != IMG_SUCCESS)
+ goto pool_init_error;
+
+ ret = decoder_set_device_config(&dec_context->dev_cfg);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ dec_context->internal_heap_id = int_heap_id;
+
+ /* Enumerate the master core. */
+ ret = decoder_core_enumerate(dec_context, dec_context->dev_cfg,
+ &dec_context->num_pipes);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ if (dd_device_config)
+ *dd_device_config = *dec_context->dev_cfg;
+
+ if (num_pipes)
+ *num_pipes = dec_context->num_pipes;
+
+ dec_context->inited = TRUE;
+ }
+
+ return IMG_SUCCESS;
+
+error:
+ pool_deinit();
+
+pool_init_error:
+ if (dec_context->dev_cfg) {
+ kfree((void *)dec_context->dev_cfg);
+ dec_context->dev_cfg = NULL;
+ }
+
+ kfree(*dec_ctx_handle);
+ *dec_ctx_handle = NULL;
+
+ return ret;
+}
+
+/*
+ * @Function decoder_supported_features
+ */
+int decoder_supported_features(void *dec_ctx, struct vdec_features *features)
+{
+ struct dec_ctx *dec_context = (struct dec_ctx *)dec_ctx;
+ struct dec_core_ctx *dec_core_ctx_local;
+
+ /* Check input parameters. */
+ VDEC_ASSERT(dec_context);
+ VDEC_ASSERT(features);
+ if (!dec_context || !features) {
+ pr_err("Invalid parameters!");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Ensure that Decoder component is initialised. */
+ VDEC_ASSERT(dec_context->inited);
+
+ /* Loop over all cores checking for support. */
+ dec_core_ctx_local = dec_context->dec_core_ctx;
+ VDEC_ASSERT(dec_core_ctx_local);
+
+ /*
+ * Determine whether the required core attribute
+ * is present to support requested feature
+ */
+ features->h264 |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_H264) ? TRUE : FALSE;
+ features->mpeg2 |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_MPEG2) ? TRUE : FALSE;
+ features->mpeg4 |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_MPEG4) ? TRUE : FALSE;
+ features->vc1 |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_VC1) ? TRUE : FALSE;
+ features->avs |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_AVS) ? TRUE : FALSE;
+ features->real |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_REAL) ? TRUE : FALSE;
+ features->jpeg |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_JPEG) ? TRUE : FALSE;
+ features->vp6 |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_VP6) ? TRUE : FALSE;
+ features->vp8 |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_VP8) ? TRUE : FALSE;
+ features->hd |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_HD_DECODE) ? TRUE : FALSE;
+ features->rotation |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_ROTATION) ? TRUE : FALSE;
+ features->scaling |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_SCALING) ? TRUE : FALSE;
+ features->hevc |= (dec_core_ctx_local->core_features &
+ VDECDD_COREFEATURE_HEVC) ? TRUE : FALSE;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_stream_get_status
+ */
+int decoder_stream_get_status(void *dec_str_ctx_handle,
+ struct vdecdd_decstr_status *dec_str_st)
+{
+ struct dec_str_ctx *dec_str_ctx;
+ struct dec_decoded_pict *decoded_pict;
+ struct dec_core_ctx *dec_core_ctx;
+ unsigned int item;
+
+ VDEC_ASSERT(dec_str_st);
+ if (!dec_str_st) {
+ pr_err("Invalid decoder streams status pointer!");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ dec_str_ctx = decoder_stream_get_context(dec_str_ctx_handle);
+ VDEC_ASSERT(dec_str_ctx);
+ if (!dec_str_ctx) {
+ pr_err("Invalid decoder stream context handle!");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Obtain the state of each core. */
+ dec_core_ctx = decoder_str_ctx_to_core_ctx(dec_str_ctx);
+ VDEC_ASSERT(dec_core_ctx);
+
+ /*
+ * Obtain the display and release list of first unprocessed
+ * picture in decoded list
+ */
+ dec_str_ctx->dec_str_st.display_pics = 0;
+ dec_str_ctx->dec_str_st.release_pics = 0;
+ decoded_pict = dq_first(&dec_str_ctx->str_decd_pict_list);
+ while (decoded_pict) {
+ /* if this is the first unprocessed picture */
+ if (!decoded_pict->processed) {
+ unsigned int idx;
+ struct vdecfw_buffer_control *buf_ctrl;
+
+ VDEC_ASSERT(decoded_pict->pict_ref_res);
+ buf_ctrl =
+ (struct vdecfw_buffer_control *)decoded_pict->pict_ref_res->fw_ctrlbuf.cpu_virt;
+ VDEC_ASSERT(buf_ctrl);
+
+ /* Get display pictures */
+ idx = decoded_pict->disp_idx;
+ item = dec_str_ctx->dec_str_st.display_pics;
+ while (idx < buf_ctrl->display_list_length &&
+ item < VDECFW_MAX_NUM_PICTURES) {
+ dec_str_ctx->dec_str_st.next_display_items[item] =
+ buf_ctrl->display_list[idx];
+ dec_str_ctx->dec_str_st.next_display_item_parent[item] =
+ decoded_pict->transaction_id;
+ idx++;
+ item++;
+ }
+ dec_str_ctx->dec_str_st.display_pics = item;
+
+ /* Get release pictures */
+ idx = decoded_pict->rel_idx;
+ item = dec_str_ctx->dec_str_st.release_pics;
+ while (idx < buf_ctrl->release_list_length &&
+ item < VDECFW_MAX_NUM_PICTURES) {
+ dec_str_ctx->dec_str_st.next_release_items[item] =
+ buf_ctrl->release_list[idx];
+ dec_str_ctx->dec_str_st.next_release_item_parent[item] =
+ decoded_pict->transaction_id;
+ idx++;
+ item++;
+ }
+ dec_str_ctx->dec_str_st.release_pics = item;
+ break;
+ }
+
+ if (decoded_pict != dq_last(&dec_str_ctx->str_decd_pict_list))
+ decoded_pict = dq_next(decoded_pict);
+ else
+ decoded_pict = NULL;
+ }
+
+ /* Get list of held decoded pictures */
+ item = 0;
+ decoded_pict = dq_first(&dec_str_ctx->str_decd_pict_list);
+ while (decoded_pict) {
+ dec_str_ctx->dec_str_st.decoded_picts[item] =
+ decoded_pict->transaction_id;
+ item++;
+
+ if (decoded_pict != dq_last(&dec_str_ctx->str_decd_pict_list))
+ decoded_pict = dq_next(decoded_pict);
+ else
+ decoded_pict = NULL;
+ }
+
+ VDEC_ASSERT(item == dec_str_ctx->dec_str_st.num_pict_decoded);
+ *dec_str_st = dec_str_ctx->dec_str_st;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_deinitialise
+ */
+int decoder_deinitialise(void *dec_ctx)
+{
+ struct dec_ctx *dec_context = (struct dec_ctx *)dec_ctx;
+ int ret;
+ /* Remove and free all core context structures */
+ struct dec_decpict *dec_pict;
+
+ if (dec_context && dec_context->inited) {
+ struct dec_core_ctx *dec_core_ctx_local =
+ dec_context->dec_core_ctx;
+
+ if (!dec_core_ctx_local) {
+ pr_warn("%s %d NULL Decoder context passed", __func__, __LINE__);
+ VDEC_ASSERT(0);
+ return -EFAULT;
+ }
+
+ /* Stream list should be empty. */
+ if (!lst_empty(&dec_context->str_list))
+ pr_warn("%s %d stream list should be empty", __func__, __LINE__);
+
+ /*
+ * All cores should now be idle since there are no
+ * connections/streams.
+ */
+ ret = hwctrl_peekheadpiclist(dec_core_ctx_local->hw_ctx, &dec_pict);
+ VDEC_ASSERT(ret != IMG_SUCCESS);
+
+ /* Destroy a device MMU context. */
+ ret = mmu_device_destroy(dec_context->mmu_dev_handle);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Remove and free core context structure */
+ dec_core_ctx_local = dec_context->dec_core_ctx;
+ VDEC_ASSERT(dec_core_ctx_local);
+
+ hwctrl_deinitialise(dec_core_ctx_local->hw_ctx);
+
+ kfree(dec_core_ctx_local);
+ dec_core_ctx_local = NULL;
+
+ VDEC_ASSERT(dec_context->dev_cfg);
+ if (dec_context->dev_cfg)
+ kfree((void *)dec_context->dev_cfg);
+
+ dec_context->user_data = NULL;
+
+ pool_deinit();
+
+ dec_context->inited = FALSE;
+
+ kfree(dec_context);
+ } else {
+ pr_err("Decoder has not been initialised so cannot be de-initialised");
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ pr_debug("Decoder deinitialise successfully\n");
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_picture_destroy
+ * @Description
+ * Free the picture container and optionally release image buffer back to
+ * client.
+ * Default is to decrement the reference count held by this picture.
+ */
+static int decoder_picture_destroy(struct dec_str_ctx *dec_str_ctx,
+ unsigned int pict_id,
+ unsigned char release_image)
+{
+ struct vdecdd_picture *picture;
+ int ret;
+
+ VDEC_ASSERT(dec_str_ctx);
+
+ ret = idgen_gethandle(dec_str_ctx->pict_idgen, pict_id, (void **)&picture);
+ if (ret == IMG_SUCCESS) {
+ VDEC_ASSERT(picture);
+ ret = idgen_freeid(dec_str_ctx->pict_idgen, pict_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ if (picture->dec_pict_info) {
+ /* Destroy the picture */
+ kfree(picture->dec_pict_info);
+ picture->dec_pict_info = NULL;
+ }
+
+ /* Return unused picture and internal resources */
+ if (picture->disp_pict_buf.pict_buf) {
+ if (release_image)
+ resource_item_release
+ (&picture->disp_pict_buf.pict_buf->ddbuf_info.ref_count);
+ else
+ resource_item_return
+ (&picture->disp_pict_buf.pict_buf->ddbuf_info.ref_count);
+
+ memset(&picture->disp_pict_buf, 0, sizeof(picture->disp_pict_buf));
+ }
+
+ if (picture->pict_res_int) {
+ resource_item_return(&picture->pict_res_int->ref_cnt);
+ picture->pict_res_int = NULL;
+ }
+
+ kfree(picture);
+ picture = NULL;
+ } else {
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ return ret;
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_decoded_picture_destroy
+ */
+static int
+decoder_decoded_picture_destroy(struct dec_str_ctx *dec_str_ctx,
+ struct dec_decoded_pict *decoded_pict,
+ unsigned char release_image)
+{
+ int ret;
+
+ VDEC_ASSERT(dec_str_ctx);
+ VDEC_ASSERT(decoded_pict);
+
+ if (decoded_pict->pict) {
+ VDEC_ASSERT(decoded_pict->pict->pict_id ==
+ GET_STREAM_PICTURE_ID(decoded_pict->transaction_id));
+
+ ret = decoder_picture_destroy(dec_str_ctx, decoded_pict->pict->pict_id,
+ release_image);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ decoded_pict->pict = NULL;
+ }
+
+ dq_remove(decoded_pict);
+ dec_str_ctx->dec_str_st.num_pict_decoded--;
+
+ resource_item_return(&decoded_pict->pict_ref_res->ref_cnt);
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] COMPLETE",
+ GET_STREAM_ID(decoded_pict->transaction_id),
+ decoded_pict->transaction_id);
+#endif
+
+ kfree(decoded_pict->first_fld_fwmsg);
+ decoded_pict->first_fld_fwmsg = NULL;
+
+ kfree(decoded_pict->second_fld_fwmsg);
+ decoded_pict->second_fld_fwmsg = NULL;
+
+ kfree(decoded_pict);
+ decoded_pict = NULL;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_stream_decode_resource_destroy
+ */
+static int decoder_stream_decode_resource_destroy(void *item, void *free_cb_param)
+{
+ struct dec_pictdec_res *pict_dec_res = item;
+ struct dec_str_ctx *dec_str_ctx_local =
+ (struct dec_str_ctx *)free_cb_param;
+ int ret;
+
+ VDEC_ASSERT(pict_dec_res);
+ VDEC_ASSERT(resource_item_isavailable(&pict_dec_res->ref_cnt));
+
+ /* Free memory (device-based) to store fw contexts for stream. */
+ ret = mmu_free_mem(dec_str_ctx_local->mmu_str_handle, &pict_dec_res->fw_ctx_buf);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ if (pict_dec_res->h264_sgm_buf.hndl_memory) {
+ /* Free memory (device-based) to store SGM. */
+ ret = mmu_free_mem(dec_str_ctx_local->mmu_str_handle, &pict_dec_res->h264_sgm_buf);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ kfree(pict_dec_res);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_stream_release_buffers
+ */
+int decoder_stream_release_buffers(void *dec_str_ctx_handle)
+{
+ struct dec_str_ctx *dec_str_ctx;
+ struct dec_decoded_pict *decoded_pict;
+ int ret;
+
+ dec_str_ctx = decoder_stream_get_context(dec_str_ctx_handle);
+
+ /* Decoding queue should be empty since we are stopped */
+ VDEC_ASSERT(dec_str_ctx);
+ if (!dec_str_ctx) {
+ pr_err("Invalid decoder stream context handle!");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+ VDEC_ASSERT(lst_empty(&dec_str_ctx->pend_strunit_list));
+
+ /* Destroy all pictures in the decoded list */
+ decoded_pict = dq_first(&dec_str_ctx->str_decd_pict_list);
+ while (decoded_pict) {
+ ret = decoder_decoded_picture_destroy(dec_str_ctx, decoded_pict, TRUE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ decoded_pict = dq_first(&dec_str_ctx->str_decd_pict_list);
+ }
+
+ /* if and only if the output buffers were used for reference. */
+ if (dec_str_ctx->last_be_pict_dec_res) {
+ /*
+ * Clear the firmware context so that reference pictures
+ * are no longer referred to.
+ */
+ memset(dec_str_ctx->last_be_pict_dec_res->fw_ctx_buf.cpu_virt, 0,
+ dec_str_ctx->last_be_pict_dec_res->fw_ctx_buf.buf_size);
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_stream_reference_resource_destroy
+ */
+static int decoder_stream_reference_resource_destroy(void *item, void *free_cb_param)
+{
+ struct dec_pictref_res *pict_ref_res = item;
+ struct dec_str_ctx *dec_str_ctx_local =
+ (struct dec_str_ctx *)free_cb_param;
+ int ret;
+
+ VDEC_ASSERT(pict_ref_res);
+ VDEC_ASSERT(resource_item_isavailable(&pict_ref_res->ref_cnt));
+
+ /* Free memory (device-based) to store fw contexts for stream */
+ ret = mmu_free_mem(dec_str_ctx_local->mmu_str_handle, &pict_ref_res->fw_ctrlbuf);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ kfree(pict_ref_res);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_stream_destroy
+ */
+int decoder_stream_destroy(void *dec_str_context, unsigned char abort)
+{
+ struct dec_str_ctx *dec_str_ctx_local;
+ struct dec_str_unit *dec_str_unit_local;
+ struct dec_decoded_pict *decoded_pict_local;
+ unsigned int i;
+ int ret;
+ unsigned int pict_id;
+ void **res_handle_local;
+
+ /* Required for getting segments from decode picture to free */
+ struct dec_decpict_seg *dec_pict_seg_local;
+ struct dec_ctx *dec_context;
+ struct dec_core_ctx *dec_core_ctx_local;
+
+ /* Get the Decoder stream context. */
+ dec_str_ctx_local = decoder_stream_get_context(dec_str_context);
+
+ VDEC_ASSERT(dec_str_ctx_local);
+ if (!dec_str_ctx_local) {
+ pr_err("Invalid decoder stream context handle!");
+ return FALSE;
+ }
+ VDEC_ASSERT(dec_str_ctx_local->decctx);
+
+ /* Decrement the stream count */
+ dec_str_ctx_local->decctx->str_cnt--;
+
+ /*
+ * Ensure that there are no pictures for this stream outstanding
+ * on any decoder cores.
+ * This should not be removed, it is important to see it if
+ * it ever happens.
+ * In practice we see it many times with Application Timeout.
+ */
+ if (!abort)
+ VDEC_ASSERT(lst_empty(&dec_str_ctx_local->pend_strunit_list));
+
+ /*
+ * At this point all resources for the stream are guaranteed to
+ * not be used and no further hardware interrupts will be received.
+ */
+
+ /* Destroy all stream units submitted for processing. */
+ dec_str_unit_local =
+ lst_removehead(&dec_str_ctx_local->pend_strunit_list);
+ while (dec_str_unit_local) {
+ /* If the unit was submitted for decoding (picture)... */
+ if (dec_str_unit_local->dec_pict) {
+ /*
+ * Explicitly remove picture from core decode queue
+ * and destroy.
+ */
+ struct dec_core_ctx *dec_core_ctx_local =
+ decoder_str_ctx_to_core_ctx(dec_str_ctx_local);
+ VDEC_ASSERT(dec_core_ctx_local);
+
+ res_handle_local = &dec_str_ctx_local->resources;
+
+ if (!dec_core_ctx_local) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ hwctrl_removefrom_piclist(dec_core_ctx_local->hw_ctx,
+ dec_str_unit_local->dec_pict);
+
+ /* Free decoder picture */
+ kfree(dec_str_unit_local->dec_pict->first_fld_fwmsg);
+ dec_str_unit_local->dec_pict->first_fld_fwmsg = NULL;
+
+ kfree(dec_str_unit_local->dec_pict->second_fld_fwmsg);
+ dec_str_unit_local->dec_pict->second_fld_fwmsg = NULL;
+
+ dec_res_picture_detach(res_handle_local, dec_str_unit_local->dec_pict);
+
+ /* Free all the segments of the picture */
+ dec_pict_seg_local =
+ lst_removehead(&dec_str_unit_local->dec_pict->dec_pict_seg_list);
+ while (dec_pict_seg_local) {
+ if (dec_pict_seg_local->internal_seg) {
+ VDEC_ASSERT(dec_pict_seg_local->bstr_seg);
+ kfree(dec_pict_seg_local->bstr_seg);
+ dec_pict_seg_local->bstr_seg = NULL;
+ }
+
+ kfree(dec_pict_seg_local);
+ dec_pict_seg_local = NULL;
+
+ dec_pict_seg_local =
+ lst_removehead
+ (&dec_str_unit_local->dec_pict->dec_pict_seg_list);
+ }
+
+ VDEC_ASSERT(dec_str_unit_local->dec_pict->dec_str_ctx == dec_str_ctx_local);
+
+ dec_str_ctx_local->dec_str_st.num_pict_decoding--;
+ pict_id =
+ GET_STREAM_PICTURE_ID(dec_str_unit_local->dec_pict->transaction_id);
+
+ ret = decoder_picture_destroy(dec_str_ctx_local, pict_id, TRUE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ kfree(dec_str_unit_local->dec_pict);
+ dec_str_unit_local->dec_pict = NULL;
+ }
+
+ /* Free the picture container */
+ kfree(dec_str_unit_local);
+ dec_str_unit_local = NULL;
+
+ dec_str_unit_local = lst_removehead(&dec_str_ctx_local->pend_strunit_list);
+ }
+
+ /* Destroy all pictures in the decoded list */
+ decoded_pict_local = dq_first(&dec_str_ctx_local->str_decd_pict_list);
+ while (decoded_pict_local) {
+ ret = decoder_decoded_picture_destroy(dec_str_ctx_local,
+ decoded_pict_local,
+ TRUE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ decoded_pict_local = dq_first(&dec_str_ctx_local->str_decd_pict_list);
+ }
+
+ /* Ensure all picture queues are empty */
+ VDEC_ASSERT(lst_empty(&dec_str_ctx_local->pend_strunit_list));
+ VDEC_ASSERT(dq_empty(&dec_str_ctx_local->str_decd_pict_list));
+
+ /* Free memory to store stream context buffer. */
+ ret = mmu_free_mem(dec_str_ctx_local->mmu_str_handle,
+ &dec_str_ctx_local->pvdec_fw_ctx_buf);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Release any fw contexts held by stream. */
+ if (dec_str_ctx_local->prev_fe_pict_dec_res)
+ resource_item_return(&dec_str_ctx_local->prev_fe_pict_dec_res->ref_cnt);
+
+ if (dec_str_ctx_local->cur_fe_pict_dec_res)
+ resource_item_return(&dec_str_ctx_local->cur_fe_pict_dec_res->ref_cnt);
+
+ if (dec_str_ctx_local->last_be_pict_dec_res)
+ resource_item_return(&dec_str_ctx_local->last_be_pict_dec_res->ref_cnt);
+
+ /*
+ * Remove the device resources used for decoding and the two
+ * added to hold the last on front and back-end for stream.
+ */
+ for (i = 0; i < dec_str_ctx_local->num_dec_res + 2; i++) {
+ ret = resource_list_empty(&dec_str_ctx_local->dec_res_lst, FALSE,
+ decoder_stream_decode_resource_destroy,
+ dec_str_ctx_local);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+ VDEC_ASSERT(lst_empty(&dec_str_ctx_local->dec_res_lst));
+
+ /* Remove all stream decode resources. */
+ ret = resource_list_empty(&dec_str_ctx_local->ref_res_lst, FALSE,
+ decoder_stream_reference_resource_destroy,
+ dec_str_ctx_local);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ VDEC_ASSERT(lst_empty(&dec_str_ctx_local->ref_res_lst));
+
+ idgen_destroycontext(dec_str_ctx_local->pict_idgen);
+
+ dec_context = dec_str_ctx_local->decctx;
+ dec_core_ctx_local = decoder_str_ctx_to_core_ctx(dec_str_ctx_local);
+
+ VDEC_ASSERT(dec_context);
+ VDEC_ASSERT(dec_core_ctx_local);
+
+ res_handle_local = &dec_str_ctx_local->resources;
+
+ if (*res_handle_local) {
+ ret = dec_res_destroy(dec_str_ctx_local->mmu_str_handle, *res_handle_local);
+ if (ret != IMG_SUCCESS)
+ pr_warn("resourceS_Destroy() failed to tidy-up after error");
+
+ *res_handle_local = NULL;
+ }
+
+ lst_remove(&dec_str_ctx_local->decctx->str_list, dec_str_ctx_local);
+
+ kfree(dec_str_ctx_local);
+ dec_str_ctx_local = NULL;
+
+ pr_debug("%s successfully", __func__);
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_init_avail_slots
+ */
+static int decoder_init_avail_slots(struct dec_str_ctx *dec_str_context)
+{
+ VDEC_ASSERT(dec_str_context);
+
+ switch (dec_str_context->config.vid_std) {
+ case VDEC_STD_H264:
+ /*
+ * only first pipe can be master when decoding H264 in
+ * multipipe mode (FW restriction)
+ */
+ dec_str_context->avail_slots =
+ dec_str_context->decctx->dev_cfg->num_slots_per_pipe *
+ dec_str_context->decctx->num_pipes;
+ break;
+
+ default:
+ /* all pipes by default */
+ dec_str_context->avail_slots =
+ dec_str_context->decctx->dev_cfg->num_slots_per_pipe;
+ break;
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_stream_decode_resource_create
+ */
+static int decoder_stream_decode_resource_create(struct dec_str_ctx *dec_str_context)
+{
+ struct dec_pictdec_res *pict_dec_res;
+ int ret;
+ unsigned int mem_heap_id;
+ enum sys_emem_attrib mem_attribs;
+
+ unsigned char fw_ctx_buf = FALSE;
+
+ /* Validate input arguments */
+ if (!dec_str_context || !dec_str_context->decctx ||
+ !dec_str_context->decctx->dev_cfg) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ mem_heap_id = dec_str_context->decctx->internal_heap_id;
+ mem_attribs = (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE);
+ mem_attribs |= (enum sys_emem_attrib)SYS_MEMATTRIB_INTERNAL;
+
+ /* Allocate the firmware context buffer info structure. */
+ pict_dec_res = kzalloc(sizeof(*pict_dec_res), GFP_KERNEL);
+ VDEC_ASSERT(pict_dec_res);
+ if (!pict_dec_res)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /*
+ * Allocate the firmware context buffer to contain
+ * data required for subsequent picture.
+ */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d calling MMU_StreamMalloc", __func__, __LINE__);
+#endif
+
+ ret = mmu_stream_alloc(dec_str_context->mmu_str_handle,
+ MMU_HEAP_STREAM_BUFFERS, mem_heap_id,
+ (enum sys_emem_attrib)(mem_attribs | SYS_MEMATTRIB_CPU_READ |
+ SYS_MEMATTRIB_CPU_WRITE),
+ sizeof(union dec_fw_contexts),
+ DEV_MMU_PAGE_ALIGNMENT,
+ &pict_dec_res->fw_ctx_buf);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto err_out_of_memory;
+
+ fw_ctx_buf = TRUE;
+
+ /*
+ * Clear the context data in preparation for first time
+ * use by the firmware.
+ */
+ memset(pict_dec_res->fw_ctx_buf.cpu_virt, 0, pict_dec_res->fw_ctx_buf.buf_size);
+
+ switch (dec_str_context->config.vid_std) {
+ case VDEC_STD_H264:
+ /* Allocate the SGM buffer */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d calling MMU_StreamMalloc", __func__, __LINE__);
+#endif
+ ret = mmu_stream_alloc
+ (dec_str_context->mmu_str_handle,
+ MMU_HEAP_STREAM_BUFFERS, mem_heap_id,
+ (enum sys_emem_attrib)(mem_attribs | SYS_MEMATTRIB_CPU_WRITE),
+ H264_SGM_BUFFER_BYTES_PER_MB *
+ H264_SGM_MAX_MBS,
+ DEV_MMU_PAGE_ALIGNMENT,
+ &pict_dec_res->h264_sgm_buf);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto err_out_of_memory;
+
+ /* Clear the SGM data. */
+ memset(pict_dec_res->h264_sgm_buf.cpu_virt, 0, pict_dec_res->h264_sgm_buf.buf_size);
+ break;
+
+ default:
+ break;
+ }
+
+ pict_dec_res->ref_cnt = 1;
+
+ ret = resource_list_add_img(&dec_str_context->dec_res_lst, pict_dec_res, 0,
+ &pict_dec_res->ref_cnt);
+
+ if (ret != IMG_SUCCESS) {
+ pr_warn("[USERSID=0x%08X] Failed to add resource",
+ dec_str_context->config.user_str_id);
+ }
+
+ return IMG_SUCCESS;
+
+err_out_of_memory:
+ if (pict_dec_res) {
+ if (fw_ctx_buf)
+ mmu_free_mem(dec_str_context->mmu_str_handle, &pict_dec_res->fw_ctx_buf);
+
+ kfree(pict_dec_res);
+ pict_dec_res = NULL;
+ }
+
+ pr_err("[USERSID=0x%08X] Failed to allocate device memory for stream decode resources",
+ dec_str_context->config.user_str_id);
+
+ return IMG_ERROR_OUT_OF_MEMORY;
+}
+
+/*
+ * @Function decoder_stream_create
+ */
+int decoder_stream_create(void *dec_ctx_arg,
+ struct vdec_str_configdata str_cfg,
+ unsigned int km_str_id, void **mmu_str_handle,
+ void *vxd_dec_ctx, void *str_user_int_data,
+ void **dec_str_ctx_arg, void *decoder_cb,
+ void *query_cb)
+{
+ struct dec_ctx *dec_context = (struct dec_ctx *)dec_ctx_arg;
+ struct dec_str_ctx *dec_str_ctx = NULL;
+ unsigned int i;
+ int ret;
+ unsigned int mem_heap_id;
+ enum sys_emem_attrib mem_attribs;
+ struct dec_core_ctx *dec_core_ctx_local;
+
+ /* Check input parameters. */
+ VDEC_ASSERT(dec_ctx_arg);
+ if (!dec_ctx_arg) {
+ pr_err("Invalid parameters!");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (dec_context->str_cnt >= MAX_CONCURRENT_STREAMS) {
+ pr_err("Device has too many concurrent streams. Number of Concurrent streams allowed: %d.",
+ MAX_CONCURRENT_STREAMS);
+ return IMG_ERROR_DEVICE_UNAVAILABLE;
+ }
+
+ mem_heap_id = dec_context->internal_heap_id;
+ mem_attribs = (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE);
+ mem_attribs |= (enum sys_emem_attrib)SYS_MEMATTRIB_INTERNAL;
+
+ /* Allocate Decoder Stream Context */
+ dec_str_ctx = kzalloc(sizeof(*dec_str_ctx), GFP_KERNEL);
+ VDEC_ASSERT(dec_str_ctx);
+ if (!dec_str_ctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Increment the stream counter */
+ dec_context->str_cnt++;
+
+ /*
+ * Initialise the context structure to NULL. Any non-zero
+ * default values should be set at this point.
+ */
+ dec_str_ctx->config = str_cfg;
+ dec_str_ctx->vxd_dec_ctx = vxd_dec_ctx;
+ dec_str_ctx->usr_int_data = str_user_int_data;
+ dec_str_ctx->decctx = dec_context;
+
+ decoder_init_avail_slots(dec_str_ctx);
+
+ dec_str_ctx->next_dec_pict_id = 1;
+ dec_str_ctx->next_pict_id_expected = 1;
+
+ dec_str_ctx->km_str_id = km_str_id;
+ VDEC_ASSERT(dec_str_ctx->km_str_id > 0);
+
+ lst_init(&dec_str_ctx->pend_strunit_list);
+ dq_init(&dec_str_ctx->str_decd_pict_list);
+ lst_init(&dec_str_ctx->ref_res_lst);
+ lst_init(&dec_str_ctx->dec_res_lst);
+
+ ret = idgen_createcontext(DECODER_MAX_PICT_ID + 1,
+ DECODER_MAX_CONCURRENT_PICT,
+ TRUE,
+ &dec_str_ctx->pict_idgen);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /* Create an MMU context for this stream. */
+ ret = mmu_stream_create(dec_context->mmu_dev_handle,
+ dec_str_ctx->km_str_id,
+ dec_str_ctx->vxd_dec_ctx,
+ &dec_str_ctx->mmu_str_handle);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ dec_core_ctx_local = dec_context->dec_core_ctx;
+
+ VDEC_ASSERT(dec_core_ctx_local);
+
+ /* Create core resources */
+ ret = dec_res_create(dec_str_ctx->mmu_str_handle,
+ &dec_core_ctx_local->core_props,
+ dec_context->dev_cfg->num_slots_per_pipe *
+ dec_context->num_pipes,
+ dec_context->internal_heap_id,
+ &dec_str_ctx->resources);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /* Allocate the PVDEC firmware context buffer */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d calling MMU_StreamMalloc", __func__, __LINE__);
+#endif
+ ret = mmu_stream_alloc(dec_str_ctx->mmu_str_handle, MMU_HEAP_STREAM_BUFFERS,
+ mem_heap_id,
+ (enum sys_emem_attrib)(mem_attribs | SYS_MEMATTRIB_CPU_WRITE),
+ CONTEXT_BUFF_SIZE,
+ DEV_MMU_PAGE_ALIGNMENT,
+ &dec_str_ctx->pvdec_fw_ctx_buf);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /*
+ * Clear the context data in preparation for
+ * first time use by the firmware.
+ */
+ memset(dec_str_ctx->pvdec_fw_ctx_buf.cpu_virt, 0, dec_str_ctx->pvdec_fw_ctx_buf.buf_size);
+
+ /*
+ * Create enough device resources to hold last context on
+ * front and back-end for stream.
+ */
+ dec_str_ctx->num_dec_res =
+ dec_str_ctx->decctx->dev_cfg->num_slots_per_pipe *
+ dec_str_ctx->decctx->num_pipes;
+ for (i = 0; i < dec_str_ctx->num_dec_res + 2; i++) {
+ ret = decoder_stream_decode_resource_create(dec_str_ctx);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+ }
+
+ dec_str_ctx->str_processed_cb = (strunit_processed_cb)decoder_cb;
+
+ dec_str_ctx->core_query_cb = (core_gen_cb)query_cb;
+
+ lst_add(&dec_context->str_list, dec_str_ctx);
+
+ *dec_str_ctx_arg = (void *)dec_str_ctx;
+ *mmu_str_handle = dec_str_ctx->mmu_str_handle;
+
+ return IMG_SUCCESS;
+
+ /* Roll back in case of errors. */
+error:
+ decoder_stream_destroy((void *)dec_str_ctx, FALSE);
+
+ return ret;
+}
+
+/*
+ * @Function decoder_get_decoded_pict
+ */
+static struct dec_decoded_pict *decoder_get_decoded_pict(unsigned int transaction_id,
+ struct dq_linkage_t *dq_list)
+{
+ struct dec_decoded_pict *decoded_pict;
+ void *item = NULL;
+
+ VDEC_ASSERT(dq_list);
+
+ decoded_pict = dq_first(dq_list);
+ while (decoded_pict) {
+ if (decoded_pict->transaction_id == transaction_id) {
+ item = decoded_pict;
+ break;
+ }
+
+ if (decoded_pict != dq_last(dq_list))
+ decoded_pict = dq_next(decoded_pict);
+ else
+ decoded_pict = NULL;
+ }
+
+ return item;
+}
+
+/*
+ * @Function decoder_get_decoded_pict_of_stream
+ */
+static struct dec_decoded_pict *decoder_get_decoded_pict_of_stream(unsigned int pict_id,
+ struct dq_linkage_t *dq_list)
+{
+ struct dec_decoded_pict *decoded_pict;
+ void *item = NULL;
+
+ VDEC_ASSERT(dq_list);
+
+ decoded_pict = dq_first(dq_list);
+ while (decoded_pict) {
+ if (GET_STREAM_PICTURE_ID(decoded_pict->transaction_id) == pict_id) {
+ item = decoded_pict;
+ break;
+ }
+
+ if (decoded_pict != dq_last(dq_list))
+ decoded_pict = dq_next(decoded_pict);
+ else
+ decoded_pict = NULL;
+ }
+ return item;
+}
+
+/*
+ * @Function decoder_get_next_decpict_contiguous
+ */
+static struct
+dec_decoded_pict *decoder_get_next_decpict_contiguous(struct dec_decoded_pict *decoded_pict,
+ unsigned int next_dec_pict_id,
+ struct dq_linkage_t *str_decoded_pict_list)
+{
+ struct dec_decoded_pict *next_dec_pict = NULL;
+ struct dec_decoded_pict *result_dec_pict = NULL;
+
+ VDEC_ASSERT(str_decoded_pict_list);
+ if (!str_decoded_pict_list) {
+ pr_err("Invalid parameter");
+ return NULL;
+ }
+
+ if (decoded_pict) {
+ if (decoded_pict != dq_last(str_decoded_pict_list)) {
+ next_dec_pict = dq_next(decoded_pict);
+ if (!next_dec_pict) {
+ VDEC_ASSERT(0);
+ return NULL;
+ }
+
+ if (next_dec_pict->pict) {
+ /*
+ * If we have no holes in the decoded list
+ * (i.e. next decoded picture is next in
+ * bitstream decode order).
+ */
+ if (HAS_X_REACHED_Y(next_dec_pict_id, next_dec_pict->pict->pict_id,
+ 1 << FWIF_NUMBITS_STREAM_PICTURE_ID,
+ unsigned int)) {
+ result_dec_pict = next_dec_pict;
+ }
+ }
+ }
+ }
+
+ return result_dec_pict;
+}
+
+/*
+ * @Function decoder_next_picture
+ * @Description
+ * Returns the next unprocessed picture or NULL if the next picture is not
+ * next in bitstream decode order or there are no more decoded pictures in the
+ * list.
+
+ * @Input psCurrentDecodedPicture : Pointer to current decoded picture.
+
+ * @Input ui32NextDecPictId : Picture ID of next picture in decode
+ * order.
+
+ * @Input psStrDecdPictList : Pointer to decoded picture list.
+
+ * @Return DECODER_sDecodedPict * : Pointer to next decoded picture to
+ * process.
+ */
+static struct dec_decoded_pict *decoder_next_picture(struct dec_decoded_pict *cur_decoded_pict,
+ unsigned int next_dec_pict_d,
+ struct dq_linkage_t *str_decodec_pict_list)
+{
+ struct dec_decoded_pict *ret = NULL;
+
+ VDEC_ASSERT(str_decodec_pict_list);
+ if (!str_decodec_pict_list)
+ return NULL;
+
+ if (!cur_decoded_pict)
+ cur_decoded_pict = dq_first(str_decodec_pict_list);
+
+ if (cur_decoded_pict && !cur_decoded_pict->process_failed) {
+ /* Search for picture ID greater than picture in list */
+ do {
+ if (!cur_decoded_pict->processed) {
+ /*
+ * Return the current one because it has not
+ * been processed
+ */
+ ret = cur_decoded_pict;
+ break;
+ }
+ /*
+ * Obtain a pointer to the next picture in bitstream
+ * decode order.
+ */
+ cur_decoded_pict = decoder_get_next_decpict_contiguous
+ (cur_decoded_pict,
+ next_dec_pict_d,
+ str_decodec_pict_list);
+ } while (cur_decoded_pict &&
+ !cur_decoded_pict->process_failed);
+ }
+ return ret;
+}
+
+/*
+ * @Function decoder_picture_display
+ */
+static int decoder_picture_display(struct dec_str_ctx *dec_str_ctx,
+ unsigned int pict_id, unsigned char last)
+{
+ struct vdecdd_picture *picture;
+ int ret;
+ static unsigned int display_num;
+
+ VDEC_ASSERT(dec_str_ctx);
+
+ ret = idgen_gethandle(dec_str_ctx->pict_idgen, pict_id, (void **)&picture);
+ if (ret == IMG_SUCCESS) {
+ struct vdecdd_ddbuf_mapinfo *pict_buf;
+
+ /* validate pointers */
+ if (!picture || !picture->dec_pict_info) {
+ VDEC_ASSERT(0);
+ return -EIO;
+ }
+
+ pict_buf = picture->disp_pict_buf.pict_buf;
+ VDEC_ASSERT(pict_buf);
+
+ /*
+ * Indicate whether there are more pictures
+ * coming for display.
+ */
+ picture->dec_pict_info->last_in_seq = last;
+
+ /* Set decode order id */
+ picture->dec_pict_info->decode_id = pict_id;
+
+ /* Return the picture to the client for display */
+ dec_str_ctx->dec_str_st.total_pict_displayed++;
+ resource_item_use(&pict_buf->ddbuf_info.ref_count);
+ display_num++;
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] DISPLAY(%d): PIC_ID[%d]",
+ dec_str_ctx->config.user_str_id, display_num, pict_id);
+#endif
+
+ VDEC_ASSERT(dec_str_ctx->decctx);
+ ret = dec_str_ctx->str_processed_cb(dec_str_ctx->usr_int_data,
+ VXD_CB_PICT_DISPLAY,
+ picture);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /*
+ * All handles will be freed after actually
+ * displaying the picture.
+ * Reset them to NULL here to avoid any confusion.
+ */
+ memset(&picture->dec_pict_sup_data, 0, sizeof(picture->dec_pict_sup_data));
+ } else {
+ pr_warn("[USERSID=0x%08X] ERROR: DISPLAY PICTURE HAS AN EXPIRED ID",
+ dec_str_ctx->config.user_str_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ }
+
+ return IMG_SUCCESS;
+}
+
+#ifdef ERROR_CONCEALMENT
+/*
+ * @Function decoder_get_pict_processing_info
+ */
+static unsigned char decoder_get_pict_processing_info(struct dec_core_ctx *dec_corectx,
+ struct dec_str_ctx *dec_strctx,
+ struct bspp_pict_hdr_info *pict_hdr_info,
+ struct dec_decoded_pict *decoded_pict,
+ struct dec_decpict *dec_pict,
+ unsigned int *pict_last_mb)
+{
+ int ret = IMG_SUCCESS;
+ unsigned char pipe_minus1;
+ struct hwctrl_state last_state;
+ unsigned int width_in_mb;
+ unsigned int height_in_mb;
+ unsigned int i;
+
+ memset(&last_state, 0, sizeof(last_state));
+
+ VDEC_ASSERT(pict_hdr_info);
+ width_in_mb = (pict_hdr_info->coded_frame_size.width +
+ (VDEC_MB_DIMENSION - 1)) / VDEC_MB_DIMENSION;
+ height_in_mb = (pict_hdr_info->coded_frame_size.height +
+ (VDEC_MB_DIMENSION - 1)) / VDEC_MB_DIMENSION;
+
+ VDEC_ASSERT(pict_last_mb);
+ *pict_last_mb = width_in_mb * height_in_mb;
+ VDEC_ASSERT(decoded_pict);
+
+ if (decoded_pict->first_fld_fwmsg->pict_attrs.pict_attrs.dwrfired ||
+ decoded_pict->second_fld_fwmsg->pict_attrs.pict_attrs.dwrfired ||
+ decoded_pict->first_fld_fwmsg->pict_attrs.pict_attrs.mmufault ||
+ decoded_pict->second_fld_fwmsg->pict_attrs.pict_attrs.mmufault) {
+ struct dec_pict_attrs *pict_attrs = &decoded_pict->first_fld_fwmsg->pict_attrs;
+ unsigned char be_found = FALSE;
+ unsigned int mbs_dropped = 0;
+ unsigned int mbs_recovered = 0;
+ unsigned int no_be_wdt = 0;
+ unsigned int max_y = 0;
+ unsigned int row_drop = 0;
+
+ VDEC_ASSERT(dec_corectx);
+ /* Obtain the last available core status -
+ * cached before clocks where switched off
+ */
+ ret = hwctrl_getcore_cached_status(dec_corectx->hw_ctx, &last_state);
+ if (ret != IMG_SUCCESS)
+ return FALSE;
+
+ /* Try to determine pipe where the last picture was decoded on (BE) */
+ for (pipe_minus1 = 0; pipe_minus1 < VDEC_MAX_PIXEL_PIPES; pipe_minus1++) {
+ for (i = VDECFW_CHECKPOINT_BE_END; i >= VDECFW_CHECKPOINT_BE_START; i--) {
+ struct vxd_pipestate *pipe_state =
+ &last_state.core_state.fw_state.pipe_state[pipe_minus1];
+
+ if (!pipe_state->is_pipe_present)
+ continue;
+
+ if (pipe_state->acheck_point[i] == decoded_pict->transaction_id) {
+ row_drop += width_in_mb - pipe_state->be_mb.x;
+ if (pipe_state->be_mb.y > max_y)
+ max_y = pipe_state->be_mb.y;
+
+ if (pipe_state->be_mbs_dropped > mbs_dropped)
+ mbs_dropped = pipe_state->be_mbs_dropped;
+
+ if (pipe_state->be_mbs_recovered > mbs_recovered)
+ mbs_recovered = pipe_state->be_mbs_recovered;
+
+ no_be_wdt += pipe_state->be_errored_slices;
+ be_found = TRUE;
+ }
+ }
+ if (be_found)
+ /* No need to check FE as we already have an info from BE */
+ continue;
+
+ /* If not found, we probbaly stuck on FE ? */
+ for (i = VDECFW_CHECKPOINT_FE_END; i >= VDECFW_CHECKPOINT_FE_START; i--) {
+ struct vxd_pipestate *pipe_state =
+ &last_state.core_state.fw_state.pipe_state[pipe_minus1];
+
+ if (!pipe_state->is_pipe_present)
+ continue;
+
+ if (pipe_state->acheck_point[i] == decoded_pict->transaction_id) {
+ /* Mark all MBs as dropped */
+ pict_attrs->mbs_dropped = *pict_last_mb;
+ pict_attrs->mbs_recovered = 0;
+ return TRUE;
+ }
+ }
+ }
+
+ if (be_found) {
+ /* Calculate last macroblock number processed on BE */
+ unsigned int num_mb_processed = (max_y * width_in_mb) - row_drop;
+
+ /* Sanity check, as HW may signal MbYX position
+ * beyond picture for corrupted streams
+ */
+ if (num_mb_processed > (*pict_last_mb))
+ num_mb_processed = (*pict_last_mb); /* trim */
+
+ if (((*pict_last_mb) - num_mb_processed) > mbs_dropped)
+ mbs_dropped = (*pict_last_mb) - num_mb_processed;
+
+ pict_attrs->mbs_dropped = mbs_dropped;
+ pict_attrs->mbs_recovered = num_mb_processed;
+ pict_attrs->no_be_wdt = no_be_wdt;
+ return TRUE;
+ }
+ return FALSE;
+ }
+ /* Picture was decoded without DWR, so we have already the required info */
+ return TRUE;
+}
+#endif
+
+/*
+ * @Function decoder_picture_release
+ */
+static int decoder_picture_release(struct dec_str_ctx *dec_str_ctx,
+ unsigned int pict_id,
+ unsigned char displayed,
+ unsigned char merged)
+{
+ struct vdecdd_picture *picture;
+ int ret;
+
+ /* validate input arguments */
+ if (!dec_str_ctx) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ ret = idgen_gethandle(dec_str_ctx->pict_idgen, pict_id, (void **)&picture);
+ if (ret == IMG_SUCCESS) {
+ if (!picture || !picture->dec_pict_info) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ /* Set decode order id */
+ picture->dec_pict_info->decode_id = pict_id;
+
+ VDEC_ASSERT(dec_str_ctx->decctx);
+
+ pr_debug("Decoder picture released pict_id = %d\n", pict_id);
+ ret = dec_str_ctx->str_processed_cb(dec_str_ctx->usr_int_data,
+ VXD_CB_PICT_RELEASE,
+ picture);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /*
+ * All handles will be freed after actually displaying
+ * the picture. Reset them to NULL here to avoid any
+ * confusion.
+ */
+ memset(&picture->dec_pict_sup_data, 0, sizeof(picture->dec_pict_sup_data));
+ } else {
+ pr_err("[USERSID=0x%08X] ERROR: RELEASE PICTURE HAS AN EXPIRED ID",
+ dec_str_ctx->config.user_str_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_stream_flush_process_dpb_h264
+ */
+static int
+decoder_stream_flush_process_dpb_h264(struct dec_str_ctx *dec_str_ctx,
+ struct dec_decoded_pict *decoded_pict,
+ unsigned char discard_refs)
+{
+ int ret;
+
+ struct h264fw_context_data *ctx_data =
+ (struct h264fw_context_data *)dec_str_ctx->last_be_pict_dec_res->fw_ctx_buf.cpu_virt;
+ unsigned char found = TRUE;
+ unsigned int i;
+ int min_cnt;
+ int min_cnt_idx;
+ unsigned int num_display_pics = 0;
+ unsigned int num_pics_displayed = 0;
+ struct dec_decoded_pict *display_pict = NULL;
+ unsigned int last_disp_pict_tid;
+ unsigned int pict_id;
+
+ /* Determine how many display pictures reside in the DPB */
+ if (ctx_data->dpb_size > H264FW_MAX_DPB_SIZE || ctx_data->dpb_size <= 0) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] Incorrect DPB size: %d",
+ dec_str_ctx->config.user_str_id, ctx_data->dpb_size);
+#endif
+ ctx_data->dpb_size = H264FW_MAX_DPB_SIZE;
+ }
+ for (i = 0; i < ctx_data->dpb_size; i++) {
+ if (ctx_data->dpb[i].transaction_id)
+ if (ctx_data->dpb[i].needed_for_output)
+ num_display_pics++;
+ }
+
+ last_disp_pict_tid = ctx_data->last_displayed_pic_data[0].transaction_id;
+ /* Check for picture stuck outside the dpb */
+ if (last_disp_pict_tid) {
+ VDEC_ASSERT(last_disp_pict_tid != 0xffffffff);
+
+ display_pict = decoder_get_decoded_pict(last_disp_pict_tid,
+ &dec_str_ctx->str_decd_pict_list);
+
+ if (display_pict && display_pict->pict &&
+ display_pict->pict->dec_pict_info) {
+ if (FLAG_IS_SET(ctx_data->prev_display_flags,
+ VDECFW_BUFFLAG_DISPLAY_FIELD_CODED)) {
+ if (!FLAG_IS_SET(ctx_data->prev_display_flags,
+ VDECFW_BUFFLAG_DISPLAY_SINGLE_FIELD))
+ display_pict->pict->dec_pict_info->buf_type =
+ IMG_BUFFERTYPE_PAIR;
+ else
+ display_pict->pict->dec_pict_info->buf_type =
+ FLAG_IS_SET
+ (ctx_data->prev_display_flags,
+ VDECFW_BUFFLAG_DISPLAY_BOTTOM_FIELD) ?
+ IMG_BUFFERTYPE_FIELD_BOTTOM :
+ IMG_BUFFERTYPE_FIELD_TOP;
+ } else {
+ display_pict->pict->dec_pict_info->buf_type =
+ IMG_BUFFERTYPE_FRAME;
+ }
+ } else {
+ VDEC_ASSERT(display_pict);
+ VDEC_ASSERT(display_pict && display_pict->pict);
+ VDEC_ASSERT(display_pict && display_pict->pict &&
+ display_pict->pict->dec_pict_info);
+ }
+
+ if (display_pict && !display_pict->displayed) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] DISPLAY",
+ dec_str_ctx->config.user_str_id,
+ last_disp_pict_tid);
+#endif
+ display_pict->displayed = TRUE;
+ ret = decoder_picture_display
+ (dec_str_ctx, GET_STREAM_PICTURE_ID(last_disp_pict_tid),
+ TRUE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+ }
+
+ while (found) {
+ min_cnt = ((unsigned int)(1 << 31)) - 1;
+ min_cnt_idx = -1;
+ found = FALSE;
+
+ /* Loop over the DPB to find the first in order */
+ for (i = 0; i < ctx_data->dpb_size; i++) {
+ if (ctx_data->dpb[i].transaction_id &&
+ (ctx_data->dpb[i].needed_for_output ||
+ discard_refs)) {
+ if (ctx_data->dpb[i].top_field_order_count <
+ min_cnt) {
+ min_cnt =
+ ctx_data->dpb[i].top_field_order_count;
+ min_cnt_idx = i;
+ found = TRUE;
+ }
+ }
+ }
+
+ if (found) {
+ unsigned int umin_cnt_tid = ctx_data->dpb[min_cnt_idx].transaction_id;
+
+ if (ctx_data->dpb[min_cnt_idx].needed_for_output) {
+ VDEC_ASSERT(umin_cnt_tid != 0xffffffff);
+ display_pict =
+ decoder_get_decoded_pict(umin_cnt_tid,
+ &dec_str_ctx->str_decd_pict_list);
+
+ if ((display_pict && display_pict->pict &&
+ display_pict->pict->dec_pict_info) &&
+ FLAG_IS_SET(ctx_data->dpb[min_cnt_idx].display_flags,
+ VDECFW_BUFFLAG_DISPLAY_FIELD_CODED)) {
+ if (!FLAG_IS_SET(ctx_data->dpb[min_cnt_idx].display_flags,
+ VDECFW_BUFFLAG_DISPLAY_SINGLE_FIELD))
+ display_pict->pict->dec_pict_info->buf_type =
+ IMG_BUFFERTYPE_PAIR;
+ else
+ display_pict->pict->dec_pict_info->buf_type =
+ FLAG_IS_SET
+ (ctx_data->dpb
+ [min_cnt_idx].display_flags,
+ VDECFW_BUFFLAG_DISPLAY_BOTTOM_FIELD)
+ ?
+ IMG_BUFFERTYPE_FIELD_BOTTOM :
+ IMG_BUFFERTYPE_FIELD_TOP;
+ display_pict->pict->dec_pict_info->view_id =
+ ctx_data->dpb[min_cnt_idx].view_id;
+ } else if ((display_pict && display_pict->pict &&
+ display_pict->pict->dec_pict_info) &&
+ (!FLAG_IS_SET(ctx_data->dpb[min_cnt_idx].display_flags,
+ VDECFW_BUFFLAG_DISPLAY_FIELD_CODED))){
+ display_pict->pict->dec_pict_info->buf_type =
+ IMG_BUFFERTYPE_FRAME;
+ display_pict->pict->dec_pict_info->view_id =
+ ctx_data->dpb[min_cnt_idx].view_id;
+ } else {
+ VDEC_ASSERT(display_pict);
+ VDEC_ASSERT(display_pict && display_pict->pict);
+ VDEC_ASSERT(display_pict && display_pict->pict &&
+ display_pict->pict->dec_pict_info);
+ }
+
+ if (display_pict && !display_pict->displayed) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] DISPLAY",
+ dec_str_ctx->config.user_str_id,
+ umin_cnt_tid);
+#endif
+ display_pict->displayed = TRUE;
+ num_pics_displayed++;
+ ret = decoder_picture_display
+ (dec_str_ctx,
+ GET_STREAM_PICTURE_ID(umin_cnt_tid),
+ num_pics_displayed == num_display_pics ?
+ TRUE : FALSE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+ ctx_data->dpb[min_cnt_idx].needed_for_output = FALSE;
+ }
+
+ if (discard_refs) {
+ decoded_pict =
+ decoder_get_decoded_pict(umin_cnt_tid,
+ &dec_str_ctx->str_decd_pict_list);
+ if (decoded_pict) {
+ /* Signal releasing this picture to upper layers. */
+ pict_id =
+ GET_STREAM_PICTURE_ID(decoded_pict->transaction_id);
+ decoder_picture_release(dec_str_ctx,
+ pict_id,
+ decoded_pict->displayed,
+ decoded_pict->merged);
+ /* Destroy the decoded picture. */
+ ret = decoder_decoded_picture_destroy(dec_str_ctx,
+ decoded_pict, FALSE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+ ctx_data->dpb[min_cnt_idx].transaction_id = 0;
+ }
+ }
+ }
+
+ VDEC_ASSERT(num_pics_displayed == num_display_pics);
+
+ return IMG_SUCCESS;
+}
+
+#ifdef HAS_HEVC
+/*
+ * decoder_StreamFlushProcessDPB_HEVC
+ */
+static int decoder_stream_flush_process_dpb_hevc(struct dec_str_ctx *decstr_ctx,
+ struct dec_decoded_pict *decoded_pict,
+ unsigned char discard_refs)
+{
+ int result;
+ struct hevcfw_ctx_data *ctx =
+ (struct hevcfw_ctx_data *)decstr_ctx->last_be_pict_dec_res->fw_ctx_buf.cpu_virt;
+ struct hevcfw_decoded_picture_buffer *dpb;
+ unsigned char found = TRUE;
+ unsigned char idx;
+ int min_poc_val;
+ signed char dpb_idx;
+ unsigned char num_display_pics = 0;
+ unsigned char num_pics_displayed = 0;
+ struct dec_decoded_pict *display_pict = NULL;
+
+ /*
+ * Update the fw context for analysing the dpb in order
+ * to display or release any outstanding picture
+ */
+ dpb = &ctx->dpb;
+
+ /* Determine how many display pictures reside in the DPB. */
+ for (idx = 0; idx < HEVCFW_MAX_DPB_SIZE; ++idx) {
+ struct hevcfw_picture_in_dpb *dpb_pic = &dpb->pictures[idx];
+
+ if (dpb_pic->valid && dpb_pic->needed_for_output)
+ ++num_display_pics;
+ }
+
+ while (found) {
+ struct hevcfw_picture_in_dpb *dpb_pic;
+
+ min_poc_val = 0x7fffffff;
+ dpb_idx = HEVCFW_DPB_IDX_INVALID;
+ found = FALSE;
+
+ /* Loop over the DPB to find the first in order */
+ for (idx = 0; idx < HEVCFW_MAX_DPB_SIZE; ++idx) {
+ dpb_pic = &dpb->pictures[idx];
+ if (dpb_pic->valid && (dpb_pic->needed_for_output || discard_refs)) {
+ if (dpb_pic->picture.pic_order_cnt_val < min_poc_val) {
+ min_poc_val = dpb_pic->picture.pic_order_cnt_val;
+ dpb_idx = idx;
+ found = TRUE;
+ }
+ }
+ }
+
+ if (!found)
+ break;
+
+ dpb_pic = &dpb->pictures[dpb_idx];
+
+ if (dpb_pic->needed_for_output) {
+ unsigned int str_pic_id = GET_STREAM_PICTURE_ID
+ (dpb_pic->picture.transaction_id);
+
+ VDEC_ASSERT(dpb_pic->picture.transaction_id != 0xffffffff);
+ display_pict = decoder_get_decoded_pict(dpb_pic->picture.transaction_id,
+ &decstr_ctx->str_decd_pict_list);
+
+ if (display_pict && display_pict->pict &&
+ display_pict->pict->dec_pict_info) {
+ display_pict->pict->dec_pict_info->buf_type = IMG_BUFFERTYPE_FRAME;
+ } else {
+ VDEC_ASSERT(display_pict);
+ VDEC_ASSERT(display_pict && display_pict->pict);
+ VDEC_ASSERT(display_pict && display_pict->pict &&
+ display_pict->pict->dec_pict_info);
+
+ dpb_pic->valid = FALSE;
+ continue;
+ }
+
+ if (!display_pict->displayed) {
+ display_pict->displayed = TRUE;
+ ++num_pics_displayed;
+ result = decoder_picture_display(decstr_ctx, str_pic_id,
+ num_pics_displayed ==
+ num_display_pics);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+ if (result != IMG_SUCCESS)
+ return result;
+ }
+ dpb_pic->needed_for_output = FALSE;
+ }
+
+ if (discard_refs) {
+ decoded_pict = decoder_get_decoded_pict(dpb_pic->picture.transaction_id,
+ &decstr_ctx->str_decd_pict_list);
+
+ if (decoded_pict) {
+ /* Signal releasing this picture to upper layers. */
+ decoder_picture_release(decstr_ctx,
+ GET_STREAM_PICTURE_ID
+ (decoded_pict->transaction_id),
+ decoded_pict->displayed,
+ decoded_pict->merged);
+ /* Destroy the decoded picture. */
+ result = decoder_decoded_picture_destroy(decstr_ctx, decoded_pict,
+ FALSE);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+ if (result != IMG_SUCCESS)
+ return result;
+ }
+ dpb_pic->valid = FALSE;
+ }
+ }
+
+ VDEC_ASSERT(num_pics_displayed == num_display_pics);
+
+ return IMG_SUCCESS;
+}
+#endif
+
+/*
+ * @Function decoder_stream_flush_process_dpb
+ * @Description
+ * Process DPB fetched from firmware, display and release relevant pictures.
+ */
+static int decoder_stream_flush_process_dpb(struct dec_str_ctx *dec_str_ctx,
+ struct dec_decoded_pict *decoded_pict,
+ unsigned char discard_refs)
+{
+ int ret = 0;
+ /* Get oldest reference to display. */
+ decoded_pict = dq_last(&dec_str_ctx->str_decd_pict_list);
+ if (decoded_pict) {
+ switch (dec_str_ctx->config.vid_std) {
+ case VDEC_STD_H264:
+ ret = decoder_stream_flush_process_dpb_h264(dec_str_ctx, decoded_pict,
+ discard_refs);
+
+ break;
+#ifdef HAS_HEVC
+ case VDEC_STD_HEVC:
+ decoder_stream_flush_process_dpb_hevc(dec_str_ctx,
+ decoded_pict, discard_refs);
+#endif
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int decoder_stream_flush(void *dec_str_ctx_arg, unsigned char discard_refs)
+{
+ struct dec_str_ctx *dec_str_ctx;
+ struct dec_str_unit *dec_str_unit;
+ struct dec_decoded_pict *decoded_pict;
+ int ret = 0;
+
+ dec_str_ctx = decoder_stream_get_context(dec_str_ctx_arg);
+ VDEC_ASSERT(dec_str_ctx);
+ if (!dec_str_ctx) {
+ pr_err("Invalid decoder stream context handle!");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /*
+ * Since the stream should be stopped before flushing
+ * there should be no pictures in the stream list.
+ */
+ dec_str_unit = lst_first(&dec_str_ctx->pend_strunit_list);
+ while (dec_str_unit) {
+ VDEC_ASSERT(dec_str_unit->str_unit->str_unit_type !=
+ VDECDD_STRUNIT_PICTURE_START);
+ dec_str_unit = lst_next(dec_str_unit);
+ }
+
+ decoded_pict = dq_last(&dec_str_ctx->str_decd_pict_list);
+
+ ret = decoder_stream_flush_process_dpb(dec_str_ctx, decoded_pict,
+ discard_refs);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ if (discard_refs) {
+ while (!dq_empty(&dec_str_ctx->str_decd_pict_list)) {
+ struct dec_decoded_pict *non_decoded_pict =
+ dq_first(&dec_str_ctx->str_decd_pict_list);
+
+ if (!non_decoded_pict) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] Decoded picture list contains item ID:0x%08x when DPB is empty",
+ dec_str_ctx->config.user_str_id,
+ non_decoded_pict->transaction_id);
+#endif
+
+ /* release the buffers back to vxd_decoder */
+ decoder_picture_release(dec_str_ctx, GET_STREAM_PICTURE_ID
+ (non_decoded_pict->transaction_id), FALSE,
+ FALSE);
+
+ ret = decoder_decoded_picture_destroy(dec_str_ctx, non_decoded_pict, FALSE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+ VDEC_ASSERT(dq_empty(&dec_str_ctx->str_decd_pict_list));
+
+ if (dec_str_ctx->last_be_pict_dec_res)
+ /*
+ * Clear the firmware context so that reference
+ * pictures are no longer referred to.
+ */
+ memset(dec_str_ctx->last_be_pict_dec_res->fw_ctx_buf.cpu_virt, 0,
+ dec_str_ctx->last_be_pict_dec_res->fw_ctx_buf.buf_size);
+ }
+
+ pr_debug("Decoder stream flushed successfully\n");
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_stream_prepare_ctx
+ */
+int decoder_stream_prepare_ctx(void *dec_str_ctx_arg, unsigned char flush_dpb)
+{
+ struct dec_str_ctx *dec_str_ctx =
+ decoder_stream_get_context(dec_str_ctx_arg);
+ int ret;
+
+ VDEC_ASSERT(dec_str_ctx);
+ if (!dec_str_ctx)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] Preparing stream context after seek",
+ dec_str_ctx->config.user_str_id,
+ dec_str_ctx->last_fe_transaction_id);
+#endif
+
+ if (flush_dpb) {
+ ret = decoder_stream_flush(dec_str_ctx, TRUE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ /* Reset front-end temporary pointers */
+ if (dec_str_ctx->prev_fe_pict_dec_res) {
+ resource_item_return(&dec_str_ctx->prev_fe_pict_dec_res->ref_cnt);
+ dec_str_ctx->prev_fe_pict_dec_res = NULL;
+ }
+ if (dec_str_ctx->cur_fe_pict_dec_res) {
+ resource_item_return(&dec_str_ctx->cur_fe_pict_dec_res->ref_cnt);
+ dec_str_ctx->cur_fe_pict_dec_res = NULL;
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_get_load
+ */
+int decoder_get_load(void *dec_str_ctx_arg, unsigned int *avail_slots)
+{
+ struct dec_str_ctx *dec_str_ctx =
+ decoder_stream_get_context(dec_str_ctx_arg);
+ struct dec_core_ctx *dec_core_ctx_local = NULL;
+
+ /* Check input parameters. */
+ VDEC_ASSERT(dec_str_ctx);
+ if (!dec_str_ctx || !avail_slots) {
+ pr_err("Invalid parameters!");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ dec_core_ctx_local = decoder_str_ctx_to_core_ctx(dec_str_ctx);
+ if (!dec_core_ctx_local) {
+ VDEC_ASSERT(0);
+ return -EIO;
+ }
+
+ if (dec_core_ctx_local->busy)
+ *avail_slots = 0;
+ else
+ *avail_slots = dec_str_ctx->avail_slots;
+
+ return IMG_SUCCESS;
+}
+
+static int decoder_check_ref_errors(struct dec_str_ctx *dec_str_ctx,
+ struct vdecfw_buffer_control *buf_ctrl,
+ struct vdecdd_picture *picture)
+{
+ struct dec_decoded_pict *ref_pict;
+ unsigned int i;
+
+ if (!dec_str_ctx) {
+ VDEC_ASSERT(0);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!buf_ctrl || !picture) {
+ pr_err("[USERSID=0x%08X] Invalid parameters for checking reference lists.",
+ dec_str_ctx->config.user_str_id);
+ VDEC_ASSERT(0);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ for (i = 0; i < VDECFW_MAX_NUM_PICTURES && buf_ctrl->ref_list[i];
+ i++) {
+ ref_pict = decoder_get_decoded_pict_of_stream
+ (GET_STREAM_PICTURE_ID(buf_ctrl->ref_list[i]),
+ &dec_str_ctx->str_decd_pict_list);
+ if (ref_pict && ref_pict->pict && ref_pict->pict->dec_pict_info &&
+ ref_pict->pict->dec_pict_info->err_flags) {
+ picture->dec_pict_info->err_flags |=
+ VDEC_ERROR_CORRUPTED_REFERENCE;
+ pr_warn("Picture decoded using corrupted reference: 0x%08X 0x%08X",
+ ref_pict->transaction_id,
+ ref_pict->pict->dec_pict_info->err_flags);
+ }
+ }
+
+ return IMG_SUCCESS;
+}
+
+static void decoder_clean_bitstr_segments(struct lst_t *decpict_seglist)
+{
+ struct dec_decpict_seg *dec_pict_seg;
+
+ while (NULL != (dec_pict_seg = lst_removehead(decpict_seglist))) {
+ if (dec_pict_seg->internal_seg) {
+ VDEC_ASSERT(dec_pict_seg->bstr_seg);
+ kfree(dec_pict_seg->bstr_seg);
+ dec_pict_seg->bstr_seg = NULL;
+ }
+ kfree(dec_pict_seg);
+ }
+}
+
+static int decoder_wrap_bitstr_segments(struct lst_t *bitstr_seglist,
+ struct lst_t *decpict_seglist,
+ unsigned int user_str_id)
+{
+ /* Required for attaching segments to the decode picture */
+ struct bspp_bitstr_seg *bit_str_seg;
+ struct dec_decpict_seg *dec_pict_seg;
+
+ VDEC_ASSERT(bitstr_seglist);
+ VDEC_ASSERT(decpict_seglist);
+
+ /* Add the segments to the Decode Picture */
+ bit_str_seg = lst_first(bitstr_seglist);
+ while (bit_str_seg) {
+ dec_pict_seg = kzalloc(sizeof(*dec_pict_seg), GFP_KERNEL);
+ VDEC_ASSERT(dec_pict_seg);
+ if (!dec_pict_seg)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ dec_pict_seg->bstr_seg = bit_str_seg;
+ dec_pict_seg->internal_seg = FALSE;
+ lst_add(decpict_seglist, dec_pict_seg);
+
+ bit_str_seg = lst_next(bit_str_seg);
+ }
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_picture_decode
+ */
+static int decoder_picture_decode(struct dec_str_ctx *dec_str_ctx,
+ struct vdecdd_str_unit *str_unit,
+ struct dec_decpict **dec_pict_ptr)
+{
+ struct vdecdd_picture *picture;
+ struct dec_core_ctx *dec_core_ctx;
+ struct dec_decpict *dec_pict;
+ int ret = IMG_SUCCESS;
+ struct decoder_regsoffsets regs_offsets;
+
+ /* Validate input arguments */
+ if (!dec_str_ctx || !str_unit || !str_unit->pict_hdr_info || !dec_pict_ptr) {
+ VDEC_ASSERT(0);
+ return -EIO;
+ }
+
+ picture = (struct vdecdd_picture *)str_unit->dd_pict_data;
+ dec_core_ctx = decoder_str_ctx_to_core_ctx(dec_str_ctx);
+
+ if (!picture || !dec_core_ctx) {
+ VDEC_ASSERT(0);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Ensure this is a new picture */
+ VDEC_ASSERT(!dec_str_ctx->cur_pict);
+ VDEC_ASSERT(str_unit->str_unit_type == VDECDD_STRUNIT_PICTURE_START);
+
+ dec_core_ctx->cum_pics++;
+
+ /* Allocate a unique id to the picture */
+ ret = idgen_allocid(dec_str_ctx->pict_idgen, picture, &picture->pict_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Allocate the decoded picture information structure. */
+ picture->dec_pict_info = kzalloc(sizeof(*picture->dec_pict_info), GFP_KERNEL);
+ VDEC_ASSERT(picture->dec_pict_info);
+ if (!picture->dec_pict_info)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Extract decoded information from the stream unit */
+ picture->dec_pict_info->err_flags = str_unit->err_flags;
+ picture->dec_pict_info->first_fld_tag_container.pict_tag_param =
+ (unsigned long)(str_unit->str_unit_tag);
+ picture->dec_pict_info->op_config = picture->op_config;
+ picture->dec_pict_info->rend_info = picture->disp_pict_buf.rend_info;
+ picture->dec_pict_info->disp_info = str_unit->pict_hdr_info->disp_info;
+
+ /* Extract aux picture information from the stream unit */
+ picture->dec_pict_aux_info.seq_hdr_id =
+ str_unit->seq_hdr_info->sequ_hdr_id;
+ picture->dec_pict_aux_info.pps_id =
+ str_unit->pict_hdr_info->pict_aux_data.id;
+ picture->dec_pict_aux_info.second_pps_id =
+ str_unit->pict_hdr_info->second_pict_aux_data.id;
+
+ /* Create a new decoder picture container. */
+ dec_pict = kzalloc(sizeof(*dec_pict), GFP_KERNEL);
+ VDEC_ASSERT(dec_pict);
+ if (!dec_pict) {
+ ret = IMG_ERROR_OUT_OF_MEMORY;
+ goto error_dec_pict;
+ }
+
+ /* Attach decoder/picture context information. */
+ dec_pict->dec_str_ctx = dec_str_ctx;
+
+ /*
+ * Construct the transaction Id.
+ * This consists of stream and core number in addition to picture
+ * number in stream and a 4-bit value representing the picture number
+ * in core.
+ */
+ dec_pict->transaction_id =
+ CREATE_TRANSACTION_ID(0, dec_str_ctx->km_str_id, dec_core_ctx->cum_pics,
+ picture->pict_id);
+
+ /* Add picture to core decode list */
+ dec_str_ctx->dec_str_st.num_pict_decoding++;
+
+ /* Fake a FW message to process when decoded. */
+ dec_pict->first_fld_fwmsg = kzalloc(sizeof(*dec_pict->first_fld_fwmsg), GFP_KERNEL);
+ VDEC_ASSERT(dec_pict->first_fld_fwmsg);
+ if (!dec_pict->first_fld_fwmsg) {
+ ret = IMG_ERROR_OUT_OF_MEMORY;
+ goto error_fw_msg;
+ }
+
+ dec_pict->second_fld_fwmsg =
+ kzalloc(sizeof(*dec_pict->second_fld_fwmsg), GFP_KERNEL);
+ VDEC_ASSERT(dec_pict->second_fld_fwmsg);
+ if (!dec_pict->second_fld_fwmsg) {
+ ret = IMG_ERROR_OUT_OF_MEMORY;
+ goto error_fw_msg;
+ }
+
+ /* Add the segments to the Decode Picture */
+ ret = decoder_wrap_bitstr_segments(&str_unit->bstr_seg_list,
+ &dec_pict->dec_pict_seg_list,
+ dec_str_ctx->config.user_str_id);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error_segments;
+
+ /*
+ * Shuffle the current and previous
+ * Hold a reference to the last context on the FE
+ */
+ if (dec_str_ctx->prev_fe_pict_dec_res) {
+ /* Return previous last FW context. */
+ resource_item_return(&dec_str_ctx->prev_fe_pict_dec_res->ref_cnt);
+
+ if (resource_item_isavailable(&dec_str_ctx->prev_fe_pict_dec_res->ref_cnt)) {
+ resource_list_remove(&dec_str_ctx->dec_res_lst,
+ dec_str_ctx->prev_fe_pict_dec_res);
+
+ resource_list_add_img(&dec_str_ctx->dec_res_lst,
+ dec_str_ctx->prev_fe_pict_dec_res, 0,
+ &dec_str_ctx->prev_fe_pict_dec_res->ref_cnt);
+ }
+ }
+
+ dec_str_ctx->prev_fe_pict_dec_res = dec_str_ctx->cur_fe_pict_dec_res;
+ dec_pict->prev_pict_dec_res = dec_str_ctx->prev_fe_pict_dec_res;
+
+ /* Get a new stream decode resource bundle for current picture. */
+ dec_pict->cur_pict_dec_res = resource_list_get_avail(&dec_str_ctx->dec_res_lst);
+ VDEC_ASSERT(dec_pict->cur_pict_dec_res);
+ if (!dec_pict->cur_pict_dec_res) {
+ ret = IMG_ERROR_UNEXPECTED_STATE;
+ goto error_dec_res;
+ }
+
+ if (dec_str_ctx->config.vid_std == VDEC_STD_H264) {
+ /* Copy any SGM for current picture. */
+ if (str_unit->pict_hdr_info->pict_sgm_data.id !=
+ BSPP_INVALID) {
+ VDEC_ASSERT(str_unit->pict_hdr_info->pict_sgm_data.size <=
+ dec_pict->cur_pict_dec_res->h264_sgm_buf.buf_size);
+ /* Updated in translation_api */
+ memcpy(dec_pict->cur_pict_dec_res->h264_sgm_buf.cpu_virt,
+ str_unit->pict_hdr_info->pict_sgm_data.pic_data,
+ str_unit->pict_hdr_info->pict_sgm_data.size);
+ }
+ }
+
+ dec_pict->cur_pict_dec_res->transaction_id = dec_pict->transaction_id;
+ dec_str_ctx->cur_fe_pict_dec_res = dec_pict->cur_pict_dec_res;
+ resource_item_use(&dec_str_ctx->cur_fe_pict_dec_res->ref_cnt);
+
+ /* Get a new control buffer */
+ dec_pict->pict_ref_res =
+ resource_list_get_avail(&dec_str_ctx->ref_res_lst);
+ VDEC_ASSERT(dec_pict->pict_ref_res);
+ if (!dec_pict->pict_ref_res) {
+ ret = IMG_ERROR_UNEXPECTED_STATE;
+ goto error_ref_res;
+ }
+
+ VDEC_ASSERT(dec_str_ctx->decctx);
+ VDEC_ASSERT(dec_str_ctx->decctx->dev_cfg);
+
+ dec_pict->str_pvdec_fw_ctxbuf = &dec_str_ctx->pvdec_fw_ctx_buf;
+ dec_pict->pict_hdr_info = str_unit->pict_hdr_info;
+
+ /* Obtain (core) resources for the picture */
+ ret = dec_res_picture_attach(&dec_str_ctx->resources,
+ dec_str_ctx->config.vid_std, dec_pict);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error_res_attach;
+
+ /* Clear fw context data for re-use */
+ memset(dec_pict->cur_pict_dec_res->fw_ctx_buf.cpu_virt, 0,
+ dec_pict->cur_pict_dec_res->fw_ctx_buf.buf_size);
+
+ /*
+ * Clear the control data in case the picture is discarded before
+ * being prepared by firmware.
+ */
+ memset(dec_pict->pict_ref_res->fw_ctrlbuf.cpu_virt, 0,
+ dec_pict->pict_ref_res->fw_ctrlbuf.buf_size);
+
+ ret = hwctrl_getregsoffset(dec_core_ctx->hw_ctx, &regs_offsets);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error_other;
+
+ ret = translation_ctrl_alloc_prepare(&dec_str_ctx->config, str_unit,
+ dec_pict,
+ &dec_core_ctx->core_props,
+ &regs_offsets);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error_other;
+
+ ret = hwctrl_picture_submitbatch(dec_core_ctx->hw_ctx, dec_pict,
+ dec_str_ctx->vxd_dec_ctx);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error_other;
+
+ VDEC_ASSERT(dec_str_ctx->avail_slots > 0);
+ dec_str_ctx->avail_slots--;
+
+ VDEC_ASSERT(!dec_core_ctx->busy);
+ dec_core_ctx->busy = TRUE;
+ /* Store this transaction ID in stream context */
+ dec_str_ctx->last_fe_transaction_id = dec_pict->transaction_id;
+ dec_str_ctx->cur_pict = (struct dec_decpict *)dec_pict;
+
+ dec_str_ctx->dec_str_st.features = str_unit->features;
+
+ if (str_unit->eop)
+ dec_pict->eop_found = TRUE;
+
+ *dec_pict_ptr = dec_pict;
+
+ return IMG_SUCCESS;
+
+ /* Roll back in case of errors. */
+error_other:
+ dec_res_picture_detach(&dec_str_ctx->resources, dec_pict);
+error_res_attach:
+error_ref_res:
+error_dec_res:
+error_segments:
+ decoder_clean_bitstr_segments(&dec_pict->dec_pict_seg_list);
+ kfree(dec_pict->first_fld_fwmsg);
+ kfree(dec_pict->second_fld_fwmsg);
+error_fw_msg:
+ kfree(dec_pict);
+error_dec_pict:
+ kfree(picture->dec_pict_info);
+
+ return ret;
+}
+
+/*
+ * @Function decoder_stream_reference_resource_create
+ */
+static int
+decoder_stream_reference_resource_create(struct dec_str_ctx *dec_str_ctx)
+{
+ struct dec_pictref_res *pict_ref_res;
+ int ret;
+ unsigned int mem_heap_id;
+ enum sys_emem_attrib mem_attribs;
+
+ if (!dec_str_ctx || !dec_str_ctx->decctx) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ mem_heap_id = dec_str_ctx->decctx->internal_heap_id;
+ mem_attribs = (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE);
+ mem_attribs |= (enum sys_emem_attrib)SYS_MEMATTRIB_INTERNAL;
+
+ /* Allocate the firmware context buffer info structure. */
+ pict_ref_res = kzalloc(sizeof(*pict_ref_res), GFP_KERNEL);
+ VDEC_ASSERT(pict_ref_res);
+ if (!pict_ref_res)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /*
+ * Allocate the firmware context buffer to contain data required for
+ * subsequent picture.
+ */
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s:%d calling MMU_StreamMalloc", __func__, __LINE__);
+#endif
+ ret = mmu_stream_alloc(dec_str_ctx->mmu_str_handle, MMU_HEAP_STREAM_BUFFERS, mem_heap_id,
+ (enum sys_emem_attrib)(mem_attribs | SYS_MEMATTRIB_CPU_READ |
+ SYS_MEMATTRIB_CPU_WRITE),
+ sizeof(struct vdecfw_buffer_control),
+ DEV_MMU_PAGE_ALIGNMENT,
+ &pict_ref_res->fw_ctrlbuf);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto err_out_of_memory;
+
+ /*
+ * Clear the context data in preparation for first time use by
+ * the firmware.
+ */
+ memset(pict_ref_res->fw_ctrlbuf.cpu_virt, 0, pict_ref_res->fw_ctrlbuf.buf_size);
+
+ pict_ref_res->ref_cnt = 1;
+
+ ret = resource_list_add_img(&dec_str_ctx->ref_res_lst, pict_ref_res, 0,
+ &pict_ref_res->ref_cnt);
+ if (ret != IMG_SUCCESS) {
+ pr_err("[USERSID=0x%08X] Failed to add resource", dec_str_ctx->config.user_str_id);
+ return ret;
+ }
+
+ return IMG_SUCCESS;
+
+err_out_of_memory:
+
+ kfree(pict_ref_res);
+ pict_ref_res = NULL;
+
+ pr_err("[USERSID=0x%08X] Failed to allocate device memory for stream reference resources",
+ dec_str_ctx->config.user_str_id);
+
+ return IMG_ERROR_OUT_OF_MEMORY;
+}
+
+/*
+ * @Function decoder_picture_finalize
+ */
+static int decoder_picture_finalize(struct dec_str_ctx *dec_str_ctx,
+ struct vdecdd_str_unit *str_unit)
+{
+ struct dec_decpict *dec_pict;
+ struct dec_core_ctx *dec_core_ctx = NULL;
+
+ VDEC_ASSERT(dec_str_ctx);
+
+ dec_pict = dec_str_ctx->cur_pict;
+ if (!dec_pict) {
+ pr_err("[USERSID=0x%08X] Unable to get the current picture from Decoder context",
+ dec_str_ctx->config.user_str_id);
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+
+ dec_core_ctx = decoder_str_ctx_to_core_ctx(dec_str_ctx);
+
+ if (!dec_core_ctx || !dec_core_ctx->busy) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ dec_core_ctx->busy = FALSE;
+
+ /* Picture data are now complete, nullify pointer */
+ dec_str_ctx->cur_pict = NULL;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_submit_fragment
+ */
+static int decoder_submit_fragment(struct dec_str_ctx *dec_str_context,
+ struct vdecdd_str_unit *str_unit,
+ unsigned char eop)
+{
+ struct dec_core_ctx *dec_core_context = NULL;
+ struct lst_t dec_fragment_seg_list;
+ struct dec_decpict_seg *dec_pict_seg;
+ struct dec_pict_fragment *pict_fragment;
+ int ret = IMG_SUCCESS;
+
+ if (!dec_str_context) {
+ VDEC_ASSERT(0);
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+
+ if (!dec_str_context->cur_pict) {
+ pr_err("[USERSID=0x%08X] Unable to get the current picture from Decoder context",
+ dec_str_context->config.user_str_id);
+ VDEC_ASSERT(0);
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+
+ dec_core_context = decoder_str_ctx_to_core_ctx(dec_str_context);
+ if (!dec_core_context) {
+ VDEC_ASSERT(0);
+ return IMG_ERROR_GENERIC_FAILURE;
+ }
+
+ pict_fragment = kzalloc(sizeof(*pict_fragment), GFP_KERNEL);
+ VDEC_ASSERT(pict_fragment);
+ if (!pict_fragment)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ lst_init(&dec_fragment_seg_list);
+
+ /* Add the segments to the temporary list */
+ ret = decoder_wrap_bitstr_segments(&str_unit->bstr_seg_list,
+ &dec_fragment_seg_list,
+ dec_str_context->config.user_str_id);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /* Prepare ctr alloc for the fragment */
+ ret = translation_fragment_prepare(dec_str_context->cur_pict,
+ &dec_fragment_seg_list, eop,
+ pict_fragment);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ /*
+ * Move segments of the fragment from the temporary list to the picture
+ * segment list
+ */
+ dec_pict_seg = lst_removehead(&dec_fragment_seg_list);
+ while (dec_pict_seg) {
+ lst_add(&dec_str_context->cur_pict->dec_pict_seg_list,
+ dec_pict_seg);
+ dec_pict_seg = lst_removehead(&dec_fragment_seg_list);
+ }
+
+ /* Submit fragment */
+ ret = hwctrl_picture_submit_fragment(dec_core_context->hw_ctx,
+ pict_fragment,
+ dec_str_context->cur_pict,
+ dec_str_context->vxd_dec_ctx);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ goto error;
+
+ lst_add(&dec_str_context->cur_pict->fragment_list, pict_fragment);
+
+ if (eop)
+ dec_str_context->cur_pict->eop_found = TRUE;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] FRAGMENT",
+ dec_str_context->config.user_str_id,
+ dec_str_context->last_fe_transaction_id);
+#endif
+
+ return IMG_SUCCESS;
+error:
+ kfree(pict_fragment);
+
+ return ret;
+}
+
+/*
+ * @Function decoder_stream_process_unit
+ */
+int decoder_stream_process_unit(void *dec_str_ctx_arg,
+ struct vdecdd_str_unit *str_unit)
+{
+ struct dec_str_ctx *dec_str_ctx =
+ decoder_stream_get_context(dec_str_ctx_arg);
+
+ struct dec_str_unit *dec_str_unit;
+ struct dec_decpict *dec_pict = NULL;
+ unsigned char processed = FALSE;
+ int ret;
+
+ VDEC_ASSERT(dec_str_ctx);
+ VDEC_ASSERT(str_unit);
+
+ if (!dec_str_ctx || !str_unit) {
+ pr_err("Invalid decoder stream context handle!\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+ pr_debug("%s : stream unit type = %d\n"
+ , __func__, str_unit->str_unit_type);
+ /* Process the stream unit */
+ switch (str_unit->str_unit_type) {
+ case VDECDD_STRUNIT_SEQUENCE_END:
+ case VDECDD_STRUNIT_ANONYMOUS:
+ case VDECDD_STRUNIT_CLOSED_GOP:
+ case VDECDD_STRUNIT_PICTURE_PORTENT:
+ case VDECDD_STRUNIT_FENCE:
+ /* Nothing more to do so mark the stream unit as processed */
+ processed = TRUE;
+ break;
+
+ case VDECDD_STRUNIT_STOP:
+ if (dec_str_ctx->cur_pict && !dec_str_ctx->cur_pict->eop_found) {
+ ret = decoder_submit_fragment(dec_str_ctx, str_unit, TRUE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ ret = decoder_picture_finalize(dec_str_ctx, str_unit);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] FORCED END",
+ dec_str_ctx->config.user_str_id,
+ dec_str_ctx->last_fe_transaction_id);
+#endif
+ }
+
+ processed = TRUE;
+ break;
+
+ case VDECDD_STRUNIT_SEQUENCE_START:
+ {
+ unsigned int max_num_activ_pict = 0;
+
+ VDEC_ASSERT(str_unit->seq_hdr_info);
+ /*
+ * Determine how many decoded pictures can be held for
+ * reference in the decoder for this stream.
+ */
+ ret = vdecddutils_ref_pict_get_maxnum(&dec_str_ctx->config,
+ &str_unit->seq_hdr_info->com_sequ_hdr_info,
+ &max_num_activ_pict);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Double for field coding */
+ max_num_activ_pict *= 2;
+
+ /*
+ * Ensure that there are enough resource to have pictures
+ * filling all slots on all cores.
+ */
+ max_num_activ_pict +=
+ dec_str_ctx->decctx->dev_cfg->num_slots_per_pipe *
+ dec_str_ctx->decctx->num_pipes;
+
+ /* Increase decoder stream resources if necessary. */
+ while (dec_str_ctx->num_ref_res < max_num_activ_pict) {
+ ret = decoder_stream_reference_resource_create(dec_str_ctx);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dec_str_ctx->num_ref_res++;
+ }
+
+ /* Nothing more to do so mark the stream unit as processed */
+ processed = TRUE;
+ break;
+ }
+
+ case VDECDD_STRUNIT_PICTURE_START:
+ if (str_unit->decode) {
+ /* Prepare and submit picture to decode. */
+ ret = decoder_picture_decode(dec_str_ctx, str_unit, &dec_pict);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] START",
+ dec_str_ctx->config.user_str_id,
+ dec_str_ctx->last_fe_transaction_id);
+#endif
+ } else {
+ processed = TRUE;
+ }
+ break;
+
+ case VDECDD_STRUNIT_PICTURE_END:
+ if (str_unit->decode) {
+ ret = decoder_picture_finalize(dec_str_ctx, str_unit);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] END",
+ dec_str_ctx->config.user_str_id,
+ dec_str_ctx->last_fe_transaction_id);
+#endif
+ } else {
+ processed = TRUE;
+ }
+ break;
+
+ default:
+ VDEC_ASSERT(FALSE);
+ break;
+ }
+
+ /*
+ * If this or any preceding stream unit(s) could not be
+ * completely processed, add this unit to the queue.
+ */
+ if (!processed) {
+ /* Add unit to stream decode list */
+ dec_str_unit = kzalloc(sizeof(*dec_str_unit), GFP_KERNEL);
+ VDEC_ASSERT(dec_str_unit);
+ if (!dec_str_unit)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ dec_str_unit->str_unit = str_unit;
+
+ /* make PICTURE_START owner of dec_pict */
+ if (dec_pict) {
+ VDEC_ASSERT(str_unit->str_unit_type == VDECDD_STRUNIT_PICTURE_START);
+ dec_str_unit->dec_pict = dec_pict;
+ }
+
+ lst_add(&dec_str_ctx->pend_strunit_list, dec_str_unit);
+ } else {
+ /*
+ * If there is nothing being decoded for this stream,
+ * immediately handle the unit (non-picture so doesn't need
+ * decoding). Report that this unit has been processed.
+ */
+ VDEC_ASSERT(dec_str_ctx->decctx);
+ ret = dec_str_ctx->str_processed_cb(dec_str_ctx->usr_int_data,
+ VXD_CB_STRUNIT_PROCESSED,
+ str_unit);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ return IMG_SUCCESS;
+}
+
+static int
+decoder_get_required_core_features(const struct vdec_str_configdata *str_cfg,
+ const struct vdec_str_opconfig *op_cfg,
+ unsigned int *features)
+{
+ unsigned int features_local = 0;
+
+ VDEC_ASSERT(str_cfg);
+ VDEC_ASSERT(features);
+
+ /* Check Video Standard. */
+ switch (str_cfg->vid_std) {
+ case VDEC_STD_H264:
+ features_local = VDECDD_COREFEATURE_H264;
+ break;
+#ifdef HAS_JPEG
+ case VDEC_STD_JPEG:
+ features_local = VDECDD_COREFEATURE_JPEG;
+ break;
+#endif
+#ifdef HAS_HEVC
+ case VDEC_STD_HEVC:
+ features_local = VDECDD_COREFEATURE_HEVC;
+ break;
+#endif
+ default:
+ VDEC_ASSERT(FALSE);
+ break;
+ }
+
+ *features = features_local;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function decoder_is_supported_by_atleast_onepipe
+ */
+static unsigned char decoder_is_supported_by_atleast_onepipe(unsigned char *features,
+ unsigned int num_pipes)
+{
+ unsigned int i;
+
+ VDEC_ASSERT(features);
+ VDEC_ASSERT(num_pipes <= VDEC_MAX_PIXEL_PIPES);
+
+ for (i = 0; i < num_pipes; i++) {
+ if (features[i])
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*
+ * @Function decoder_check_support
+ */
+int decoder_check_support(void *dec_ctx_arg,
+ const struct vdec_str_configdata *str_cfg,
+ const struct vdec_str_opconfig *str_op_cfg,
+ const struct vdecdd_ddpict_buf *disp_pict_buf,
+ const struct vdec_pict_rendinfo *req_pict_rendinfo,
+ const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ const struct bspp_pict_hdr_info *pict_hdrinfo,
+ const struct vdec_comsequ_hdrinfo *prev_comseq_hdrinfo,
+ const struct bspp_pict_hdr_info *prev_pict_hdrinfo,
+ unsigned char non_cfg_req,
+ struct vdec_unsupp_flags *unsupported,
+ unsigned int *features)
+{
+ struct dec_ctx *dec_ctx = (struct dec_ctx *)dec_ctx_arg;
+ struct dec_core_ctx *dec_core_ctx;
+ struct vxd_coreprops *core_props;
+ const struct vdec_pict_rendinfo *disp_pict_rendinfo = NULL;
+ int ret = IMG_ERROR_NOT_SUPPORTED;
+
+ /* Ensure input parameters are valid. */
+ VDEC_ASSERT(dec_ctx_arg);
+ VDEC_ASSERT(str_cfg);
+ VDEC_ASSERT(unsupported);
+
+ if (!dec_ctx_arg || !str_cfg || !unsupported) {
+ pr_err("Invalid parameters!");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (disp_pict_buf)
+ disp_pict_rendinfo = &disp_pict_buf->rend_info;
+
+ /*
+ * Validate compatibility between the supplied configuration/state
+ * and the master core only at the moment (assumed to have superset
+ * of features).
+ * Some features may not be present on any slave cores which might
+ * cause poor utilisation of hardware.
+ */
+ memset(unsupported, 0, sizeof(*unsupported));
+
+ dec_core_ctx = dec_ctx->dec_core_ctx;
+ VDEC_ASSERT(dec_core_ctx);
+
+ core_props = &dec_core_ctx->core_props;
+ VDEC_ASSERT(core_props);
+
+ /* Check that the video standard is supported */
+ switch (str_cfg->vid_std) {
+ case VDEC_STD_H264:
+ if (!decoder_is_supported_by_atleast_onepipe(core_props->h264,
+ core_props->num_pixel_pipes)) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: VIDEO STANDARD (H.264)",
+ str_cfg->user_str_id);
+ unsupported->str_cfg |=
+ VDECDD_UNSUPPORTED_STRCONFIG_STD;
+ }
+
+ if (comseq_hdrinfo && (H264_PROFILE_MVC_HIGH ==
+ comseq_hdrinfo->codec_profile || H264_PROFILE_MVC_STEREO ==
+ comseq_hdrinfo->codec_profile) && comseq_hdrinfo->num_views >
+ VDEC_H264_MVC_MAX_VIEWS) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[SW]: NUMBER OF VIEWS",
+ str_cfg->user_str_id);
+ unsupported->seq_hdr |= VDECDD_UNSUPPORTED_SEQUHDR_NUM_OF_VIEWS;
+ }
+ break;
+#ifdef HAS_HEVC
+ case VDEC_STD_HEVC:
+ if (!decoder_is_supported_by_atleast_onepipe(core_props->hevc,
+ core_props->num_pixel_pipes)) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: VIDEO STANDARD (HEVC)",
+ str_cfg->user_str_id);
+ unsupported->str_cfg |= VDECDD_UNSUPPORTED_STRCONFIG_STD;
+ }
+ if (pict_hdrinfo && pict_hdrinfo->hevc_pict_hdr_info.range_ext_present)
+ if ((pict_hdrinfo->hevc_pict_hdr_info.is_full_range_ext &&
+ !decoder_is_supported_by_atleast_onepipe(core_props->hevc_range_ext,
+ core_props->num_pixel_pipes)) ||
+ (!pict_hdrinfo->hevc_pict_hdr_info.is_full_range_ext &&
+ core_props->vidstd_props[str_cfg->vid_std].max_chroma_format ==
+ PIXEL_FORMAT_420)) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: HEVC RANGE EXTENSIONS",
+ str_cfg->user_str_id);
+ unsupported->pict_hdr |= VDECDD_UNSUPPORTED_PICTHDR_HEVC_RANGE_EXT;
+ }
+ break;
+#endif
+#ifdef HAS_JPEG
+ case VDEC_STD_JPEG:
+ if (!decoder_is_supported_by_atleast_onepipe(core_props->jpeg,
+ core_props->num_pixel_pipes)) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: VIDEO STANDARD (JPEG)",
+ str_cfg->user_str_id);
+ unsupported->str_cfg |=
+ VDECDD_UNSUPPORTED_STRCONFIG_STD;
+ }
+ break;
+#endif
+ default:
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: VIDEO STANDARD (UNKNOWN)",
+ str_cfg->user_str_id);
+ unsupported->str_cfg |=
+ VDECDD_UNSUPPORTED_STRCONFIG_STD;
+ break;
+ }
+
+ if (str_op_cfg) {
+ /*
+ * Ensure that each display feature is supported by the
+ * hardware.
+ */
+ if (comseq_hdrinfo) {
+ /* Validate display pixel format */
+ if (non_cfg_req && prev_comseq_hdrinfo &&
+ vdec_size_nz(prev_comseq_hdrinfo->frame_size) &&
+ prev_comseq_hdrinfo->pixel_info.chroma_fmt_idc ==
+ str_op_cfg->pixel_info.chroma_fmt_idc &&
+ comseq_hdrinfo->pixel_info.chroma_fmt_idc !=
+ prev_comseq_hdrinfo->pixel_info.chroma_fmt_idc) {
+ /*
+ * If this is a non-configuration request and
+ * it looks like a new sequence with
+ * sub-sampling change, just indicate output
+ * format mismatch without any error messages.
+ */
+ unsupported->str_opcfg |= VDECDD_UNSUPPORTED_OUTPUTCONFIG_PIXFORMAT;
+ } else {
+ switch (str_op_cfg->pixel_info.chroma_fmt_idc) {
+ case PIXEL_FORMAT_420:
+ if (comseq_hdrinfo->pixel_info.chroma_fmt_idc ==
+ PIXEL_FORMAT_MONO) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: TRANSFORM PIXEL FORMAT FROM 400 TO 420",
+ str_cfg->user_str_id);
+ unsupported->str_opcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_PIXFORMAT;
+ }
+ break;
+
+ case PIXEL_FORMAT_422:
+ if (comseq_hdrinfo->pixel_info.chroma_fmt_idc ==
+ PIXEL_FORMAT_420 &&
+ str_op_cfg->pixel_info.num_planes > 1) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: REQUESTED NUMBER OF PLANES FOR 422 UPSAMPLING",
+ str_cfg->user_str_id);
+ unsupported->str_opcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_PIXFORMAT;
+ } else if (comseq_hdrinfo->pixel_info.chroma_fmt_idc ==
+ PIXEL_FORMAT_MONO) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: TRANSFORM PIXEL FORMAT FROM 400 TO 422",
+ str_cfg->user_str_id);
+ unsupported->str_opcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_PIXFORMAT;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (str_op_cfg->pixel_info.bitdepth_y >
+ core_props->vidstd_props[str_cfg->vid_std].max_luma_bitdepth ||
+ str_op_cfg->pixel_info.bitdepth_y < 8 ||
+ str_op_cfg->pixel_info.bitdepth_y == 9) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: DISPLAY PICTURE LUMA BIT DEPTH %d [RANGE: 8->%d for %s]",
+ str_cfg->user_str_id,
+ str_op_cfg->pixel_info.bitdepth_y,
+ core_props->vidstd_props[str_cfg->vid_std].max_luma_bitdepth,
+ vid_std_names[str_cfg->vid_std]);
+ unsupported->str_opcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_PIXFORMAT;
+ }
+
+ if (str_op_cfg->pixel_info.chroma_fmt_idc !=
+ PIXEL_FORMAT_MONO &&
+ (str_op_cfg->pixel_info.bitdepth_c >
+ core_props->vidstd_props[str_cfg->vid_std].max_chroma_bitdepth ||
+ str_op_cfg->pixel_info.bitdepth_c < 8 ||
+ str_op_cfg->pixel_info.bitdepth_c == 9)) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: DISPLAY PICTURE CHROMA BIT DEPTH %d [RANGE: 8->%d for %s]",
+ str_cfg->user_str_id,
+ str_op_cfg->pixel_info.bitdepth_c,
+ core_props->vidstd_props[str_cfg->vid_std].max_chroma_bitdepth,
+ vid_std_names[str_cfg->vid_std]);
+ unsupported->str_opcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_PIXFORMAT;
+ }
+
+#ifdef HAS_JPEG
+ /* Validate display configuration against existing stream configuration.*/
+ if (str_cfg->vid_std == VDEC_STD_JPEG) {
+ if (str_op_cfg->force_oold) {
+ pr_err("[USERSID=0x%08X] UNSUPPORTED[HW]: OOLD WITH JPEG\n",
+ str_cfg->user_str_id);
+ unsupported->str_opcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_X_WITH_JPEG;
+ }
+ }
+#endif
+ }
+
+ if (disp_pict_rendinfo) {
+ unsigned int stride_alignment = VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT;
+
+ if (req_pict_rendinfo) {
+ /*
+ * Picture size declared in buffer must be at least as
+ * large as that required by bitstream/output config.
+ */
+ if (!vdec_size_ge(disp_pict_rendinfo->rend_pict_size,
+ req_pict_rendinfo->rend_pict_size)) {
+ pr_warn("[USERSID=0x%08X] Picture size of output picture buffer [%d x %d] is not large enough for sequence [%d x %d]",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->rend_pict_size.width,
+ disp_pict_rendinfo->rend_pict_size.height,
+ req_pict_rendinfo->rend_pict_size.width,
+ req_pict_rendinfo->rend_pict_size.height);
+ unsupported->str_opcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_PICTURE_SIZE;
+ }
+
+ /*
+ * Size of each plane must be at least as large
+ * as that required.
+ */
+ if (disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].size <
+ req_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].size) {
+ pr_warn("[USERSID=0x%08X] Y plane of output picture buffer [%d bytes] is not large enough for bitstream/config [%d bytes]",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].size,
+ req_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].size);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_Y_SIZE;
+ }
+
+ /*
+ * Stride of each plane must be at least as large as that
+ * required.
+ */
+ if (disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].stride <
+ req_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].stride) {
+ pr_warn("[USERSID=0x%08X] Y stride of output picture buffer [%d bytes] is not large enough for bitstream/config [%d bytes]",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].stride,
+ req_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].stride);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_Y_STRIDE;
+ }
+
+ /*
+ * Size of each plane must be at least
+ * as large as that required.
+ */
+ if (disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].size <
+ req_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].size) {
+ pr_warn("[USERSID=0x%08X] UV plane of output picture buffer [%d bytes] is not large enough for bitstream/config [%d bytes]",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].size,
+ req_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].size);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_UV_SIZE;
+ }
+
+ /*
+ * Stride of each plane must be at least
+ * as large as that required.
+ */
+ if (disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].stride <
+ req_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].stride) {
+ pr_warn("[USERSID=0x%08X] UV stride of output picture buffer [%d bytes] is not large enough for bitstream/config [%d bytes]",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].stride,
+ req_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].stride);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_UV_STRIDE;
+ }
+
+ if ((req_pict_rendinfo->stride_alignment &
+ (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT - 1)) != 0) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: STRIDE ALIGNMENT [%d] must be a multiple of %d bytes",
+ str_cfg->user_str_id,
+ req_pict_rendinfo->stride_alignment,
+ VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_64BYTE_STRIDE;
+ }
+
+ if (req_pict_rendinfo->stride_alignment > 0)
+ stride_alignment = req_pict_rendinfo->stride_alignment;
+ }
+
+ if ((disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].stride %
+ stride_alignment) != 0) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: Y STRIDE [%d] must be a multiple of %d bytes",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_Y].stride,
+ stride_alignment);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_64BYTE_STRIDE;
+ }
+
+ if ((disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].stride %
+ stride_alignment) != 0) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: UV STRIDE [%d] must be a multiple of %d bytes",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_UV].stride,
+ stride_alignment);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_64BYTE_STRIDE;
+ }
+
+ if ((disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_V].stride %
+ stride_alignment) != 0) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: V STRIDE [%d] must be a multiple of %d bytes",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->plane_info[VDEC_PLANE_VIDEO_V].stride,
+ stride_alignment);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_64BYTE_STRIDE;
+ }
+
+ if (req_pict_rendinfo) {
+ if (str_op_cfg) {
+ if (str_cfg->vid_std != VDEC_STD_JPEG) {
+ if (str_op_cfg->pixel_info.num_planes <= 2)
+ /*
+ * V plane only required when chroma is
+ * separated.
+ */
+ VDEC_ASSERT(req_pict_rendinfo->plane_info
+ [VDEC_PLANE_VIDEO_V].size == 0);
+
+ if (str_op_cfg->pixel_info.num_planes <= 3)
+ /* Alpha planes should not be required. */
+ VDEC_ASSERT(req_pict_rendinfo->plane_info
+ [VDEC_PLANE_VIDEO_A].size == 0);
+ }
+ }
+
+ /* Size of buffer must be at least as large as that required. */
+ if (disp_pict_rendinfo->rendered_size <
+ req_pict_rendinfo->rendered_size) {
+ pr_warn("[USERSID=0x%08X] Output picture buffer [%d bytes] is not large enough for bitstream/config [%d bytes]",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->rendered_size,
+ req_pict_rendinfo->rendered_size);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_BUFFER_SIZE;
+ }
+ }
+
+ if (str_op_cfg) {
+ if (comseq_hdrinfo) {
+ if (vdec_size_lt(disp_pict_rendinfo->rend_pict_size,
+ comseq_hdrinfo->max_frame_size)) {
+ pr_warn("[USERSID=0x%08X] Buffers [%d x %d] must be large enough to contain the maximum frame size [%d x %d] when not scaling",
+ str_cfg->user_str_id,
+ disp_pict_rendinfo->rend_pict_size.width,
+ disp_pict_rendinfo->rend_pict_size.height,
+ comseq_hdrinfo->max_frame_size.width,
+ comseq_hdrinfo->max_frame_size.height);
+ unsupported->op_bufcfg |=
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_PICTURE_SIZE;
+ }
+ }
+ }
+ }
+
+ if (comseq_hdrinfo) {
+ unsigned int max_width =
+ vdec_size_min(core_props->vidstd_props[str_cfg->vid_std].max_width,
+ MAX_PLATFORM_SUPPORTED_WIDTH);
+
+ unsigned int max_height =
+ vdec_size_min(core_props->vidstd_props[str_cfg->vid_std].max_height,
+ MAX_PLATFORM_SUPPORTED_HEIGHT);
+
+ if (comseq_hdrinfo->max_frame_size.width > max_width ||
+ comseq_hdrinfo->max_frame_size.height > max_height) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: FRAME WIDTH %dpx or HEIGHT %dpx are over maximum allowed value [%d, %d]",
+ str_cfg->user_str_id,
+ comseq_hdrinfo->max_frame_size.width,
+ comseq_hdrinfo->max_frame_size.height,
+ max_width, max_height);
+ unsupported->seq_hdr |=
+ VDECDD_UNSUPPORTED_SEQUHDR_SIZE;
+ }
+
+ if (comseq_hdrinfo->pixel_info.bitdepth_y >
+ core_props->vidstd_props[str_cfg->vid_std].max_luma_bitdepth ||
+ comseq_hdrinfo->pixel_info.bitdepth_y < 8 ||
+ comseq_hdrinfo->pixel_info.bitdepth_y == 9) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: CODED PICTURE LUMA BIT DEPTH %d [RANGE: 8->%d for %s]",
+ str_cfg->user_str_id,
+ comseq_hdrinfo->pixel_info.bitdepth_y,
+ core_props->vidstd_props[str_cfg->vid_std].max_luma_bitdepth,
+ vid_std_names[str_cfg->vid_std]);
+ unsupported->seq_hdr |=
+ VDECDD_UNSUPPORTED_SEQUHDR_PIXFORMAT_BIT_DEPTH;
+ }
+
+ if (comseq_hdrinfo->pixel_info.chroma_fmt_idc !=
+ PIXEL_FORMAT_MONO &&
+ (comseq_hdrinfo->pixel_info.bitdepth_c >
+ core_props->vidstd_props[str_cfg->vid_std].max_chroma_bitdepth ||
+ comseq_hdrinfo->pixel_info.bitdepth_c < 8 ||
+ comseq_hdrinfo->pixel_info.bitdepth_c == 9)) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: CODED PICTURE CHROMA BIT DEPTH %d [RANGE: 8->%d for %s]",
+ str_cfg->user_str_id,
+ comseq_hdrinfo->pixel_info.bitdepth_c,
+ core_props->vidstd_props[str_cfg->vid_std].max_chroma_bitdepth,
+ vid_std_names[str_cfg->vid_std]);
+ unsupported->seq_hdr |=
+ VDECDD_UNSUPPORTED_SEQUHDR_PIXFORMAT_BIT_DEPTH;
+ }
+
+ if (comseq_hdrinfo->pixel_info.chroma_fmt_idc !=
+ PIXEL_FORMAT_MONO &&
+ comseq_hdrinfo->pixel_info.bitdepth_y !=
+ comseq_hdrinfo->pixel_info.bitdepth_c) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: CODED PICTURE MIXED BIT DEPTH [%d vs %d]",
+ str_cfg->user_str_id,
+ comseq_hdrinfo->pixel_info.bitdepth_y,
+ comseq_hdrinfo->pixel_info.bitdepth_c);
+ unsupported->seq_hdr |=
+ VDECDD_UNSUPPORTED_SEQUHDR_PIXFORMAT_BIT_DEPTH;
+ }
+
+ if (comseq_hdrinfo->pixel_info.chroma_fmt_idc >
+ core_props->vidstd_props[str_cfg->vid_std].max_chroma_format) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: CODED PIXEL FORMAT IDC %s [for %s]",
+ str_cfg->user_str_id,
+ comseq_hdrinfo->pixel_info.chroma_fmt_idc <
+ ARRAY_SIZE
+ (pix_fmt_idc_names) ? (unsigned char *)
+ pix_fmt_idc_names[comseq_hdrinfo->pixel_info.chroma_fmt_idc] :
+ (unsigned char *)"Invalid",
+ vid_std_names[str_cfg->vid_std]);
+ unsupported->seq_hdr |=
+ VDECDD_UNSUPPORTED_SEQUHDR_PIXEL_FORMAT;
+ }
+
+ if (comseq_hdrinfo->pixel_info.chroma_fmt_idc ==
+ PIXEL_FORMAT_INVALID) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[SW]: UNKNOWN CODED PIXEL FORMAT",
+ str_cfg->user_str_id);
+ unsupported->seq_hdr |=
+ VDECDD_UNSUPPORTED_SEQUHDR_PIXEL_FORMAT;
+ }
+ }
+
+ if (pict_hdrinfo && comseq_hdrinfo) {
+ unsigned int coded_cmd_width;
+ unsigned int coded_cmd_height;
+ unsigned int min_width = core_props->vidstd_props[str_cfg->vid_std].min_width;
+ unsigned int min_height =
+ ALIGN(core_props->vidstd_props[str_cfg->vid_std].min_height,
+ (pict_hdrinfo->field) ?
+ 2 * VDEC_MB_DIMENSION : VDEC_MB_DIMENSION);
+ unsigned int pict_size_in_mbs;
+ unsigned int max_height = core_props->vidstd_props[str_cfg->vid_std].max_height;
+ unsigned int max_width = core_props->vidstd_props[str_cfg->vid_std].max_width;
+ unsigned int max_mbs = core_props->vidstd_props[str_cfg->vid_std].max_macroblocks;
+
+#ifdef HAS_JPEG
+ /* For JPEG, max picture size of four plane images is 16k*16k. */
+ if (str_cfg->vid_std == VDEC_STD_JPEG) {
+ if (comseq_hdrinfo->pixel_info.num_planes >= 4) {
+ max_width = (max_width > 16 * 1024) ? 16 * 1024 : max_width;
+ max_height = (max_height > 16 * 1024) ? 16 * 1024 : max_height;
+ }
+ }
+#endif
+
+ coded_cmd_width =
+ ALIGN(pict_hdrinfo->coded_frame_size.width, VDEC_MB_DIMENSION);
+ coded_cmd_height =
+ ALIGN(pict_hdrinfo->coded_frame_size.height,
+ pict_hdrinfo->field ?
+ 2 * VDEC_MB_DIMENSION : VDEC_MB_DIMENSION);
+
+ pict_size_in_mbs = (coded_cmd_width * coded_cmd_height) /
+ (VDEC_MB_DIMENSION * VDEC_MB_DIMENSION);
+
+ if ((str_cfg->vid_std == VDEC_STD_H264 &&
+ max_mbs && pict_size_in_mbs > max_mbs) ||
+ coded_cmd_width > max_width ||
+ coded_cmd_height > max_height) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: CODED PICTURE SIZE %d x %d [MAX: %d x %d or %d MBs]",
+ str_cfg->user_str_id,
+ coded_cmd_width, coded_cmd_height,
+ max_width, max_height, max_mbs);
+ unsupported->pict_hdr |= VDECDD_UNSUPPORTED_PICTHDR_RESOLUTION;
+ }
+
+ if (pict_hdrinfo->coded_frame_size.width < min_width ||
+ pict_hdrinfo->coded_frame_size.height < min_height) {
+#ifdef USE_STRICT_MIN_PIC_SIZE_CHECK
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: CODED PICTURE SIZE %d x %d [MIN: %d x %d]",
+ str_cfg->user_str_id,
+ pict_hdrinfo->coded_frame_size.width,
+ pict_hdrinfo->coded_frame_size.height,
+ min_width, min_height);
+ unsupported->pict_hdr |= VDECDD_UNSUPPORTED_PICTHDR_RESOLUTION;
+#else /* ndef USE_STRICT_MIN_PIC_SIZE_CHECK */
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: CODED PICTURE SIZE %d x %d [MIN: %d x %d]",
+ str_cfg->user_str_id,
+ pict_hdrinfo->coded_frame_size.width,
+ pict_hdrinfo->coded_frame_size.height,
+ min_width, min_height);
+#endif /* ndef USE_STRICT_MIN_PIC_SIZE_CHECK */
+ }
+
+ if (pict_hdrinfo->pict_sgm_data.id !=
+ BSPP_INVALID && pict_hdrinfo->coded_frame_size.width > 1280) {
+ pr_warn("[USERSID=0x%08X] UNSUPPORTED[HW]: SGM & coded frame width > 1280",
+ str_cfg->user_str_id);
+ unsupported->pict_hdr |=
+ VDECDD_UNSUPPORTED_PICTHDR_OVERSIZED_SGM;
+ }
+
+ if (pict_hdrinfo->discontinuous_mbs)
+ pr_info("Stream has Discontinuous Macroblocks");
+
+ decoder_get_required_core_features(str_cfg, str_op_cfg, features);
+ }
+
+ if (unsupported->str_cfg == 0 && unsupported->str_opcfg == 0 &&
+ unsupported->op_bufcfg == 0 && unsupported->pict_hdr == 0)
+ ret = IMG_SUCCESS;
+
+ return ret;
+}
+
+/*
+ * @Function decoder_picture_decoded
+ */
+static int decoder_picture_decoded(struct dec_str_ctx *dec_str_ctx,
+ struct dec_core_ctx *dec_core_ctx,
+ struct vdecdd_picture *picture,
+ struct dec_decpict *dec_pict,
+ struct bspp_pict_hdr_info *pict_hdrinfo,
+ struct vdecdd_str_unit *str_unit)
+{
+ struct dec_fwmsg *first_fld_fwmsg;
+ struct dec_fwmsg *second_fld_fwmsg;
+ struct dec_pictref_res *pict_ref_res;
+ unsigned int transaction_id;
+ struct dec_decoded_pict *decoded_pict;
+ struct dec_decoded_pict *next_decoded_pict;
+ struct vdecdd_ddbuf_mapinfo *pict_buf;
+ struct dec_decoded_pict *prev_decoded_pict;
+ struct vdecfw_buffer_control *buf_control;
+ struct vdec_comsequ_hdrinfo *comseq_hdrinfo;
+ unsigned int res_limit = 0;
+ unsigned int dec_pict_num = 0;
+ unsigned int req_pict_num = 0;
+ struct dec_decoded_pict *aux_decoded_pict;
+ struct dec_decoded_pict *displayed_decoded_pict = NULL;
+ int ret;
+ unsigned int pict_id;
+ struct vdec_pict_tag_container *fld_tag_container;
+#ifdef ERROR_CONCEALMENT
+ unsigned int first_field_err_level = 0;
+ unsigned int second_field_err_level = 0;
+ unsigned int pict_last_mb = 0;
+#endif
+ struct vxd_dec_ctx *ctx;
+ unsigned int error_flag = 0;
+
+ VDEC_ASSERT(dec_str_ctx);
+ VDEC_ASSERT(str_unit);
+ VDEC_ASSERT(dec_pict);
+
+ first_fld_fwmsg = dec_pict->first_fld_fwmsg;
+ second_fld_fwmsg = dec_pict->second_fld_fwmsg;
+ pict_ref_res = dec_pict->pict_ref_res;
+ transaction_id = dec_pict->transaction_id;
+
+ VDEC_ASSERT(picture);
+ pict_buf = picture->disp_pict_buf.pict_buf;
+ VDEC_ASSERT(pict_buf);
+ comseq_hdrinfo = &pict_buf->ddstr_context->comseq_hdr_info;
+
+ /* Create a container for decoded picture. */
+ decoded_pict = kzalloc(sizeof(*decoded_pict), GFP_KERNEL);
+ VDEC_ASSERT(decoded_pict);
+ if (!decoded_pict)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ decoded_pict->pict = picture;
+ decoded_pict->first_fld_fwmsg = first_fld_fwmsg;
+ decoded_pict->second_fld_fwmsg = second_fld_fwmsg;
+ decoded_pict->pict_ref_res = pict_ref_res;
+ decoded_pict->transaction_id = transaction_id;
+
+ /* Populate the decoded picture information structure. */
+ picture->dec_pict_info->pict_state = VDEC_PICT_STATE_DECODED;
+
+ memcpy(&picture->dec_pict_info->first_fld_tag_container.pict_hwcrc,
+ &first_fld_fwmsg->pict_hwcrc,
+ sizeof(picture->dec_pict_info->first_fld_tag_container.pict_hwcrc));
+
+ memcpy(&picture->dec_pict_info->second_fld_tag_container.pict_hwcrc,
+ &second_fld_fwmsg->pict_hwcrc,
+ sizeof(picture->dec_pict_info->second_fld_tag_container.pict_hwcrc));
+
+ buf_control =
+ (struct vdecfw_buffer_control *)decoded_pict->pict_ref_res->fw_ctrlbuf.cpu_virt;
+ if (buf_control->second_field_of_pair) {
+ /* Search the first field and fill the second_fld_tag_container */
+ unsigned int prev_dec_pict_id =
+ get_prev_picture_id(GET_STREAM_PICTURE_ID(decoded_pict->transaction_id));
+ prev_decoded_pict =
+ decoder_get_decoded_pict_of_stream(prev_dec_pict_id,
+ &dec_str_ctx->str_decd_pict_list);
+
+ if (prev_decoded_pict) {
+ memcpy(&picture->dec_pict_info->second_fld_tag_container.pict_hwcrc,
+ &prev_decoded_pict->first_fld_fwmsg->pict_hwcrc,
+ sizeof
+ (picture->dec_pict_info->second_fld_tag_container.pict_hwcrc));
+ } else {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] Failed to find decoded picture to attach second_fld_tag_container",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+ }
+ prev_decoded_pict = NULL;
+ }
+
+ /* Report any issues in decoding */
+ if (decoded_pict->pict->dec_pict_info->err_flags)
+ pr_warn("[USERSID=0x%08X] [PID=0x%08X] BSPP reported errors [flags: 0x%08X]",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->pict->pict_id,
+ decoded_pict->pict->dec_pict_info->err_flags);
+
+ if ((decoded_pict->first_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_FEERROR_ENTDECERROR)) ||
+ (decoded_pict->second_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_FEERROR_ENTDECERROR))) {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] Front-end HW processing terminated prematurely due to an error.",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+ picture->dec_pict_info->err_flags |= VDEC_ERROR_FEHW_DECODE;
+ }
+
+ if ((decoded_pict->first_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_FEERROR_SRERROR)) ||
+ (decoded_pict->second_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_FEERROR_SRERROR))) {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] HW Shift Register access returned an error during FEHW parsing.",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+ picture->dec_pict_info->err_flags |= VDEC_ERROR_SR_ERROR;
+ }
+
+ if ((decoded_pict->first_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_FEERROR_HWWDT)) ||
+ (decoded_pict->second_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_FEERROR_HWWDT))) {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] Front-end HW processing timed-out.",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+ picture->dec_pict_info->err_flags |= VDEC_ERROR_FEHW_TIMEOUT;
+ }
+
+ if ((decoded_pict->first_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_MISSING_REFERENCES)) ||
+ (decoded_pict->second_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_MISSING_REFERENCES))) {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] There are missing references for the current frame. May have corruption",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+ /*
+ * This is not a serious error, indicate host app to drop the
+ * frame as may have corruption.
+ */
+ picture->dec_pict_info->err_flags |=
+ VDEC_ERROR_MISSING_REFERENCES;
+ }
+
+ if ((decoded_pict->first_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_MMCO_ERROR)) ||
+ (decoded_pict->second_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_MMCO_ERROR))) {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] MMCO error accured when processing the current frame. May have corruption",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+
+ /*
+ * This is not a serious error, indicate host app to drop
+ * the frame as may have corruption.
+ */
+ picture->dec_pict_info->err_flags |= VDEC_ERROR_MMCO;
+ }
+
+ if ((decoded_pict->first_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_MBS_DROPPED_ERROR)) ||
+ (decoded_pict->second_fld_fwmsg->pict_attrs.fe_err &
+ FLAG_MASK(VDECFW_MSGFLAG_DECODED_MBS_DROPPED_ERROR))) {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] Some macroblocks were dropped when processing the current frame. May have corruption",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+
+ /*
+ * This is not a serious error, indicate host app to
+ * drop the frame as may have corruption.
+ */
+ picture->dec_pict_info->err_flags |= VDEC_ERROR_MBS_DROPPED;
+ }
+
+ if (decoded_pict->first_fld_fwmsg->pict_attrs.no_be_wdt > 0) {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] Back-end HW processing timed-out. Aborted slices %d",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id,
+ decoded_pict->first_fld_fwmsg->pict_attrs.no_be_wdt);
+ picture->dec_pict_info->err_flags |= VDEC_ERROR_BEHW_TIMEOUT;
+ }
+
+ if (decoded_pict->second_fld_fwmsg->pict_attrs.no_be_wdt > 0) {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] Back-end HW processing timed-out. Aborted slices %d",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id,
+ decoded_pict->second_fld_fwmsg->pict_attrs.no_be_wdt);
+ picture->dec_pict_info->err_flags |= VDEC_ERROR_BEHW_TIMEOUT;
+ }
+
+#ifdef ERROR_CONCEALMENT
+ /* Estimate error level in percentage */
+ if (decoder_get_pict_processing_info(dec_core_ctx, dec_str_ctx, pict_hdrinfo,
+ decoded_pict, dec_pict, &pict_last_mb) == TRUE) {
+ if (pict_last_mb) {
+ first_field_err_level = 100 - ((100 * (pict_last_mb -
+ decoded_pict->first_fld_fwmsg->pict_attrs.mbs_dropped +
+ decoded_pict->first_fld_fwmsg->pict_attrs.mbs_recovered)) /
+ pict_last_mb);
+
+ second_field_err_level = 100 - ((100 * (pict_last_mb -
+ decoded_pict->second_fld_fwmsg->pict_attrs.mbs_dropped +
+ decoded_pict->second_fld_fwmsg->pict_attrs.mbs_recovered)) /
+ pict_last_mb);
+ }
+
+ /* does not work properly with discontinuous mbs */
+ if (!pict_hdrinfo->discontinuous_mbs)
+ picture->dec_pict_info->err_level = first_field_err_level >
+ second_field_err_level ?
+ first_field_err_level : second_field_err_level;
+
+ VDEC_ASSERT(picture->dec_pict_info->err_level <= 100);
+ if (picture->dec_pict_info->err_level)
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] Picture error level: %d(%%)",
+ dec_str_ctx->config.user_str_id, decoded_pict->transaction_id,
+ picture->dec_pict_info->err_level);
+ }
+#endif
+
+ if (decoded_pict->first_fld_fwmsg->pict_attrs.pict_attrs.dwrfired ||
+ decoded_pict->second_fld_fwmsg->pict_attrs.pict_attrs.dwrfired) {
+ pr_warn("[USERSID=0x%08X] VXD Device Reset (Lockup).",
+ dec_str_ctx->config.user_str_id);
+ picture->dec_pict_info->err_flags |=
+ VDEC_ERROR_SERVICE_TIMER_EXPIRY;
+ }
+
+ if (decoded_pict->first_fld_fwmsg->pict_attrs.pict_attrs.mmufault ||
+ decoded_pict->second_fld_fwmsg->pict_attrs.pict_attrs.mmufault) {
+ pr_warn("[USERSID=0x%08X] VXD Device Reset (MMU fault).",
+ dec_str_ctx->config.user_str_id);
+ picture->dec_pict_info->err_flags |= VDEC_ERROR_MMU_FAULT;
+ }
+
+ if (decoded_pict->first_fld_fwmsg->pict_attrs.pict_attrs.deverror ||
+ decoded_pict->second_fld_fwmsg->pict_attrs.pict_attrs.deverror) {
+ pr_warn("[USERSID=0x%08X] VXD Device Error (e.g. firmware load failed).",
+ dec_str_ctx->config.user_str_id);
+ picture->dec_pict_info->err_flags |= VDEC_ERROR_DEVICE;
+ }
+
+ /*
+ * Assigned error flag from the decoder error flag for error recovery.
+ */
+ error_flag = picture->dec_pict_info->err_flags;
+ /*
+ * Loop over references, for each one find the related picture
+ * on the decPictList, and propagate errors if needed
+ */
+ ret =
+ decoder_check_ref_errors(dec_str_ctx, (struct vdecfw_buffer_control *)
+ decoded_pict->pict_ref_res->fw_ctrlbuf.cpu_virt,
+ picture);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+
+ if (dec_str_ctx->config.vid_std == VDEC_STD_H264) {
+ /* Attach the supplementary data to the decoded picture. */
+ picture->dec_pict_sup_data.raw_vui_data =
+ pict_hdrinfo->h264_pict_hdr_info.raw_vui_data;
+ pict_hdrinfo->h264_pict_hdr_info.raw_vui_data = NULL;
+
+ picture->dec_pict_sup_data.raw_sei_list_first_fld =
+ pict_hdrinfo->h264_pict_hdr_info.raw_sei_data_list_first_field;
+ pict_hdrinfo->h264_pict_hdr_info.raw_sei_data_list_first_field = NULL;
+
+ picture->dec_pict_sup_data.raw_sei_list_second_fld =
+ pict_hdrinfo->h264_pict_hdr_info.raw_sei_data_list_second_field;
+ pict_hdrinfo->h264_pict_hdr_info.raw_sei_data_list_second_field = NULL;
+
+ picture->dec_pict_sup_data.h264_pict_supl_data.nal_ref_idc =
+ pict_hdrinfo->h264_pict_hdr_info.nal_ref_idc;
+
+ picture->dec_pict_sup_data.h264_pict_supl_data.frame_num =
+ pict_hdrinfo->h264_pict_hdr_info.frame_num;
+ }
+
+#ifdef HAS_HEVC
+ if (dec_str_ctx->config.vid_std == VDEC_STD_HEVC) {
+ /* Attach the supplementary data to the decoded picture. */
+ picture->dec_pict_sup_data.raw_vui_data =
+ pict_hdrinfo->hevc_pict_hdr_info.raw_vui_data;
+
+ pict_hdrinfo->hevc_pict_hdr_info.raw_vui_data = NULL;
+
+ picture->dec_pict_sup_data.raw_sei_list_first_fld =
+ pict_hdrinfo->hevc_pict_hdr_info.raw_sei_datalist_firstfield;
+
+ pict_hdrinfo->hevc_pict_hdr_info.raw_sei_datalist_firstfield = NULL;
+
+ picture->dec_pict_sup_data.raw_sei_list_second_fld =
+ pict_hdrinfo->hevc_pict_hdr_info.raw_sei_datalist_secondfield;
+
+ pict_hdrinfo->hevc_pict_hdr_info.raw_sei_datalist_secondfield = NULL;
+
+ picture->dec_pict_sup_data.hevc_pict_supl_data.pic_order_cnt =
+ buf_control->hevc_data.pic_order_count;
+ }
+#endif
+
+ if (!((buf_control->dec_pict_type == IMG_BUFFERTYPE_PAIR &&
+ VDECFW_PICMGMT_FIELD_CODED_PICTURE_EXECUTED(buf_control->picmgmt_flags)) ||
+ FLAG_IS_SET(buf_control->picmgmt_flags, VDECFW_PICMGMTFLAG_PICTURE_EXECUTED))) {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] Picture management was not executed for this picture; forcing display.",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+ decoded_pict->force_display = TRUE;
+ }
+
+ dec_str_ctx->dec_str_st.total_pict_finished++;
+
+ /*
+ * Use NextPictIdExpected to do this check. ui32NextPictId could be
+ * different from what expected at this point because we failed to
+ * process a picture the last time run this function (this is still
+ * an error (unless doing multi-core) but not the error reported here.
+ */
+ if (picture->pict_id != dec_str_ctx->next_pict_id_expected) {
+ pr_warn("[USERSID=0x%08X] ERROR: MISSING DECODED PICTURE (%d)",
+ dec_str_ctx->config.user_str_id,
+ dec_str_ctx->next_dec_pict_id);
+ }
+
+ dec_str_ctx->next_dec_pict_id =
+ get_next_picture_id(GET_STREAM_PICTURE_ID(decoded_pict->transaction_id));
+ dec_str_ctx->next_pict_id_expected = dec_str_ctx->next_dec_pict_id;
+
+ /* Add the picture itself to the decoded list */
+ next_decoded_pict = dq_first(&dec_str_ctx->str_decd_pict_list);
+ while (next_decoded_pict &&
+ !HAS_X_REACHED_Y(GET_STREAM_PICTURE_ID(next_decoded_pict->transaction_id),
+ picture->pict_id,
+ 1 << FWIF_NUMBITS_STREAM_PICTURE_ID, unsigned int)) {
+ if (next_decoded_pict !=
+ dq_last(&dec_str_ctx->str_decd_pict_list))
+ next_decoded_pict = dq_next(next_decoded_pict);
+ else
+ next_decoded_pict = NULL;
+ }
+
+ if (next_decoded_pict)
+ dq_addbefore(next_decoded_pict, decoded_pict);
+ else
+ dq_addtail(&dec_str_ctx->str_decd_pict_list, decoded_pict);
+
+ dec_str_ctx->dec_str_st.num_pict_decoded++;
+
+ pr_debug("%s : number of picture decoded = %d\n"
+ , __func__, dec_str_ctx->dec_str_st.num_pict_decoded);
+ /* Process the decoded pictures in the encoded order */
+ decoded_pict = dq_first(&dec_str_ctx->str_decd_pict_list);
+ VDEC_ASSERT(decoded_pict);
+ if (!decoded_pict)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ ret = dec_str_ctx->str_processed_cb((void *)dec_str_ctx->usr_int_data,
+ VXD_CB_PICT_DECODED, (void *)picture);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ /*
+ * Loop on the unprocessed pictures until we failed to process one
+ * or we have processed them all
+ */
+ for (next_decoded_pict = decoder_next_picture(decoded_pict,
+ dec_str_ctx->next_dec_pict_id,
+ &dec_str_ctx->str_decd_pict_list);
+ next_decoded_pict;
+ next_decoded_pict = decoder_next_picture(decoded_pict,
+ dec_str_ctx->next_dec_pict_id,
+ &dec_str_ctx->str_decd_pict_list)) {
+ unsigned int i = 0;
+ struct dec_decoded_pict *display_pict = NULL;
+ struct dec_decoded_pict *release_pict = NULL;
+ unsigned char last_to_display_for_seq = FALSE;
+
+ /*
+ * next_decoded_pict is used to temporarily store decoded_pict
+ * so that we can clear the bProcessFailed flag before
+ * returning
+ */
+ decoded_pict = next_decoded_pict;
+ if (!decoded_pict->force_display) {
+ struct vdecfw_buffer_control *buf_ctrl = NULL;
+
+ buf_ctrl = (struct vdecfw_buffer_control *)
+ decoded_pict->pict_ref_res->fw_ctrlbuf.cpu_virt;
+
+ if (buf_ctrl->real_data.width && buf_ctrl->real_data.height) {
+ /*
+ * Firmware sets image size as it is in
+ * bitstream.
+ */
+ picture->dec_pict_info->disp_info.disp_region.width =
+ buf_ctrl->real_data.width;
+ picture->dec_pict_info->disp_info.disp_region.height =
+ buf_ctrl->real_data.height;
+ picture->dec_pict_info->disp_info.disp_region.top_offset = 0;
+ picture->dec_pict_info->disp_info.disp_region.left_offset = 0;
+
+ picture->dec_pict_info->rend_info.rend_pict_size.width =
+ picture->dec_pict_info->disp_info.disp_region.width;
+ picture->dec_pict_info->rend_info.rend_pict_size.height =
+ picture->dec_pict_info->disp_info.disp_region.height;
+
+ /*
+ * Update encoded size with values coded in
+ * bitstream,so golden image can be loaded
+ * correctly
+ */
+ picture->dec_pict_info->disp_info.enc_disp_region.width =
+ buf_ctrl->real_data.width;
+ picture->dec_pict_info->disp_info.enc_disp_region.height =
+ buf_ctrl->real_data.height;
+ }
+
+ decoded_pict->pict->dec_pict_info->timestamp =
+ buf_ctrl->real_data.timestamp;
+ decoded_pict->pict->dec_pict_info->disp_info.top_fld_first =
+ buf_ctrl->top_field_first;
+ decoded_pict->pict->dec_pict_info->disp_info.top_fld_first =
+ buf_ctrl->top_field_first;
+
+ decoded_pict->pict->dec_pict_info->id_for_hwcrc_chk =
+ GET_STREAM_PICTURE_ID(decoded_pict->transaction_id) - 1;
+ decoded_pict->pict->dec_pict_info->id_for_hwcrc_chk +=
+ dec_str_ctx->dec_str_st.flds_as_frm_decodes;
+
+ if (buf_ctrl->dec_pict_type == IMG_BUFFERTYPE_PAIR &&
+ !buf_ctrl->second_field_of_pair)
+ dec_str_ctx->dec_str_st.flds_as_frm_decodes++;
+
+ if (buf_ctrl->second_field_of_pair) {
+ /*
+ * Second field of pair is always complementary
+ * type to the eFirstPictTagType of the
+ * previous picture
+ */
+ unsigned int prev_dec_pict_id =
+ get_prev_picture_id(GET_STREAM_PICTURE_ID(decoded_pict->transaction_id));
+
+ prev_decoded_pict =
+ decoder_get_decoded_pict_of_stream
+ (prev_dec_pict_id,
+ &dec_str_ctx->str_decd_pict_list);
+ if (prev_decoded_pict) {
+ fld_tag_container =
+ &prev_decoded_pict->pict->dec_pict_info->second_fld_tag_container;
+ fld_tag_container->pict_tag_param =
+ decoded_pict->pict->dec_pict_info->first_fld_tag_container.pict_tag_param;
+
+ /*
+ * Copy the first field info in the
+ * proper place
+ */
+ memcpy(&fld_tag_container->pict_hwcrc,
+ &first_fld_fwmsg->pict_hwcrc,
+ sizeof(fld_tag_container->pict_hwcrc));
+
+ /*
+ * Attach the raw SEI data list for a
+ * second field to a picture.
+ */
+ prev_decoded_pict->pict->dec_pict_sup_data.raw_sei_list_second_fld =
+ decoded_pict->pict->dec_pict_sup_data.raw_sei_list_first_fld;
+
+ prev_decoded_pict->pict->dec_pict_info->disp_info.top_fld_first =
+ buf_ctrl->top_field_first;
+
+ /* Mark this picture as merged fields. */
+ prev_decoded_pict->pict->dec_pict_sup_data.merged_flds =
+ TRUE;
+ /* Mark the picture that was merged to the previous one. */
+ decoded_pict->merged = TRUE;
+ } else {
+ pr_warn("[USERSID=0x%08X] [TID 0x%08X] Failed to find decoded picture to attach tag",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+ }
+ } else {
+ /*
+ * Not Second-field-of-pair picture tag
+ * correlates its Tag to the its type by
+ * setting the eFirstPictTagType in the
+ * following way
+ */
+ decoded_pict->pict->dec_pict_info->first_fld_tag_container.pict_type
+ =
+ buf_ctrl->dec_pict_type;
+ memcpy(&picture->dec_pict_info->first_fld_tag_container.pict_hwcrc,
+ &first_fld_fwmsg->pict_hwcrc,
+ sizeof
+ (picture->dec_pict_info->first_fld_tag_container.pict_hwcrc));
+ }
+
+ /*
+ * Update the id of the next picture to process. It has
+ * to be update always (even if we fail to process)
+ * This has to be a global flag because it will be
+ * passed in both decoder_NextPicture (and then to
+ * DECODER_NextDecPictContiguous inside it)
+ * and to the corner case check below
+ */
+ dec_str_ctx->next_dec_pict_id =
+ get_next_picture_id(GET_STREAM_PICTURE_ID
+ (decoded_pict->transaction_id));
+ /*
+ * Display all the picture in the list that have been
+ * decoded and signalled by the fw to be displayed
+ */
+ for (i = decoded_pict->disp_idx;
+ i < buf_ctrl->display_list_length &&
+ !decoded_pict->process_failed;
+ i++, decoded_pict->disp_idx++) {
+ /*
+ * Display picture if it has been decoded
+ * (i.e. in decoded list).
+ */
+ display_pict = decoder_get_decoded_pict
+ (buf_ctrl->display_list[i],
+ &dec_str_ctx->str_decd_pict_list);
+ if (display_pict) {
+ if (FLAG_IS_SET(buf_ctrl->display_flags[i],
+ VDECFW_BUFFLAG_DISPLAY_FIELD_CODED) &&
+ (!FLAG_IS_SET
+ (buf_ctrl->display_flags[i],
+ VDECFW_BUFFLAG_DISPLAY_SINGLE_FIELD))) {
+ display_pict->pict->dec_pict_info->buf_type =
+ IMG_BUFFERTYPE_PAIR;
+ if (FLAG_IS_SET
+ (buf_ctrl->display_flags[i],
+ VDECFW_BUFFLAG_DISPLAY_INTERLACED_FIELDS))
+ display_pict->pict->dec_pict_info->interlaced_flds =
+ TRUE;
+ } else if (FLAG_IS_SET
+ (buf_ctrl->display_flags[i],
+ VDECFW_BUFFLAG_DISPLAY_FIELD_CODED) &&
+ FLAG_IS_SET
+ (buf_ctrl->display_flags[i],
+ VDECFW_BUFFLAG_DISPLAY_SINGLE_FIELD)) {
+ display_pict->pict->dec_pict_info->buf_type =
+ FLAG_IS_SET
+ (buf_ctrl->display_flags[i],
+ VDECFW_BUFFLAG_DISPLAY_BOTTOM_FIELD) ?
+ IMG_BUFFERTYPE_FIELD_BOTTOM :
+ IMG_BUFFERTYPE_FIELD_TOP;
+ } else {
+ display_pict->pict->dec_pict_info->buf_type =
+ IMG_BUFFERTYPE_FRAME;
+ }
+
+ display_pict->pict->dec_pict_info->view_id =
+ buf_ctrl->display_view_ids[i];
+
+ /*
+ * When no reference pictures are left to
+ * display and this is the last display
+ * picture in response to the last decoded
+ * picture, signal.
+ */
+ if (decoded_pict->pict->last_pict_in_seq &&
+ i == (buf_ctrl->display_list_length - 1))
+ last_to_display_for_seq = TRUE;
+
+ if (!display_pict->displayed) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] DISPLAY",
+ dec_str_ctx->config.user_str_id,
+ buf_ctrl->display_list[i]);
+#endif
+ display_pict->displayed = TRUE;
+ pict_id = GET_STREAM_PICTURE_ID
+ (buf_ctrl->display_list[i]);
+
+ ret = decoder_picture_display
+ (dec_str_ctx, pict_id,
+ last_to_display_for_seq);
+ }
+ } else {
+ /*
+ * In single core scenario should
+ * not come here.
+ */
+ pr_warn("[USERSID=0x%08X] Failed to find decoded picture [TID = 0x%08X] to send for display",
+ dec_str_ctx->config.user_str_id,
+ buf_ctrl->display_list[i]);
+ }
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ /* Release all unused pictures (firmware request) */
+ for (i = decoded_pict->rel_idx;
+ i < buf_ctrl->release_list_length &&
+ !decoded_pict->process_failed;
+ i++, decoded_pict->rel_idx++) {
+ release_pict = decoder_get_decoded_pict
+ (buf_ctrl->release_list[i],
+ &dec_str_ctx->str_decd_pict_list);
+ if (release_pict) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] RELEASE( ): PIC_ID[%d]",
+ dec_str_ctx->config.user_str_id,
+ release_pict->pict->pict_id);
+#endif
+ /*
+ * Signal releasing this picture to upper
+ * layers.
+ */
+ decoder_picture_release(dec_str_ctx,
+ GET_STREAM_PICTURE_ID
+ (buf_ctrl->release_list[i]),
+ release_pict->displayed,
+ release_pict->merged);
+ if (release_pict->processed) {
+ /*
+ * If the decoded picture has been
+ * processed, destroy now.
+ */
+ ret = decoder_decoded_picture_destroy(dec_str_ctx,
+ release_pict,
+ FALSE);
+ } else {
+ /*
+ * If the decoded picture is not
+ * processed just destroy the
+ * containing picture.
+ */
+ pict_id = GET_STREAM_PICTURE_ID
+ (buf_ctrl->release_list[i]);
+ ret = decoder_picture_destroy(dec_str_ctx,
+ pict_id, FALSE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ release_pict->pict = NULL;
+ }
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ } else {
+ /*
+ * In single core scenario should not
+ * come here.
+ */
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] Failed to find decoded picture [TID = 0x%08X] to release",
+ dec_str_ctx->config.user_str_id,
+ buf_ctrl->release_list[i]);
+#endif
+ }
+ }
+ } else {
+ /* Always display the picture if we have no hardware */
+ if (!decoded_pict->displayed) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] DISPLAY",
+ dec_str_ctx->config.user_str_id,
+ decoded_pict->transaction_id);
+#endif
+ decoded_pict->displayed = TRUE;
+ ret = decoder_picture_display
+ (dec_str_ctx,
+ decoded_pict->pict->pict_id,
+ decoded_pict->pict->last_pict_in_seq);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ /* Always release the picture if we have no hardware */
+ ret = decoder_picture_destroy(dec_str_ctx,
+ decoded_pict->pict->pict_id,
+ FALSE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ decoded_pict->pict = NULL;
+ }
+
+ /* If we have processed the current picture */
+ if (!decoded_pict->process_failed) {
+ decoded_pict->processed = TRUE;
+
+ /*
+ * If the current picture has been released then
+ * remove the container from the decoded list
+ */
+ if (!decoded_pict->pict) {
+ /*
+ * Only destroy the decoded picture once it is processed
+ * and the fw has instructed to release the picture.
+ */
+ ret = decoder_decoded_picture_destroy(dec_str_ctx,
+ decoded_pict, FALSE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ decoded_pict = NULL;
+ } /* end if (decoded_pict->pict == NULL) */
+ } /* end if (!decoded_pict->process_failed) */
+ } /* end for */
+
+ /*
+ * Always clear the process_failed flag to ensure that this picture
+ * will be processed on the next function call
+ */
+ if (decoded_pict)
+ decoded_pict->process_failed = FALSE;
+
+ /*
+ * Go through the list of decoded pictures to check if there are any
+ * pictures left for displaying and that are still not displayed due
+ * to picture management errors.
+ * Get the minimum required number of picture buffers.
+ */
+ vdecddutils_ref_pict_get_maxnum(&dec_str_ctx->config,
+ comseq_hdrinfo, &req_pict_num);
+ req_pict_num += comseq_hdrinfo->interlaced_frames ? 2 : 1;
+
+ ret = dec_str_ctx->core_query_cb(dec_str_ctx->usr_int_data,
+ DECODER_CORE_GET_RES_LIMIT,
+ &res_limit);
+
+ /* Start the procedure only if there is enough resources available. */
+ if (res_limit >= req_pict_num) {
+ /* Allow for one picture buffer for display. */
+ res_limit--;
+
+ /*
+ * Count the number of decoded pictures that were not
+ * displayed yet.
+ */
+ aux_decoded_pict = dq_first(&dec_str_ctx->str_decd_pict_list);
+ while (aux_decoded_pict) {
+ if (aux_decoded_pict->pict) {
+ dec_pict_num++;
+ if (!displayed_decoded_pict)
+ displayed_decoded_pict =
+ aux_decoded_pict;
+ }
+ if (aux_decoded_pict !=
+ dq_last(&dec_str_ctx->str_decd_pict_list))
+ aux_decoded_pict = dq_next(aux_decoded_pict);
+ else
+ aux_decoded_pict = NULL;
+ }
+ }
+
+ /* If there is at least one not displayed picture... */
+ if (displayed_decoded_pict) {
+ /*
+ * While the number of not displayed decoded pictures exceeds
+ * the number of maximum allowed number of pictures being held
+ * by VDEC...
+ */
+ while (dec_pict_num > res_limit) {
+ pr_warn("[USERSID=0x%08X] Number of outstanding decoded pictures exceeded number of available pictures buffers.",
+ dec_str_ctx->config.user_str_id);
+
+ if (!displayed_decoded_pict) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+ /* Find the picture with the least picture id. */
+ aux_decoded_pict = dq_next(displayed_decoded_pict);
+ while (aux_decoded_pict) {
+ if (aux_decoded_pict !=
+ dq_last(&dec_str_ctx->str_decd_pict_list)) {
+ if (aux_decoded_pict->pict &&
+ aux_decoded_pict->pict->pict_id <
+ displayed_decoded_pict->pict->pict_id)
+ displayed_decoded_pict = aux_decoded_pict;
+
+ aux_decoded_pict = dq_next(aux_decoded_pict);
+ } else {
+ if (aux_decoded_pict->pict &&
+ aux_decoded_pict->pict->pict_id <
+ displayed_decoded_pict->pict->pict_id)
+ displayed_decoded_pict = aux_decoded_pict;
+
+ aux_decoded_pict = NULL;
+ }
+ }
+
+ /* Display and release the picture with the least picture id. */
+ if (!displayed_decoded_pict->displayed) {
+ pr_warn("[USERSID=0x%08X] [TID=0x%08X] DISPLAY FORCED",
+ dec_str_ctx->config.user_str_id,
+ displayed_decoded_pict->transaction_id);
+ displayed_decoded_pict->displayed = TRUE;
+ ret = decoder_picture_display
+ (dec_str_ctx,
+ displayed_decoded_pict->pict->pict_id,
+ displayed_decoded_pict->pict->last_pict_in_seq);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ ret = decoder_picture_destroy(dec_str_ctx,
+ displayed_decoded_pict->pict->pict_id,
+ FALSE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ displayed_decoded_pict->pict = NULL;
+ displayed_decoded_pict->processed = TRUE;
+
+ ret = decoder_decoded_picture_destroy(dec_str_ctx, displayed_decoded_pict,
+ FALSE);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ displayed_decoded_pict = NULL;
+
+ /*
+ * Decrease the number of not displayed decoded
+ * pictures.
+ */
+ dec_pict_num--;
+ }
+ }
+
+#ifdef ERROR_RECOVERY_SIMULATION
+ /*
+ * This part of the code should execute only when, DEBUG_FW_ERR_RECOVERY
+ * flag is enabled. This basically reads the error flag attribute from
+ * user space to create fake errors for testing the firmware error
+ * recovery.
+ */
+ if (fw_error_value != VDEC_ERROR_MAX) {
+ error_flag = error_flag | (1 << fw_error_value);
+ /* Now lets make it VDEC_ERROR_MAX */
+ fw_error_value = VDEC_ERROR_MAX;
+ }
+#endif
+
+ /*
+ * Whenever the error flag is set, we need to handle the error case.
+ * Need to forward this error to stream processed callback.
+ */
+ if (error_flag) {
+ pr_err("%s : %d err_flags: 0x%x\n", __func__, __LINE__, error_flag);
+ ret = dec_str_ctx->str_processed_cb((void *)dec_str_ctx->usr_int_data,
+ VXD_CB_ERROR_FATAL, &error_flag);
+ }
+ /*
+ * check for eos on bitstream and propagate the same to picture
+ * buffer
+ */
+ ctx = dec_str_ctx->vxd_dec_ctx;
+ ctx->num_decoding--;
+ if (ctx->eos) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("EOS reached\n");
+#endif
+ ret = dec_str_ctx->str_processed_cb((void *)dec_str_ctx->usr_int_data,
+ VXD_CB_STR_END, NULL);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ }
+
+ return ret;
+}
+
+/*
+ * @Function decoder_service_firmware_response
+ */
+int decoder_service_firmware_response(void *dec_str_ctx_arg, unsigned int *msg,
+ unsigned int msg_size, unsigned int msg_flags)
+{
+ int ret = IMG_SUCCESS;
+ struct dec_decpict *dec_pict = NULL;
+ unsigned char head_of_queue = TRUE;
+ struct dec_str_ctx *dec_str_ctx;
+ struct dec_str_unit *dec_str_unit;
+ unsigned char pict_start = FALSE;
+ enum vdecdd_str_unit_type str_unit_type;
+ struct vdecdd_picture *picture;
+ struct decoder_pict_fragment *pict_fragment;
+ struct dec_str_ctx *dec_strctx;
+ struct dec_core_ctx *dec_core_ctx;
+
+ /* validate input arguments */
+ if (!dec_str_ctx_arg || !msg) {
+ VDEC_ASSERT(0);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ dec_strctx = decoder_stream_get_context(dec_str_ctx_arg);
+
+ dec_core_ctx = decoder_str_ctx_to_core_ctx(dec_strctx);
+
+ if (!dec_core_ctx) {
+ pr_err("%s: dec_core_ctx is NULL\n", __func__);
+ VDEC_ASSERT(0);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ pr_debug("%s : process firmware response\n", __func__);
+ ret = hwctrl_process_msg(dec_core_ctx->hw_ctx, msg_flags, msg, &dec_pict);
+ VDEC_ASSERT((ret == IMG_SUCCESS) | (ret == IMG_ERROR_FATAL));
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ if (!dec_pict || (dec_pict->state != DECODER_PICTURE_STATE_DECODED &&
+ dec_pict->state != DECODER_PICTURE_STATE_TO_DISCARD))
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ /*
+ * Try and locate the stream context in the list of active
+ * streams.
+ */
+ VDEC_ASSERT(dec_core_ctx->dec_ctx);
+ dec_str_ctx = lst_first(&dec_core_ctx->dec_ctx->str_list);
+ if (!dec_str_ctx) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ while (dec_str_ctx) {
+ if (dec_str_ctx == dec_pict->dec_str_ctx)
+ break;
+
+ dec_str_ctx = lst_next(dec_str_ctx);
+ }
+
+ /*
+ * If the stream is not in the list of active streams then
+ * it must have been destroyed.
+ * This interrupt should be ignored.
+ */
+ if (dec_str_ctx != dec_pict->dec_str_ctx)
+ return IMG_SUCCESS;
+
+ /*
+ * Retrieve the picture from the head of the core decode queue
+ * primarily to obtain the correct stream context.
+ */
+ hwctrl_removefrom_piclist(dec_core_ctx->hw_ctx, dec_pict);
+
+ if (!dec_str_ctx) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+ dec_str_ctx->avail_slots++;
+ VDEC_ASSERT(dec_str_ctx->avail_slots > 0);
+
+ /*
+ * Store the stream context of the picture that has been
+ * decoded.
+ */
+ dec_str_ctx = dec_pict->dec_str_ctx;
+ VDEC_ASSERT(dec_str_ctx);
+
+ if (!dec_str_ctx)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ /*
+ * Picture has been discarded before EOP unit,
+ * recover the decoder to valid state
+ */
+ if (!dec_pict->eop_found) {
+ VDEC_ASSERT(dec_pict == dec_str_ctx->cur_pict);
+
+ dec_core_ctx->busy = FALSE;
+ dec_str_ctx->cur_pict = NULL;
+ }
+
+ /*
+ * Peek the first stream unit and validate against core
+ * queue to ensure that this really is the next picture
+ * for the stream.
+ */
+ dec_str_unit = lst_first(&dec_str_ctx->pend_strunit_list);
+ if (dec_str_unit) {
+ if (dec_str_unit->dec_pict != dec_pict) {
+ head_of_queue = FALSE;
+
+ /*
+ * For pictures to be decoded
+ * out-of-order there must be
+ * more than one decoder core.
+ */
+ VDEC_ASSERT(dec_str_ctx->decctx->num_pipes > 1);
+ while (dec_str_unit) {
+ dec_str_unit = lst_next(dec_str_unit);
+ if (dec_str_unit->dec_pict == dec_pict)
+ break;
+ }
+ }
+ VDEC_ASSERT(dec_str_unit);
+ if (!dec_str_unit)
+ return IMG_ERROR_FATAL;
+
+ VDEC_ASSERT(dec_str_unit->dec_pict == dec_pict);
+ VDEC_ASSERT(dec_str_unit->str_unit->str_unit_type ==
+ VDECDD_STRUNIT_PICTURE_START);
+ }
+
+ /*
+ * Process all units from the pending stream list until
+ * the next picture start.
+ */
+ while (dec_str_unit && !pict_start) {
+ /*
+ * Actually remove the unit now from the
+ * pending stream list.
+ */
+ lst_remove(&dec_str_ctx->pend_strunit_list, dec_str_unit);
+ if (!dec_str_unit->str_unit || !dec_pict)
+ break;
+
+ str_unit_type = dec_str_unit->str_unit->str_unit_type;
+
+ if (str_unit_type != VDECDD_STRUNIT_PICTURE_START)
+ break;
+
+ dec_str_ctx = dec_pict->dec_str_ctx;
+
+ dec_str_ctx->dec_str_st.num_pict_decoding--;
+ dec_str_ctx->dec_str_st.total_pict_decoded++;
+
+ ret = idgen_gethandle(dec_str_ctx->pict_idgen,
+ GET_STREAM_PICTURE_ID(dec_str_unit->dec_pict->transaction_id),
+ (void **)&picture);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS || !picture) {
+ pr_err("[USERSID=0x%08X] Failed to find picture from ID",
+ dec_str_ctx->config.user_str_id);
+ return IMG_ERROR_INVALID_ID;
+ }
+
+ VDEC_ASSERT(picture == dec_str_unit->str_unit->dd_pict_data);
+
+ /* Hold a reference to the last context on the BE */
+ if (dec_str_ctx->last_be_pict_dec_res && HAS_X_PASSED_Y
+ (picture->pict_id,
+ GET_STREAM_PICTURE_ID(dec_str_ctx->last_be_pict_dec_res->transaction_id),
+ 1 << FWIF_NUMBITS_STREAM_PICTURE_ID, unsigned int)) {
+ /* Return previous last FW context. */
+ resource_item_return(&dec_str_ctx->last_be_pict_dec_res->ref_cnt);
+
+ if (resource_item_isavailable(&dec_str_ctx->last_be_pict_dec_res->ref_cnt
+ )) {
+ resource_list_remove(&dec_str_ctx->dec_res_lst,
+ dec_str_ctx->last_be_pict_dec_res);
+ resource_list_add_img(&dec_str_ctx->dec_res_lst,
+ dec_str_ctx->last_be_pict_dec_res, 0,
+ &dec_str_ctx->last_be_pict_dec_res->ref_cnt);
+ }
+ }
+ if (!dec_str_ctx->last_be_pict_dec_res ||
+ (dec_str_ctx->last_be_pict_dec_res && HAS_X_PASSED_Y
+ (picture->pict_id,
+ GET_STREAM_PICTURE_ID(dec_str_ctx->last_be_pict_dec_res->transaction_id),
+ 1 << FWIF_NUMBITS_STREAM_PICTURE_ID, unsigned int))) {
+ /* Hold onto last FW context. */
+ dec_str_ctx->last_be_pict_dec_res = dec_pict->cur_pict_dec_res;
+ resource_item_use(&dec_str_ctx->last_be_pict_dec_res->ref_cnt);
+ }
+ resource_item_return(&dec_pict->cur_pict_dec_res->ref_cnt);
+
+ if (resource_item_isavailable(&dec_pict->cur_pict_dec_res->ref_cnt)) {
+ resource_list_remove(&dec_str_ctx->dec_res_lst,
+ dec_pict->cur_pict_dec_res);
+ resource_list_add_img(&dec_str_ctx->dec_res_lst,
+ dec_pict->cur_pict_dec_res, 0,
+ &dec_pict->cur_pict_dec_res->ref_cnt);
+ }
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("[USERSID=0x%08X] [TID=0x%08X] DECODED",
+ dec_str_ctx->config.user_str_id,
+ dec_pict->transaction_id);
+#endif
+
+ ret = decoder_picture_decoded(dec_str_ctx, dec_core_ctx,
+ picture, dec_pict,
+ dec_pict->pict_hdr_info,
+ dec_str_unit->str_unit);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ dec_res_picture_detach(&dec_str_ctx->resources, dec_pict);
+
+ /* Free the segments from the decode picture */
+ decoder_clean_bitstr_segments(&dec_pict->dec_pict_seg_list);
+
+ pict_fragment = lst_removehead(&dec_pict->fragment_list);
+ while (pict_fragment) {
+ kfree(pict_fragment);
+ pict_fragment =
+ lst_removehead(&dec_pict->fragment_list);
+ }
+
+ pict_start = (!head_of_queue) ? TRUE : FALSE;
+
+ ret = dec_str_ctx->str_processed_cb(dec_str_ctx->usr_int_data,
+ VXD_CB_STRUNIT_PROCESSED,
+ dec_str_unit->str_unit);
+
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS) {
+ /* Free decoder picture */
+ kfree(dec_pict);
+ dec_pict = NULL;
+ return ret;
+ }
+
+ /* Destroy the Decoder stream unit wrapper */
+ kfree(dec_str_unit);
+
+ /* Peek at the next stream unit */
+ dec_str_unit = lst_first(&dec_str_ctx->pend_strunit_list);
+ if (dec_str_unit)
+ pict_start = (dec_str_unit->str_unit->str_unit_type ==
+ VDECDD_STRUNIT_PICTURE_START &&
+ dec_str_unit->dec_pict != dec_pict);
+
+ /* Free decoder picture */
+ kfree(dec_pict);
+ dec_pict = NULL;
+ }
+
+ kfree(dec_str_unit);
+ return ret;
+}
+
+/*
+ * @Function decoder_is_stream_idle
+ */
+unsigned char decoder_is_stream_idle(void *dec_str_ctx_handle)
+{
+ struct dec_str_ctx *dec_str_ctx;
+
+ dec_str_ctx = decoder_stream_get_context(dec_str_ctx_handle);
+ VDEC_ASSERT(dec_str_ctx);
+ if (!dec_str_ctx) {
+ pr_err("Invalid decoder stream context handle!");
+ return FALSE;
+ }
+
+ return lst_empty(&dec_str_ctx->pend_strunit_list);
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/decoder.h b/drivers/media/platform/vxe-vxd/decoder/decoder.h
new file mode 100644
index 000000000000..a6595fa785e4
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/decoder.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Decoder Component header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef __DECODER_H__
+#define __DECODER_H__
+
+#include "bspp.h"
+#include "dq.h"
+#ifdef HAS_JPEG
+#include "jpegfw_data.h"
+#endif
+#include "lst.h"
+#include "vdecdd_defs.h"
+#include "vdec_defs.h"
+#include "vid_buf.h"
+#include "vxd_ext.h"
+#include "vxd_props.h"
+#include "hevcfw_data.h"
+
+#define MAX_CONCURRENT_STREAMS 16
+
+enum dec_pict_states {
+ DECODER_PICTURE_STATE_TO_DECODE = 0,
+ DECODER_PICTURE_STATE_DECODED,
+ DECODER_PICTURE_STATE_TO_DISCARD,
+ DECODER_PICTURE_STATE_MAX,
+ DECODER_PICTURE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum dec_res_type {
+ DECODER_RESTYPE_TRANSACTION = 0,
+ DECODER_RESTYPE_HDR,
+ DECODER_RESTYPE_BATCH_MSG,
+#ifdef HAS_HEVC
+ DECODER_RESTYPE_PVDEC_BUF,
+#endif
+ DECODER_RESTYPE_MAX,
+ DECODER_RESTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum dec_core_query_type {
+ DECODER_CORE_GET_RES_LIMIT = 0,
+ DECODER_CORE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * @Function pfnRefPicGetMaxNum
+ * @Description
+ * This is the prototype for functions calculating the maximum number
+ * of reference pictures required per video standard.
+ *
+ * @Input psComSequHdrInfo : A pointer to the common VSH information
+ * structure.
+ *
+ * @Output pui32MaxRefPicNum : A pointer used to return the maximum number
+ * of reference frames required.
+ *
+ * @Return IMG_RESULT : This function returns either IMG_SUCCESS or
+ * an error code.
+ */
+typedef int (*ref_pic_get_maximum)(const struct vdec_comsequ_hdrinfo *comseq_hdr_info,
+ unsigned int *max_ref_pict_num);
+
+typedef int (*strunit_processed_cb)(void *handle, int cb_type, void *item);
+
+typedef int (*core_gen_cb)(void *handle, int query, void *item);
+
+struct dec_ctx;
+
+/*
+ * This structure contains the core context.
+ * @brief Decoder Core Context
+ */
+struct dec_core_ctx {
+ void **link; /* to be part of single linked list */
+ struct dec_ctx *dec_ctx;
+ unsigned char enumerated;
+ unsigned char master;
+ unsigned char configured;
+ unsigned int core_features;
+ unsigned int pipe_features[VDEC_MAX_PIXEL_PIPES];
+ struct vxd_coreprops core_props;
+ void *resources;
+ void *hw_ctx;
+ unsigned int cum_pics;
+ unsigned char busy;
+};
+
+struct dec_ctx {
+ unsigned char inited;
+ void *user_data;
+ const struct vdecdd_dd_devconfig *dev_cfg;
+ unsigned int num_pipes;
+ struct dec_core_ctx *dec_core_ctx;
+ struct lst_t str_list;
+ void *mmu_dev_handle;
+ void *dev_handle;
+ struct vidio_ddbufinfo ptd_buf_info;
+ unsigned char sup_stds[VDEC_STD_MAX];
+ unsigned int internal_heap_id;
+ unsigned int str_cnt;
+};
+
+/*
+ * This structure contains the device decode resource (used for decoding and
+ * held for subsequent decoding).
+ * @brief Decoder Device Resource
+ */
+struct dec_pictdec_res {
+ void **link; /* to be part of single linked list */
+ unsigned int transaction_id;
+ struct vidio_ddbufinfo fw_ctx_buf;
+ struct vidio_ddbufinfo h264_sgm_buf;
+ unsigned int ref_cnt;
+};
+
+struct dec_decpict;
+
+/*
+ *
+ * This structure contains the stream context.
+ * @brief Decoder Stream Context
+ */
+struct dec_str_ctx {
+ void **link; /* to be part of single linked list */
+ int km_str_id;
+ struct vdec_str_configdata config;
+ struct dec_ctx *decctx;
+ void *vxd_dec_ctx;
+ void *usr_int_data;
+ void *mmu_str_handle;
+ void *pict_idgen;
+ struct lst_t pend_strunit_list;
+ struct dq_linkage_t str_decd_pict_list;
+ unsigned int num_ref_res;
+ struct lst_t ref_res_lst;
+ unsigned int num_dec_res;
+ struct lst_t dec_res_lst;
+ unsigned int avail_pipes;
+ unsigned int avail_slots;
+ struct vdecdd_decstr_status dec_str_st;
+ struct vidio_ddbufinfo pvdec_fw_ctx_buf;
+ unsigned int last_fe_transaction_id;
+ unsigned int next_dec_pict_id;
+ unsigned int next_pict_id_expected;
+ struct dec_pictdec_res *cur_fe_pict_dec_res;
+ struct dec_pictdec_res *prev_fe_pict_dec_res;
+ struct dec_pictdec_res *last_be_pict_dec_res;
+ struct dec_decpict *cur_pict;
+ void *resources;
+ strunit_processed_cb str_processed_cb;
+ core_gen_cb core_query_cb;
+};
+
+/*
+ * Resource Structure for DECODER_sDdResourceInfo to be used with pools
+ */
+struct res_resinfo {
+ void **link; /* to be part of single linked list */
+ void *res;
+ struct vidio_ddbufinfo *ddbuf_info;
+};
+
+struct vdecdd_ddstr_ctx;
+
+/*
+ * This structure contains the Decoded attributes
+ * @brief Decoded attributes
+ */
+struct dec_pict_attrs {
+ unsigned char first_fld_rcvd;
+ unsigned int fe_err;
+ unsigned int no_be_wdt;
+ unsigned int mbs_dropped;
+ unsigned int mbs_recovered;
+ struct vxd_pict_attrs pict_attrs;
+};
+
+/*
+ * This union contains firmware contexts. Used to allocate buffers for firmware
+ * context.
+ */
+union dec_fw_contexts {
+ struct h264fw_context_data h264_context;
+#ifdef HAS_JPEG
+ struct jpegfw_context_data jpeg_context;
+#endif
+#ifdef HAS_HEVC
+ struct hevcfw_ctx_data hevc_context;
+#endif
+};
+
+/*
+ * for debug
+ */
+struct dec_fwmsg {
+ void **link;
+ struct dec_pict_attrs pict_attrs;
+ struct vdec_pict_hwcrc pict_hwcrc;
+};
+
+/*
+ * This structure contains the stream decode resource (persistent for
+ * longer than decoding).
+ * @brief Decoder Stream Resource
+ */
+struct dec_pictref_res {
+ void **link; /* to be part of single linked list */
+ struct vidio_ddbufinfo fw_ctrlbuf;
+ unsigned int ref_cnt;
+};
+
+/*
+ * This structure defines the decode picture.
+ * @brief Decoder Picture
+ */
+struct dec_decpict {
+ void **link;
+ unsigned int transaction_id;
+ void *dec_str_ctx;
+ unsigned char twopass;
+ unsigned char first_fld_rcvd;
+ struct res_resinfo *transaction_info;
+ struct res_resinfo *hdr_info;
+#ifdef HAS_HEVC
+ struct res_resinfo *pvdec_info;
+ unsigned int temporal_out_addr;
+#endif
+ struct vdecdd_ddpict_buf *recon_pict;
+ struct vdecdd_ddpict_buf *alt_pict;
+ struct res_resinfo *batch_msginfo;
+ struct vidio_ddbufinfo *intra_bufinfo;
+ struct vidio_ddbufinfo *auxline_bufinfo;
+ struct vidio_ddbufinfo *vlc_tables_bufinfo;
+ struct vidio_ddbufinfo *vlc_idx_tables_bufinfo;
+ struct vidio_ddbufinfo *start_code_bufinfo;
+ struct dec_fwmsg *first_fld_fwmsg;
+ struct dec_fwmsg *second_fld_fwmsg;
+ struct bspp_pict_hdr_info *pict_hdr_info;
+ struct dec_pictdec_res *cur_pict_dec_res;
+ struct dec_pictdec_res *prev_pict_dec_res;
+ struct dec_pictref_res *pict_ref_res;
+ struct lst_t dec_pict_seg_list;
+ struct lst_t fragment_list;
+ unsigned char eop_found;
+ unsigned int operating_op;
+ unsigned short genc_id;
+ struct vdecdd_ddbuf_mapinfo **genc_bufs;
+ struct vdecdd_ddbuf_mapinfo *genc_fragment_buf;
+ unsigned int ctrl_alloc_bytes;
+ unsigned int ctrl_alloc_offset;
+ enum dec_pict_states state;
+ struct vidio_ddbufinfo *str_pvdec_fw_ctxbuf;
+};
+
+/*
+ *
+ * This structure defines the decode picture reference.
+ * @brief Decoder Picture Reference
+ */
+struct dec_str_unit {
+ void **link; /* to be part of single linked list */
+ struct dec_decpict *dec_pict;
+ struct vdecdd_str_unit *str_unit;
+};
+
+/*
+ * This structure defines the decoded picture.
+ * @brief Decoded Picture
+ */
+struct dec_decoded_pict {
+ struct dq_linkage_t link; /* to be part of double linked list */
+ unsigned int transaction_id;
+ unsigned char processed;
+ unsigned char process_failed;
+ unsigned char force_display;
+ unsigned char displayed;
+ unsigned char merged;
+ unsigned int disp_idx;
+ unsigned int rel_idx;
+ struct vdecdd_picture *pict;
+ struct dec_fwmsg *first_fld_fwmsg;
+ struct dec_fwmsg *second_fld_fwmsg;
+ struct dec_pictref_res *pict_ref_res;
+};
+
+struct dec_pict_fragment {
+ void **link; /* to be part of single linked list */
+ /* Control allocation size in bytes */
+ unsigned int ctrl_alloc_bytes;
+ /* Control allocation offset in bytes */
+ unsigned int ctrl_alloc_offset;
+};
+
+/*
+ * This structure contains the pointer to the picture segment.
+ * All the segments could be added to the list in struct dec_decpict,
+ * but because list items cannot belong to more than one list this wrapper
+ * is used which is added in the list sDecPictSegList inside struct dec_decpict
+ * @brief Decoder Picture Segment
+ */
+struct dec_decpict_seg {
+ void **link; /* to be part of single linked list */
+ struct bspp_bitstr_seg *bstr_seg;
+ unsigned char internal_seg;
+};
+
+struct decoder_regsoffsets {
+ unsigned int vdmc_cmd_offset;
+ unsigned int vec_offset;
+ unsigned int entropy_offset;
+ unsigned int vec_be_regs_offset;
+ unsigned int vdec_be_codec_regs_offset;
+};
+
+int decoder_initialise(void *init_usr_data, unsigned int internal_heap_id,
+ struct vdecdd_dd_devconfig *dd_devcfg, unsigned int *num_pipes,
+ void **dec_ctx);
+
+int decoder_deinitialise(void *dec_ctx);
+
+int decoder_supported_features(void *dec_ctx, struct vdec_features *features);
+
+int decoder_stream_destroy(void *dec_str_ctx, unsigned char abort);
+
+int decoder_stream_create(void *dec_ctx, struct vdec_str_configdata str_cfg,
+ unsigned int kmstr_id, void **mmu_str_handle,
+ void *vxd_dec_ctx, void *str_usr_int_data,
+ void **dec_str_ctx, void *decoder_cb, void *query_cb);
+
+int decoder_stream_prepare_ctx(void *dec_str_ctx, unsigned char flush_dpb);
+
+int decoder_stream_process_unit(void *dec_str_ctx,
+ struct vdecdd_str_unit *str_unit);
+
+int decoder_get_load(void *dec_str_ctx, unsigned int *avail_slots);
+
+int
+decoder_check_support(void *dec_ctx,
+ const struct vdec_str_configdata *str_cfg,
+ const struct vdec_str_opconfig *op_cfg,
+ const struct vdecdd_ddpict_buf *disp_pictbuf,
+ const struct vdec_pict_rendinfo *req_pict_rendinfo,
+ const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ const struct bspp_pict_hdr_info *pict_hdrinfo,
+ const struct vdec_comsequ_hdrinfo *prev_comseq_hdrinfo,
+ const struct bspp_pict_hdr_info *prev_pict_hdrinfo,
+ unsigned char non_cfg_req, struct vdec_unsupp_flags *unsupported,
+ unsigned int *features);
+
+unsigned char decoder_is_stream_idle(void *dec_str_ctx);
+
+int decoder_stream_flush(void *dec_str_ctx, unsigned char discard_refs);
+
+int decoder_stream_release_buffers(void *dec_str_ctx);
+
+int decoder_stream_get_status(void *dec_str_ctx,
+ struct vdecdd_decstr_status *dec_str_st);
+
+int decoder_service_firmware_response(void *dec_str_ctx_arg, unsigned int *msg,
+ unsigned int msg_size, unsigned int msg_flags);
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/fw_interface.h b/drivers/media/platform/vxe-vxd/decoder/fw_interface.h
new file mode 100644
index 000000000000..6da3d835b950
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/fw_interface.h
@@ -0,0 +1,818 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG MSVDX core Registers
+ * This file contains the MSVDX_CORE_REGS_H Definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef FW_INTERFACE_H_
+#define FW_INTERFACE_H_
+
+/* TODO For now this macro defined, need to think and enable */
+#define VDEC_USE_PVDEC_COMPATIBILITY 1
+
+#define MSG_TYPE_PADDING (0x00)
+/* Start of parser specific Host->MTX messages */
+#define MSG_TYPE_START_PSR_HOSTMTX_MSG (0x80)
+/* Start of parser specific MTX->Host message */
+#define MSG_TYPE_START_PSR_MTXHOST_MSG (0xC0)
+
+enum {
+ FW_DEVA_INIT = MSG_TYPE_START_PSR_HOSTMTX_MSG,
+ FW_DEVA_DECODE_FE,
+ FW_DEVA_RES_0,
+ FW_DEVA_RES_1,
+ FW_DEVA_DECODE_BE,
+ FW_DEVA_HOST_BE_OPP,
+ FW_DEVA_DEBLOCK,
+ FW_DEVA_INTRA_OOLD,
+ FW_DEVA_ENDFRAME,
+
+ FW_DEVA_PARSE,
+ FW_DEVA_PARSE_FRAGMENT,
+ FW_DEVA_BEGINFRAME,
+
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+#ifdef VDEC_USE_PVDEC_SEC
+ FWBSP_INIT,
+ FWBSP_PARSE_BITSTREAM,
+ FWDEC_DECODE,
+#endif /* VDEC_USE_PVDEC_SEC */
+#endif /* VDEC_USE_PVDEC_COMPATIBILITY */
+
+ /* Sent by the firmware on the MTX to the host. */
+ FW_DEVA_COMPLETED = MSG_TYPE_START_PSR_MTXHOST_MSG,
+#ifndef VDEC_USE_PVDEC_COMPATIBILITY
+ FW_DEVA_RES_2,
+ FW_DEVA_RES_3,
+ FW_DEVA_RES_4,
+ FW_DEVA_RES_5,
+
+ FW_DEVA_RES_6,
+ FW_DEVA_CONTIGUITY_WARNING,
+ FW_DEVA_PANIC,
+ FW_DEVA_RES_7,
+ FW_DEVA_RES_8,
+#else /* ndef VDEC_USE_PVDEC_COMPATIBILITY */
+ FW_DEVA_PANIC,
+ FW_ASSERT,
+ FW_PERF,
+ /* An empty completion message sent by new vxd driver */
+ FW_VXD_EMPTY_COMPL,
+ FW_DEC_REQ_RECEIVED,
+ FW_SO,
+#ifdef VDEC_USE_PVDEC_SEC
+ FWBSP_NEW_SEQ,
+ FWBSP_NEW_PIC,
+ FWBSP_BUF_EMPTY,
+ FWBSP_ERROR,
+ FWDEC_COMPLETED,
+#endif /* VDEC_USE_PVDEC_SEC */
+#endif /* VDEC_USE_PVDEC_COMPATIBILITY */
+ FW_DEVA_SIGNATURES_LEGACY = 0xD0,
+ FW_DEVA_SIGNATURES_HEVC = 0xE0,
+ FW_DEVA_SIGNATURES_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Defines the Host/Firmware communication area */
+#ifndef VDEC_USE_PVDEC_COMPATIBILITY
+#define COMMS_HEADER_SIZE (0x34)
+#else /* def VDEC_USE_PVDEC_COMPATIBILITY */
+#define COMMS_HEADER_SIZE (0x40)
+#endif /* def VDEC_USE_PVDEC_COMPATIBILITY */
+/* dwords */
+#define PVDEC_COM_RAM_FW_STATUS_OFFSET 0x00
+#define PVDEC_COM_RAM_TASK_STATUS_OFFSET 0x04
+#define PVDEC_COM_RAM_FW_ID_OFFSET 0x08
+#define PVDEC_COM_RAM_FW_MTXPC_OFFSET 0x0c
+#define PVDEC_COM_RAM_MSG_COUNTER_OFFSET 0x10
+#define PVDEC_COM_RAM_SIGNATURE_OFFSET 0x14
+#define PVDEC_COM_RAM_TO_HOST_BUF_SIZE_AND_OFFSET_OFFSET 0x18
+#define PVDEC_COM_RAM_TO_HOST_RD_INDEX_OFFSET 0x1c
+#define PVDEC_COM_RAM_TO_HOST_WRT_INDEX_OFFSET 0x20
+#define PVDEC_COM_RAM_TO_MTX_BUF_SIZE_AND_OFFSET_OFFSET 0x24
+#define PVDEC_COM_RAM_TO_MTX_RD_INDEX_OFFSET 0x28
+#define PVDEC_COM_RAM_FLAGS_OFFSET 0x2c
+#define PVDEC_COM_RAM_TO_MTX_WRT_INDEX_OFFSET 0x30
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+#define PVDEC_COM_RAM_STATE_BUF_SIZE_AND_OFFSET_OFFSET 0x34
+#define PVDEC_COM_RAM_FW_MMU_REPORT_OFFSET 0x38
+#endif /* VDEC_USE_PVDEC_COMPATIBILITY */
+/* fields */
+#define PVDEC_COM_RAM_TO_HOST_BUF_SIZE_AND_OFFSET_SIZE_MASK 0xFFFF
+#define PVDEC_COM_RAM_TO_HOST_BUF_SIZE_AND_OFFSET_SIZE_SHIFT 0
+#define PVDEC_COM_RAM_TO_HOST_BUF_SIZE_AND_OFFSET_OFFSET_MASK 0xFFFF0000
+#define PVDEC_COM_RAM_TO_HOST_BUF_SIZE_AND_OFFSET_OFFSET_SHIFT 16
+
+#define PVDEC_COM_RAM_TO_MTX_BUF_SIZE_AND_OFFSET_SIZE_MASK 0xFFFF
+#define PVDEC_COM_RAM_TO_MTX_BUF_SIZE_AND_OFFSET_SIZE_SHIFT 0
+#define PVDEC_COM_RAM_TO_MTX_BUF_SIZE_AND_OFFSET_OFFSET_MASK 0xFFFF0000
+#define PVDEC_COM_RAM_TO_MTX_BUF_SIZE_AND_OFFSET_OFFSET_SHIFT 16
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+#define PVDEC_COM_RAM_STATE_BUF_SIZE_AND_OFFSET_SIZE_MASK 0xFFFF
+#define PVDEC_COM_RAM_STATE_BUF_SIZE_AND_OFFSET_SIZE_SHIFT 0
+#define PVDEC_COM_RAM_STATE_BUF_SIZE_AND_OFFSET_OFFSET_MASK 0xFFFF0000
+#define PVDEC_COM_RAM_STATE_BUF_SIZE_AND_OFFSET_OFFSET_SHIFT 16
+#endif /* VDEC_USE_PVDEC_COMPATIBILITY */
+#define PVDEC_COM_RAM_BUF_GET_SIZE(_reg_, _name_) \
+ (((_reg_) & PVDEC_COM_RAM_ ## _name_ ## _BUF_SIZE_AND_OFFSET_SIZE_MASK) >> \
+ PVDEC_COM_RAM_ ## _name_ ## _BUF_SIZE_AND_OFFSET_SIZE_SHIFT)
+#define PVDEC_COM_RAM_BUF_GET_OFFSET(_reg_, _name_) \
+ (((_reg_) & \
+ PVDEC_COM_RAM_ ## _name_ ## _BUF_SIZE_AND_OFFSET_OFFSET_MASK) >> \
+ PVDEC_COM_RAM_ ## _name_ ## _BUF_SIZE_AND_OFFSET_OFFSET_SHIFT)
+#define PVDEC_COM_RAM_BUF_SET_SIZE_AND_OFFSET(_name_, _size_, _offset_) \
+ ((((_size_) << \
+ PVDEC_COM_RAM_ ## _name_ ## _BUF_SIZE_AND_OFFSET_SIZE_SHIFT) \
+ & PVDEC_COM_RAM_ ## _name_ ## _BUF_SIZE_AND_OFFSET_SIZE_MASK) | \
+ (((_offset_) << \
+ PVDEC_COM_RAM_ ## _name_ ## _BUF_SIZE_AND_OFFSET_OFFSET_SHIFT) \
+ & PVDEC_COM_RAM_ ## _name_ ## _BUF_SIZE_AND_OFFSET_OFFSET_MASK))
+/* values */
+/* Firmware ready signature value */
+ #define FW_READY_SIGNATURE (0xA5A5A5A5)
+
+/* Firmware status values */
+ #define FW_STATUS_BUSY 0
+ #define FW_STATUS_IDLE 1
+ #define FW_STATUS_PANIC 2
+ #define FW_STATUS_ASSERT 3
+ #define FW_STATUS_GAMEOVER 4
+ #define FW_STATUS_FEWATCHDOG 5
+ #define FW_STATUS_EPWATCHDOG 6
+ #define FW_STATUS_BEWATCHDOG 7
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+ #define FW_STATUS_SO 8
+ #define FW_STATUS_INIT 0xF
+#endif
+
+/* Decode Message Flags */
+ #define FW_DEVA_RENDER_IS_FIRST_SLICE (0x00000001)
+/* This is H264 Mbaff - required for state store */
+ #define FW_DEVA_FORCE_RECON_WRITE_DISABLE (0x00000002)
+ #define FW_DEVA_RENDER_IS_LAST_SLICE (0x00000004)
+/* Prevents insertion of end of picture or flush at VEC EOS */
+ #define FW_DEVA_DECODE_DISABLE_EOF_DETECTION (0x00000008)
+
+ #define FW_DEVA_CONTEXT_BUFFER_INVALID (0x00000010)
+ #define FW_DEVA_FORCE_ALT_OUTPUT (0x00000020)
+ #define FW_SECURE_STREAM (0x00000040)
+ #define FW_LOW_LATENCY (0x00000080)
+
+ #define FW_DEVA_CONTIGUITY_DETECTION (0x00000100)
+ #define FW_DEVA_FORCE_INIT_CMDS (0x00000200)
+ #define FW_DEVA_DEBLOCK_ENABLE (0x00000400)
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+ #define FW_VDEC_SEND_SIGNATURES (0x00000800)
+#else
+/* (0x00000800) */
+#endif /* VDEC_USE_PVDEC_COMPATIBILITY */
+
+ #define FW_DEVA_FORCE_AUX_LINE_BUF_DISABLE (0x00001000)
+/*
+ * Cause no response message to be sent, and no interrupt
+ * generation on successful completion
+ */
+ #define FW_DEVA_RENDER_NO_RESPONSE_MSG (0x00002000)
+/*
+ * Cause an interrupt if a response message is generated
+ * on successful completion
+ */
+ #define FW_DEVA_RENDER_HOST_INT (0x00004000)
+/* Report contiguity errors to host */
+ #define FW_DEVA_CONTIGUITY_REPORTING (0x00008000)
+
+ #define FW_DEVA_VC1_SKIPPED_PICTURE (0x00010000)
+ #define FW_INTERNAL_RENDER_SWITCH (0x00020000)
+ #define FW_DEVA_UNSUPPORTED (0x00040000)
+ #define DEBLOCKING_FORCED_OFF (0x00080000)
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+ #define FW_VDEC_CMD_PENDING (0x00100000)
+#else
+/* (0x00100000) */
+#endif
+/* Only for debug */
+ #define DETECTED_RENDEC_FULL (0x00200000)
+/* Only for debug */
+ #define DETECTED_RENDEC_EMPTY (0x00400000)
+ #define FW_ONE_PASS_PARSE (0x00800000)
+
+ #define FW_DEVA_EARLY_COMPLETE (0x01000000)
+ #define FW_DEVA_FE_EP_SIGNATURES_READY (0x02000000)
+ #define FW_VEC_EOS (0x04000000)
+/* hardware has reported an error relating to this command */
+ #define FW_DEVA_ERROR_DETECTED_ENT (0x08000000)
+
+ #define FW_DEVA_ERROR_DETECTED_PIX (0x10000000)
+ #define FW_DEVA_MP_SYNC (0x20000000)
+ #define MORE_THAN_ONE_MB (0x40000000)
+ #define REATTEMPT_SINGLEPIPE (0x80000000)
+/* end of message flags */
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+/* VDEC Decode Message Flags */
+/*
+ * H.264/H.265 are to be configured in SIZE_DELIMITED mode rather than SCP mode.
+ */
+#define FW_VDEC_NAL_SIZE_DELIM (0x00000001)
+/* Indicates if MMU cache shall be flushed. */
+#define FW_VDEC_MMU_FLUSH_CACHE (0x00000002)
+/* end of message flags */
+#endif /* VDEC_USE_PVDEC_COMPATIBILITY */
+
+/* FW flags */
+/* TODO : Temporary for HW testing */
+ #define FWFLAG_DISABLE_VDEB_PRELOAD (0x00000001)
+ #define FWFLAG_BIG_TO_HOST_BUFFER (0x00000002)
+/* FS is default regarless of this flag */
+ #define FWFLAG_FORCE_FS_FLOW (0x00000004)
+ #define FWFLAG_DISABLE_WATCHDOG_TIMERS (0x00000008)
+
+ #define FWFLAG_DISABLE_AEH (0x00000020)
+ #define FWFLAG_DISABLE_AUTONOMOUS_RESET (0x00000040)
+ #define FWFLAG_NON_ACCUMULATING_HWSIGS (0x00000080)
+
+ #define FWFLAG_DISABLE_2PASS_DEBLOCK (0x00000100)
+ #define FWFLAG_NO_INT_ON_TOHOST_FULL (0x00000200)
+ #define FWFLAG_RETURN_VDEB_CR (0x00000800)
+
+ #define FWFLAG_DISABLE_AUTOCLOCKGATING (0x00001000)
+ #define FWFLAG_DISABLE_IDLE_GPIO (0x00002000)
+ #define FWFLAG_XPL (0x00004000)
+ #define FWFLAG_INFINITE_MTX_TIMEOUT (0x00008000)
+
+ #define FWFLAG_DECOUPLE_BE_FE (0x00010000)
+ #define FWFLAG_ENABLE_SECURITY (0x00080000)
+
+ #define FWFLAG_ENABLE_CONCEALMENT (0x00100000)
+/* Not currently supported */
+/* #define FWFLAG_PREEMPT (0x00200000) */
+/* NA in FS */
+ #define FWFLAG_FORCE_FLUSHING (0x00400000)
+/* NA in FS */
+ #define FWFLAG_DISABLE_GENC_FLUSHING (0x00800000)
+
+ #define FWFLAG_DISABLE_COREWDT_TIMERS (0x01000000)
+ #define FWFLAG_DISABLE_RENDEC_AUTOFLUSH (0x02000000)
+ #define FWFLAG_FORCE_STRICT_SINGLEPIPE (0x04000000)
+ #define FWFLAG_CONSISTENT_MULTIPIPE_FLOW (0x08000000)
+
+ #define FWFLAG_DISABLE_IDLE_FAST_EVAL (0x10000000)
+ #define FWFLAG_FAKE_COMPLETION (0x20000000)
+ #define FWFLAG_MAN_PP_CLK (0x40000000)
+ #define FWFLAG_STACK_CHK (0x80000000)
+
+/* end of FW flags */
+
+#ifdef FW_STACK_USAGE_TRACKING
+/* FW task identifiers */
+enum task_id {
+ TASK_ID_RX = 0,
+ TASK_ID_TX,
+ TASK_ID_EP1,
+ TASK_ID_FE1,
+ TASK_ID_FE2,
+ TASK_ID_FE3,
+ TASK_ID_BE1,
+ TASK_ID_BE2,
+ TASK_ID_BE3,
+ TASK_ID_PARSER,
+ TASK_ID_MAX,
+ TASK_ID_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* FW task stack info utility macros */
+#define TASK_STACK_SIZE_MASK 0xFFFF
+#define TASK_STACK_SIZE_SHIFT 0
+#define TASK_STACK_USED_MASK 0xFFFF0000
+#define TASK_STACK_USED_SHIFT 16
+#define TASK_STACK_SET_INFO(_task_id_, _stack_info_, _size_, _used_) \
+ (_stack_info_[_task_id_] = \
+ ((_size_) << TASK_STACK_SIZE_SHIFT) | \
+ ((_used_) << TASK_STACK_USED_SHIFT))
+#define TASK_STACK_GET_SIZE(_task_id_, _stack_info_) \
+ ((_stack_info_[_task_id_] & TASK_STACK_SIZE_MASK) >> \
+ TASK_STACK_SIZE_SHIFT)
+#define TASK_STACK_GET_USED(_task_id_, _stack_info_) \
+ ((_stack_info_[_task_id_] & TASK_STACK_USED_MASK) >> \
+ TASK_STACK_USED_SHIFT)
+#endif /* FW_STACK_USAGE_TRACKING */
+
+/* Control Allocation */
+#define CMD_MASK (0xF0000000)
+
+/* Ctrl Allocation Header */
+#define CMD_CTRL_ALLOC_HEADER (0x90000000)
+
+struct ctrl_alloc_header {
+ unsigned int cmd_additional_params;
+ unsigned int slice_params;
+ union {
+ unsigned int vp8_probability_data;
+ unsigned int h264_pipeintra_buffersize;
+ };
+ unsigned int chroma_strides;
+ unsigned int slice_first_mb_yx;
+ unsigned int pic_last_mb_yx;
+ /* VC1 only : Store Range Map flags in bottom bits of [0] */
+ unsigned int alt_output_addr[2];
+ unsigned int alt_output_flags;
+ /* H264 Only : Extended Operating Mode */
+ unsigned int ext_opmode;
+};
+
+#define CMD_CTRL_ALLOC_HEADER_DWSIZE \
+ (sizeof(struct ctrl_alloc_header) / sizeof(unsigned int))
+
+/* Additional Parameter flags */
+#define VC1_PARSEHDR_MASK (0x00000001)
+#define VC1_SKIPPIC_MASK (0x00000002)
+
+#define VP6_BUFFOFFSET_MASK (0x0000ffff)
+#define VP6_MULTISTREAM_MASK (0x01000000)
+#define VP6_FRAMETYPE_MASK (0x02000000)
+
+#define VP8_BUFFOFFSET_MASK (0x00ffffff)
+#define VP8_PARTITIONSCOUNT_MASK (0x0f000000)
+#define VP8_PARTITIONSCOUNT_SHIFT (24)
+
+/* Nop Command */
+#define CMD_NOP (0x00000000)
+#define CMD_NOP_DWSIZE (1)
+
+/* Register Block */
+#define CMD_REGISTER_BLOCK (0x10000000)
+#define CMD_REGISTER_BLOCK_PATCHING_REQUIRED (0x01000000)
+#define CMD_REGISTER_BLOCK_FLAG_PRELOAD (0x04000000)
+#define CMD_REGISTER_BLOCK_FLAG_VLC_DATA (0x08000000)
+
+/* Rendec Command */
+#define CMD_RENDEC_BLOCK (0x50000000)
+#define CMD_RENDEC_BLOCK_FLAG_MASK (0x0F000000)
+#define CMD_RENDEC_FORCE (0x08000000)
+#define CMD_RENDEC_PATCHING_REQUIRED (0x01000000)
+#define CMD_RENDEC_WORD_COUNT_MASK (0x00ff0000)
+#define CMD_RENDEC_WORD_COUNT_SHIFT (16)
+#define CMD_RENDEC_ADDRESS_MASK (0x0000ffff)
+#define CMD_RENDEC_ADDRESS_SHIFT (0)
+
+#ifndef VDEC_USE_PVDEC_SEC
+/* Deblock */
+#define CMD_DEBLOCK (0x70000000)
+#define CMD_DEBLOCK_TYPE_STD (0x00000000)
+#define CMD_DEBLOCK_TYPE_OOLD (0x00000001)
+#define CMD_DEBLOCK_TYPE_SKIP (0x00000002)
+/* End Of Frame */
+#define CMD_DEBLOCK_TYPE_EF (0x00000003)
+
+struct deblock_cmd {
+ unsigned int cmd; /* 0x70000000 */
+ unsigned int source_mb_data;
+ unsigned int address_a[2];
+};
+
+#define CMD_DEBLOCK_DWSIZE (sizeof(DEBLOCK_CMD) / sizeof(unsigned int))
+#endif /* !VDEC_USE_PVDEC_SEC */
+
+/* Skip */
+#define CMD_CONDITIONAL_SKIP (0x80000000)
+#define CMD_CONDITIONAL_SKIP_DWSIZE (1)
+#define CMD_CONDITIONAL_SKIP_DWORDS (0x0000ffff)
+#define CMD_CONDITIONAL_SKIP_CONTEXT_SWITCH BIT(20)
+
+/* DMA */
+#define CMD_DMA (0xE0000000)
+#define CMD_DMA_DMA_TYPE_MASK (0x03000000)
+#define CMD_DMA_DMA_TYPE_SHIFT (24)
+#define CMD_DMA_FLAG_MASK (0x00100000)
+#define CMD_DMA_FLAG_SHIFT (20)
+#define CMD_DMA_DMA_SIZE_MASK (0x000fffff)
+
+#define CMD_DMA_OFFSET_FLAG (0x00100000)
+
+#define CMD_DMA_MAX_OFFSET (0xFFF)
+#define CMD_DMA_TYPE_VLC_TABLE (0 << CMD_DMA_DMA_TYPE_SHIFT)
+#define CMD_DMA_TYPE_PROBABILITY_DATA BIT(CMD_DMA_DMA_TYPE_SHIFT)
+
+struct dma_cmd {
+ unsigned int cmd;
+ unsigned int dev_virt_add;
+};
+
+#define CMD_DMA_DWSIZE (sizeof(DMA_CMD) / sizeof(unsigned int))
+
+struct dma_cmd_offset_dwsize {
+ unsigned int cmd;
+ unsigned int dev_virt_add;
+ unsigned int byte_offset;
+};
+
+#define CMD_DMA_OFFSET_DWSIZE (sizeof(DMA_CMD_WITH_OFFSET) / sizeof(unsigned int))
+
+/* HOST COPY */
+#define CMD_HOST_COPY (0xF0000000)
+#define CMD_HOST_COPY_SIZE_MASK (0x000fffff)
+
+struct host_copy_cmd {
+ unsigned int cmd;
+ unsigned int src_dev_virt_add;
+ unsigned int dst_dev_virt_add;
+};
+
+#define CMD_HOST_COPY_DWSIZE (sizeof(HOST_COPY_CMD) / sizeof(unsigned int))
+
+/* Shift register setup and Bitstream DMA */
+#define CMD_SR_SETUP (0xB0000000)
+#define CMD_SR_ENABLE_RBDU_EXTRACTION (0x00000001)
+#define CMD_SR_ENABLE_AES_COUNTER (0x00000002)
+#define CMD_SR_VERIFY_STARTCODE (0x00000004)
+#define CMD_SR_BITSTR_ADDR_DEREF (0x00000008)
+#define CMD_SR_BITSTR_PARSE_KEY (0x00000010)
+
+struct sr_setup_cmd {
+ unsigned int cmd;
+ unsigned int bitstream_offset_bits;
+ unsigned int bitstream_size_bytes;
+};
+
+#define CMD_SR_DWSIZE (sizeof(SR_SETUP_CMD) / sizeof(unsigned int))
+
+#define CMD_BITSTREAM_DMA (0xA0000000)
+#define CMD_BITSTREAM_DMA_DWSIZE (2)
+/* VC1 Parse Header Command */
+#define CMD_PARSE_HEADER (0x30000000)
+#define CMD_PARSE_HEADER_CONTEXT_MASK (0x000000ff)
+#define CMD_PARSE_HEADER_NEWSLICE (0x00000001)
+#define CMD_PARSE_HEADER_SKIP_PIC (0x00000002)
+#define CMD_PARSE_HEADER_ONEPASSPARSE (0x00000004)
+#define CMD_PARSE_HEADER_NUMSLICE_MINUS1 (0x00ffff00)
+
+struct parse_header_cmd {
+ unsigned int cmd;
+ unsigned int seq_hdr_data;
+ unsigned int pic_dimensions;
+ unsigned int bitplane_addr[3];
+ unsigned int vlc_table_addr;
+};
+
+#define CMD_PARSE_DWSIZE (sizeof(PARSE_HEADER_CMD) / sizeof(unsigned int))
+
+#define CMD_SLICE_INFO (0x20000000)
+#define CMD_SLICE_INFO_SLICENUM (0xff000000)
+#define CMD_SLICE_INFO_FIRSTMBY (0x00ff0000)
+#define CMD_SLICE_INFO_MBBITOFFSET (0x0000ffff)
+
+struct slice_info {
+ unsigned char slice_num;
+ unsigned char slice_first_mby;
+ unsigned short slice_mb_bitoffset;
+};
+
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+/* VDEC extension */
+#define CMD_VDEC_EXT (0xC0000000)
+#ifdef VDEC_USE_PVDEC_SEC
+/*
+ * Used only between firmware secure modules FWBSP->FWDEC,
+ * thus the structure is defined in firmware structures.h
+ */
+#define CMD_VDEC_SECURE_EXT (0x40000000)
+#endif/* VDEC_USE_PVDEC_SEC */
+
+#define MEM2REG_SIZE_HOST_PART_MASK 0x0000FFFF
+#define MEM2REG_SIZE_HOST_PART_SHIFT 0
+
+#define MEM2REG_SIZE_BUF_TOTAL_MASK 0xFFFF0000
+#define MEM2REG_SIZE_BUF_TOTAL_SHIFT 16
+
+struct vdec_ext_cmd {
+ unsigned int cmd;
+ unsigned int trans_id;
+ unsigned int hdr_addr;
+ unsigned int hdr_size;
+ unsigned int ctx_save_addr;
+ unsigned int ctx_load_addr;
+ unsigned int buf_ctrl_addr;
+ unsigned int seq_addr;
+ unsigned int pps_addr;
+ unsigned int pps_2addr;
+ unsigned int mem_to_reg_addr;
+ /* 31-16: buff size, 15-0: size filled by host; dwords */
+ unsigned int mem_to_reg_size;
+ unsigned int slice_params_addr;
+ unsigned int slice_params_size; /* dwords */
+ unsigned int last_luma_recon;
+ unsigned int last_chroma_recon;
+ unsigned int luma_err_base;
+ unsigned int chroma_err_base;
+ unsigned int scaled_display_size;
+ unsigned int horz_scale_control;
+ unsigned int vert_scale_control;
+ unsigned int scale_output_size;
+ unsigned int vlc_idx_table_size;
+ unsigned int vlc_idx_table_addr;
+ unsigned int vlc_tables_size;
+ unsigned int vlc_tables_addr;
+ unsigned int display_picture_size;
+ unsigned int parser_mode;
+ /* needed for separate colour planes */
+ unsigned int intra_buf_base_addr;
+ unsigned int intra_buf_size_per_plane;
+ unsigned int intra_buf_size_per_pipe;
+ unsigned int chroma2reconstructed_addr;
+ unsigned int luma_alt_addr;
+ unsigned int chroma_alt_addr;
+ unsigned int chroma2alt_addr;
+ unsigned int aux_line_buf_size_per_pipe;
+ unsigned int aux_line_buffer_base_addr;
+ unsigned int alt_output_pict_rotation;
+ /* miscellaneous flags */
+ struct {
+ unsigned is_chromainterleaved : 1;
+ unsigned is_packedformat : 1;
+ unsigned is_discontinuousmbs : 1;
+ };
+};
+
+#define CMD_VDEC_EXT_DWSIZE (sizeof(VDEC_EXT_CMD) / sizeof(unsigned int))
+#endif /* VDEC_USE_PVDEC_COMPATIBILITY */
+
+/* Completion */
+#define CMD_COMPLETION (0x60000000)
+#define CMD_COMPLETION_DWSIZE (1)
+
+#ifdef VDEC_USE_PVDEC_SEC
+/* Slice done */
+#define CMD_SLICE_DONE (0x70000000)
+#define CMD_SLICE_DONE_DWSIZE (1)
+#endif /* VDEC_USE_PVDEC_SEC */
+
+/* Bitstream segments */
+#define CMD_BITSTREAM_SEGMENTS (0xD0000000)
+#define CMD_BITSTREAM_SEGMENTS_MINUS1_MASK (0x0000001F)
+#define CMD_BITSTREAM_PARSE_BLK_MASK (0x0000FF00)
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+#define CMD_BITSTREAM_SEGMENTS_MORE_FOLLOW_MASK (0x00000020)
+#define CMD_BITSTREAM_EOP_MASK (0x00000040)
+#define CMD_BITSTREAM_BS_TOT_SIZE_WORD_OFFSET (1)
+#define CMD_BITSTREAM_BS_SEG_LIST_WORD_OFFSET (2)
+#define CMD_BITSTREAM_HDR_DW_SIZE CMD_BITSTREAM_BS_SEG_LIST_WORD_OFFSET
+
+#define CMD_BITSTREAM_SEGMENTS_MAX_NUM (60)
+#endif /* VDEC_USE_PVDEC_COMPATIBILITY */
+
+#ifdef VDEC_USE_PVDEC_COMPATIBILITY
+/* Signatures */
+/* Signature set ids (see hwSignatureModules.c for exact order). */
+/* -- FRONT END/ENTROPY_PIPE ----------------------------------- */
+/*
+ * Signature group 0:
+ * REG(PVDEC_ENTROPY, CR_SR_SIGNATURE)
+ * REG(MSVDX_VEC, CR_SR_CRC)
+ */
+#define PVDEC_SIGNATURE_GROUP_0 BIT(0)
+/*
+ * Signature group 1:
+ * REG(PVDEC_ENTROPY, CR_HEVC_PARSER_SIGNATURE)
+ * REG(PVDEC_ENTROPY, CR_ENCAP_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_1 BIT(1)
+/*
+ * Signature group 2:
+ * REG(PVDEC_ENTROPY, CR_GENC_ENGINE_OUTPUT_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_2 BIT(2)
+/*
+ * Signature group 3:
+ * REGREP(PVDEC_ENTROPY, CR_GENC_BUFFER_SIGNATURE, 0)
+ * REGREP(PVDEC_ENTROPY, CR_GENC_BUFFER_SIGNATURE, 1)
+ * REGREP(PVDEC_ENTROPY, CR_GENC_BUFFER_SIGNATURE, 2)
+ * REGREP(PVDEC_ENTROPY, CR_GENC_BUFFER_SIGNATURE, 3)
+ * REG( PVDEC_ENTROPY, CR_GENC_FRAGMENT_SIGNATURE)
+ * REG( PVDEC_ENTROPY, CR_GENC_FRAGMENT_READ_SIGNATURE)
+ * REG( PVDEC_ENTROPY, CR_GENC_FRAGMENT_WRADDR_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_3 BIT(3)
+/* -- GENC_DEC -------------------------------------------------- */
+/*
+ * Signature group 4:
+ * REG( PVDEC_VEC_BE, CR_GDEC_FRAGMENT_REQ_SIGNATURE)
+ * REG( PVDEC_VEC_BE, CR_GDEC_SYS_WR_SIGNATURE)
+ * REG( PVDEC_VEC_BE, CR_GDEC_MEM2REG_SYS_WR_SIGNATURE)
+ * REG( PVDEC_VEC_BE, CR_SLICE_STRUCTURE_REQ_SIGNATURE)
+ * REG( PVDEC_VEC_BE, CR_SLICE_STRUCTURE_OVER1K_REQ_SIGNATURE)
+ * REG( PVDEC_VEC_BE, CR_MEM_STRUCTURE_REQ_SIGNATURE)
+ * REGREP(PVDEC_VEC_BE, CR_GDEC_DATA_REQ_SIGNATURE, 0)
+ * REGREP(PVDEC_VEC_BE, CR_GDEC_DATA_REQ_SIGNATURE, 1)
+ * REGREP(PVDEC_VEC_BE, CR_GDEC_DATA_REQ_SIGNATURE, 2)
+ * REGREP(PVDEC_VEC_BE, CR_GDEC_DATA_REQ_SIGNATURE, 3)
+ */
+#define PVDEC_SIGNATURE_GROUP_4 BIT(4)
+/*
+ * Signature group 5:
+ * REG( PVDEC_VEC_BE, CR_GDEC_FRAGMENT_SIGNATURE)
+ * REG( PVDEC_VEC_BE, CR_SLICE_STRUCTURE_SIGNATURE)
+ * REG( PVDEC_VEC_BE, CR_SLICE_STRUCTURE_OVER1K_SIGNATURE)
+ * REG( PVDEC_VEC_BE, CR_MEM_STRUCTURE_SIGNATURE)
+ * REGREP(PVDEC_VEC_BE, CR_GDEC_BUFFER_SIGNATURE, 0)
+ * REGREP(PVDEC_VEC_BE, CR_GDEC_BUFFER_SIGNATURE, 1)
+ * REGREP(PVDEC_VEC_BE, CR_GDEC_BUFFER_SIGNATURE, 2)
+ * REGREP(PVDEC_VEC_BE, CR_GDEC_BUFFER_SIGNATURE, 3)
+ */
+#define PVDEC_SIGNATURE_GROUP_5 BIT(5)
+/* -- RESIDUAL AND COMMAND DEBUG--------------------------------- */
+/*
+ * Signature group 12:
+ * REG(PVDEC_VEC_BE, CR_DECODE_TO_COMMAND_PRIME_SIGNATURE)
+ * REG(PVDEC_VEC_BE, CR_DECODE_TO_COMMAND_SECOND_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_12 BIT(12)
+/*
+ * Signature group 13:
+ * REG(PVDEC_VEC_BE, CR_DECODE_TO_RESIDUAL_PRIME_SIGNATURE)
+ * REG(PVDEC_VEC_BE, CR_DECODE_TO_RESIDUAL_SECOND_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_13 BIT(13)
+/*
+ * Signature group 14:
+ * REG(PVDEC_VEC_BE, CR_COMMAND_ABOVE_READ_SIGNATURE)
+ * REG(PVDEC_VEC_BE, CR_COMMAND_ABOVE_WRITE_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_14 BIT(14)
+/*
+ * Signature group 15:
+ * REG(PVDEC_VEC_BE, CR_TEMPORAL_READ_SIGNATURE)
+ * REG(PVDEC_VEC_BE, CR_TEMPORAL_WRITE_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_15 BIT(15)
+/* --VEC--------------------------------------------------------- */
+/*
+ * Signature group 16:
+ * REG(PVDEC_VEC_BE, CR_COMMAND_OUTPUT_SIGNATURE)
+ * REG(MSVDX_VEC, CR_VEC_IXFORM_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_16 BIT(16)
+/*
+ * Signature group 17:
+ * REG(PVDEC_VEC_BE, CR_RESIDUAL_OUTPUT_SIGNATURE)
+ * REG(MSVDX_VEC, CR_VEC_COMMAND_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_17 BIT(17)
+/* --VDMC-------------------------------------------------------- */
+/*
+ * Signature group 18:
+ * REG(MSVDX_VDMC, CR_VDMC_REFERENCE_CACHE_SIGNATURE)
+ * REG(MSVDX_VDMC, CR_VDMC_REFERENCE_CACHE_MEM_WADDR_SIGNATURE)
+ * REG(MSVDX_VDMC, CR_VDMC_REFERENCE_CACHE_MEM_RADDR_SIGNATURE)
+ * REG(MSVDX_VDMC, CR_VDMC_REFERENCE_CACHE_MEM_WDATA_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_18 BIT(18)
+/*
+ * Signature group 19:
+ * REG(MSVDX_VDMC, CR_VDMC_2D_FILTER_PIPELINE_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_19 BIT(19)
+/*
+ * Signature group 20:
+ * REG(MSVDX_VDMC, CR_VDMC_PIXEL_RECONSTRUCTION_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_20 BIT(20)
+/*
+ * Signature group 21:
+ * REG(MSVDX_VDMC, CR_VDMC_MCU_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_21 BIT(21)
+/* ---VDEB------------------------------------------------------- */
+/*
+ * Signature group 22:
+ * REG(MSVDX_VDEB, CR_VDEB_SYS_MEM_RDATA_LUMA_SIGNATURE)
+ * REG(MSVDX_VDEB, CR_VDEB_SYS_MEM_RDATA_CHROMA_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_22 BIT(22)
+/*
+ * Signature group 23:
+ * REG(MSVDX_VDEB, CR_VDEB_SYS_MEM_ADDR_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_23 BIT(23)
+/*
+ * Signature group 24:
+ * REG(MSVDX_VDEB, CR_VDEB_SYS_MEM_WDATA_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_24 BIT(24)
+/* ---SCALER----------------------------------------------------- */
+/*
+ * Signature group 25:
+ * REG(MSVDX_VDEB, CR_VDEB_SCALE_ADDR_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_25 BIT(25)
+/*
+ * Signature group 26:
+ * REG(MSVDX_VDEB, CR_VDEB_SCALE_WDATA_SIGNATURE)
+ */
+#define PVDEC_SIGNATURE_GROUP_26 BIT(26)
+/* ---PICTURE CHECKSUM------------------------------------------- */
+/*
+ * Signature group 27:
+ * REG(MSVDX_VDEB, CR_VDEB_HEVC_CHECKSUM_LUMA)
+ * REG(MSVDX_VDEB, CR_VDEB_HEVC_CHECKSUM_CB)
+ * REG(MSVDX_VDEB, CR_VDEB_HEVC_CHECKSUM_CR)
+ */
+#define PVDEC_SIGNATURE_GROUP_27 BIT(27)
+#define PVDEC_SIGNATURE_NEW_METHOD BIT(31)
+
+/* Debug messages */
+#define DEBUG_DATA_TYPE_MASK 0xF
+#define DEBUG_DATA_TYPE_SHIFT 28
+
+#define DEBUG_DATA_MSG_TYPE_MASK 0x1
+#define DEBUG_DATA_MSG_TYPE_SHIFT 15
+
+#define DEBUG_DATA_MSG_ARG_COUNT_MASK 0x7
+#define DEBUG_DATA_MSG_ARG_COUNT_SHIFT 12
+
+#define DEBUG_DATA_MSG_LINE_NO_MASK 0xFFF
+#define DEBUG_DATA_MSG_LINE_NO_SHIFT 0
+
+#define DEBUG_DATA_TYPE_HEADER (0)
+#define DEBUG_DATA_TYPE_STRING (1)
+#define DEBUG_DATA_TYPE_PARAMS (2)
+#define DEBUG_DATA_TYPE_MSG (3)
+#define DEBUG_DATA_TYPE_PERF (6)
+
+#define DEBUG_DATA_MSG_TYPE_LOG 0
+#define DEBUG_DATA_MSG_TYPE_ASSERT 1
+
+#define DEBUG_DATA_TAPE_PERF_INC_TIME_MASK 0x1
+#define DEBUG_DATA_TYPE_PERF_INC_TIME_SHIFT 28
+#define DEBUG_DATA_TYPE_PERF_INC_TIME 0x1
+
+#define DEBUG_DATA_SET_TYPE(val, type, data_type) \
+ ({ \
+ data_type __val = val; \
+ ((__val) = (__val & ~(DEBUG_DATA_TYPE_MASK << DEBUG_DATA_TYPE_SHIFT)) | \
+ ((type) << DEBUG_DATA_TYPE_SHIFT)); })
+
+#define DEBUG_DATA_MSG_SET_ARG_COUNT(val, ac, data_type) \
+ ({ \
+ data_type __val = val; \
+ (__val = (__val & \
+ ~(DEBUG_DATA_MSG_ARG_COUNT_MASK << DEBUG_DATA_MSG_ARG_COUNT_SHIFT)) \
+ | ((ac) << DEBUG_DATA_MSG_ARG_COUNT_SHIFT)); })
+
+#define DEBUG_DATA_MSG_SET_LINE_NO(val, ln, type) \
+ ({ \
+ type __val = val; \
+ (__val = (__val & \
+ ~(DEBUG_DATA_MSG_LINE_NO_MASK << DEBUG_DATA_MSG_LINE_NO_SHIFT)) \
+ | ((ln) << DEBUG_DATA_MSG_LINE_NO_SHIFT)); })
+
+#define DEBUG_DATA_MSG_SET_TYPE(val, tp, type) \
+ ({ \
+ type __val = val; \
+ (__val = (__val & \
+ ~(DEBUG_DATA_MSG_TYPE_MASK << DEBUG_DATA_MSG_TYPE_SHIFT)) \
+ | ((tp) << DEBUG_DATA_MSG_TYPE_SHIFT)); })
+
+#define DEBUG_DATA_GET_TYPE(val) \
+ (((val) >> DEBUG_DATA_TYPE_SHIFT) & DEBUG_DATA_TYPE_MASK)
+#define DEBUG_DATA_TYPE_PERF_IS_INC_TIME(val) \
+ (((val) >> DEBUG_DATA_TYPE_PERF_INC_TIME_SHIFT) \
+ & DEBUG_DATA_TAPE_PERF_INC_TIME_MASK)
+#define DEBUG_DATA_MSG_GET_ARG_COUNT(val) \
+ (((val) >> DEBUG_DATA_MSG_ARG_COUNT_SHIFT) \
+ & DEBUG_DATA_MSG_ARG_COUNT_MASK)
+#define DEBUG_DATA_MSG_GET_LINE_NO(val) \
+ (((val) >> DEBUG_DATA_MSG_LINE_NO_SHIFT) \
+ & DEBUG_DATA_MSG_LINE_NO_MASK)
+#define DEBUG_DATA_MSG_GET_TYPE(val) \
+ (((val) >> DEBUG_DATA_MSG_TYPE_SHIFT) & DEBUG_DATA_MSG_TYPE_MASK)
+#define DEBUG_DATA_MSG_TYPE_IS_ASSERT(val) \
+ (DEBUG_DATA_MSG_GET_TYPE(val) == DEBUG_DATA_MSG_TYPE_ASSERT \
+ ? IMG_TRUE : IMG_FALSE)
+#define DEBUG_DATA_MSG_TYPE_IS_LOG(val) \
+ (DEBUG_DATA_MSG_GET_TYPE(val) == DEBUG_DATA_MSG_TYPE_LOG ? \
+ IMG_TRUE : IMG_FALSE)
+
+#define DEBUG_DATA_MSG_LAT(ln, ac, tp) \
+ (((ln) << DEBUG_DATA_MSG_LINE_NO_SHIFT) | \
+ ((ac) << DEBUG_DATA_MSG_ARG_COUNT_SHIFT) | \
+ ((tp) << DEBUG_DATA_MSG_TYPE_SHIFT))
+/* FWBSP-mode specific defines. */
+#ifdef VDEC_USE_PVDEC_SEC
+/**
+ * FWBSP_ENC_BSTR_BUF_QUEUE_LEN - Suggested number of bitstream buffers submitted (queued)
+ * to firmware for processing at the same time.
+ */
+#define FWBSP_ENC_BSTR_BUF_QUEUE_LEN 1
+
+#endif /* VDEC_USE_PVDEC_SEC */
+
+#endif /* VDEC_USE_PVDEC_COMPATIBILITY */
+#endif /* FW_INTERFACE_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/h264_idx.h b/drivers/media/platform/vxe-vxd/decoder/h264_idx.h
new file mode 100644
index 000000000000..5fd050f664b3
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/h264_idx.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * h264 idx table definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ */
+
+#ifndef __H264_IDX_H__
+#define __H264_IDX_H__
+
+#include <linux/types.h>
+
+static unsigned short h264_vlc_index_data[38][3] = {
+ { 2, 5, 0 }, /* NumCoeffTrailingOnes_Table9-5_nC_0-1.out */
+ { 0, 3, 76 }, /* NumCoeffTrailingOnes_Table9-5_nC_2-3.out */
+ { 0, 3, 160 }, /* NumCoeffTrailingOnes_Table9-5_nC_4-7.out */
+ { 0, 2, 231 }, /* NumCoeffTrailingOnesFixedLen.out */
+ { 2, 2, 244 }, /* NumCoeffTrailingOnesChromaDC_YUV420.out */
+ { 2, 5, 261 }, /* NumCoeffTrailingOnesChromaDC_YUV422.out */
+ { 2, 5, 301 }, /* TotalZeros_00.out */
+ { 0, 2, 326 }, /* TotalZeros_01.out */
+ { 0, 2, 345 }, /* TotalZeros_02.out */
+ { 0, 2, 363 }, /* TotalZeros_03.out */
+ { 0, 2, 379 }, /* TotalZeros_04.out */
+ { 0, 2, 394 }, /* TotalZeros_05.out */
+ { 0, 2, 406 }, /* TotalZeros_06.out */
+ { 0, 1, 418 }, /* TotalZeros_07.out */
+ { 0, 1, 429 }, /* TotalZeros_08.out */
+ { 0, 1, 438 }, /* TotalZeros_09.out */
+ { 2, 2, 446 }, /* TotalZeros_10.out */
+ { 2, 2, 452 }, /* TotalZeros_11.out */
+ { 2, 1, 456 }, /* TotalZeros_12.out */
+ { 0, 0, 459 }, /* TotalZeros_13.out */
+ { 0, 0, 461 }, /* TotalZeros_14.out */
+ { 2, 2, 463 }, /* TotalZerosChromaDC_YUV420_00.out */
+ { 2, 1, 467 }, /* TotalZerosChromaDC_YUV420_01.out */
+ { 0, 0, 470 }, /* TotalZerosChromaDC_YUV420_02.out */
+ { 0, 0, 472 }, /* Run_00.out */
+ { 2, 1, 474 }, /* Run_01.out */
+ { 0, 1, 477 }, /* Run_02.out */
+ { 0, 1, 481 }, /* Run_03.out */
+ { 1, 1, 487 }, /* Run_04.out */
+ { 0, 2, 494 }, /* Run_05.out */
+ { 0, 2, 502 }, /* Run_06.out */
+ { 2, 4, 520 }, /* TotalZerosChromaDC_YUV422_00.out */
+ { 2, 2, 526 }, /* TotalZerosChromaDC_YUV422_01.out */
+ { 0, 1, 530 }, /* TotalZerosChromaDC_YUV422_02.out */
+ { 1, 2, 534 }, /* TotalZerosChromaDC_YUV422_03.out */
+ { 0, 0, 538 }, /* TotalZerosChromaDC_YUV422_04.out */
+ { 0, 0, 540 }, /* TotalZerosChromaDC_YUV422_05.out */
+ { 0, 0, 542 }, /* TotalZerosChromaDC_YUV422_06.out */
+};
+
+static const unsigned char h264_vlc_index_size = 38;
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/h264_secure_parser.c b/drivers/media/platform/vxe-vxd/decoder/h264_secure_parser.c
new file mode 100644
index 000000000000..3973749eac58
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/h264_secure_parser.c
@@ -0,0 +1,3051 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * h.264 secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ * Re-written for upstreming
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp.h"
+#include "bspp_int.h"
+#include "h264_secure_parser.h"
+#include "pixel_api.h"
+#include "swsr.h"
+#include "vdec_defs.h"
+
+/*
+ * Reduce DPB to 1 when no pic reordering.
+ */
+#define SL_MAX_REF_IDX 32
+#define VUI_CPB_CNT_MAX 32
+#define MAX_SPS_COUNT 32
+#define MAX_PPS_COUNT 256
+/* changed from 810 */
+#define MAX_SLICE_GROUPMBS 65536
+#define MAX_SPS_COUNT 32
+#define MAX_PPS_COUNT 256
+#define MAX_SLICEGROUP_COUNT 8
+#define MAX_WIDTH_IN_MBS 256
+#define MAX_HEIGHT_IN_MBS 256
+#define MAX_COLOR_PLANE 4
+#define H264_MAX_SGM_SIZE 8196
+
+#define H264_MAX_CHROMA_QP_INDEX_OFFSET (12)
+#define H264_MIN_CHROMA_QP_INDEX_OFFSET (-12)
+
+/*
+ * AVC Profile IDC definitions
+ */
+enum h264_profile_idc {
+ h264_profile_cavlc444 = 44, /* YUV 4:4:4/14 "CAVLC 4:4:4" */
+ h264_profile_baseline = 66, /* YUV 4:2:0/8 "Baseline" */
+ h264_profile_main = 77, /* YUV 4:2:0/8 "Main" */
+ h264_profile_scalable = 83, /* YUV 4:2:0/8 "Scalable" */
+ h264_profile_extended = 88, /* YUV 4:2:0/8 "Extended" */
+ h264_profile_high = 100, /* YUV 4:2:0/8 "High" */
+ h264_profile_hig10 = 110, /* YUV 4:2:0/10 "High 10" */
+ h264_profile_mvc_high = 118, /* YUV 4:2:0/8 "Multiview High" */
+ h264_profile_high422 = 122, /* YUV 4:2:2/10 "High 4:2:2" */
+ h264_profile_mvc_stereo = 128, /* YUV 4:2:0/8 "Stereo High" */
+ h264_profile_high444 = 244, /* YUV 4:4:4/14 "High 4:4:4" */
+ h264_profile_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Remap H.264 colour format into internal representation.
+ */
+static const enum pixel_fmt_idc pixel_format_idc[] = {
+ PIXEL_FORMAT_MONO,
+ PIXEL_FORMAT_420,
+ PIXEL_FORMAT_422,
+ PIXEL_FORMAT_444,
+};
+
+/*
+ * Pixel Aspect Ratio
+ */
+static const unsigned short pixel_aspect[17][2] = {
+ { 0, 1 },
+ { 1, 1 },
+ { 12, 11 },
+ { 10, 11 },
+ { 16, 11 },
+ { 40, 33 },
+ { 24, 11 },
+ { 20, 11 },
+ { 32, 11 },
+ { 80, 33 },
+ { 18, 11 },
+ { 15, 11 },
+ { 64, 33 },
+ { 160, 99 },
+ { 4, 3 },
+ { 3, 2 },
+ { 2, 1 },
+};
+
+/*
+ * Table 7-3, 7-4: Default Scaling lists
+ */
+static const unsigned char default_4x4_intra[16] = {
+ 6, 13, 13, 20,
+ 20, 20, 28, 28,
+ 28, 28, 32, 32,
+ 32, 37, 37, 42
+};
+
+static const unsigned char default_4x4_inter[16] = {
+ 10, 14, 14, 20,
+ 20, 20, 24, 24,
+ 24, 24, 27, 27,
+ 27, 30, 30, 34
+};
+
+static const unsigned char default_8x8_intra[64] = {
+ 6, 10, 10, 13, 11, 13, 16, 16,
+ 16, 16, 18, 18, 18, 18, 18, 23,
+ 23, 23, 23, 23, 23, 25, 25, 25,
+ 25, 25, 25, 25, 27, 27, 27, 27,
+ 27, 27, 27, 27, 29, 29, 29, 29,
+ 29, 29, 29, 31, 31, 31, 31, 31,
+ 31, 33, 33, 33, 33, 33, 36, 36,
+ 36, 36, 38, 38, 38, 40, 40, 42
+};
+
+static const unsigned char default_8x8_inter[64] = {
+ 9, 13, 13, 15, 13, 15, 17, 17,
+ 17, 17, 19, 19, 19, 19, 19, 21,
+ 21, 21, 21, 21, 21, 22, 22, 22,
+ 22, 22, 22, 22, 24, 24, 24, 24,
+ 24, 24, 24, 24, 25, 25, 25, 25,
+ 25, 25, 25, 27, 27, 27, 27, 27,
+ 27, 28, 28, 28, 28, 28, 30, 30,
+ 30, 30, 32, 32, 32, 33, 33, 35
+};
+
+/*
+ * to be use if no q matrix is chosen
+ */
+static const unsigned char default_4x4_org[16] = {
+ 16, 16, 16, 16,
+ 16, 16, 16, 16,
+ 16, 16, 16, 16,
+ 16, 16, 16, 16
+};
+
+/*
+ * to be use if no q matrix is chosen
+ */
+static const unsigned char default_8x8_org[64] = {
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16
+};
+
+/*
+ * source: ITU-T H.264 2010/03, page 20 Table 6-1
+ */
+static const int bspp_subheightc[] = { -1, 2, 1, 1 };
+
+/*
+ * source: ITU-T H.264 2010/03, page 20 Table 6-1
+ */
+static const int bspp_subwidthc[] = { -1, 2, 2, 1 };
+
+/*
+ * inline functions for Minimum and Maximum value
+ */
+static inline unsigned int umin(unsigned int a, unsigned int b)
+{
+ return (((a) < (b)) ? (a) : (b));
+}
+
+static inline int smin(int a, int b)
+{
+ return (((a) < (b)) ? (a) : (b));
+}
+
+static inline int smax(int a, int b)
+{
+ return (((a) > (b)) ? (a) : (b));
+}
+
+static void set_if_not_determined_yet(int *determined,
+ unsigned char condition,
+ int *target,
+ unsigned int value)
+{
+ if ((!(*determined)) && (condition)) {
+ *target = value;
+ *determined = 1;
+ }
+}
+
+static int bspp_h264_get_subwidthc(int chroma_format_idc, int separate_colour_plane_flag)
+{
+ return bspp_subwidthc[chroma_format_idc];
+}
+
+static int bspp_h264_get_subheightc(int chroma_format_idc, int separate_colour_plane_flag)
+{
+ return bspp_subheightc[chroma_format_idc];
+}
+
+static unsigned int h264ceillog2(unsigned int value)
+{
+ unsigned int status = 0;
+
+ value -= 1;
+ while (value > 0) {
+ value >>= 1;
+ status++;
+ }
+ return status;
+}
+
+/*
+ * @Function bspp_h264_set_default_vui
+ * @Description Sets default values of the VUI info
+ */
+static void bspp_h264_set_default_vui(struct bspp_h264_vui_info *vui_info)
+{
+ unsigned int *nal_hrd_bitrate_valueminus1 = NULL;
+ unsigned int *vcl_hrd_bitrate_valueminus1 = NULL;
+ unsigned int *nal_hrd_cpbsize_valueminus1 = NULL;
+ unsigned int *vcl_hrd_cpbsize_valueminus1 = NULL;
+ unsigned char *nal_hrd_cbr_flag = NULL;
+ unsigned char *vcl_hrd_cbr_flag = NULL;
+
+ /* Saving pointers */
+ nal_hrd_bitrate_valueminus1 = vui_info->nal_hrd_parameters.bit_rate_value_minus1;
+ vcl_hrd_bitrate_valueminus1 = vui_info->vcl_hrd_parameters.bit_rate_value_minus1;
+
+ nal_hrd_cpbsize_valueminus1 = vui_info->nal_hrd_parameters.cpb_size_value_minus1;
+ vcl_hrd_cpbsize_valueminus1 = vui_info->vcl_hrd_parameters.cpb_size_value_minus1;
+
+ nal_hrd_cbr_flag = vui_info->nal_hrd_parameters.cbr_flag;
+ vcl_hrd_cbr_flag = vui_info->vcl_hrd_parameters.cbr_flag;
+
+ /* Cleaning sVUIInfo */
+ if (vui_info->nal_hrd_parameters.bit_rate_value_minus1)
+ memset(vui_info->nal_hrd_parameters.bit_rate_value_minus1, 0x00,
+ VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+ if (vui_info->nal_hrd_parameters.cpb_size_value_minus1)
+ memset(vui_info->nal_hrd_parameters.cpb_size_value_minus1, 0x00,
+ VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+ if (vui_info->vcl_hrd_parameters.cpb_size_value_minus1)
+ memset(vui_info->vcl_hrd_parameters.cpb_size_value_minus1, 0x00,
+ VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+ if (vui_info->nal_hrd_parameters.cbr_flag)
+ memset(vui_info->nal_hrd_parameters.cbr_flag, 0x00,
+ VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned char));
+
+ if (vui_info->vcl_hrd_parameters.cbr_flag)
+ memset(vui_info->vcl_hrd_parameters.cbr_flag, 0x00,
+ VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned char));
+
+ /* Make sure you set default for everything */
+ memset(vui_info, 0, sizeof(*vui_info));
+ vui_info->video_format = 5;
+ vui_info->colour_primaries = 2;
+ vui_info->transfer_characteristics = 2;
+ vui_info->matrix_coefficients = 2;
+ vui_info->motion_vectors_over_pic_boundaries_flag = 1;
+ vui_info->max_bytes_per_pic_denom = 2;
+ vui_info->max_bits_per_mb_denom = 1;
+ vui_info->log2_max_mv_length_horizontal = 16;
+ vui_info->log2_max_mv_length_vertical = 16;
+
+#ifdef REDUCED_DPB_NO_PIC_REORDERING
+ vui_info->max_dec_frame_buffering = 1;
+ vui_info->num_reorder_frames = 0;
+#else
+ vui_info->max_dec_frame_buffering = 0;
+ vui_info->num_reorder_frames = vui_info->max_dec_frame_buffering;
+#endif
+
+ /* Restoring pointers */
+ vui_info->nal_hrd_parameters.bit_rate_value_minus1 = nal_hrd_bitrate_valueminus1;
+ vui_info->vcl_hrd_parameters.bit_rate_value_minus1 = vcl_hrd_bitrate_valueminus1;
+
+ vui_info->nal_hrd_parameters.cpb_size_value_minus1 = nal_hrd_cpbsize_valueminus1;
+ vui_info->vcl_hrd_parameters.cpb_size_value_minus1 = vcl_hrd_cpbsize_valueminus1;
+
+ vui_info->nal_hrd_parameters.cbr_flag = nal_hrd_cbr_flag;
+ vui_info->vcl_hrd_parameters.cbr_flag = vcl_hrd_cbr_flag;
+}
+
+/*
+ * @Function bspp_h264_hrd_param_parser
+ * @Description Parse the HRD parameter
+ */
+static enum bspp_error_type bspp_h264_hrd_param_parser
+ (void *swsr_context,
+ struct bspp_h264_hrdparam_info *h264_hrd_param_info)
+{
+ unsigned int sched_sel_idx;
+
+ VDEC_ASSERT(swsr_context);
+ h264_hrd_param_info->cpb_cnt_minus1 = swsr_read_unsigned_expgoulomb(swsr_context);
+
+ if (h264_hrd_param_info->cpb_cnt_minus1 >= 32)
+ pr_info("pb_cnt_minus1 is not within the range");
+
+ h264_hrd_param_info->bit_rate_scale = swsr_read_bits(swsr_context, 4);
+ h264_hrd_param_info->cpb_size_scale = swsr_read_bits(swsr_context, 4);
+
+ if (!h264_hrd_param_info->bit_rate_value_minus1) {
+ h264_hrd_param_info->bit_rate_value_minus1 = kcalloc
+ (VDEC_H264_MAXIMUMVALUEOFCPB_CNT,
+ sizeof(unsigned int), GFP_KERNEL);
+ VDEC_ASSERT(h264_hrd_param_info->bit_rate_value_minus1);
+ if (!h264_hrd_param_info->bit_rate_value_minus1)
+ return BSPP_ERROR_OUT_OF_MEMORY;
+ }
+
+ if (!h264_hrd_param_info->cpb_size_value_minus1) {
+ h264_hrd_param_info->cpb_size_value_minus1 = kcalloc
+ (VDEC_H264_MAXIMUMVALUEOFCPB_CNT,
+ sizeof(unsigned int),
+ GFP_KERNEL);
+ VDEC_ASSERT(h264_hrd_param_info->cpb_size_value_minus1);
+ if (!h264_hrd_param_info->cpb_size_value_minus1)
+ return BSPP_ERROR_OUT_OF_MEMORY;
+ }
+
+ if (!h264_hrd_param_info->cbr_flag) {
+ h264_hrd_param_info->cbr_flag =
+ kcalloc(VDEC_H264_MAXIMUMVALUEOFCPB_CNT, sizeof(unsigned char), GFP_KERNEL);
+ VDEC_ASSERT(h264_hrd_param_info->cbr_flag);
+ if (!h264_hrd_param_info->cbr_flag)
+ return BSPP_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (sched_sel_idx = 0; sched_sel_idx <= h264_hrd_param_info->cpb_cnt_minus1;
+ sched_sel_idx++) {
+ h264_hrd_param_info->bit_rate_value_minus1[sched_sel_idx] =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ h264_hrd_param_info->cpb_size_value_minus1[sched_sel_idx] =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+
+ if (h264_hrd_param_info->cpb_size_value_minus1[sched_sel_idx] == 0xffffffff)
+ /* 65 bit pattern, 32 0's -1 - 32 0's then value should be 0 */
+ h264_hrd_param_info->cpb_size_value_minus1[sched_sel_idx] = 0;
+
+ h264_hrd_param_info->cbr_flag[sched_sel_idx] = swsr_read_bits(swsr_context, 1);
+ }
+
+ h264_hrd_param_info->initial_cpb_removal_delay_length_minus1 = swsr_read_bits(swsr_context,
+ 5);
+ h264_hrd_param_info->cpb_removal_delay_length_minus1 = swsr_read_bits(swsr_context, 5);
+ h264_hrd_param_info->dpb_output_delay_length_minus1 = swsr_read_bits(swsr_context, 5);
+ h264_hrd_param_info->time_offset_length = swsr_read_bits(swsr_context, 5);
+
+ return BSPP_ERROR_NONE;
+}
+
+/*
+ * @Function bspp_h264_get_default_hrd_param
+ * @Description Get default value of the HRD parameter
+ */
+static void bspp_h264_get_default_hrd_param(struct bspp_h264_hrdparam_info *h264_hrd_param_info)
+{
+ /* other parameters already set to '0' */
+ h264_hrd_param_info->initial_cpb_removal_delay_length_minus1 = 23;
+ h264_hrd_param_info->cpb_removal_delay_length_minus1 = 23;
+ h264_hrd_param_info->dpb_output_delay_length_minus1 = 23;
+ h264_hrd_param_info->time_offset_length = 24;
+}
+
+/*
+ * @Function bspp_h264_vui_parser
+ * @Description Parse the VUI info
+ */
+static enum bspp_error_type bspp_h264_vui_parser(void *swsr_context,
+ struct bspp_h264_vui_info *vui_info,
+ struct bspp_h264_sps_info *sps_info)
+{
+ enum bspp_error_type vui_parser_error = BSPP_ERROR_NONE;
+
+ vui_info->aspect_ratio_info_present_flag = swsr_read_bits(swsr_context, 1);
+ if (vui_info->aspect_ratio_info_present_flag) {
+ vui_info->aspect_ratio_idc = swsr_read_bits(swsr_context, 8);
+ /* Extended SAR */
+ if (vui_info->aspect_ratio_idc == 255) {
+ vui_info->sar_width = swsr_read_bits(swsr_context, 16);
+ vui_info->sar_height = swsr_read_bits(swsr_context, 16);
+ } else if (vui_info->aspect_ratio_idc < 17) {
+ vui_info->sar_width = pixel_aspect[vui_info->aspect_ratio_idc][0];
+ vui_info->sar_height = pixel_aspect[vui_info->aspect_ratio_idc][1];
+ } else {
+ /* we can consider this error as a aux data error */
+ vui_parser_error |= BSPP_ERROR_INVALID_VALUE;
+ }
+ }
+
+ vui_info->overscan_info_present_flag = swsr_read_bits(swsr_context, 1);
+ if (vui_info->overscan_info_present_flag)
+ vui_info->overscan_appropriate_flag = swsr_read_bits(swsr_context, 1);
+
+ vui_info->video_signal_type_present_flag = swsr_read_bits(swsr_context, 1);
+ if (vui_info->video_signal_type_present_flag) {
+ vui_info->video_format = swsr_read_bits(swsr_context, 3);
+ vui_info->video_full_range_flag = swsr_read_bits(swsr_context, 1);
+ vui_info->colour_description_present_flag = swsr_read_bits(swsr_context, 1);
+ if (vui_info->colour_description_present_flag) {
+ vui_info->colour_primaries = swsr_read_bits(swsr_context, 8);
+ vui_info->transfer_characteristics = swsr_read_bits(swsr_context, 8);
+ vui_info->matrix_coefficients = swsr_read_bits(swsr_context, 8);
+ }
+ }
+
+ vui_info->chroma_location_info_present_flag = swsr_read_bits(swsr_context, 1);
+ if (vui_info->chroma_location_info_present_flag) {
+ vui_info->chroma_sample_loc_type_top_field = swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ vui_info->chroma_sample_loc_type_bottom_field = swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ }
+
+ vui_info->timing_info_present_flag = swsr_read_bits(swsr_context, 1);
+ if (vui_info->timing_info_present_flag) {
+ vui_info->num_units_in_tick = swsr_read_bits(swsr_context, 16);
+ vui_info->num_units_in_tick <<= 16; /* SR can only do up to 31 bit reads */
+ vui_info->num_units_in_tick |= swsr_read_bits(swsr_context, 16);
+ vui_info->time_scale = swsr_read_bits(swsr_context, 16);
+ vui_info->time_scale <<= 16; /* SR can only do up to 31 bit reads */
+ vui_info->time_scale |= swsr_read_bits(swsr_context, 16);
+ if (!vui_info->num_units_in_tick || !vui_info->time_scale)
+ vui_parser_error |= BSPP_ERROR_INVALID_VALUE;
+
+ vui_info->fixed_frame_rate_flag = swsr_read_bits(swsr_context, 1);
+ }
+
+ /* no default values */
+ vui_info->nal_hrd_parameters_present_flag = swsr_read_bits(swsr_context, 1);
+ if (vui_info->nal_hrd_parameters_present_flag)
+ vui_parser_error |= bspp_h264_hrd_param_parser(swsr_context,
+ &vui_info->nal_hrd_parameters);
+ else
+ bspp_h264_get_default_hrd_param(&vui_info->nal_hrd_parameters);
+
+ vui_info->vcl_hrd_parameters_present_flag = swsr_read_bits(swsr_context, 1);
+
+ if (vui_info->vcl_hrd_parameters_present_flag)
+ vui_parser_error |= bspp_h264_hrd_param_parser(swsr_context,
+ &vui_info->vcl_hrd_parameters);
+ else
+ bspp_h264_get_default_hrd_param(&vui_info->vcl_hrd_parameters);
+
+ if (vui_info->nal_hrd_parameters_present_flag || vui_info->vcl_hrd_parameters_present_flag)
+ vui_info->low_delay_hrd_flag = swsr_read_bits(swsr_context, 1);
+
+ vui_info->pic_struct_present_flag = swsr_read_bits(swsr_context, 1);
+ vui_info->bitstream_restriction_flag = swsr_read_bits(swsr_context, 1);
+ if (vui_info->bitstream_restriction_flag) {
+ vui_info->motion_vectors_over_pic_boundaries_flag = swsr_read_bits(swsr_context, 1);
+ vui_info->max_bytes_per_pic_denom = swsr_read_unsigned_expgoulomb(swsr_context);
+ vui_info->max_bits_per_mb_denom = swsr_read_unsigned_expgoulomb(swsr_context);
+ vui_info->log2_max_mv_length_horizontal =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ vui_info->log2_max_mv_length_vertical = swsr_read_unsigned_expgoulomb(swsr_context);
+ vui_info->num_reorder_frames = swsr_read_unsigned_expgoulomb(swsr_context);
+ vui_info->max_dec_frame_buffering = swsr_read_unsigned_expgoulomb(swsr_context);
+ }
+
+ if ((sps_info->profile_idc == h264_profile_baseline ||
+ sps_info->profile_idc == h264_profile_extended) &&
+ sps_info->max_num_ref_frames == 1) {
+ vui_info->bitstream_restriction_flag = 1;
+ vui_info->num_reorder_frames = 0;
+ vui_info->max_dec_frame_buffering = 1;
+ }
+
+ if (vui_info->num_reorder_frames > 32)
+ vui_parser_error |= BSPP_ERROR_UNSUPPORTED;
+
+ return vui_parser_error;
+}
+
+/*
+ * Parse scaling list
+ */
+static enum bspp_error_type bspp_h264_scl_listparser(void *swsr_context,
+ unsigned char *scaling_list,
+ unsigned char sizeof_scaling_list,
+ unsigned char *usedefaultscalingmatrixflag)
+{
+ enum bspp_error_type parse_error = BSPP_ERROR_NONE;
+ int delta_scale;
+ unsigned int lastscale = 8;
+ unsigned int nextscale = 8;
+ unsigned int j;
+
+ VDEC_ASSERT(swsr_context);
+ VDEC_ASSERT(scaling_list);
+ VDEC_ASSERT(usedefaultscalingmatrixflag);
+
+ if (!scaling_list || !swsr_context || !usedefaultscalingmatrixflag) {
+ parse_error = BSPP_ERROR_UNRECOVERABLE;
+ return parse_error;
+ }
+
+ /* 7.3.2.1.1 */
+ for (j = 0; j < sizeof_scaling_list; j++) {
+ if (nextscale != 0) {
+ delta_scale = swsr_read_signed_expgoulomb(swsr_context);
+ if ((-128 > delta_scale) || delta_scale > 127)
+ parse_error |= BSPP_ERROR_INVALID_VALUE;
+ nextscale = (lastscale + delta_scale + 256) & 0xff;
+ *usedefaultscalingmatrixflag = (j == 0 && nextscale == 0);
+ }
+ scaling_list[j] = (nextscale == 0) ? lastscale : nextscale;
+ lastscale = scaling_list[j];
+ }
+ return parse_error;
+}
+
+/*
+ * Parse the SPS NAL unit
+ */
+static enum bspp_error_type bspp_h264_sps_parser(void *swsr_context,
+ void *str_res,
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info)
+{
+ unsigned int i;
+ unsigned char scaling_list_num;
+ struct bspp_h264_sps_info *sps_info;
+ struct bspp_h264_vui_info *vui_info;
+ enum bspp_error_type sps_parser_error = BSPP_ERROR_NONE;
+ enum bspp_error_type vui_parser_error = BSPP_ERROR_NONE;
+
+ sps_info = &h264_seq_hdr_info->sps_info;
+ vui_info = &h264_seq_hdr_info->vui_info;
+
+ /* Set always the default VUI/MVCExt, their values
+ * may be used even if VUI/MVCExt not present
+ */
+ bspp_h264_set_default_vui(vui_info);
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("Parsing Sequence Parameter Set");
+#endif
+ sps_info->profile_idc = swsr_read_bits(swsr_context, 8);
+ if (sps_info->profile_idc != H264_PROFILE_BASELINE &&
+ sps_info->profile_idc != H264_PROFILE_MAIN &&
+ sps_info->profile_idc != H264_PROFILE_SCALABLE &&
+ sps_info->profile_idc != H264_PROFILE_EXTENDED &&
+ sps_info->profile_idc != H264_PROFILE_HIGH &&
+ sps_info->profile_idc != H264_PROFILE_HIGH10 &&
+ sps_info->profile_idc != H264_PROFILE_MVC_HIGH &&
+ sps_info->profile_idc != H264_PROFILE_HIGH422 &&
+ sps_info->profile_idc != H264_PROFILE_CAVLC444 &&
+ sps_info->profile_idc != H264_PROFILE_MVC_STEREO &&
+ sps_info->profile_idc != H264_PROFILE_HIGH444) {
+ pr_err("Invalid Profile ID [%d],Parsed by BSPP", sps_info->profile_idc);
+ return BSPP_ERROR_UNSUPPORTED;
+ }
+ sps_info->constraint_set_flags = swsr_read_bits(swsr_context, 8);
+ sps_info->level_idc = swsr_read_bits(swsr_context, 8);
+
+ /* sequence parameter set id */
+ sps_info->seq_parameter_set_id = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (sps_info->seq_parameter_set_id >= MAX_SPS_COUNT) {
+ pr_err("SPS ID [%d] goes beyond the limit", sps_info->seq_parameter_set_id);
+ return BSPP_ERROR_UNSUPPORTED;
+ }
+
+ /* High profile settings */
+ if (sps_info->profile_idc == H264_PROFILE_HIGH ||
+ sps_info->profile_idc == H264_PROFILE_HIGH10 ||
+ sps_info->profile_idc == H264_PROFILE_HIGH422 ||
+ sps_info->profile_idc == H264_PROFILE_HIGH444 ||
+ sps_info->profile_idc == H264_PROFILE_CAVLC444 ||
+ sps_info->profile_idc == H264_PROFILE_MVC_HIGH ||
+ sps_info->profile_idc == H264_PROFILE_MVC_STEREO) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("This is High Profile Bitstream");
+#endif
+ sps_info->chroma_format_idc = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (sps_info->chroma_format_idc > 3) {
+ pr_err("chroma_format_idc[%d] is not within the range",
+ sps_info->chroma_format_idc);
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+ }
+ if (sps_info->chroma_format_idc == 3)
+ sps_info->separate_colour_plane_flag = swsr_read_bits(swsr_context, 1);
+ else
+ sps_info->separate_colour_plane_flag = 0;
+
+ sps_info->bit_depth_luma_minus8 = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (sps_info->bit_depth_luma_minus8 > 6)
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+
+ sps_info->bit_depth_chroma_minus8 = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (sps_info->bit_depth_chroma_minus8 > 6)
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+
+ sps_info->qpprime_y_zero_transform_bypass_flag = swsr_read_bits(swsr_context, 1);
+ sps_info->seq_scaling_matrix_present_flag = swsr_read_bits(swsr_context, 1);
+ if (sps_info->seq_scaling_matrix_present_flag) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("seq_scaling_matrix_present_flag is available");
+#endif
+ scaling_list_num = (sps_info->chroma_format_idc != 3) ? 8 : 12;
+
+ if (!sps_info->scllst4x4seq) {
+ sps_info->scllst4x4seq =
+ kmalloc((sizeof(unsigned char[H264FW_NUM_4X4_LISTS]
+ [H264FW_4X4_SIZE])), GFP_KERNEL);
+ if (!sps_info->scllst4x4seq) {
+ sps_parser_error |= BSPP_ERROR_OUT_OF_MEMORY;
+ } else {
+ VDEC_ASSERT(sps_info->scllst4x4seq);
+ memset(sps_info->scllst4x4seq, 0x00,
+ sizeof(unsigned char[H264FW_NUM_4X4_LISTS]
+ [H264FW_4X4_SIZE]));
+ }
+ }
+ if (!sps_info->scllst8x8seq) {
+ sps_info->scllst8x8seq =
+ kmalloc((sizeof(unsigned char[H264FW_NUM_8X8_LISTS]
+ [H264FW_8X8_SIZE])), GFP_KERNEL);
+ if (!sps_info->scllst8x8seq) {
+ sps_parser_error |= BSPP_ERROR_OUT_OF_MEMORY;
+ } else {
+ VDEC_ASSERT(sps_info->scllst8x8seq);
+ memset(sps_info->scllst8x8seq, 0x00,
+ sizeof(unsigned char[H264FW_NUM_8X8_LISTS]
+ [H264FW_8X8_SIZE]));
+ }
+ }
+
+ {
+ unsigned char(*scllst4x4seq)[H264FW_NUM_4X4_LISTS]
+ [H264FW_4X4_SIZE] =
+ (unsigned char (*)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE])
+ sps_info->scllst4x4seq;
+ unsigned char(*scllst8x8seq)[H264FW_NUM_8X8_LISTS]
+ [H264FW_8X8_SIZE] =
+ (unsigned char (*)[H264FW_NUM_8X8_LISTS]
+ [H264FW_8X8_SIZE])
+ sps_info->scllst8x8seq;
+
+ for (i = 0; i < scaling_list_num; i++) {
+ unsigned char *ptr =
+ &sps_info->usedefaultscalingmatrixflag_seq[i];
+
+ sps_info->seq_scaling_list_present_flag[i] =
+ swsr_read_bits(swsr_context, 1);
+ if (sps_info->seq_scaling_list_present_flag[i]) {
+ if (i < 6) {
+ sps_parser_error |=
+ bspp_h264_scl_listparser
+ (swsr_context,
+ (*scllst4x4seq)[i], 16,
+ ptr);
+ } else {
+ sps_parser_error |=
+ bspp_h264_scl_listparser
+ (swsr_context,
+ (*scllst8x8seq)[i - 6], 64,
+ ptr);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ /* default values in here */
+ sps_info->chroma_format_idc = 1;
+ sps_info->bit_depth_luma_minus8 = 0;
+ sps_info->bit_depth_chroma_minus8 = 0;
+ sps_info->qpprime_y_zero_transform_bypass_flag = 0;
+ sps_info->seq_scaling_matrix_present_flag = 0;
+ }
+
+ sps_info->log2_max_frame_num_minus4 = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (sps_info->log2_max_frame_num_minus4 > 12) {
+ pr_err("log2_max_frame_num_minus4[%d] is not within range [0 - 12]",
+ sps_info->log2_max_frame_num_minus4);
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+ }
+
+ sps_info->pic_order_cnt_type = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (sps_info->pic_order_cnt_type > 2) {
+ pr_err("pic_order_cnt_type[%d] is not within range [0 - 2]",
+ sps_info->pic_order_cnt_type);
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+ }
+
+ if (sps_info->pic_order_cnt_type == 0) {
+ sps_info->log2_max_pic_order_cnt_lsb_minus4 = swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ if (sps_info->log2_max_pic_order_cnt_lsb_minus4 > 12) {
+ pr_err("log2_max_pic_order_cnt_lsb_minus4[%d] is not within range [0 - 12]",
+ sps_info->log2_max_pic_order_cnt_lsb_minus4);
+ sps_info->log2_max_pic_order_cnt_lsb_minus4 = 12;
+ sps_parser_error |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+ }
+ } else if (sps_info->pic_order_cnt_type == 1) {
+ sps_info->delta_pic_order_always_zero_flag = swsr_read_bits(swsr_context, 1);
+ sps_info->offset_for_non_ref_pic = swsr_read_signed_expgoulomb(swsr_context);
+ sps_info->offset_for_top_to_bottom_field = swsr_read_signed_expgoulomb
+ (swsr_context);
+ sps_info->num_ref_frames_in_pic_order_cnt_cycle = swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ if (sps_info->num_ref_frames_in_pic_order_cnt_cycle > 255) {
+ pr_err("num_ref_frames_in_pic_order_cnt_cycle[%d] is not within range [0 - 256]",
+ sps_info->num_ref_frames_in_pic_order_cnt_cycle);
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+ }
+
+ if (!sps_info->offset_for_ref_frame) {
+ sps_info->offset_for_ref_frame =
+ kmalloc((H264FW_MAX_CYCLE_REF_FRAMES * sizeof(unsigned int)),
+ GFP_KERNEL);
+ if (!sps_info->offset_for_ref_frame) {
+ pr_err("out of memory");
+ sps_parser_error |= BSPP_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ if (sps_info->offset_for_ref_frame) {
+ VDEC_ASSERT(sps_info->num_ref_frames_in_pic_order_cnt_cycle <=
+ H264FW_MAX_CYCLE_REF_FRAMES);
+ memset(sps_info->offset_for_ref_frame, 0x00,
+ (H264FW_MAX_CYCLE_REF_FRAMES * sizeof(unsigned int)));
+ for (i = 0; i < sps_info->num_ref_frames_in_pic_order_cnt_cycle; i++) {
+ /* check the max value and if it crosses then exit from the loop */
+ sps_info->offset_for_ref_frame[i] = swsr_read_signed_expgoulomb
+ (swsr_context);
+ }
+ }
+ } else if (sps_info->pic_order_cnt_type != 2) {
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+ }
+ sps_info->max_num_ref_frames = swsr_read_unsigned_expgoulomb(swsr_context);
+
+ if (sps_info->max_num_ref_frames > 16) {
+ pr_err("num_ref_frames[%d] is not within range [0 - 16]",
+ sps_info->max_num_ref_frames);
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+ }
+ sps_info->gaps_in_frame_num_value_allowed_flag = swsr_read_bits(swsr_context, 1);
+ sps_info->pic_width_in_mbs_minus1 = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (sps_info->pic_width_in_mbs_minus1 >= MAX_WIDTH_IN_MBS) {
+ pr_err("pic_width_in_mbs_minus1[%d] is not within range",
+ sps_info->pic_width_in_mbs_minus1);
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+ }
+ sps_info->pic_height_in_map_units_minus1 = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (sps_info->pic_height_in_map_units_minus1 >= MAX_HEIGHT_IN_MBS) {
+ pr_err("pic_height_in_map_units_minus1[%d] is not within range",
+ sps_info->pic_height_in_map_units_minus1);
+ sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+ }
+
+ sps_info->frame_mbs_only_flag = swsr_read_bits(swsr_context, 1);
+ if (!sps_info->frame_mbs_only_flag)
+ sps_info->mb_adaptive_frame_field_flag = swsr_read_bits(swsr_context, 1);
+ else
+ sps_info->mb_adaptive_frame_field_flag = 0;
+
+ sps_info->direct_8x8_inference_flag = swsr_read_bits(swsr_context, 1);
+
+ sps_info->frame_cropping_flag = swsr_read_bits(swsr_context, 1);
+ if (sps_info->frame_cropping_flag) {
+ sps_info->frame_crop_left_offset = swsr_read_unsigned_expgoulomb(swsr_context);
+ sps_info->frame_crop_right_offset = swsr_read_unsigned_expgoulomb(swsr_context);
+ sps_info->frame_crop_top_offset = swsr_read_unsigned_expgoulomb(swsr_context);
+ sps_info->frame_crop_bottom_offset = swsr_read_unsigned_expgoulomb(swsr_context);
+ } else {
+ sps_info->frame_crop_left_offset = 0;
+ sps_info->frame_crop_right_offset = 0;
+ sps_info->frame_crop_top_offset = 0;
+ sps_info->frame_crop_bottom_offset = 0;
+ }
+
+ sps_info->vui_parameters_present_flag = swsr_read_bits(swsr_context, 1);
+ /* initialise matrix_coefficients to 2 (unspecified) */
+ vui_info->matrix_coefficients = 2;
+
+ if (sps_info->vui_parameters_present_flag) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("vui_parameters_present_flag is available");
+#endif
+ /* save the SPS parse error in temp variable */
+ vui_parser_error = bspp_h264_vui_parser(swsr_context, vui_info, sps_info);
+ if (vui_parser_error != BSPP_ERROR_NONE)
+ sps_parser_error |= BSPP_ERROR_AUXDATA;
+
+#ifdef REDUCED_DPB_NO_PIC_REORDERING
+ vui_info->max_dec_frame_buffering = 1;
+ vui_info->num_reorder_frames = 0;
+#endif
+ }
+
+ if (sps_info->profile_idc == H264_PROFILE_MVC_HIGH ||
+ sps_info->profile_idc == H264_PROFILE_MVC_STEREO) {
+ pr_err("No MVC Support for this version\n");
+ }
+
+ if (swsr_check_exception(swsr_context) != SWSR_EXCEPT_NO_EXCEPTION)
+ sps_parser_error |= BSPP_ERROR_INSUFFICIENT_DATA;
+
+ return sps_parser_error;
+}
+
+/*
+ * Parse the PPS NAL unit
+ */
+static enum bspp_error_type bspp_h264_pps_parser(void *swsr_context,
+ void *str_res,
+ struct bspp_h264_pps_info *h264_pps_info)
+{
+ int i, group, chroma_format_idc;
+ unsigned int number_bits_per_slicegroup_id;
+ unsigned char n_scaling_list;
+ unsigned char more_rbsp_data;
+ unsigned int result;
+ enum bspp_error_type pps_parse_error = BSPP_ERROR_NONE;
+
+ VDEC_ASSERT(swsr_context);
+
+ h264_pps_info->pps_id = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (h264_pps_info->pps_id >= MAX_PPS_COUNT) {
+ pr_err("Picture Parameter Set(PPS) ID is not within the range");
+ h264_pps_info->pps_id = (int)BSPP_INVALID;
+ return BSPP_ERROR_UNSUPPORTED;
+ }
+ h264_pps_info->seq_parameter_set_id = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (h264_pps_info->seq_parameter_set_id >= MAX_SPS_COUNT) {
+ pr_err("Sequence Parameter Set(SPS) ID is not within the range");
+ h264_pps_info->seq_parameter_set_id = (int)BSPP_INVALID;
+ return BSPP_ERROR_UNSUPPORTED;
+ }
+
+ {
+ /*
+ * Get the chroma_format_idc from sps. Because of MVC sharing sps and subset sps ids
+ * (H.7.4.1.2.1).
+ * At this point is not clear if this pps refers to an sps or a subset sps.
+ * It should be finehowever for the case of chroma_format_idc to try and locate
+ * a subset sps if there isn't a normal one.
+ */
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info;
+ struct bspp_sequence_hdr_info *seq_hdr_info;
+
+ seq_hdr_info = bspp_get_sequ_hdr(str_res, h264_pps_info->seq_parameter_set_id);
+
+ if (!seq_hdr_info) {
+ seq_hdr_info = bspp_get_sequ_hdr(str_res,
+ h264_pps_info->seq_parameter_set_id + 32);
+ if (!seq_hdr_info)
+ return BSPP_ERROR_NO_SEQUENCE_HDR;
+ }
+
+ h264_seq_hdr_info =
+ (struct bspp_h264_seq_hdr_info *)seq_hdr_info->secure_sequence_info;
+
+ chroma_format_idc = h264_seq_hdr_info->sps_info.chroma_format_idc;
+ }
+
+ h264_pps_info->entropy_coding_mode_flag = swsr_read_bits(swsr_context, 1);
+ h264_pps_info->pic_order_present_flag = swsr_read_bits(swsr_context, 1);
+ h264_pps_info->num_slice_groups_minus1 = swsr_read_unsigned_expgoulomb(swsr_context);
+ if ((h264_pps_info->num_slice_groups_minus1 + 1) >
+ MAX_SLICEGROUP_COUNT) {
+ h264_pps_info->num_slice_groups_minus1 =
+ MAX_SLICEGROUP_COUNT - 1;
+ pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+ }
+
+ if (h264_pps_info->num_slice_groups_minus1 > 0) {
+ h264_pps_info->slice_group_map_type = swsr_read_unsigned_expgoulomb(swsr_context);
+ pr_err("slice_group_map_type is : %d, Parsed by BSPP",
+ h264_pps_info->slice_group_map_type);
+ if (h264_pps_info->slice_group_map_type > 6) {
+ pr_err("slice_group_map_type [%d] is not within the range [ 0- 6 ]",
+ h264_pps_info->slice_group_map_type);
+ pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+ }
+
+ if (h264_pps_info->slice_group_map_type == 0) {
+ for (group = 0; group <= h264_pps_info->num_slice_groups_minus1; group++) {
+ h264_pps_info->run_length_minus1[group] =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ }
+ } else if (h264_pps_info->slice_group_map_type == 2) {
+ for (group = 0; group < h264_pps_info->num_slice_groups_minus1; group++) {
+ h264_pps_info->top_left[group] = swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ h264_pps_info->bottom_right[group] =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ }
+ } else if (h264_pps_info->slice_group_map_type == 3 ||
+ h264_pps_info->slice_group_map_type == 4 ||
+ h264_pps_info->slice_group_map_type == 5) {
+ h264_pps_info->slice_group_change_direction_flag = swsr_read_bits
+ (swsr_context, 1);
+ h264_pps_info->slice_group_change_rate_minus1 =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ } else if (h264_pps_info->slice_group_map_type == 6) {
+ h264_pps_info->pic_size_in_map_unit = swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ if (h264_pps_info->pic_size_in_map_unit >= H264_MAX_SGM_SIZE) {
+ pr_err("pic_size_in_map_units_minus1 [%d] is not within the range",
+ h264_pps_info->pic_size_in_map_unit);
+ pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+ }
+ number_bits_per_slicegroup_id = h264ceillog2
+ (h264_pps_info->num_slice_groups_minus1 + 1);
+
+ if ((h264_pps_info->pic_size_in_map_unit + 1) >
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum) {
+ unsigned char *slice_group_id =
+ kmalloc(((h264_pps_info->pic_size_in_map_unit + 1) *
+ sizeof(unsigned char)),
+ GFP_KERNEL);
+ if (!slice_group_id) {
+ pr_err("out of memory");
+ pps_parse_error |= BSPP_ERROR_OUT_OF_MEMORY;
+ } else {
+ pr_err("reallocating SGM info from size %lu bytes to size %lu bytes",
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum *
+ sizeof(unsigned char),
+ (h264_pps_info->pic_size_in_map_unit + 1) *
+ sizeof(unsigned char));
+ if (h264_pps_info->h264_ppssgm_info.slice_group_id) {
+ memcpy
+ (slice_group_id,
+ h264_pps_info->h264_ppssgm_info.slice_group_id,
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum *
+ sizeof(unsigned char));
+ kfree
+ (h264_pps_info->h264_ppssgm_info.slice_group_id);
+ }
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum =
+ (h264_pps_info->pic_size_in_map_unit + 1);
+ h264_pps_info->h264_ppssgm_info.slice_group_id =
+ slice_group_id;
+ }
+ }
+
+ VDEC_ASSERT((h264_pps_info->pic_size_in_map_unit + 1) <=
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum);
+ for (i = 0; i <= h264_pps_info->pic_size_in_map_unit; i++)
+ h264_pps_info->h264_ppssgm_info.slice_group_id[i] =
+ swsr_read_bits(swsr_context, number_bits_per_slicegroup_id);
+ }
+ }
+
+ for (i = 0; i < H264FW_MAX_REFPIC_LISTS; i++) {
+ h264_pps_info->num_ref_idx_lx_active_minus1[i] = swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ if (h264_pps_info->num_ref_idx_lx_active_minus1[i] >=
+ SL_MAX_REF_IDX) {
+ pr_err("num_ref_idx_lx_active_minus1[%d] [%d] is not within the range",
+ i, h264_pps_info->num_ref_idx_lx_active_minus1[i]);
+ pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+ }
+ }
+
+ h264_pps_info->weighted_pred_flag = swsr_read_bits(swsr_context, 1);
+ h264_pps_info->weighted_bipred_idc = swsr_read_bits(swsr_context, 2);
+ h264_pps_info->pic_init_qp_minus26 = swsr_read_signed_expgoulomb(swsr_context);
+ if (h264_pps_info->pic_init_qp_minus26 > 26)
+ pr_err("pic_init_qp_minus26[%d] is not within the range [-25 , 26]",
+ h264_pps_info->pic_init_qp_minus26);
+
+ h264_pps_info->pic_init_qs_minus26 = swsr_read_signed_expgoulomb(swsr_context);
+ if (h264_pps_info->pic_init_qs_minus26 > 26)
+ pr_err("pic_init_qs_minus26[%d] is not within the range [-25 , 26]",
+ h264_pps_info->pic_init_qs_minus26);
+
+ h264_pps_info->chroma_qp_index_offset = swsr_read_signed_expgoulomb(swsr_context);
+ if (h264_pps_info->chroma_qp_index_offset > H264_MAX_CHROMA_QP_INDEX_OFFSET)
+ h264_pps_info->chroma_qp_index_offset = H264_MAX_CHROMA_QP_INDEX_OFFSET;
+
+ else if (h264_pps_info->chroma_qp_index_offset < H264_MIN_CHROMA_QP_INDEX_OFFSET)
+ h264_pps_info->chroma_qp_index_offset = H264_MIN_CHROMA_QP_INDEX_OFFSET;
+
+ h264_pps_info->deblocking_filter_control_present_flag = swsr_read_bits(swsr_context, 1);
+ h264_pps_info->constrained_intra_pred_flag = swsr_read_bits(swsr_context, 1);
+ h264_pps_info->redundant_pic_cnt_present_flag = swsr_read_bits(swsr_context, 1);
+
+ /* Check for more rbsp data. */
+ result = swsr_check_more_rbsp_data(swsr_context, &more_rbsp_data);
+ if (result == 0 && more_rbsp_data) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("More RBSP data is available");
+#endif
+ /* Fidelity Range Extensions Stuff */
+ h264_pps_info->transform_8x8_mode_flag = swsr_read_bits(swsr_context, 1);
+ h264_pps_info->pic_scaling_matrix_present_flag = swsr_read_bits(swsr_context, 1);
+ if (h264_pps_info->pic_scaling_matrix_present_flag) {
+ if (!h264_pps_info->scllst4x4pic) {
+ h264_pps_info->scllst4x4pic =
+ kmalloc((sizeof(unsigned char[H264FW_NUM_4X4_LISTS]
+ [H264FW_4X4_SIZE])), GFP_KERNEL);
+ if (!h264_pps_info->scllst4x4pic) {
+ pps_parse_error |= BSPP_ERROR_OUT_OF_MEMORY;
+ } else {
+ VDEC_ASSERT(h264_pps_info->scllst4x4pic);
+ memset(h264_pps_info->scllst4x4pic, 0x00,
+ sizeof(unsigned char[H264FW_NUM_4X4_LISTS]
+ [H264FW_4X4_SIZE]));
+ }
+ }
+ if (!h264_pps_info->scllst8x8pic) {
+ h264_pps_info->scllst8x8pic =
+ kmalloc((sizeof(unsigned char[H264FW_NUM_8X8_LISTS]
+ [H264FW_8X8_SIZE])), GFP_KERNEL);
+ if (!h264_pps_info->scllst8x8pic) {
+ pps_parse_error |= BSPP_ERROR_OUT_OF_MEMORY;
+ } else {
+ VDEC_ASSERT(h264_pps_info->scllst8x8pic);
+ memset(h264_pps_info->scllst8x8pic, 0x00,
+ sizeof(unsigned char[H264FW_NUM_8X8_LISTS]
+ [H264FW_8X8_SIZE]));
+ }
+ }
+ {
+ unsigned char(*scllst4x4pic)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE] =
+ (unsigned char (*)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE])
+ h264_pps_info->scllst4x4pic;
+ unsigned char(*scllst8x8pic)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE] =
+ (unsigned char (*)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE])
+ h264_pps_info->scllst8x8pic;
+
+ /*
+ * For chroma_format =3 (YUV444) total list would be 12
+ * if transform_8x8_mode_flag is enabled else 6.
+ */
+ n_scaling_list = 6 + (chroma_format_idc != 3 ? 2 : 6) *
+ h264_pps_info->transform_8x8_mode_flag;
+ if (n_scaling_list > 12)
+ pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+
+ VDEC_ASSERT(h264_pps_info->scllst4x4pic);
+ VDEC_ASSERT(h264_pps_info->scllst8x8pic);
+ for (i = 0; i < n_scaling_list; i++) {
+ unsigned char *ptr =
+ &h264_pps_info->usedefaultscalingmatrixflag_pic[i];
+
+ h264_pps_info->pic_scaling_list_present_flag[i] =
+ swsr_read_bits(swsr_context, 1);
+ if (h264_pps_info->pic_scaling_list_present_flag[i]) {
+ if (i < 6)
+ pps_parse_error |=
+ bspp_h264_scl_listparser
+ (swsr_context,
+ (*scllst4x4pic)[i], 16, ptr);
+ else
+ pps_parse_error |=
+ bspp_h264_scl_listparser
+ (swsr_context,
+ (*scllst8x8pic)[i - 6], 64, ptr);
+ }
+ }
+ }
+ }
+ h264_pps_info->second_chroma_qp_index_offset = swsr_read_signed_expgoulomb
+ (swsr_context);
+
+ if (h264_pps_info->second_chroma_qp_index_offset > H264_MAX_CHROMA_QP_INDEX_OFFSET)
+ h264_pps_info->second_chroma_qp_index_offset =
+ H264_MAX_CHROMA_QP_INDEX_OFFSET;
+ else if (h264_pps_info->second_chroma_qp_index_offset <
+ H264_MIN_CHROMA_QP_INDEX_OFFSET)
+ h264_pps_info->second_chroma_qp_index_offset =
+ H264_MIN_CHROMA_QP_INDEX_OFFSET;
+ } else {
+ h264_pps_info->second_chroma_qp_index_offset =
+ h264_pps_info->chroma_qp_index_offset;
+ }
+
+ if (swsr_check_exception(swsr_context) != SWSR_EXCEPT_NO_EXCEPTION)
+ pps_parse_error |= BSPP_ERROR_INSUFFICIENT_DATA;
+
+ return pps_parse_error;
+}
+
+static int bspp_h264_release_sequ_hdr_info(void *str_alloc, void *secure_sps_info)
+{
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info =
+ (struct bspp_h264_seq_hdr_info *)secure_sps_info;
+
+ if (!h264_seq_hdr_info)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ return 0;
+}
+
+static int bspp_h264_reset_seq_hdr_info(void *secure_sps_info)
+{
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info = NULL;
+ unsigned int *nal_hrd_bitrate_valueminus1 = NULL;
+ unsigned int *vcl_hrd_bitrate_valueminus1 = NULL;
+ unsigned int *nal_hrd_cpbsize_valueminus1 = NULL;
+ unsigned int *vcl_hrd_cpbsize_valueminus1 = NULL;
+ unsigned char *nal_hrd_cbrflag = NULL;
+ unsigned char *vcl_hrd_cbrflag = NULL;
+ unsigned int *offset_for_ref_frame = NULL;
+ unsigned char *scllst4x4seq = NULL;
+ unsigned char *scllst8x8seq = NULL;
+
+ if (!secure_sps_info)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ h264_seq_hdr_info = (struct bspp_h264_seq_hdr_info *)secure_sps_info;
+
+ offset_for_ref_frame = h264_seq_hdr_info->sps_info.offset_for_ref_frame;
+ scllst4x4seq = h264_seq_hdr_info->sps_info.scllst4x4seq;
+ scllst8x8seq = h264_seq_hdr_info->sps_info.scllst8x8seq;
+ nal_hrd_bitrate_valueminus1 =
+ h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1;
+ vcl_hrd_bitrate_valueminus1 =
+ h264_seq_hdr_info->vui_info.vcl_hrd_parameters.bit_rate_value_minus1;
+ nal_hrd_cpbsize_valueminus1 =
+ h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1;
+ vcl_hrd_cpbsize_valueminus1 =
+ h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1;
+ nal_hrd_cbrflag = h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag;
+ vcl_hrd_cbrflag = h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag;
+
+ /* Cleaning vui_info */
+ if (h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1)
+ memset(h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1,
+ 0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+ if (h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1)
+ memset(h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1,
+ 0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+ if (h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1)
+ memset(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1,
+ 0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+ if (h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag)
+ memset(h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag,
+ 0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned char));
+
+ if (h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag)
+ memset(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag,
+ 0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned char));
+
+ /* Cleaning sps_info */
+ if (h264_seq_hdr_info->sps_info.offset_for_ref_frame)
+ memset(h264_seq_hdr_info->sps_info.offset_for_ref_frame, 0x00,
+ H264FW_MAX_CYCLE_REF_FRAMES * sizeof(unsigned int));
+
+ if (h264_seq_hdr_info->sps_info.scllst4x4seq)
+ memset(h264_seq_hdr_info->sps_info.scllst4x4seq, 0x00,
+ sizeof(unsigned char[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE]));
+
+ if (h264_seq_hdr_info->sps_info.scllst8x8seq)
+ memset(h264_seq_hdr_info->sps_info.scllst8x8seq, 0x00,
+ sizeof(unsigned char[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE]));
+
+ /* Erasing the structure */
+ memset(h264_seq_hdr_info, 0, sizeof(*h264_seq_hdr_info));
+
+ /* Restoring pointers */
+ h264_seq_hdr_info->sps_info.offset_for_ref_frame = offset_for_ref_frame;
+ h264_seq_hdr_info->sps_info.scllst4x4seq = scllst4x4seq;
+ h264_seq_hdr_info->sps_info.scllst8x8seq = scllst8x8seq;
+
+ h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1 =
+ nal_hrd_bitrate_valueminus1;
+ h264_seq_hdr_info->vui_info.vcl_hrd_parameters.bit_rate_value_minus1 =
+ vcl_hrd_bitrate_valueminus1;
+
+ h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1 =
+ nal_hrd_cpbsize_valueminus1;
+ h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1 =
+ vcl_hrd_cpbsize_valueminus1;
+
+ h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag = nal_hrd_cbrflag;
+ h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag = vcl_hrd_cbrflag;
+
+ return 0;
+}
+
+static int bspp_h264_reset_pps_info(void *secure_pps_info)
+{
+ struct bspp_h264_pps_info *h264_pps_info = NULL;
+ unsigned short slicegroupidnum = 0;
+ unsigned char *slice_group_id = NULL;
+ unsigned char *scllst4x4pic = NULL;
+ unsigned char *scllst8x8pic = NULL;
+
+ if (!secure_pps_info)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ h264_pps_info = (struct bspp_h264_pps_info *)secure_pps_info;
+
+ /*
+ * Storing temp values (we want to leave the SGM structure
+ * it may be useful again instead of reallocating later
+ */
+ slice_group_id = h264_pps_info->h264_ppssgm_info.slice_group_id;
+ slicegroupidnum = h264_pps_info->h264_ppssgm_info.slicegroupidnum;
+ scllst4x4pic = h264_pps_info->scllst4x4pic;
+ scllst8x8pic = h264_pps_info->scllst8x8pic;
+
+ if (h264_pps_info->h264_ppssgm_info.slice_group_id)
+ memset(h264_pps_info->h264_ppssgm_info.slice_group_id, 0x00,
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum * sizeof(unsigned char));
+
+ if (h264_pps_info->scllst4x4pic)
+ memset(h264_pps_info->scllst4x4pic, 0x00,
+ sizeof(unsigned char[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE]));
+
+ if (h264_pps_info->scllst8x8pic)
+ memset(h264_pps_info->scllst8x8pic, 0x00,
+ sizeof(unsigned char[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE]));
+
+ /* Erasing the structure */
+ memset(h264_pps_info, 0x00, sizeof(*h264_pps_info));
+
+ /* Copy the temp variable back */
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum = slicegroupidnum;
+ h264_pps_info->h264_ppssgm_info.slice_group_id = slice_group_id;
+ h264_pps_info->scllst4x4pic = scllst4x4pic;
+ h264_pps_info->scllst8x8pic = scllst8x8pic;
+
+ return 0;
+}
+
+static enum bspp_error_type bspp_h264_pict_hdr_parser
+ (void *swsr_context, void *str_res,
+ struct bspp_h264_slice_hdr_info *h264_slice_hdr_info,
+ struct bspp_pps_info **pps_info,
+ struct bspp_sequence_hdr_info **seq_hdr_info,
+ enum h264_nalunittype nal_unit_type,
+ unsigned char nal_ref_idc)
+{
+ enum bspp_error_type slice_parse_error = BSPP_ERROR_NONE;
+ struct bspp_h264_pps_info *h264_pps_info;
+ struct bspp_pps_info *pps_info_loc;
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info;
+ struct bspp_sequence_hdr_info *seq_hdr_info_loc;
+ int id_loc;
+
+ VDEC_ASSERT(swsr_context);
+
+ memset(h264_slice_hdr_info, 0, sizeof(*h264_slice_hdr_info));
+
+ h264_slice_hdr_info->first_mb_in_slice = swsr_read_unsigned_expgoulomb(swsr_context);
+ h264_slice_hdr_info->slice_type = (enum bspp_h264_slice_type)swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ if ((unsigned int)h264_slice_hdr_info->slice_type > 9) {
+ pr_err("Slice Type [%d] invalid, set to P", h264_slice_hdr_info->slice_type);
+ h264_slice_hdr_info->slice_type = (enum bspp_h264_slice_type)0;
+ slice_parse_error |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+ }
+ h264_slice_hdr_info->slice_type =
+ (enum bspp_h264_slice_type)(h264_slice_hdr_info->slice_type % 5);
+
+ h264_slice_hdr_info->pps_id = swsr_read_unsigned_expgoulomb(swsr_context);
+ if (h264_slice_hdr_info->pps_id >= MAX_PPS_COUNT) {
+ pr_err("Picture Parameter ID [%d] invalid, set to 0", h264_slice_hdr_info->pps_id);
+ h264_slice_hdr_info->pps_id = 0;
+ slice_parse_error |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+ }
+
+ /* Set relevant PPS and SPS */
+ pps_info_loc = bspp_get_pps_hdr(str_res, h264_slice_hdr_info->pps_id);
+
+ if (!pps_info_loc) {
+ slice_parse_error |= BSPP_ERROR_NO_PPS;
+ goto error;
+ }
+ h264_pps_info = (struct bspp_h264_pps_info *)pps_info_loc->secure_pps_info;
+ if (!h264_pps_info) {
+ slice_parse_error |= BSPP_ERROR_NO_PPS;
+ goto error;
+ }
+ VDEC_ASSERT(h264_pps_info->pps_id == h264_slice_hdr_info->pps_id);
+ *pps_info = pps_info_loc;
+
+ /* seq_parameter_set_id is always in range 0-31,
+ * so we can add offset indicating subsequence header
+ */
+ id_loc = h264_pps_info->seq_parameter_set_id;
+ id_loc = (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+ nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE ||
+ nal_unit_type == H264_NALTYPE_SUBSET_SPS) ? id_loc + 32 : id_loc;
+
+ seq_hdr_info_loc = bspp_get_sequ_hdr(str_res, id_loc);
+
+ if (!seq_hdr_info_loc) {
+ slice_parse_error |= BSPP_ERROR_NO_SEQUENCE_HDR;
+ goto error;
+ }
+ h264_seq_hdr_info = (struct bspp_h264_seq_hdr_info *)seq_hdr_info_loc->secure_sequence_info;
+ VDEC_ASSERT((unsigned int)h264_seq_hdr_info->sps_info.seq_parameter_set_id ==
+ h264_pps_info->seq_parameter_set_id);
+ *seq_hdr_info = seq_hdr_info_loc;
+
+ /*
+ * For MINIMAL parsing in secure mode, slice header parsing can stop
+ * here, may be problematic with field-coded streams and splitting
+ * fields
+ */
+ if (h264_seq_hdr_info->sps_info.separate_colour_plane_flag)
+ h264_slice_hdr_info->colour_plane_id = swsr_read_bits(swsr_context, 2);
+
+ else
+ h264_slice_hdr_info->colour_plane_id = 0;
+
+ h264_slice_hdr_info->frame_num = swsr_read_bits
+ (swsr_context,
+ h264_seq_hdr_info->sps_info.log2_max_frame_num_minus4
+ + 4);
+
+ VDEC_ASSERT(h264_slice_hdr_info->frame_num <
+ (1UL << (h264_seq_hdr_info->sps_info.log2_max_frame_num_minus4 + 4)));
+
+ if (!h264_seq_hdr_info->sps_info.frame_mbs_only_flag) {
+ if (h264_slice_hdr_info->slice_type == B_SLICE &&
+ !h264_seq_hdr_info->sps_info.direct_8x8_inference_flag)
+ slice_parse_error |= BSPP_ERROR_INVALID_VALUE;
+
+ h264_slice_hdr_info->field_pic_flag = swsr_read_bits(swsr_context, 1);
+ if (h264_slice_hdr_info->field_pic_flag)
+ h264_slice_hdr_info->bottom_field_flag = swsr_read_bits(swsr_context, 1);
+ else
+ h264_slice_hdr_info->bottom_field_flag = 0;
+ } else {
+ h264_slice_hdr_info->field_pic_flag = 0;
+ h264_slice_hdr_info->bottom_field_flag = 0;
+ }
+
+ /*
+ * At this point we have everything we need, but we still lack all the
+ * conditions for detecting new pictures (needed for error cases)
+ */
+ if (nal_unit_type == H264_NALTYPE_IDR_SLICE)
+ h264_slice_hdr_info->idr_pic_id = swsr_read_unsigned_expgoulomb(swsr_context);
+
+ if (h264_seq_hdr_info->sps_info.pic_order_cnt_type == 0) {
+ h264_slice_hdr_info->pic_order_cnt_lsb = swsr_read_bits
+ (swsr_context,
+ h264_seq_hdr_info->sps_info.log2_max_pic_order_cnt_lsb_minus4 + 4);
+ if (h264_pps_info->pic_order_present_flag && !h264_slice_hdr_info->field_pic_flag)
+ h264_slice_hdr_info->delta_pic_order_cnt_bottom =
+ swsr_read_signed_expgoulomb(swsr_context);
+ }
+
+ if (h264_seq_hdr_info->sps_info.pic_order_cnt_type == 1 &&
+ !h264_seq_hdr_info->sps_info.delta_pic_order_always_zero_flag) {
+ h264_slice_hdr_info->delta_pic_order_cnt[0] = swsr_read_signed_expgoulomb
+ (swsr_context);
+ if (h264_pps_info->pic_order_present_flag && !h264_slice_hdr_info->field_pic_flag)
+ h264_slice_hdr_info->delta_pic_order_cnt[1] = swsr_read_signed_expgoulomb
+ (swsr_context);
+ }
+
+ if (h264_pps_info->redundant_pic_cnt_present_flag)
+ h264_slice_hdr_info->redundant_pic_cnt =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+
+ /* For FMO streams, we need to go further */
+ if (h264_pps_info->num_slice_groups_minus1 != 0 &&
+ h264_pps_info->slice_group_map_type >= 3 &&
+ h264_pps_info->slice_group_map_type <= 5) {
+ if (h264_slice_hdr_info->slice_type == B_SLICE)
+ swsr_read_bits(swsr_context, 1);
+
+ if (h264_slice_hdr_info->slice_type == P_SLICE ||
+ h264_slice_hdr_info->slice_type == SP_SLICE ||
+ h264_slice_hdr_info->slice_type == B_SLICE) {
+ h264_slice_hdr_info->num_ref_idx_active_override_flag =
+ swsr_read_bits(swsr_context, 1);
+ if (h264_slice_hdr_info->num_ref_idx_active_override_flag) {
+ h264_slice_hdr_info->num_ref_idx_lx_active_minus1[0] =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ if (h264_slice_hdr_info->slice_type == B_SLICE)
+ h264_slice_hdr_info->num_ref_idx_lx_active_minus1[1] =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ }
+ }
+
+ if (h264_slice_hdr_info->slice_type != SI_SLICE &&
+ h264_slice_hdr_info->slice_type != I_SLICE) {
+ /* Reference picture list modification */
+ /* parse reordering info and pack into commands */
+ unsigned int i;
+ unsigned int cmd_num, list_num;
+ unsigned int command;
+
+ i = (h264_slice_hdr_info->slice_type == B_SLICE) ? 2 : 1;
+
+ for (list_num = 0; list_num < i; list_num++) {
+ cmd_num = 0;
+ if (swsr_read_bits(swsr_context, 1)) {
+ do {
+ command =
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ if (command != 3) {
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ cmd_num++;
+ }
+ } while (command != 3 && cmd_num <= SL_MAX_REF_IDX);
+ }
+ }
+ }
+
+ if ((h264_pps_info->weighted_pred_flag &&
+ h264_slice_hdr_info->slice_type == P_SLICE) ||
+ (h264_pps_info->weighted_bipred_idc &&
+ h264_slice_hdr_info->slice_type == B_SLICE)) {
+ int mono_chrome;
+ unsigned int list, i, j, k;
+
+ mono_chrome = (!h264_seq_hdr_info->sps_info.chroma_format_idc) ? 1 : 0;
+
+ swsr_read_unsigned_expgoulomb(swsr_context);
+ if (!mono_chrome)
+ swsr_read_unsigned_expgoulomb(swsr_context);
+
+ k = (h264_slice_hdr_info->slice_type == B_SLICE) ? 2 : 1;
+
+ for (list = 0; list < k; list++) {
+ for (i = 0;
+ i <=
+ h264_slice_hdr_info->num_ref_idx_lx_active_minus1[list];
+ i++) {
+ if (swsr_read_bits(swsr_context, 1)) {
+ swsr_read_signed_expgoulomb(swsr_context);
+ swsr_read_signed_expgoulomb(swsr_context);
+ }
+
+ if (!mono_chrome && (swsr_read_bits(swsr_context, 1))) {
+ for (j = 0; j < 2; j++) {
+ swsr_read_signed_expgoulomb
+ (swsr_context);
+ swsr_read_signed_expgoulomb
+ (swsr_context);
+ }
+ }
+ }
+ }
+ }
+
+ if (nal_ref_idc != 0) {
+ unsigned int memmanop;
+
+ if (nal_unit_type == H264_NALTYPE_IDR_SLICE) {
+ swsr_read_bits(swsr_context, 1);
+ swsr_read_bits(swsr_context, 1);
+ }
+ if (swsr_read_bits(swsr_context, 1)) {
+ do {
+ /* clamp 0--6 */
+ memmanop = swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ if (memmanop != 0 && memmanop != 5) {
+ if (memmanop == 3) {
+ swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ } else {
+ swsr_read_unsigned_expgoulomb
+ (swsr_context);
+ }
+ }
+ } while (memmanop != 0);
+ }
+ }
+
+ if (h264_pps_info->entropy_coding_mode_flag &&
+ h264_slice_hdr_info->slice_type != I_SLICE)
+ swsr_read_unsigned_expgoulomb(swsr_context);
+
+ swsr_read_signed_expgoulomb(swsr_context);
+
+ if (h264_slice_hdr_info->slice_type == SP_SLICE ||
+ h264_slice_hdr_info->slice_type == SI_SLICE) {
+ if (h264_slice_hdr_info->slice_type == SP_SLICE)
+ swsr_read_bits(swsr_context, 1);
+
+ /* slice_qs_delta */
+ swsr_read_signed_expgoulomb(swsr_context);
+ }
+
+ if (h264_pps_info->deblocking_filter_control_present_flag) {
+ if (swsr_read_unsigned_expgoulomb(swsr_context) != 1) {
+ swsr_read_signed_expgoulomb(swsr_context);
+ swsr_read_signed_expgoulomb(swsr_context);
+ }
+ }
+
+ if (h264_pps_info->slice_group_map_type >= 3 &&
+ h264_pps_info->slice_group_map_type <= 5) {
+ unsigned int num_slice_group_map_units =
+ (h264_seq_hdr_info->sps_info.pic_height_in_map_units_minus1 + 1) *
+ (h264_seq_hdr_info->sps_info.pic_width_in_mbs_minus1 + 1);
+
+ unsigned short slice_group_change_rate =
+ (h264_pps_info->slice_group_change_rate_minus1 + 1);
+
+ unsigned int width = h264ceillog2(num_slice_group_map_units /
+ slice_group_change_rate +
+ (num_slice_group_map_units % slice_group_change_rate ==
+ 0 ? 0 : 1) + 1); /* (7-32) */
+ h264_slice_hdr_info->slice_group_change_cycle = swsr_read_bits(swsr_context,
+ width);
+ }
+ }
+
+error:
+ return slice_parse_error;
+}
+
+static void bspp_h264_select_scaling_list(struct h264fw_picture_ps *h264fw_pps_info,
+ struct bspp_h264_pps_info *h264_pps_info,
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info)
+{
+ unsigned int num8x8_lists;
+ unsigned int i;
+ const unsigned char *quant_matrix = NULL;
+ unsigned char (*scllst4x4pic)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE] =
+ (unsigned char (*)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE])h264_pps_info->scllst4x4pic;
+ unsigned char (*scllst8x8pic)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE] =
+ (unsigned char (*)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE])h264_pps_info->scllst8x8pic;
+
+ unsigned char (*scllst4x4seq)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE] =
+ (unsigned char (*)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE])
+ h264_seq_hdr_info->sps_info.scllst4x4seq;
+ unsigned char (*scllst8x8seq)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE] =
+ (unsigned char (*)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE])
+ h264_seq_hdr_info->sps_info.scllst8x8seq;
+
+ if (h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+ VDEC_ASSERT(h264_seq_hdr_info->sps_info.scllst4x4seq);
+ VDEC_ASSERT(h264_seq_hdr_info->sps_info.scllst8x8seq);
+ }
+
+ if (h264_pps_info->pic_scaling_matrix_present_flag) {
+ for (i = 0; i < H264FW_NUM_4X4_LISTS; i++) {
+ if (h264_pps_info->pic_scaling_list_present_flag[i]) {
+ if (h264_pps_info->usedefaultscalingmatrixflag_pic[i])
+ quant_matrix =
+ (i > 2) ? default_4x4_inter : default_4x4_intra;
+ else
+ quant_matrix = (*scllst4x4pic)[i];
+
+ } else {
+ if (h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+ /* SPS matrix present - use fallback rule B */
+ /* first 4x4 Intra list */
+ if (i == 0) {
+ if
+ (h264_seq_hdr_info->sps_info.seq_scaling_list_present_flag[i] &&
+ !h264_seq_hdr_info->sps_info.usedefaultscalingmatrixflag_seq[i]) {
+ VDEC_ASSERT
+ (h264_seq_hdr_info->sps_info.scllst4x4seq);
+ if (scllst4x4seq)
+ quant_matrix = (*scllst4x4seq)[i];
+ } else {
+ quant_matrix = default_4x4_intra;
+ }
+ }
+ /* first 4x4 Inter list */
+ else if (i == 3) {
+ if
+ (h264_seq_hdr_info->sps_info.seq_scaling_list_present_flag[i] &&
+ !h264_seq_hdr_info->sps_info.usedefaultscalingmatrixflag_seq[i]) {
+ VDEC_ASSERT
+ (h264_seq_hdr_info->sps_info.scllst4x4seq);
+ if (scllst4x4seq)
+ quant_matrix = (*scllst4x4seq)[i];
+ } else {
+ quant_matrix = default_4x4_inter;
+ }
+ } else {
+ quant_matrix =
+ h264fw_pps_info->scalinglist4x4[i - 1];
+ }
+ } else {
+ /* SPS matrix not present - use fallback rule A */
+ /* first 4x4 Intra list */
+ if (i == 0)
+ quant_matrix = default_4x4_intra;
+ /* first 4x4 Interlist */
+ else if (i == 3)
+ quant_matrix = default_4x4_inter;
+ else
+ quant_matrix =
+ h264fw_pps_info->scalinglist4x4[i - 1];
+ }
+ }
+ if (!quant_matrix) {
+ VDEC_ASSERT(0);
+ return;
+ }
+ /* copy correct 4x4 list to output - as selected by PPS */
+ memcpy(h264fw_pps_info->scalinglist4x4[i], quant_matrix,
+ sizeof(h264fw_pps_info->scalinglist4x4[i]));
+ }
+ } else {
+ /* PPS matrix not present, use SPS information */
+ if (h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+ for (i = 0; i < H264FW_NUM_4X4_LISTS; i++) {
+ if (h264_seq_hdr_info->sps_info.seq_scaling_list_present_flag[i]) {
+ if
+ (h264_seq_hdr_info->sps_info.usedefaultscalingmatrixflag_seq
+ [i]) {
+ quant_matrix = (i > 2) ? default_4x4_inter
+ : default_4x4_intra;
+ } else {
+ VDEC_ASSERT
+ (h264_seq_hdr_info->sps_info.scllst4x4seq);
+ if (scllst4x4seq)
+ quant_matrix = (*scllst4x4seq)[i];
+ }
+ } else {
+ /* SPS list not present - use fallback rule A */
+ /* first 4x4 Intra list */
+ if (i == 0)
+ quant_matrix = default_4x4_intra;
+ else if (i == 3) /* first 4x4 Inter list */
+ quant_matrix = default_4x4_inter;
+ else
+ quant_matrix =
+ h264fw_pps_info->scalinglist4x4[i - 1];
+ }
+ if (quant_matrix) {
+ /* copy correct 4x4 list to output - as selected by SPS */
+ memcpy(h264fw_pps_info->scalinglist4x4[i], quant_matrix,
+ sizeof(h264fw_pps_info->scalinglist4x4[i]));
+ }
+ }
+ } else {
+ /* SPS matrix not present - use flat lists */
+ quant_matrix = default_4x4_org;
+ for (i = 0; i < H264FW_NUM_4X4_LISTS; i++)
+ memcpy(h264fw_pps_info->scalinglist4x4[i], quant_matrix,
+ sizeof(h264fw_pps_info->scalinglist4x4[i]));
+ }
+ }
+
+ /* 8x8 matrices */
+ num8x8_lists = (h264_seq_hdr_info->sps_info.chroma_format_idc == 3) ? 6 : 2;
+ if (h264_pps_info->transform_8x8_mode_flag) {
+ unsigned char *seq_scllstflg =
+ h264_seq_hdr_info->sps_info.seq_scaling_list_present_flag;
+ unsigned char *def_sclmatflg_seq =
+ h264_seq_hdr_info->sps_info.usedefaultscalingmatrixflag_seq;
+
+ if (h264_pps_info->pic_scaling_matrix_present_flag) {
+ for (i = 0; i < num8x8_lists; i++) {
+ if (h264_pps_info->pic_scaling_list_present_flag[i +
+ H264FW_NUM_4X4_LISTS]) {
+ if (h264_pps_info->usedefaultscalingmatrixflag_pic[i +
+ H264FW_NUM_4X4_LISTS]) {
+ quant_matrix = (i & 0x1) ? default_8x8_inter
+ : default_8x8_intra;
+ } else {
+ VDEC_ASSERT(h264_pps_info->scllst8x8pic);
+ if (scllst8x8pic)
+ quant_matrix = (*scllst8x8pic)[i];
+ }
+ } else {
+ if
+ (h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+ /* SPS matrix present - use fallback rule B */
+ /* list 6 - first 8x8 Intra list */
+ if (i == 0) {
+ if (seq_scllstflg[i +
+ H264FW_NUM_4X4_LISTS] &&
+ !def_sclmatflg_seq[i +
+ H264FW_NUM_4X4_LISTS]) {
+ VDEC_ASSERT
+ (h264_seq_hdr_info->sps_info.scllst8x8seq);
+ if (scllst8x8seq)
+ quant_matrix = (*scllst8x8seq)[i];
+ } else {
+ quant_matrix = default_8x8_intra;
+ }
+ /* list 7 - first 8x8 Inter list */
+ } else if (i == 1) {
+ if (seq_scllstflg[i +
+ H264FW_NUM_4X4_LISTS] &&
+ !def_sclmatflg_seq[i +
+ H264FW_NUM_4X4_LISTS]) {
+ VDEC_ASSERT
+ (h264_seq_hdr_info->sps_info.scllst8x8seq);
+ if (scllst8x8seq)
+ quant_matrix = (*scllst8x8seq)[i];
+ } else {
+ quant_matrix = default_8x8_inter;
+ }
+ } else {
+ quant_matrix =
+ h264fw_pps_info->scalinglist8x8[i - 2];
+ }
+ } else {
+ /* SPS matrix not present - use fallback rule A */
+ /* list 6 - first 8x8 Intra list */
+ if (i == 0)
+ quant_matrix = default_8x8_intra;
+ /* list 7 - first 8x8 Inter list */
+ else if (i == 1)
+ quant_matrix = default_8x8_inter;
+ else
+ quant_matrix =
+ h264fw_pps_info->scalinglist8x8[i - 2];
+ }
+ }
+ if (quant_matrix) {
+ /* copy correct 8x8 list to output - as selected by PPS */
+ memcpy(h264fw_pps_info->scalinglist8x8[i], quant_matrix,
+ sizeof(h264fw_pps_info->scalinglist8x8[i]));
+ }
+ }
+ } else {
+ /* PPS matrix not present, use SPS information */
+ if (h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+ for (i = 0; i < num8x8_lists; i++) {
+ if (seq_scllstflg[i + H264FW_NUM_4X4_LISTS] &&
+ def_sclmatflg_seq[i + H264FW_NUM_4X4_LISTS]) {
+ quant_matrix =
+ (i & 0x1) ? default_8x8_inter :
+ default_8x8_intra;
+ } else if ((seq_scllstflg[i + H264FW_NUM_4X4_LISTS]) &&
+ !(def_sclmatflg_seq[i + H264FW_NUM_4X4_LISTS])) {
+ VDEC_ASSERT
+ (h264_seq_hdr_info->sps_info.scllst8x8seq);
+ if (scllst8x8seq)
+ quant_matrix = (*scllst8x8seq)[i];
+ } else if (!(seq_scllstflg[i + H264FW_NUM_4X4_LISTS]) &&
+ (i == 0)) {
+ /* SPS list not present - use fallback rule A */
+ /* list 6 - first 8x8 Intra list */
+ quant_matrix = default_8x8_intra;
+ } else if (!(seq_scllstflg[i + H264FW_NUM_4X4_LISTS]) &&
+ (i == 1)) {
+ /* list 7 - first 8x8 Inter list */
+ quant_matrix = default_8x8_inter;
+ } else {
+ quant_matrix =
+ h264fw_pps_info->scalinglist8x8
+ [i - 2];
+ }
+ if (quant_matrix) {
+ /* copy correct 8x8 list to output -
+ * as selected by SPS
+ */
+ memcpy(h264fw_pps_info->scalinglist8x8[i],
+ quant_matrix,
+ sizeof(h264fw_pps_info->scalinglist8x8[i]));
+ }
+ }
+ } else {
+ /* SPS matrix not present - use flat lists */
+ quant_matrix = default_8x8_org;
+ for (i = 0; i < num8x8_lists; i++)
+ memcpy(h264fw_pps_info->scalinglist8x8[i], quant_matrix,
+ sizeof(h264fw_pps_info->scalinglist8x8[i]));
+ }
+ }
+ }
+}
+
+static void bspp_h264_fwpps_populate(struct bspp_h264_pps_info *h264_pps_info,
+ struct h264fw_picture_ps *h264fw_pps_info)
+{
+ h264fw_pps_info->deblocking_filter_control_present_flag =
+ h264_pps_info->deblocking_filter_control_present_flag;
+ h264fw_pps_info->transform_8x8_mode_flag = h264_pps_info->transform_8x8_mode_flag;
+ h264fw_pps_info->entropy_coding_mode_flag = h264_pps_info->entropy_coding_mode_flag;
+ h264fw_pps_info->redundant_pic_cnt_present_flag =
+ h264_pps_info->redundant_pic_cnt_present_flag;
+ h264fw_pps_info->weighted_bipred_idc = h264_pps_info->weighted_bipred_idc;
+ h264fw_pps_info->weighted_pred_flag = h264_pps_info->weighted_pred_flag;
+ h264fw_pps_info->pic_order_present_flag = h264_pps_info->pic_order_present_flag;
+ h264fw_pps_info->pic_init_qp = h264_pps_info->pic_init_qp_minus26 + 26;
+ h264fw_pps_info->constrained_intra_pred_flag = h264_pps_info->constrained_intra_pred_flag;
+ VDEC_ASSERT(sizeof(h264fw_pps_info->num_ref_lx_active_minus1) ==
+ sizeof(h264_pps_info->num_ref_idx_lx_active_minus1));
+ VDEC_ASSERT(sizeof(h264fw_pps_info->num_ref_lx_active_minus1) ==
+ sizeof(unsigned char) * H264FW_MAX_REFPIC_LISTS);
+ memcpy(h264fw_pps_info->num_ref_lx_active_minus1,
+ h264_pps_info->num_ref_idx_lx_active_minus1,
+ sizeof(h264fw_pps_info->num_ref_lx_active_minus1));
+ h264fw_pps_info->slice_group_map_type = h264_pps_info->slice_group_map_type;
+ h264fw_pps_info->num_slice_groups_minus1 = h264_pps_info->num_slice_groups_minus1;
+ h264fw_pps_info->slice_group_change_rate_minus1 =
+ h264_pps_info->slice_group_change_rate_minus1;
+ h264fw_pps_info->chroma_qp_index_offset = h264_pps_info->chroma_qp_index_offset;
+ h264fw_pps_info->second_chroma_qp_index_offset =
+ h264_pps_info->second_chroma_qp_index_offset;
+}
+
+static void bspp_h264_fwseq_hdr_populate(struct bspp_h264_seq_hdr_info *h264_seq_hdr_info,
+ struct h264fw_sequence_ps *h264_fwseq_hdr_info)
+{
+ /* Basic SPS */
+ h264_fwseq_hdr_info->profile_idc = h264_seq_hdr_info->sps_info.profile_idc;
+ h264_fwseq_hdr_info->chroma_format_idc = h264_seq_hdr_info->sps_info.chroma_format_idc;
+ h264_fwseq_hdr_info->separate_colour_plane_flag =
+ h264_seq_hdr_info->sps_info.separate_colour_plane_flag;
+ h264_fwseq_hdr_info->bit_depth_luma_minus8 =
+ h264_seq_hdr_info->sps_info.bit_depth_luma_minus8;
+ h264_fwseq_hdr_info->bit_depth_chroma_minus8 =
+ h264_seq_hdr_info->sps_info.bit_depth_chroma_minus8;
+ h264_fwseq_hdr_info->delta_pic_order_always_zero_flag =
+ h264_seq_hdr_info->sps_info.delta_pic_order_always_zero_flag;
+ h264_fwseq_hdr_info->log2_max_pic_order_cnt_lsb =
+ h264_seq_hdr_info->sps_info.log2_max_pic_order_cnt_lsb_minus4 + 4;
+ h264_fwseq_hdr_info->max_num_ref_frames = h264_seq_hdr_info->sps_info.max_num_ref_frames;
+ h264_fwseq_hdr_info->log2_max_frame_num =
+ h264_seq_hdr_info->sps_info.log2_max_frame_num_minus4 + 4;
+ h264_fwseq_hdr_info->pic_order_cnt_type = h264_seq_hdr_info->sps_info.pic_order_cnt_type;
+ h264_fwseq_hdr_info->frame_mbs_only_flag = h264_seq_hdr_info->sps_info.frame_mbs_only_flag;
+ h264_fwseq_hdr_info->gaps_in_frame_num_value_allowed_flag =
+ h264_seq_hdr_info->sps_info.gaps_in_frame_num_value_allowed_flag;
+ h264_fwseq_hdr_info->constraint_set_flags =
+ h264_seq_hdr_info->sps_info.constraint_set_flags;
+ h264_fwseq_hdr_info->level_idc = h264_seq_hdr_info->sps_info.level_idc;
+ h264_fwseq_hdr_info->num_ref_frames_in_pic_order_cnt_cycle =
+ h264_seq_hdr_info->sps_info.num_ref_frames_in_pic_order_cnt_cycle;
+ h264_fwseq_hdr_info->mb_adaptive_frame_field_flag =
+ h264_seq_hdr_info->sps_info.mb_adaptive_frame_field_flag;
+ h264_fwseq_hdr_info->offset_for_non_ref_pic =
+ h264_seq_hdr_info->sps_info.offset_for_non_ref_pic;
+ h264_fwseq_hdr_info->offset_for_top_to_bottom_field =
+ h264_seq_hdr_info->sps_info.offset_for_top_to_bottom_field;
+ h264_fwseq_hdr_info->pic_width_in_mbs_minus1 =
+ h264_seq_hdr_info->sps_info.pic_width_in_mbs_minus1;
+ h264_fwseq_hdr_info->pic_height_in_map_units_minus1 =
+ h264_seq_hdr_info->sps_info.pic_height_in_map_units_minus1;
+ h264_fwseq_hdr_info->direct_8x8_inference_flag =
+ h264_seq_hdr_info->sps_info.direct_8x8_inference_flag;
+ h264_fwseq_hdr_info->qpprime_y_zero_transform_bypass_flag =
+ h264_seq_hdr_info->sps_info.qpprime_y_zero_transform_bypass_flag;
+
+ if (h264_seq_hdr_info->sps_info.offset_for_ref_frame)
+ memcpy(h264_fwseq_hdr_info->offset_for_ref_frame,
+ h264_seq_hdr_info->sps_info.offset_for_ref_frame,
+ sizeof(h264_fwseq_hdr_info->offset_for_ref_frame));
+ else
+ memset(h264_fwseq_hdr_info->offset_for_ref_frame, 0x00,
+ sizeof(h264_fwseq_hdr_info->offset_for_ref_frame));
+
+ memset(h264_fwseq_hdr_info->anchor_inter_view_reference_id_list, 0x00,
+ sizeof(h264_fwseq_hdr_info->anchor_inter_view_reference_id_list));
+ memset(h264_fwseq_hdr_info->non_anchor_inter_view_reference_id_list, 0x00,
+ sizeof(h264_fwseq_hdr_info->non_anchor_inter_view_reference_id_list));
+
+#ifdef REDUCED_DPB_NO_PIC_REORDERING
+ /* From VUI */
+ h264_fwseq_hdr_info->max_dec_frame_buffering =
+ h264_seq_hdr_info->vui_info.max_dec_frame_buffering;
+ h264_fwseq_hdr_info->num_reorder_frames = h264_seq_hdr_info->vui_info.num_reorder_frames;
+#else
+ /* From VUI */
+ if (h264_seq_hdr_info->vui_info.bitstream_restriction_flag) {
+ VDEC_ASSERT(h264_seq_hdr_info->sps_info.vui_parameters_present_flag);
+ h264_fwseq_hdr_info->max_dec_frame_buffering =
+ h264_seq_hdr_info->vui_info.max_dec_frame_buffering;
+ h264_fwseq_hdr_info->num_reorder_frames =
+ h264_seq_hdr_info->vui_info.num_reorder_frames;
+ } else {
+ h264_fwseq_hdr_info->max_dec_frame_buffering = 1;
+ h264_fwseq_hdr_info->num_reorder_frames = 16;
+ }
+#endif
+}
+
+static void bspp_h264_commonseq_hdr_populate(struct bspp_h264_seq_hdr_info *h264_seq_hdr_info,
+ struct vdec_comsequ_hdrinfo *comseq_hdr_info)
+{
+ struct bspp_h264_sps_info *sps_info = &h264_seq_hdr_info->sps_info;
+ struct bspp_h264_vui_info *vui_info = &h264_seq_hdr_info->vui_info;
+
+ comseq_hdr_info->codec_profile = sps_info->profile_idc;
+ comseq_hdr_info->codec_level = sps_info->level_idc;
+
+ if (sps_info->vui_parameters_present_flag && vui_info->timing_info_present_flag) {
+ comseq_hdr_info->frame_rate_num = vui_info->time_scale;
+ comseq_hdr_info->frame_rate_den = 2 * vui_info->num_units_in_tick;
+ comseq_hdr_info->frame_rate = ((long)comseq_hdr_info->frame_rate_num) /
+ ((long)comseq_hdr_info->frame_rate_den);
+ }
+
+ /*
+ * ColorSpace Description was present in the VUI parameters.
+ * copy it in CommonSeqHdr info for use by application.
+ */
+ if (vui_info->video_signal_type_present_flag & vui_info->colour_description_present_flag) {
+ comseq_hdr_info->color_space_info.is_present = TRUE;
+ comseq_hdr_info->color_space_info.color_primaries = vui_info->colour_primaries;
+ comseq_hdr_info->color_space_info.transfer_characteristics =
+ vui_info->transfer_characteristics;
+ comseq_hdr_info->color_space_info.matrix_coefficients =
+ vui_info->matrix_coefficients;
+ }
+
+ if (vui_info->aspect_ratio_info_present_flag) {
+ comseq_hdr_info->aspect_ratio_num = vui_info->sar_width;
+ comseq_hdr_info->aspect_ratio_den = vui_info->sar_height;
+ }
+
+ comseq_hdr_info->interlaced_frames = sps_info->frame_mbs_only_flag ? 0 : 1;
+
+ /* pixel_info populate */
+ VDEC_ASSERT(sps_info->chroma_format_idc < 4);
+ comseq_hdr_info->pixel_info.chroma_fmt = (sps_info->chroma_format_idc == 0) ? 0 : 1;
+ comseq_hdr_info->pixel_info.chroma_fmt_idc = pixel_format_idc[sps_info->chroma_format_idc];
+ comseq_hdr_info->pixel_info.chroma_interleave =
+ ((sps_info->chroma_format_idc == 0) ||
+ (sps_info->chroma_format_idc == 3 && sps_info->separate_colour_plane_flag)) ?
+ PIXEL_INVALID_CI : PIXEL_UV_ORDER;
+ comseq_hdr_info->pixel_info.num_planes =
+ (sps_info->chroma_format_idc == 0) ? 1 :
+ (sps_info->chroma_format_idc == 3 && sps_info->separate_colour_plane_flag) ? 3 : 2;
+ comseq_hdr_info->pixel_info.bitdepth_y = sps_info->bit_depth_luma_minus8 + 8;
+ comseq_hdr_info->pixel_info.bitdepth_c = sps_info->bit_depth_chroma_minus8 + 8;
+ comseq_hdr_info->pixel_info.mem_pkg =
+ (comseq_hdr_info->pixel_info.bitdepth_y > 8 ||
+ comseq_hdr_info->pixel_info.bitdepth_c > 8) ?
+ PIXEL_BIT10_MSB_MP : PIXEL_BIT8_MP;
+ comseq_hdr_info->pixel_info.pixfmt =
+ pixel_get_pixfmt(comseq_hdr_info->pixel_info.chroma_fmt_idc,
+ comseq_hdr_info->pixel_info.chroma_interleave,
+ comseq_hdr_info->pixel_info.mem_pkg,
+ comseq_hdr_info->pixel_info.bitdepth_y,
+ comseq_hdr_info->pixel_info.bitdepth_c,
+ comseq_hdr_info->pixel_info.num_planes);
+
+ /* max_frame_size populate */
+ comseq_hdr_info->max_frame_size.width = (sps_info->pic_width_in_mbs_minus1 + 1) * 16;
+ /*
+ * H264 has always coded size MB aligned. For sequences which *may* have Field-Coded
+ * pictures, as described by the frame_mbs_only_flag, the pic_height_in_map_units_minus1
+ * refers to field height in MBs, so to find the actual Frame height we need to do
+ * Field_MBs_InHeight * 32
+ */
+ comseq_hdr_info->max_frame_size.height = (sps_info->pic_height_in_map_units_minus1 + 1) *
+ (sps_info->frame_mbs_only_flag ? 1 : 2) * 16;
+
+ /* Passing 2*N to vxd_dec so that get_nbuffers can use formula N+3 for all codecs*/
+ comseq_hdr_info->max_ref_frame_num = 2 * sps_info->max_num_ref_frames;
+
+ comseq_hdr_info->field_codec_mblocks = sps_info->mb_adaptive_frame_field_flag;
+ comseq_hdr_info->min_pict_buf_num = vui_info->max_dec_frame_buffering;
+
+ /* orig_display_region populate */
+ if (sps_info->frame_cropping_flag) {
+ int sub_width_c, sub_height_c, crop_unit_x, crop_unit_y;
+ int frame_crop_left, frame_crop_right, frame_crop_top, frame_crop_bottom;
+
+ sub_width_c = bspp_h264_get_subwidthc(sps_info->chroma_format_idc,
+ sps_info->separate_colour_plane_flag);
+
+ sub_height_c = bspp_h264_get_subheightc(sps_info->chroma_format_idc,
+ sps_info->separate_colour_plane_flag);
+
+ /* equation source: ITU-T H.264 2010/03, page 77 */
+ /* ChromaArrayType == 0 */
+ if (sps_info->separate_colour_plane_flag || sps_info->chroma_format_idc == 0) {
+ /* (7-18) */
+ crop_unit_x = 1;
+ /* (7-19) */
+ crop_unit_y = 2 - sps_info->frame_mbs_only_flag;
+ /* ChromaArrayType == chroma_format_idc */
+ } else {
+ /* (7-20) */
+ crop_unit_x = sub_width_c;
+ /* (7-21) */
+ crop_unit_y = sub_height_c * (2 - sps_info->frame_mbs_only_flag);
+ }
+
+ VDEC_ASSERT(sps_info->frame_crop_left_offset <=
+ (comseq_hdr_info->max_frame_size.width / crop_unit_x) -
+ (sps_info->frame_crop_right_offset + 1));
+
+ VDEC_ASSERT(sps_info->frame_crop_top_offset <=
+ (comseq_hdr_info->max_frame_size.height / crop_unit_y) -
+ (sps_info->frame_crop_bottom_offset + 1));
+ frame_crop_left = crop_unit_x * sps_info->frame_crop_left_offset;
+ frame_crop_right = comseq_hdr_info->max_frame_size.width -
+ (crop_unit_x * sps_info->frame_crop_right_offset);
+ frame_crop_top = crop_unit_y * sps_info->frame_crop_top_offset;
+ frame_crop_bottom = comseq_hdr_info->max_frame_size.height -
+ (crop_unit_y * sps_info->frame_crop_bottom_offset);
+ comseq_hdr_info->orig_display_region.left_offset = (unsigned int)frame_crop_left;
+ comseq_hdr_info->orig_display_region.top_offset = (unsigned int)frame_crop_top;
+ comseq_hdr_info->orig_display_region.width = (frame_crop_right - frame_crop_left);
+ comseq_hdr_info->orig_display_region.height = (frame_crop_bottom - frame_crop_top);
+ } else {
+ comseq_hdr_info->orig_display_region.left_offset = 0;
+ comseq_hdr_info->orig_display_region.top_offset = 0;
+ comseq_hdr_info->orig_display_region.width = comseq_hdr_info->max_frame_size.width;
+ comseq_hdr_info->orig_display_region.height =
+ comseq_hdr_info->max_frame_size.height;
+ }
+
+#ifdef REDUCED_DPB_NO_PIC_REORDERING
+ comseq_hdr_info->max_reorder_picts = vui_info->max_dec_frame_buffering;
+#else
+ if (sps_info->vui_parameters_present_flag && vui_info->bitstream_restriction_flag)
+ comseq_hdr_info->max_reorder_picts = vui_info->max_dec_frame_buffering;
+ else
+ comseq_hdr_info->max_reorder_picts = 0;
+#endif
+ comseq_hdr_info->separate_chroma_planes =
+ h264_seq_hdr_info->sps_info.separate_colour_plane_flag ? 1 : 0;
+}
+
+static void bspp_h264_pict_hdr_populate(enum h264_nalunittype nal_unit_type,
+ struct bspp_h264_slice_hdr_info *h264_slice_hdr_info,
+ struct vdec_comsequ_hdrinfo *comseq_hdr_info,
+ struct bspp_pict_hdr_info *pict_hdr_info)
+{
+ /*
+ * H264 has slice coding type, not picture. The bReference contrary to the rest of the
+ * standards is set explicitly from the NAL externally (see just below the call to
+ * bspp_h264_pict_hdr_populate) pict_hdr_info->bReference = ? (Set externally for H264)
+ */
+ pict_hdr_info->intra_coded = (nal_unit_type == H264_NALTYPE_IDR_SLICE) ? 1 : 0;
+ pict_hdr_info->field = h264_slice_hdr_info->field_pic_flag;
+
+ pict_hdr_info->post_processing = 0;
+ /* For H264 Maximum and Coded sizes are the same */
+ pict_hdr_info->coded_frame_size.width = comseq_hdr_info->max_frame_size.width;
+ /* For H264 Maximum and Coded sizes are the same */
+ pict_hdr_info->coded_frame_size.height = comseq_hdr_info->max_frame_size.height;
+ /*
+ * For H264 Encoded Display size has been precomputed as part of the
+ * common sequence info
+ */
+ pict_hdr_info->disp_info.enc_disp_region = comseq_hdr_info->orig_display_region;
+ /*
+ * For H264 there is no resampling, so encoded and actual display
+ * regions are the same
+ */
+ pict_hdr_info->disp_info.disp_region = comseq_hdr_info->orig_display_region;
+ /* H264 does not have that */
+ pict_hdr_info->disp_info.num_pan_scan_windows = 0;
+ memset(pict_hdr_info->disp_info.pan_scan_windows, 0,
+ sizeof(pict_hdr_info->disp_info.pan_scan_windows));
+}
+
+static int bspp_h264_destroy_seq_hdr_info(const void *secure_sps_info)
+{
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info = NULL;
+
+ if (!secure_sps_info)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ h264_seq_hdr_info = (struct bspp_h264_seq_hdr_info *)secure_sps_info;
+
+ /* Cleaning vui_info */
+ kfree(h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1);
+ kfree(h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1);
+ kfree(h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag);
+ kfree(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.bit_rate_value_minus1);
+ kfree(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1);
+ kfree(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag);
+
+ /* Cleaning sps_info */
+ kfree(h264_seq_hdr_info->sps_info.offset_for_ref_frame);
+ kfree(h264_seq_hdr_info->sps_info.scllst4x4seq);
+ kfree(h264_seq_hdr_info->sps_info.scllst8x8seq);
+
+ return 0;
+}
+
+static int bspp_h264_destroy_pps_info(const void *secure_pps_info)
+{
+ struct bspp_h264_pps_info *h264_pps_info = NULL;
+
+ if (!secure_pps_info)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ h264_pps_info = (struct bspp_h264_pps_info *)secure_pps_info;
+ kfree(h264_pps_info->h264_ppssgm_info.slice_group_id);
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum = 0;
+ kfree(h264_pps_info->scllst4x4pic);
+ kfree(h264_pps_info->scllst8x8pic);
+
+ return 0;
+}
+
+static int bspp_h264_destroy_data(enum bspp_unit_type data_type, void *data_handle)
+{
+ int result = 0;
+
+ if (!data_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ switch (data_type) {
+ case BSPP_UNIT_SEQUENCE:
+ result = bspp_h264_destroy_seq_hdr_info(data_handle);
+ break;
+ case BSPP_UNIT_PPS:
+ result = bspp_h264_destroy_pps_info(data_handle);
+ break;
+ default:
+ break;
+ }
+ return result;
+}
+
+static void bspp_h264_generate_slice_groupmap(struct bspp_h264_slice_hdr_info *h264_slice_hdr_info,
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info,
+ struct bspp_h264_pps_info *h264_pps_info,
+ unsigned char *map_unit_to_slice_groupmap,
+ unsigned int map_size)
+{
+ int group;
+ unsigned int num_slice_group_mapunits;
+ unsigned int i = 0, j, k = 0;
+ unsigned char num_slice_groups = h264_pps_info->num_slice_groups_minus1 + 1;
+ unsigned int pic_width_in_mbs = h264_seq_hdr_info->sps_info.pic_width_in_mbs_minus1 + 1;
+ unsigned int pic_height_in_map_units =
+ h264_seq_hdr_info->sps_info.pic_height_in_map_units_minus1 + 1;
+
+ num_slice_group_mapunits = map_size;
+ if (h264_pps_info->slice_group_map_type == 6) {
+ if ((unsigned int)num_slice_groups != num_slice_group_mapunits) {
+ VDEC_ASSERT
+ ("wrong pps->num_slice_group_map_units_minus1 for used SPS and FMO type 6"
+ ==
+ NULL);
+ if (num_slice_group_mapunits >
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum)
+ num_slice_group_mapunits =
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum;
+ }
+ }
+
+ /* only one slice group */
+ if (h264_pps_info->num_slice_groups_minus1 == 0) {
+ memset(map_unit_to_slice_groupmap, 0, map_size * sizeof(unsigned char));
+ return;
+ }
+ if (h264_pps_info->num_slice_groups_minus1 >= MAX_SLICEGROUP_COUNT) {
+ memset(map_unit_to_slice_groupmap, 0, map_size * sizeof(unsigned char));
+ return;
+ }
+ if (h264_pps_info->slice_group_map_type == 0) {
+ do {
+ for (group =
+ 0;
+ group <= h264_pps_info->num_slice_groups_minus1 &&
+ i < num_slice_group_mapunits;
+ i += h264_pps_info->run_length_minus1[group++] + 1) {
+ for (j = 0;
+ j <= h264_pps_info->run_length_minus1[group] &&
+ i + j < num_slice_group_mapunits;
+ j++)
+ map_unit_to_slice_groupmap[i + j] = group;
+ }
+ } while (i < num_slice_group_mapunits);
+ } else if (h264_pps_info->slice_group_map_type == 1) {
+ for (i = 0; i < num_slice_group_mapunits; i++) {
+ map_unit_to_slice_groupmap[i] = ((i % pic_width_in_mbs) +
+ (((i / pic_width_in_mbs) *
+ (h264_pps_info->num_slice_groups_minus1 + 1)) / 2)) %
+ (h264_pps_info->num_slice_groups_minus1 + 1);
+ }
+ } else if (h264_pps_info->slice_group_map_type == 2) {
+ unsigned int y_top_left, x_top_left, y_bottom_right, x_bottom_right, x, y;
+
+ for (i = 0; i < num_slice_group_mapunits; i++)
+ map_unit_to_slice_groupmap[i] = h264_pps_info->num_slice_groups_minus1;
+
+ for (group = h264_pps_info->num_slice_groups_minus1 - 1; group >= 0; group--) {
+ y_top_left = h264_pps_info->top_left[group] / pic_width_in_mbs;
+ x_top_left = h264_pps_info->top_left[group] % pic_width_in_mbs;
+ y_bottom_right = h264_pps_info->bottom_right[group] / pic_width_in_mbs;
+ x_bottom_right = h264_pps_info->bottom_right[group] % pic_width_in_mbs;
+ for (y = y_top_left; y <= y_bottom_right; y++)
+ for (x = x_top_left; x <= x_bottom_right; x++) {
+ if (h264_pps_info->top_left[group] >
+ h264_pps_info->bottom_right[group] ||
+ h264_pps_info->bottom_right[group] >=
+ num_slice_group_mapunits)
+ continue;
+ map_unit_to_slice_groupmap[y * pic_width_in_mbs +
+ x] = group;
+ }
+ }
+ } else if (h264_pps_info->slice_group_map_type == 3) {
+ int left_bound, top_bound, right_bound, bottom_bound;
+ int x, y, x_dir, y_dir;
+ int map_unit_vacant;
+
+ unsigned int mapunits_in_slicegroup_0 =
+ umin((unsigned int)((h264_pps_info->slice_group_change_rate_minus1 + 1) *
+ h264_slice_hdr_info->slice_group_change_cycle),
+ (unsigned int)num_slice_group_mapunits);
+
+ for (i = 0; i < num_slice_group_mapunits; i++)
+ map_unit_to_slice_groupmap[i] = 2;
+
+ x = (pic_width_in_mbs - h264_pps_info->slice_group_change_direction_flag) / 2;
+ y = (pic_height_in_map_units - h264_pps_info->slice_group_change_direction_flag) /
+ 2;
+
+ left_bound = x;
+ top_bound = y;
+ right_bound = x;
+ bottom_bound = y;
+
+ x_dir = h264_pps_info->slice_group_change_direction_flag - 1;
+ y_dir = h264_pps_info->slice_group_change_direction_flag;
+
+ for (k = 0; k < num_slice_group_mapunits; k += map_unit_vacant) {
+ map_unit_vacant =
+ (map_unit_to_slice_groupmap[y * pic_width_in_mbs + x] ==
+ 2);
+ if (map_unit_vacant)
+ map_unit_to_slice_groupmap[y * pic_width_in_mbs + x] =
+ (k >= mapunits_in_slicegroup_0);
+
+ if (x_dir == -1 && x == left_bound) {
+ left_bound = smax(left_bound - 1, 0);
+ x = left_bound;
+ x_dir = 0;
+ y_dir = 2 * h264_pps_info->slice_group_change_direction_flag - 1;
+ } else if (x_dir == 1 && x == right_bound) {
+ right_bound = smin(right_bound + 1, (int)pic_width_in_mbs - 1);
+ x = right_bound;
+ x_dir = 0;
+ y_dir = 1 - 2 * h264_pps_info->slice_group_change_direction_flag;
+ } else if (y_dir == -1 && y == top_bound) {
+ top_bound = smax(top_bound - 1, 0);
+ y = top_bound;
+ x_dir = 1 - 2 * h264_pps_info->slice_group_change_direction_flag;
+ y_dir = 0;
+ } else if (y_dir == 1 && y == bottom_bound) {
+ bottom_bound = smin(bottom_bound + 1,
+ (int)pic_height_in_map_units - 1);
+ y = bottom_bound;
+ x_dir = 2 * h264_pps_info->slice_group_change_direction_flag - 1;
+ y_dir = 0;
+ } else {
+ x = x + x_dir;
+ y = y + y_dir;
+ }
+ }
+ } else if (h264_pps_info->slice_group_map_type == 4) {
+ unsigned int mapunits_in_slicegroup_0 =
+ umin((unsigned int)((h264_pps_info->slice_group_change_rate_minus1 + 1) *
+ h264_slice_hdr_info->slice_group_change_cycle),
+ (unsigned int)num_slice_group_mapunits);
+ unsigned int sizeof_upper_left_group =
+ h264_pps_info->slice_group_change_direction_flag ?
+ (num_slice_group_mapunits -
+ mapunits_in_slicegroup_0) : mapunits_in_slicegroup_0;
+ for (i = 0; i < num_slice_group_mapunits; i++) {
+ if (i < sizeof_upper_left_group)
+ map_unit_to_slice_groupmap[i] =
+ h264_pps_info->slice_group_change_direction_flag;
+
+ else
+ map_unit_to_slice_groupmap[i] = 1 -
+ h264_pps_info->slice_group_change_direction_flag;
+ }
+ } else if (h264_pps_info->slice_group_map_type == 5) {
+ unsigned int mapunits_in_slicegroup_0 =
+ umin((unsigned int)((h264_pps_info->slice_group_change_rate_minus1 + 1) *
+ h264_slice_hdr_info->slice_group_change_cycle),
+ (unsigned int)num_slice_group_mapunits);
+ unsigned int sizeof_upper_left_group =
+ h264_pps_info->slice_group_change_direction_flag ?
+ (num_slice_group_mapunits -
+ mapunits_in_slicegroup_0) : mapunits_in_slicegroup_0;
+
+ for (j = 0; j < (unsigned int)pic_width_in_mbs; j++) {
+ for (i = 0; i < (unsigned int)pic_height_in_map_units; i++) {
+ if (k++ < sizeof_upper_left_group)
+ map_unit_to_slice_groupmap[i * pic_width_in_mbs + j] =
+ h264_pps_info->slice_group_change_direction_flag;
+ else
+ map_unit_to_slice_groupmap[i * pic_width_in_mbs + j] =
+ 1 -
+ h264_pps_info->slice_group_change_direction_flag;
+ }
+ }
+ } else if (h264_pps_info->slice_group_map_type == 6) {
+ VDEC_ASSERT(num_slice_group_mapunits <=
+ h264_pps_info->h264_ppssgm_info.slicegroupidnum);
+ for (i = 0; i < num_slice_group_mapunits; i++)
+ map_unit_to_slice_groupmap[i] =
+ h264_pps_info->h264_ppssgm_info.slice_group_id[i];
+ }
+}
+
+static int bspp_h264_parse_mvc_slice_extension(void *swsr_context,
+ struct bspp_h264_inter_pict_ctx *inter_pict_ctx)
+{
+ if (!swsr_read_bits(swsr_context, 1)) {
+ swsr_read_bits(swsr_context, 7);
+ inter_pict_ctx->current_view_id = swsr_read_bits(swsr_context, 10);
+ swsr_read_bits(swsr_context, 6);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int bspp_h264_unitparser_compile_sgmdata
+ (struct bspp_h264_slice_hdr_info *h264_slice_hdr_info,
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info,
+ struct bspp_h264_pps_info *h264_pps_info,
+ struct bspp_pict_hdr_info *pict_hdr_info)
+{
+ memset(&pict_hdr_info->pict_sgm_data, 0, sizeof(*&pict_hdr_info->pict_sgm_data));
+
+ pict_hdr_info->pict_sgm_data.id = 1;
+
+ /* Allocate memory for SGM. */
+ pict_hdr_info->pict_sgm_data.size =
+ (h264_seq_hdr_info->sps_info.pic_height_in_map_units_minus1 + 1) *
+ (h264_seq_hdr_info->sps_info.pic_width_in_mbs_minus1 + 1);
+
+ pict_hdr_info->pict_sgm_data.pic_data = kmalloc((pict_hdr_info->pict_sgm_data.size),
+ GFP_KERNEL);
+ VDEC_ASSERT(pict_hdr_info->pict_sgm_data.pic_data);
+ if (!pict_hdr_info->pict_sgm_data.pic_data) {
+ pict_hdr_info->pict_sgm_data.id = BSPP_INVALID;
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ bspp_h264_generate_slice_groupmap(h264_slice_hdr_info, h264_seq_hdr_info, h264_pps_info,
+ pict_hdr_info->pict_sgm_data.pic_data,
+ pict_hdr_info->pict_sgm_data.size);
+
+ /* check the discontinuous_mbs_flaginCurrFrame flag for FMO */
+ /* NO FMO support */
+ pict_hdr_info->discontinuous_mbs = 0;
+
+ return 0;
+}
+
+static int bspp_h264_unit_parser(void *swsr_context, struct bspp_unit_data *unit_data)
+{
+ unsigned int result = 0;
+ enum bspp_error_type parse_error = BSPP_ERROR_NONE;
+ enum h264_nalunittype nal_unit_type;
+ unsigned char nal_ref_idc;
+ struct bspp_h264_inter_pict_ctx *interpicctx;
+ struct bspp_sequence_hdr_info *out_seq_info;
+ unsigned char id;
+
+ interpicctx = &unit_data->parse_state->inter_pict_ctx->h264_ctx;
+ out_seq_info = unit_data->out.sequ_hdr_info;
+
+ /* At this point we should be EXACTLY at the NALTYPE byte */
+ /* parse the nal header type */
+ swsr_read_bits(swsr_context, 1);
+ nal_ref_idc = swsr_read_bits(swsr_context, 2);
+ nal_unit_type = (enum h264_nalunittype)swsr_read_bits(swsr_context, 5);
+
+ switch (unit_data->unit_type) {
+ case BSPP_UNIT_SEQUENCE:
+ VDEC_ASSERT(nal_unit_type == H264_NALTYPE_SEQUENCE_PARAMETER_SET ||
+ nal_unit_type == H264_NALTYPE_SUBSET_SPS);
+ {
+ unsigned char id_loc;
+ /* Parse SPS structure */
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info =
+ (struct bspp_h264_seq_hdr_info *)(out_seq_info->secure_sequence_info);
+ /* FW SPS Data structure */
+ struct bspp_ddbuf_array_info *tmp = &out_seq_info->fw_sequence;
+ struct h264fw_sequence_ps *h264_fwseq_hdr_info =
+ (struct h264fw_sequence_ps *)((unsigned char *)tmp->ddbuf_info.cpu_virt_addr
+ + tmp->buf_offset);
+ /* Common Sequence Header Info */
+ struct vdec_comsequ_hdrinfo *comseq_hdr_info =
+ &out_seq_info->sequ_hdr_info.com_sequ_hdr_info;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("Unit Parser:Found SEQUENCE_PARAMETER_SET NAL unit");
+#endif
+ VDEC_ASSERT(h264_seq_hdr_info);
+ VDEC_ASSERT(h264_fwseq_hdr_info);
+ if (!h264_seq_hdr_info)
+ return IMG_ERROR_ALREADY_COMPLETE;
+
+ if (!h264_fwseq_hdr_info)
+ return IMG_ERROR_ALREADY_COMPLETE;
+
+ /* Call SPS parser to populate the "Parse SPS Structure" */
+ unit_data->parse_error |=
+ bspp_h264_sps_parser(swsr_context, unit_data->str_res_handle,
+ h264_seq_hdr_info);
+ /* From "Parse SPS Structure" populate the "FW SPS Data Structure" */
+ bspp_h264_fwseq_hdr_populate(h264_seq_hdr_info, h264_fwseq_hdr_info);
+ /*
+ * From "Parse SPS Structure" populate the
+ * "Common Sequence Header Info"
+ */
+ bspp_h264_commonseq_hdr_populate(h264_seq_hdr_info, comseq_hdr_info);
+ /* Set the SPS ID */
+ /*
+ * seq_parameter_set_id is always in range 0-31, so we can
+ * add offset indicating subsequence header
+ */
+ id_loc = h264_seq_hdr_info->sps_info.seq_parameter_set_id;
+ out_seq_info->sequ_hdr_info.sequ_hdr_id =
+ (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+ nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE ||
+ nal_unit_type == H264_NALTYPE_SUBSET_SPS) ? id_loc + 32 : id_loc;
+
+ /*
+ * Set the first SPS ID as Active SPS ID for SEI parsing
+ * to cover the case of not having SeiBufferingPeriod to
+ * give us the SPS ID
+ */
+ if (interpicctx->active_sps_for_sei_parsing == BSPP_INVALID)
+ interpicctx->active_sps_for_sei_parsing =
+ h264_seq_hdr_info->sps_info.seq_parameter_set_id;
+ }
+ break;
+
+ case BSPP_UNIT_PPS:
+ VDEC_ASSERT(nal_unit_type == H264_NALTYPE_PICTURE_PARAMETER_SET);
+ {
+ /* Parse PPS structure */
+ struct bspp_h264_pps_info *h264_pps_info =
+ (struct bspp_h264_pps_info *)(unit_data->out.pps_info->secure_pps_info);
+ /* FW PPS Data structure */
+ struct bspp_ddbuf_array_info *tmp = &unit_data->out.pps_info->fw_pps;
+ struct h264fw_picture_ps *h264fw_pps_info =
+ (struct h264fw_picture_ps *)((unsigned char *)
+ tmp->ddbuf_info.cpu_virt_addr + tmp->buf_offset);
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("Unit Parser:Found PICTURE_PARAMETER_SET NAL unit");
+#endif
+ VDEC_ASSERT(h264_pps_info);
+ VDEC_ASSERT(h264fw_pps_info);
+
+ /* Call PPS parser to populate the "Parse PPS Structure" */
+ unit_data->parse_error |=
+ bspp_h264_pps_parser(swsr_context, unit_data->str_res_handle,
+ h264_pps_info);
+ /* From "Parse PPS Structure" populate the "FW PPS Data Structure"
+ * - the scaling lists
+ */
+ bspp_h264_fwpps_populate(h264_pps_info, h264fw_pps_info);
+ /* Set the PPS ID */
+ unit_data->out.pps_info->pps_id = h264_pps_info->pps_id;
+ }
+ break;
+
+ case BSPP_UNIT_PICTURE:
+ if (nal_unit_type == H264_NALTYPE_SLICE_PREFIX) {
+ if (bspp_h264_parse_mvc_slice_extension(swsr_context, interpicctx))
+ pr_err("%s: No MVC support\n", __func__);
+ } else if (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+ nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE ||
+ nal_unit_type == H264_NALTYPE_SLICE ||
+ nal_unit_type == H264_NALTYPE_IDR_SLICE) {
+ struct bspp_h264_slice_hdr_info h264_slice_hdr_info;
+ struct bspp_h264_pps_info *h264_pps_info;
+ struct bspp_pps_info *pps_info;
+ struct h264fw_picture_ps *h264fw_pps_info;
+ struct h264fw_sequence_ps *h264_fwseq_hdr_info;
+ struct bspp_h264_seq_hdr_info *h264_seq_hdr_info;
+ struct bspp_sequence_hdr_info *sequ_hdr_info;
+ struct bspp_ddbuf_array_info *tmp1;
+ struct bspp_ddbuf_array_info *tmp2;
+ int current_pic_is_new = 0;
+ int determined = 0;
+ int id_loc;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("Unit Parser:Found PICTURE DATA unit");
+#endif
+
+ unit_data->slice = 1;
+ unit_data->ext_slice = 0;
+
+ if (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+ nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE) {
+ pr_err("%s: No SVC support\n", __func__);
+ }
+
+ VDEC_ASSERT(unit_data->out.pict_hdr_info);
+ if (!unit_data->out.pict_hdr_info)
+ return IMG_ERROR_CANCELLED;
+
+ /* Default */
+ unit_data->out.pict_hdr_info->discontinuous_mbs = 0;
+
+ /*
+ * Parse the Pic Header, return Parse SPS/PPS
+ * structures
+ */
+ parse_error = bspp_h264_pict_hdr_parser(swsr_context,
+ unit_data->str_res_handle,
+ &h264_slice_hdr_info,
+ &pps_info,
+ &sequ_hdr_info,
+ nal_unit_type,
+ nal_ref_idc);
+
+ if (parse_error) {
+ unit_data->parse_error |= parse_error;
+ return IMG_ERROR_CANCELLED;
+ }
+
+ /*
+ * We are signalling closed GOP at every I frame
+ * This does not conform 100% with the
+ * specification but insures that seeking always
+ * works.
+ */
+ unit_data->new_closed_gop = h264_slice_hdr_info.slice_type ==
+ I_SLICE ? 1 : 0;
+
+ /*
+ * Now pps_info and sequ_hdr_info contain the
+ * PPS/SPS info related to this picture
+ */
+ h264_pps_info = (struct bspp_h264_pps_info *)pps_info->secure_pps_info;
+ h264_seq_hdr_info =
+ (struct bspp_h264_seq_hdr_info *)sequ_hdr_info->secure_sequence_info;
+
+ tmp1 = &pps_info->fw_pps;
+ tmp2 = &sequ_hdr_info->fw_sequence;
+
+ h264fw_pps_info = (struct h264fw_picture_ps *)((unsigned char *)
+ tmp1->ddbuf_info.cpu_virt_addr + tmp1->buf_offset);
+ h264_fwseq_hdr_info = (struct h264fw_sequence_ps *)((unsigned char *)
+ tmp2->ddbuf_info.cpu_virt_addr + tmp2->buf_offset);
+ VDEC_ASSERT(h264_slice_hdr_info.pps_id == h264_pps_info->pps_id);
+ VDEC_ASSERT(h264_pps_info->seq_parameter_set_id ==
+ (unsigned int)h264_seq_hdr_info->sps_info.seq_parameter_set_id);
+
+ /*
+ * Update the decoding-related FW SPS info related to the current picture
+ * with the SEI data that were potentially received and also relate to
+ * the current info. Until we receive the picture we do not know which
+ * sequence to update with the SEI data.
+ * Setfrom last SEI, needed for decoding
+ */
+ h264_fwseq_hdr_info->disable_vdmc_filt = interpicctx->disable_vdmc_filt;
+ h264_fwseq_hdr_info->transform4x4_mb_not_available =
+ interpicctx->b4x4transform_mb_unavailable;
+
+ /*
+ * Determine if current slice is a new picture, and update the related
+ * params for future reference
+ * Order of checks is important
+ */
+ {
+ struct bspp_parse_state *state = unit_data->parse_state;
+
+ set_if_not_determined_yet(&determined, state->new_view,
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet(&determined, state->next_pic_is_new,
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet
+ (&determined,
+ (h264_slice_hdr_info.redundant_pic_cnt > 0),
+ &current_pic_is_new, 0);
+ set_if_not_determined_yet
+ (&determined,
+ (state->prev_frame_num !=
+ h264_slice_hdr_info.frame_num),
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet
+ (&determined,
+ (state->prev_pps_id != h264_slice_hdr_info.pps_id),
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet
+ (&determined,
+ (state->prev_field_pic_flag !=
+ h264_slice_hdr_info.field_pic_flag),
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet
+ (&determined,
+ ((h264_slice_hdr_info.field_pic_flag) &&
+ (state->prev_bottom_pic_flag !=
+ h264_slice_hdr_info.bottom_field_flag)),
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet
+ (&determined,
+ ((state->prev_nal_ref_idc == 0 || nal_ref_idc == 0) &&
+ (state->prev_nal_ref_idc != nal_ref_idc)),
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet
+ (&determined,
+ ((h264_seq_hdr_info->sps_info.pic_order_cnt_type == 0) &&
+ ((state->prev_pic_order_cnt_lsb !=
+ h264_slice_hdr_info.pic_order_cnt_lsb) ||
+ (state->prev_delta_pic_order_cnt_bottom !=
+ h264_slice_hdr_info.delta_pic_order_cnt_bottom))),
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet
+ (&determined,
+ ((h264_seq_hdr_info->sps_info.pic_order_cnt_type == 1) &&
+ ((state->prev_delta_pic_order_cnt[0] !=
+ h264_slice_hdr_info.delta_pic_order_cnt[0]) ||
+ (state->prev_delta_pic_order_cnt[1] !=
+ h264_slice_hdr_info.delta_pic_order_cnt[1]))),
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet
+ (&determined,
+ ((state->prev_nal_unit_type ==
+ (int)H264_NALTYPE_IDR_SLICE ||
+ nal_unit_type == (int)H264_NALTYPE_IDR_SLICE) &&
+ (state->prev_nal_unit_type !=
+ (int)nal_unit_type)),
+ &current_pic_is_new, 1);
+ set_if_not_determined_yet(&determined,
+ ((state->prev_nal_unit_type ==
+ (int)H264_NALTYPE_IDR_SLICE) &&
+ (state->prev_idr_pic_id !=
+ h264_slice_hdr_info.idr_pic_id)),
+ &current_pic_is_new, 1);
+
+ /*
+ * Update whatever is not updated already in different places of
+ * the code or just needs to be updated here
+ */
+ state->prev_frame_num = h264_slice_hdr_info.frame_num;
+ state->prev_pps_id = h264_slice_hdr_info.pps_id;
+ state->prev_field_pic_flag =
+ h264_slice_hdr_info.field_pic_flag;
+ state->prev_nal_ref_idc = nal_ref_idc;
+ state->prev_pic_order_cnt_lsb =
+ h264_slice_hdr_info.pic_order_cnt_lsb;
+ state->prev_delta_pic_order_cnt_bottom =
+ h264_slice_hdr_info.delta_pic_order_cnt_bottom;
+ state->prev_delta_pic_order_cnt[0] =
+ h264_slice_hdr_info.delta_pic_order_cnt[0];
+ state->prev_delta_pic_order_cnt[1] =
+ h264_slice_hdr_info.delta_pic_order_cnt[1];
+ state->prev_nal_unit_type = (int)nal_unit_type;
+ state->prev_idr_pic_id = h264_slice_hdr_info.idr_pic_id;
+ }
+
+ /* Detect second field and manage the prev_bottom_pic_flag flag */
+ if (h264_slice_hdr_info.field_pic_flag && current_pic_is_new) {
+ unit_data->parse_state->prev_bottom_pic_flag =
+ h264_slice_hdr_info.bottom_field_flag;
+ }
+
+ /* Detect ASO Just met new pic */
+ id = h264_slice_hdr_info.colour_plane_id;
+ if (current_pic_is_new) {
+ unsigned int i;
+
+ for (i = 0; i < MAX_COMPONENTS; i++)
+ unit_data->parse_state->prev_first_mb_in_slice[i] = 0;
+ } else if (unit_data->parse_state->prev_first_mb_in_slice[id] >
+ h264_slice_hdr_info.first_mb_in_slice) {
+ /* We just found ASO */
+ unit_data->parse_state->discontinuous_mb = 1;
+ }
+ unit_data->parse_state->prev_first_mb_in_slice[id] =
+ h264_slice_hdr_info.first_mb_in_slice;
+
+ /* We may already knew we were DiscontinuousMB */
+ if (unit_data->parse_state->discontinuous_mb)
+ unit_data->out.pict_hdr_info->discontinuous_mbs =
+ unit_data->parse_state->discontinuous_mb;
+
+ /*
+ * We want to calculate the scaling lists only once per picture/field,
+ * not every slice We want to populate the VDEC Picture Header Info
+ * only once per picture/field, not every slice
+ */
+ if (current_pic_is_new) {
+ /* Common Sequence Header Info fetched */
+ struct vdec_comsequ_hdrinfo *comseq_hdr_info =
+ &sequ_hdr_info->sequ_hdr_info.com_sequ_hdr_info;
+ struct bspp_pict_data *type_pict_aux_data;
+
+ unit_data->parse_state->next_pic_is_new = 0;
+
+ /* Generate SGM for this picture */
+ if (h264_pps_info->num_slice_groups_minus1 != 0 &&
+ h264_pps_info->slice_group_map_type <= 6) {
+ bspp_h264_unitparser_compile_sgmdata
+ (&h264_slice_hdr_info,
+ h264_seq_hdr_info,
+ h264_pps_info,
+ unit_data->out.pict_hdr_info);
+ } else {
+ unit_data->out.pict_hdr_info->pict_sgm_data.pic_data = NULL;
+ unit_data->out.pict_hdr_info->pict_sgm_data.bufmap_id = 0;
+ unit_data->out.pict_hdr_info->pict_sgm_data.buf_offset = 0;
+ unit_data->out.pict_hdr_info->pict_sgm_data.id =
+ BSPP_INVALID;
+ unit_data->out.pict_hdr_info->pict_sgm_data.size = 0;
+ }
+
+ unit_data->parse_state->discontinuous_mb =
+ unit_data->out.pict_hdr_info->discontinuous_mbs;
+
+ /*
+ * Select the scaling lists based on h264_pps_info and
+ * h264_seq_hdr_info and pass them to h264fw_pps_info
+ */
+ bspp_h264_select_scaling_list(h264fw_pps_info,
+ h264_pps_info,
+ h264_seq_hdr_info);
+
+ /*
+ * Uses the common sequence/SINGLE-slice info to populate the
+ * VDEC Picture Header Info
+ */
+ bspp_h264_pict_hdr_populate(nal_unit_type, &h264_slice_hdr_info,
+ comseq_hdr_info,
+ unit_data->out.pict_hdr_info);
+
+ /* Store some raw bitstream fields for output. */
+ unit_data->out.pict_hdr_info->h264_pict_hdr_info.frame_num =
+ h264_slice_hdr_info.frame_num;
+ unit_data->out.pict_hdr_info->h264_pict_hdr_info.nal_ref_idc =
+ nal_ref_idc;
+
+ /*
+ * Update the display-related picture header information with
+ * the related SEI parsed data The display-related SEI is
+ * used only for the first picture after the SEI
+ */
+ if (!interpicctx->sei_info_attached_to_pic) {
+ interpicctx->sei_info_attached_to_pic = 1;
+ if (interpicctx->active_sps_for_sei_parsing !=
+ h264_seq_hdr_info->sps_info.seq_parameter_set_id) {
+ /*
+ * We tried to guess the SPS ID that we should use
+ * to parse the SEI, but we guessed wrong
+ */
+ pr_err("Parsed SEI with wrong SPS, data may be parsed wrong");
+ }
+ unit_data->out.pict_hdr_info->disp_info.repeat_first_fld =
+ interpicctx->repeat_first_field;
+ unit_data->out.pict_hdr_info->disp_info.max_frm_repeat =
+ interpicctx->max_frm_repeat;
+ /* SEI - Not supported */
+ }
+
+ /*
+ * For Idr slices update the Active
+ * Sequence ID for SEI parsing,
+ * error resilient
+ */
+ if (nal_unit_type == H264_NALTYPE_IDR_SLICE)
+ interpicctx->active_sps_for_sei_parsing =
+ h264_seq_hdr_info->sps_info.seq_parameter_set_id;
+
+ /*
+ * Choose the appropriate auxiliary data
+ * structure to populate.
+ */
+ if (unit_data->parse_state->second_field_flag)
+ type_pict_aux_data =
+ &unit_data->out.pict_hdr_info->second_pict_aux_data;
+
+ else
+ type_pict_aux_data =
+ &unit_data->out.pict_hdr_info->pict_aux_data;
+
+ /*
+ * We have no container for the PPS that
+ * passes down to the kernel, for this
+ * reason the h264 secure parser needs
+ * to populate that info into the
+ * picture header (Second)PictAuxData.
+ */
+ type_pict_aux_data->bufmap_id = pps_info->bufmap_id;
+ type_pict_aux_data->buf_offset = pps_info->buf_offset;
+ type_pict_aux_data->pic_data = (void *)h264fw_pps_info;
+ type_pict_aux_data->id = h264_pps_info->pps_id;
+ type_pict_aux_data->size = sizeof(struct h264fw_picture_ps);
+
+ pps_info->ref_count++;
+
+ /* This info comes from NAL directly */
+ unit_data->out.pict_hdr_info->ref = (nal_ref_idc == 0) ? 0 : 1;
+ }
+ if (nal_unit_type == H264_NALTYPE_IDR_SLICE)
+ unit_data->new_closed_gop = 1;
+
+ /* Return the SPS ID */
+ /*
+ * seq_parameter_set_id is always in range 0-31,
+ * so we can add offset indicating subsequence header
+ */
+ id_loc = h264_pps_info->seq_parameter_set_id;
+ unit_data->pict_sequ_hdr_id =
+ (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+ nal_unit_type ==
+ H264_NALTYPE_SLICE_IDR_SCALABLE) ? id_loc + 32 : id_loc;
+
+ } else if (nal_unit_type == H264_NALTYPE_SLICE_PARTITION_A ||
+ nal_unit_type == H264_NALTYPE_SLICE_PARTITION_B ||
+ nal_unit_type == H264_NALTYPE_SLICE_PARTITION_C) {
+ unit_data->slice = 1;
+
+ pr_err("Unsupported Slice NAL type: %d", nal_unit_type);
+ unit_data->parse_error = BSPP_ERROR_UNSUPPORTED;
+ }
+ break;
+
+ case BSPP_UNIT_UNCLASSIFIED:
+ if (nal_unit_type == H264_NALTYPE_ACCESS_UNIT_DELIMITER) {
+ unit_data->parse_state->next_pic_is_new = 1;
+ } else if (nal_unit_type == H264_NALTYPE_SLICE_PREFIX ||
+ nal_unit_type == H264_NALTYPE_SUBSET_SPS) {
+ /* if mvc disabled do nothing */
+ } else {
+ /* Should not have any other type of unclassified data. */
+ pr_err("unclassified data detected!\n");
+ }
+ break;
+
+ case BSPP_UNIT_NON_PICTURE:
+ if (nal_unit_type == H264_NALTYPE_END_OF_SEQUENCE ||
+ nal_unit_type == H264_NALTYPE_END_OF_STREAM) {
+ unit_data->parse_state->next_pic_is_new = 1;
+ } else if (nal_unit_type == H264_NALTYPE_FILLER_DATA ||
+ nal_unit_type == H264_NALTYPE_SEQUENCE_PARAMETER_SET_EXTENSION ||
+ nal_unit_type == H264_NALTYPE_AUXILIARY_SLICE) {
+ } else if (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+ nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE) {
+ /* if mvc disabled do nothing */
+ } else {
+ /* Should not have any other type of non-picture data. */
+ VDEC_ASSERT(0);
+ }
+ break;
+
+ case BSPP_UNIT_UNSUPPORTED:
+ pr_err("Unsupported NAL type: %d", nal_unit_type);
+ unit_data->parse_error = BSPP_ERROR_UNKNOWN_DATAUNIT_DETECTED;
+ break;
+
+ default:
+ VDEC_ASSERT(0);
+ break;
+ }
+
+ return result;
+}
+
+static int bspp_h264releasedata(void *str_alloc, enum bspp_unit_type data_type, void *data_handle)
+{
+ int result = 0;
+
+ if (!data_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ switch (data_type) {
+ case BSPP_UNIT_SEQUENCE:
+ result = bspp_h264_release_sequ_hdr_info(str_alloc, data_handle);
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static int bspp_h264resetdata(enum bspp_unit_type data_type, void *data_handle)
+{
+ int result = 0;
+
+ if (!data_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ switch (data_type) {
+ case BSPP_UNIT_SEQUENCE:
+ result = bspp_h264_reset_seq_hdr_info(data_handle);
+ break;
+ case BSPP_UNIT_PPS:
+ result = bspp_h264_reset_pps_info(data_handle);
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static void bspp_h264parse_codecconfig(void *swsr_ctx,
+ unsigned int *unitcount,
+ unsigned int *unit_arraycount,
+ unsigned int *delimlength,
+ unsigned int *size_delimlength)
+{
+ unsigned long long value = 6;
+
+ /*
+ * Set the shift-register up to provide next 6 bytes
+ * without emulation prevention detection.
+ */
+ swsr_consume_delim(swsr_ctx, SWSR_EMPREVENT_NONE, 0, &value);
+
+ /*
+ * Codec config header must be read for size delimited data (H.264)
+ * to get to the start of each unit.
+ * This parsing follows section 5.2.4.1.1 of ISO/IEC 14496-15:2004(E).
+ */
+ /* Configuration version. */
+ swsr_read_bits(swsr_ctx, 8);
+ /* AVC Profile Indication. */
+ swsr_read_bits(swsr_ctx, 8);
+ /* Profile compatibility. */
+ swsr_read_bits(swsr_ctx, 8);
+ /* AVC Level Indication. */
+ swsr_read_bits(swsr_ctx, 8);
+ *delimlength = ((swsr_read_bits(swsr_ctx, 8) & 0x3) + 1) * 8;
+ *unitcount = swsr_read_bits(swsr_ctx, 8) & 0x1f;
+
+ /* Size delimiter is only 2 bytes for H.264 codec configuration. */
+ *size_delimlength = 2 * 8;
+}
+
+static void bspp_h264update_unitcounts(void *swsr_ctx,
+ unsigned int *unitcount,
+ unsigned int *unit_arraycount)
+{
+ if (*unitcount == 0) {
+ unsigned long long value = 1;
+
+ /*
+ * Set the shift-register up to provide next 1 byte without
+ * emulation prevention detection.
+ */
+ swsr_consume_delim(swsr_ctx, SWSR_EMPREVENT_NONE, 0, &value);
+
+ *unitcount = swsr_read_bits(swsr_ctx, 8);
+ }
+
+ (*unitcount)--;
+}
+
+/*
+ * Sets the parser configuration
+ */
+int bspp_h264_set_parser_config(enum vdec_bstr_format bstr_format,
+ struct bspp_vid_std_features *pvidstd_features,
+ struct bspp_swsr_ctx *pswsr_ctx,
+ struct bspp_parser_callbacks *pparser_callbacks,
+ struct bspp_inter_pict_data *pinterpict_data)
+{
+ /* Set h.246 parser callbacks. */
+ pparser_callbacks->parse_unit_cb = bspp_h264_unit_parser;
+ pparser_callbacks->release_data_cb = bspp_h264releasedata;
+ pparser_callbacks->reset_data_cb = bspp_h264resetdata;
+ pparser_callbacks->destroy_data_cb = bspp_h264_destroy_data;
+ pparser_callbacks->parse_codec_config_cb = bspp_h264parse_codecconfig;
+ pparser_callbacks->update_unit_counts_cb = bspp_h264update_unitcounts;
+
+ /* Set h.246 specific features. */
+ pvidstd_features->seq_size = sizeof(struct bspp_h264_seq_hdr_info);
+ pvidstd_features->uses_pps = 1;
+ pvidstd_features->pps_size = sizeof(struct bspp_h264_pps_info);
+
+ /* Set h.246 specific shift register config. */
+ pswsr_ctx->emulation_prevention = SWSR_EMPREVENT_00000300;
+ pinterpict_data->h264_ctx.active_sps_for_sei_parsing = BSPP_INVALID;
+
+ if (bstr_format == VDEC_BSTRFORMAT_DEMUX_BYTESTREAM ||
+ bstr_format == VDEC_BSTRFORMAT_ELEMENTARY) {
+ pswsr_ctx->sr_config.delim_type = SWSR_DELIM_SCP;
+ pswsr_ctx->sr_config.delim_length = 3 * 8;
+ pswsr_ctx->sr_config.scp_value = 0x000001;
+ } else if (bstr_format == VDEC_BSTRFORMAT_DEMUX_SIZEDELIMITED) {
+ pswsr_ctx->sr_config.delim_type = SWSR_DELIM_SIZE;
+ /* Set the default size-delimiter number of bits */
+ pswsr_ctx->sr_config.delim_length = 4 * 8;
+ } else {
+ VDEC_ASSERT(0);
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ return 0;
+}
+
+/*
+ * This function determines the BSPP unit type based on the
+ * provided bitstream (H264 specific) unit type
+ */
+void bspp_h264_determine_unittype(unsigned char bitstream_unittype,
+ int disable_mvc,
+ enum bspp_unit_type *bspp_unittype)
+{
+ unsigned char type = bitstream_unittype & 0x1f;
+
+ switch (type) {
+ case H264_NALTYPE_SLICE_PREFIX:
+ *bspp_unittype = disable_mvc ? BSPP_UNIT_UNCLASSIFIED : BSPP_UNIT_PICTURE;
+ break;
+ case H264_NALTYPE_SUBSET_SPS:
+ *bspp_unittype = disable_mvc ? BSPP_UNIT_UNCLASSIFIED : BSPP_UNIT_SEQUENCE;
+ break;
+ case H264_NALTYPE_SLICE_SCALABLE:
+ case H264_NALTYPE_SLICE_IDR_SCALABLE:
+ *bspp_unittype = disable_mvc ? BSPP_UNIT_NON_PICTURE : BSPP_UNIT_PICTURE;
+ break;
+ case H264_NALTYPE_SEQUENCE_PARAMETER_SET:
+ *bspp_unittype = BSPP_UNIT_SEQUENCE;
+ break;
+ case H264_NALTYPE_PICTURE_PARAMETER_SET:
+ *bspp_unittype = BSPP_UNIT_PPS;
+ break;
+ case H264_NALTYPE_SLICE:
+ case H264_NALTYPE_SLICE_PARTITION_A:
+ case H264_NALTYPE_SLICE_PARTITION_B:
+ case H264_NALTYPE_SLICE_PARTITION_C:
+ case H264_NALTYPE_IDR_SLICE:
+ *bspp_unittype = BSPP_UNIT_PICTURE;
+ break;
+ case H264_NALTYPE_ACCESS_UNIT_DELIMITER:
+ case H264_NALTYPE_SUPPLEMENTAL_ENHANCEMENT_INFO:
+ /*
+ * Each of these NAL units should not change unit type if
+ * current is picture, since they can occur anywhere, any number
+ * of times
+ */
+ *bspp_unittype = BSPP_UNIT_UNCLASSIFIED;
+ break;
+ case H264_NALTYPE_END_OF_SEQUENCE:
+ case H264_NALTYPE_END_OF_STREAM:
+ case H264_NALTYPE_FILLER_DATA:
+ case H264_NALTYPE_SEQUENCE_PARAMETER_SET_EXTENSION:
+ case H264_NALTYPE_AUXILIARY_SLICE:
+ *bspp_unittype = BSPP_UNIT_NON_PICTURE;
+ break;
+ default:
+ *bspp_unittype = BSPP_UNIT_UNSUPPORTED;
+ break;
+ }
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/h264_secure_parser.h b/drivers/media/platform/vxe-vxd/decoder/h264_secure_parser.h
new file mode 100644
index 000000000000..68789dfcc439
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/h264_secure_parser.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * h.264 secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ * Re-written for upstreming
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __H264SECUREPARSER_H__
+#define __H264SECUREPARSER_H__
+
+#include "bspp_int.h"
+#include "vdec_defs.h"
+
+/*
+ * enum h264_nalunittype
+ * @Description Contains H264 NAL unit types
+ */
+enum h264_nalunittype {
+ H264_NALTYPE_UNSPECIFIED = 0,
+ H264_NALTYPE_SLICE = 1,
+ H264_NALTYPE_SLICE_PARTITION_A = 2,
+ H264_NALTYPE_SLICE_PARTITION_B = 3,
+ H264_NALTYPE_SLICE_PARTITION_C = 4,
+ H264_NALTYPE_IDR_SLICE = 5,
+ H264_NALTYPE_SUPPLEMENTAL_ENHANCEMENT_INFO = 6,
+ H264_NALTYPE_SEQUENCE_PARAMETER_SET = 7,
+ H264_NALTYPE_PICTURE_PARAMETER_SET = 8,
+ H264_NALTYPE_ACCESS_UNIT_DELIMITER = 9,
+ H264_NALTYPE_END_OF_SEQUENCE = 10,
+ H264_NALTYPE_END_OF_STREAM = 11,
+ H264_NALTYPE_FILLER_DATA = 12,
+ H264_NALTYPE_SEQUENCE_PARAMETER_SET_EXTENSION = 13,
+ H264_NALTYPE_SLICE_PREFIX = 14,
+ H264_NALTYPE_SUBSET_SPS = 15,
+ H264_NALTYPE_AUXILIARY_SLICE = 19,
+ H264_NALTYPE_SLICE_SCALABLE = 20,
+ H264_NALTYPE_SLICE_IDR_SCALABLE = 21,
+ H264_NALTYPE_MAX = 31,
+ H264_NALTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * struct bspp_h264_sps_info
+ * @Description H264 SPS parsed information
+ */
+struct bspp_h264_sps_info {
+ unsigned int profile_idc;
+ unsigned int constraint_set_flags;
+ unsigned int level_idc;
+ unsigned char seq_parameter_set_id;
+ unsigned char chroma_format_idc;
+ int separate_colour_plane_flag;
+ unsigned int bit_depth_luma_minus8;
+ unsigned int bit_depth_chroma_minus8;
+ unsigned char qpprime_y_zero_transform_bypass_flag;
+ int seq_scaling_matrix_present_flag;
+ unsigned char seq_scaling_list_present_flag[12];
+ unsigned int log2_max_frame_num_minus4;
+ unsigned int pic_order_cnt_type;
+ unsigned int log2_max_pic_order_cnt_lsb_minus4;
+ int delta_pic_order_always_zero_flag;
+ int offset_for_non_ref_pic;
+ int offset_for_top_to_bottom_field;
+ unsigned int num_ref_frames_in_pic_order_cnt_cycle;
+ unsigned int *offset_for_ref_frame;
+ unsigned int max_num_ref_frames;
+ int gaps_in_frame_num_value_allowed_flag;
+ unsigned int pic_width_in_mbs_minus1;
+ unsigned int pic_height_in_map_units_minus1;
+ int frame_mbs_only_flag;
+ int mb_adaptive_frame_field_flag;
+ int direct_8x8_inference_flag;
+ int frame_cropping_flag;
+ unsigned int frame_crop_left_offset;
+ unsigned int frame_crop_right_offset;
+ unsigned int frame_crop_top_offset;
+ unsigned int frame_crop_bottom_offset;
+ int vui_parameters_present_flag;
+ /* mvc_vui_parameters_present_flag; UNUSED */
+ int bmvcvuiparameterpresentflag;
+ /*
+ * scaling lists are derived from both SPS and PPS information
+ * but will change whenever the PPS changes
+ * The derived set of tables are associated here with the PPS
+ * NB: These are in H.264 order
+ */
+ /* derived from SPS and PPS - 8 bit each */
+ unsigned char *scllst4x4seq;
+ /* derived from SPS and PPS - 8 bit each */
+ unsigned char *scllst8x8seq;
+ /* This is not direct parsed data, though it is extracted */
+ unsigned char usedefaultscalingmatrixflag_seq[12];
+};
+
+struct bspp_h264_hrdparam_info {
+ unsigned char cpb_cnt_minus1;
+ unsigned char bit_rate_scale;
+ unsigned char cpb_size_scale;
+ unsigned int *bit_rate_value_minus1;
+ unsigned int *cpb_size_value_minus1;
+ unsigned char *cbr_flag;
+ unsigned char initial_cpb_removal_delay_length_minus1;
+ unsigned char cpb_removal_delay_length_minus1;
+ unsigned char dpb_output_delay_length_minus1;
+ unsigned char time_offset_length;
+};
+
+struct bspp_h264_vui_info {
+ unsigned char aspect_ratio_info_present_flag;
+ unsigned int aspect_ratio_idc;
+ unsigned int sar_width;
+ unsigned int sar_height;
+ unsigned char overscan_info_present_flag;
+ unsigned char overscan_appropriate_flag;
+ unsigned char video_signal_type_present_flag;
+ unsigned int video_format;
+ unsigned char video_full_range_flag;
+ unsigned char colour_description_present_flag;
+ unsigned int colour_primaries;
+ unsigned int transfer_characteristics;
+ unsigned int matrix_coefficients;
+ unsigned char chroma_location_info_present_flag;
+ unsigned int chroma_sample_loc_type_top_field;
+ unsigned int chroma_sample_loc_type_bottom_field;
+ unsigned char timing_info_present_flag;
+ unsigned int num_units_in_tick;
+ unsigned int time_scale;
+ unsigned char fixed_frame_rate_flag;
+ unsigned char nal_hrd_parameters_present_flag;
+ struct bspp_h264_hrdparam_info nal_hrd_parameters;
+ unsigned char vcl_hrd_parameters_present_flag;
+ struct bspp_h264_hrdparam_info vcl_hrd_parameters;
+ unsigned char low_delay_hrd_flag;
+ unsigned char pic_struct_present_flag;
+ unsigned char bitstream_restriction_flag;
+ unsigned char motion_vectors_over_pic_boundaries_flag;
+ unsigned int max_bytes_per_pic_denom;
+ unsigned int max_bits_per_mb_denom;
+ unsigned int log2_max_mv_length_vertical;
+ unsigned int log2_max_mv_length_horizontal;
+ unsigned int num_reorder_frames;
+ unsigned int max_dec_frame_buffering;
+};
+
+/*
+ * struct bspp_h264_seq_hdr_info
+ * @Description Contains everything parsed from the Sequence Header.
+ */
+struct bspp_h264_seq_hdr_info {
+ /* Video sequence header information */
+ struct bspp_h264_sps_info sps_info;
+ /* VUI sequence header information. */
+ struct bspp_h264_vui_info vui_info;
+};
+
+/**
+ * struct bspp_h264_ppssgm_info - This structure contains H264 PPS parse data.
+ * @slice_group_id: slice_group_id
+ * @slicegroupidnum: slicegroupidnum
+ */
+struct bspp_h264_ppssgm_info {
+ unsigned char *slice_group_id;
+ unsigned short slicegroupidnum;
+};
+
+/*
+ * struct bspp_h264_pps_info
+ * @Description This structure contains H264 PPS parse data.
+ */
+struct bspp_h264_pps_info {
+ /* pic_parameter_set_id: defines the PPS ID of the current PPS */
+ int pps_id;
+ /* seq_parameter_set_id: defines the SPS that current PPS points to */
+ int seq_parameter_set_id;
+ int entropy_coding_mode_flag;
+ int pic_order_present_flag;
+ unsigned char num_slice_groups_minus1;
+ unsigned char slice_group_map_type;
+ unsigned short run_length_minus1[8];
+ unsigned short top_left[8];
+ unsigned short bottom_right[8];
+ int slice_group_change_direction_flag;
+ unsigned short slice_group_change_rate_minus1;
+ unsigned short pic_size_in_map_unit;
+ struct bspp_h264_ppssgm_info h264_ppssgm_info;
+ unsigned char num_ref_idx_lx_active_minus1[H264FW_MAX_REFPIC_LISTS];
+ int weighted_pred_flag;
+ unsigned char weighted_bipred_idc;
+ int pic_init_qp_minus26;
+ int pic_init_qs_minus26;
+ int chroma_qp_index_offset;
+ int deblocking_filter_control_present_flag;
+ int constrained_intra_pred_flag;
+ int redundant_pic_cnt_present_flag;
+ int transform_8x8_mode_flag;
+ int pic_scaling_matrix_present_flag;
+ unsigned char pic_scaling_list_present_flag[12];
+ int second_chroma_qp_index_offset;
+
+ /*
+ * scaling lists are derived from both SPS and PPS information
+ * but will change whenever the PPS changes
+ * The derived set of tables are associated here with the PPS
+ * NB: These are in H.264 order
+ */
+ /* derived from SPS and PPS - 8 bit each */
+ unsigned char *scllst4x4pic;
+ /* derived from SPS and PPS - 8 bit each */
+ unsigned char *scllst8x8pic;
+ /* This is not direct parsed data, though it is extracted */
+ unsigned char usedefaultscalingmatrixflag_pic[12];
+};
+
+/*
+ * enum bspp_h264_slice_type
+ * @Description contains H264 slice types
+ */
+enum bspp_h264_slice_type {
+ P_SLICE = 0,
+ B_SLICE,
+ I_SLICE,
+ SP_SLICE,
+ SI_SLICE,
+ SLICE_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * struct bspp_h264_slice_hdr_info
+ * @Description This structure contains H264 slice header information
+ */
+struct bspp_h264_slice_hdr_info {
+ unsigned short first_mb_in_slice;
+ enum bspp_h264_slice_type slice_type;
+
+ /* data to ID new picture */
+ unsigned int pps_id;
+ unsigned int frame_num;
+ unsigned char colour_plane_id;
+ unsigned char field_pic_flag;
+ unsigned char bottom_field_flag;
+ unsigned int idr_pic_id;
+ unsigned int pic_order_cnt_lsb;
+ int delta_pic_order_cnt_bottom;
+ int delta_pic_order_cnt[2];
+ unsigned int redundant_pic_cnt;
+
+ /* Things we need to read out when doing In Secure */
+ unsigned char num_ref_idx_active_override_flag;
+ unsigned char num_ref_idx_lx_active_minus1[2];
+ unsigned short slice_group_change_cycle;
+};
+
+/*
+ * @Function bspp_h264_set_parser_config
+ * @Description Sets the parser configuration
+ */
+int bspp_h264_set_parser_config(enum vdec_bstr_format bstr_format,
+ struct bspp_vid_std_features *pvidstd_features,
+ struct bspp_swsr_ctx *pswsr_ctx,
+ struct bspp_parser_callbacks *pparser_callbacks,
+ struct bspp_inter_pict_data *pinterpict_data);
+
+/*
+ * @Function bspp_h264_determine_unittype
+ * @Description This function determines the BSPP unit type based on the
+ * provided bitstream (H264 specific) unit type
+ */
+void bspp_h264_determine_unittype(unsigned char bitstream_unittype,
+ int disable_mvc,
+ enum bspp_unit_type *pbsppunittype);
+
+#endif /*__H264SECUREPARSER_H__ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/h264_vlc.h b/drivers/media/platform/vxe-vxd/decoder/h264_vlc.h
new file mode 100644
index 000000000000..7cd79ada8ecc
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/h264_vlc.h
@@ -0,0 +1,604 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * h264 vlc table definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ */
+
+#ifndef __H264_VLC_H__
+#define __H264_VLC_H__
+
+#include <linux/types.h>
+
+static unsigned short h264_vlc_table_data[] = {
+/* NumCoeffTrailingOnes_Table9-5_nC_0-1.out */
+ 4, 0, 0,
+ 4, 1, 5,
+ 4, 2, 10,
+ 2, 1, 4,
+ 2, 1, 6,
+ 0, 1, 8,
+ 0, 2, 11,
+ 4, 0, 15,
+ 4, 1, 4,
+ 4, 1, 9,
+ 4, 0, 19,
+ 4, 1, 14,
+ 4, 1, 23,
+ 4, 1, 27,
+ 4, 1, 18,
+ 4, 1, 13,
+ 4, 1, 8,
+ 2, 5, 8,
+ 0, 1, 50,
+ 0, 0, 53,
+ 0, 0, 54,
+ 4, 2, 31,
+ 4, 2, 22,
+ 4, 2, 17,
+ 4, 2, 12,
+ 0, 2, 7,
+ 0, 2, 14,
+ 0, 2, 21,
+ 0, 2, 28,
+ 0, 1, 35,
+ 4, 5, 53,
+ 3, 5, 0,
+ 4, 2, 32,
+ 4, 2, 38,
+ 4, 2, 33,
+ 4, 2, 28,
+ 4, 2, 43,
+ 4, 2, 34,
+ 4, 2, 29,
+ 4, 2, 24,
+ 4, 2, 51,
+ 4, 2, 46,
+ 4, 2, 41,
+ 4, 2, 40,
+ 4, 2, 47,
+ 4, 2, 42,
+ 4, 2, 37,
+ 4, 2, 36,
+ 4, 2, 59,
+ 4, 2, 54,
+ 4, 2, 49,
+ 4, 2, 48,
+ 4, 2, 55,
+ 4, 2, 50,
+ 4, 2, 45,
+ 4, 2, 44,
+ 4, 2, 67,
+ 4, 2, 62,
+ 4, 2, 61,
+ 4, 2, 56,
+ 4, 2, 63,
+ 4, 2, 58,
+ 4, 2, 57,
+ 4, 2, 52,
+ 4, 1, 64,
+ 4, 1, 66,
+ 4, 1, 65,
+ 4, 1, 60,
+ 4, 1, 39,
+ 4, 1, 30,
+ 4, 1, 25,
+ 4, 1, 20,
+ 4, 0, 35,
+ 4, 0, 26,
+ 4, 0, 21,
+ 4, 0, 16,
+/* NumCoeffTrailingOnes_Table9-5_nC_2-3.out */
+ 0, 2, 16,
+ 0, 1, 73,
+ 0, 1, 76,
+ 0, 0, 79,
+ 4, 3, 19,
+ 4, 3, 15,
+ 4, 2, 10,
+ 4, 2, 10,
+ 4, 1, 5,
+ 4, 1, 5,
+ 4, 1, 5,
+ 4, 1, 5,
+ 4, 1, 0,
+ 4, 1, 0,
+ 4, 1, 0,
+ 4, 1, 0,
+ 2, 5, 8,
+ 0, 1, 49,
+ 0, 0, 52,
+ 0, 0, 53,
+ 4, 2, 35,
+ 4, 2, 22,
+ 4, 2, 21,
+ 4, 2, 12,
+ 0, 2, 7,
+ 0, 2, 14,
+ 0, 2, 21,
+ 1, 1, 28,
+ 0, 1, 34,
+ 4, 5, 63,
+ 3, 5, 0,
+ 4, 2, 47,
+ 4, 2, 38,
+ 4, 2, 37,
+ 4, 2, 32,
+ 4, 2, 43,
+ 4, 2, 34,
+ 4, 2, 33,
+ 4, 2, 28,
+ 4, 2, 44,
+ 4, 2, 46,
+ 4, 2, 45,
+ 4, 2, 40,
+ 4, 2, 51,
+ 4, 2, 42,
+ 4, 2, 41,
+ 4, 2, 36,
+ 4, 2, 59,
+ 4, 2, 54,
+ 4, 2, 53,
+ 4, 2, 52,
+ 4, 2, 55,
+ 4, 2, 50,
+ 4, 2, 49,
+ 4, 2, 48,
+ 0, 1, 3,
+ 4, 1, 58,
+ 4, 1, 56,
+ 4, 1, 61,
+ 4, 1, 60,
+ 4, 1, 62,
+ 4, 1, 57,
+ 4, 1, 67,
+ 4, 1, 66,
+ 4, 1, 65,
+ 4, 1, 64,
+ 4, 1, 39,
+ 4, 1, 30,
+ 4, 1, 29,
+ 4, 1, 24,
+ 4, 0, 20,
+ 4, 0, 26,
+ 4, 0, 25,
+ 4, 0, 16,
+ 4, 1, 31,
+ 4, 1, 18,
+ 4, 1, 17,
+ 4, 1, 8,
+ 4, 1, 27,
+ 4, 1, 14,
+ 4, 1, 13,
+ 4, 1, 4,
+ 4, 0, 23,
+ 4, 0, 9,
+/* NumCoeffTrailingOnes_Table9-5_nC_4-7.out */
+ 2, 1, 16,
+ 0, 2, 50,
+ 0, 1, 57,
+ 0, 1, 60,
+ 6, 0, 10,
+ 6, 0, 8,
+ 0, 0, 61,
+ 0, 0, 62,
+ 4, 3, 31,
+ 4, 3, 27,
+ 4, 3, 23,
+ 4, 3, 19,
+ 4, 3, 15,
+ 4, 3, 10,
+ 4, 3, 5,
+ 4, 3, 0,
+ 0, 2, 3,
+ 0, 2, 10,
+ 0, 3, 17,
+ 4, 2, 51,
+ 4, 2, 46,
+ 4, 2, 41,
+ 4, 2, 36,
+ 4, 2, 47,
+ 4, 2, 42,
+ 4, 2, 37,
+ 4, 2, 32,
+ 4, 2, 48,
+ 4, 2, 54,
+ 4, 2, 49,
+ 4, 2, 44,
+ 4, 2, 55,
+ 4, 2, 50,
+ 4, 2, 45,
+ 4, 2, 40,
+ 3, 3, 0,
+ 4, 3, 64,
+ 4, 3, 67,
+ 4, 3, 66,
+ 4, 3, 65,
+ 4, 3, 60,
+ 4, 3, 63,
+ 4, 3, 62,
+ 4, 3, 61,
+ 4, 3, 56,
+ 4, 3, 59,
+ 4, 3, 58,
+ 4, 3, 57,
+ 4, 3, 52,
+ 4, 2, 53,
+ 4, 2, 53,
+ 4, 2, 28,
+ 4, 2, 24,
+ 4, 2, 38,
+ 4, 2, 20,
+ 4, 2, 43,
+ 4, 2, 34,
+ 4, 2, 33,
+ 4, 2, 16,
+ 4, 1, 12,
+ 4, 1, 30,
+ 4, 1, 29,
+ 4, 1, 8,
+ 4, 1, 39,
+ 4, 1, 26,
+ 4, 1, 25,
+ 4, 1, 4,
+ 4, 0, 13,
+ 4, 0, 35,
+ 4, 0, 14,
+ 4, 0, 9,
+/* NumCoeffTrailingOnesFixedLen.out */
+ 2, 1, 8,
+ 5, 2, 6,
+ 5, 2, 10,
+ 5, 2, 14,
+ 5, 2, 18,
+ 5, 2, 22,
+ 5, 2, 26,
+ 5, 2, 30,
+ 5, 1, 4,
+ 0, 0, 2,
+ 5, 0, 2,
+ 3, 0, 0,
+ 4, 0, 0,
+/* NumCoeffTrailingOnesChromaDC_YUV420.out */
+ 4, 0, 5,
+ 4, 1, 0,
+ 4, 2, 10,
+ 0, 2, 1,
+ 1, 1, 8,
+ 0, 0, 10,
+ 4, 2, 16,
+ 4, 2, 12,
+ 4, 2, 8,
+ 4, 2, 15,
+ 4, 2, 9,
+ 4, 2, 4,
+ 4, 0, 19,
+ 4, 1, 18,
+ 4, 1, 17,
+ 4, 0, 14,
+ 4, 0, 13,
+/* NumCoeffTrailingOnesChromaDC_YUV422.out */
+ 4, 0, 0,
+ 4, 1, 5,
+ 4, 2, 10,
+ 0, 2, 4,
+ 4, 4, 15,
+ 4, 5, 19,
+ 2, 3, 9,
+ 4, 2, 27,
+ 4, 2, 23,
+ 4, 2, 18,
+ 4, 2, 14,
+ 4, 2, 13,
+ 4, 2, 9,
+ 4, 2, 8,
+ 4, 2, 4,
+ 0, 1, 5,
+ 0, 1, 8,
+ 0, 1, 11,
+ 0, 1, 14,
+ 1, 2, 17,
+ 4, 1, 22,
+ 4, 1, 17,
+ 4, 1, 16,
+ 4, 1, 12,
+ 4, 1, 31,
+ 4, 1, 26,
+ 4, 1, 21,
+ 4, 1, 20,
+ 4, 1, 35,
+ 4, 1, 30,
+ 4, 1, 25,
+ 4, 1, 24,
+ 4, 1, 34,
+ 4, 1, 33,
+ 4, 1, 29,
+ 4, 1, 28,
+ 3, 2, 0,
+ 3, 2, 0,
+ 3, 2, 0,
+ 4, 2, 32,
+/* TotalZeros_00.out */
+ 4, 0, 0,
+ 0, 0, 6,
+ 0, 0, 7,
+ 0, 0, 8,
+ 0, 0, 9,
+ 0, 0, 10,
+ 0, 2, 11,
+ 4, 0, 2,
+ 4, 0, 1,
+ 4, 0, 4,
+ 4, 0, 3,
+ 4, 0, 6,
+ 4, 0, 5,
+ 4, 0, 8,
+ 4, 0, 7,
+ 4, 0, 10,
+ 4, 0, 9,
+ 3, 2, 0,
+ 4, 2, 15,
+ 4, 2, 14,
+ 4, 2, 13,
+ 4, 1, 12,
+ 4, 1, 12,
+ 4, 1, 11,
+ 4, 1, 11,
+/* TotalZeros_01.out */
+ 1, 1, 8,
+ 0, 0, 14,
+ 0, 0, 15,
+ 4, 2, 4,
+ 4, 2, 3,
+ 4, 2, 2,
+ 4, 2, 1,
+ 4, 2, 0,
+ 0, 1, 3,
+ 4, 1, 10,
+ 4, 1, 9,
+ 4, 1, 14,
+ 4, 1, 13,
+ 4, 1, 12,
+ 4, 1, 11,
+ 4, 0, 8,
+ 4, 0, 7,
+ 4, 0, 6,
+ 4, 0, 5,
+/* TotalZeros_02.out */
+ 0, 1, 8,
+ 0, 0, 13,
+ 0, 0, 14,
+ 4, 2, 7,
+ 4, 2, 6,
+ 4, 2, 3,
+ 4, 2, 2,
+ 4, 2, 1,
+ 0, 0, 4,
+ 4, 1, 12,
+ 4, 1, 10,
+ 4, 1, 9,
+ 4, 0, 13,
+ 4, 0, 11,
+ 4, 0, 8,
+ 4, 0, 5,
+ 4, 0, 4,
+ 4, 0, 0,
+/* TotalZeros_03.out */
+ 0, 1, 8,
+ 0, 0, 11,
+ 0, 0, 12,
+ 4, 2, 8,
+ 4, 2, 6,
+ 4, 2, 5,
+ 4, 2, 4,
+ 4, 2, 1,
+ 4, 1, 12,
+ 4, 1, 11,
+ 4, 1, 10,
+ 4, 1, 0,
+ 4, 0, 9,
+ 4, 0, 7,
+ 4, 0, 3,
+ 4, 0, 2,
+/* TotalZeros_04.out */
+ 2, 1, 8,
+ 0, 0, 10,
+ 0, 0, 11,
+ 4, 2, 7,
+ 4, 2, 6,
+ 4, 2, 5,
+ 4, 2, 4,
+ 4, 2, 3,
+ 4, 0, 10,
+ 4, 1, 9,
+ 4, 1, 11,
+ 4, 0, 8,
+ 4, 0, 2,
+ 4, 0, 1,
+ 4, 0, 0,
+/* TotalZeros_05.out */
+ 2, 2, 8,
+ 4, 2, 9,
+ 4, 2, 7,
+ 4, 2, 6,
+ 4, 2, 5,
+ 4, 2, 4,
+ 4, 2, 3,
+ 4, 2, 2,
+ 4, 0, 8,
+ 4, 1, 1,
+ 4, 2, 0,
+ 4, 2, 10,
+/* TotalZeros_06.out */
+ 2, 2, 8,
+ 4, 2, 8,
+ 4, 2, 6,
+ 4, 2, 4,
+ 4, 2, 3,
+ 4, 2, 2,
+ 4, 1, 5,
+ 4, 1, 5,
+ 4, 0, 7,
+ 4, 1, 1,
+ 4, 2, 0,
+ 4, 2, 9,
+/* TotalZeros_07.out */
+ 2, 3, 4,
+ 0, 0, 8,
+ 4, 1, 5,
+ 4, 1, 4,
+ 4, 0, 7,
+ 4, 1, 1,
+ 4, 2, 2,
+ 4, 3, 0,
+ 4, 3, 8,
+ 4, 0, 6,
+ 4, 0, 3,
+/* TotalZeros_08.out */
+ 2, 3, 4,
+ 4, 1, 6,
+ 4, 1, 4,
+ 4, 1, 3,
+ 4, 0, 5,
+ 4, 1, 2,
+ 4, 2, 7,
+ 4, 3, 0,
+ 4, 3, 1,
+/* TotalZeros_09.out */
+ 2, 2, 4,
+ 4, 1, 5,
+ 4, 1, 4,
+ 4, 1, 3,
+ 4, 0, 2,
+ 4, 1, 6,
+ 4, 2, 0,
+ 4, 2, 1,
+/* TotalZeros_10.out */
+ 4, 0, 4,
+ 0, 0, 3,
+ 4, 2, 2,
+ 5, 0, 0,
+ 4, 0, 3,
+ 4, 0, 5,
+/* TotalZeros_11.out */
+ 4, 0, 3,
+ 4, 1, 2,
+ 4, 2, 4,
+ 5, 0, 0,
+/* TotalZeros_12.out */
+ 4, 0, 2,
+ 4, 1, 3,
+ 5, 0, 0,
+/* TotalZeros_13.out */
+ 5, 0, 0,
+ 4, 0, 2,
+/* TotalZeros_14.out */
+ 4, 0, 0,
+ 4, 0, 1,
+/* TotalZerosChromaDC_YUV420_00.out */
+ 4, 0, 0,
+ 4, 1, 1,
+ 4, 2, 2,
+ 4, 2, 3,
+/* TotalZerosChromaDC_YUV420_01.out */
+ 4, 0, 0,
+ 4, 1, 1,
+ 4, 1, 2,
+/* TotalZerosChromaDC_YUV420_02.out */
+ 4, 0, 1,
+ 4, 0, 0,
+/* Run_00.out */
+ 4, 0, 1,
+ 4, 0, 0,
+/* Run_01.out */
+ 4, 0, 0,
+ 4, 1, 1,
+ 4, 1, 2,
+/* Run_02.out */
+ 4, 1, 3,
+ 4, 1, 2,
+ 4, 1, 1,
+ 4, 1, 0,
+/* Run_03.out */
+ 0, 0, 4,
+ 4, 1, 2,
+ 4, 1, 1,
+ 4, 1, 0,
+ 4, 0, 4,
+ 4, 0, 3,
+/* Run_04.out */
+ 0, 1, 3,
+ 4, 1, 1,
+ 4, 1, 0,
+ 4, 1, 5,
+ 4, 1, 4,
+ 4, 1, 3,
+ 4, 1, 2,
+/* Run_05.out */
+ 4, 2, 1,
+ 4, 2, 2,
+ 4, 2, 4,
+ 4, 2, 3,
+ 4, 2, 6,
+ 4, 2, 5,
+ 4, 1, 0,
+ 4, 1, 0,
+/* Run_06.out */
+ 2, 5, 8,
+ 4, 2, 6,
+ 4, 2, 5,
+ 4, 2, 4,
+ 4, 2, 3,
+ 4, 2, 2,
+ 4, 2, 1,
+ 4, 2, 0,
+ 4, 0, 7,
+ 4, 1, 8,
+ 4, 2, 9,
+ 4, 3, 10,
+ 4, 4, 11,
+ 4, 5, 12,
+ 2, 1, 1,
+ 4, 0, 13,
+ 4, 1, 14,
+ 3, 1, 0,
+/* TotalZerosChromaDC_YUV422_00.out */
+ 4, 0, 0,
+ 6, 0, 0,
+ 6, 0, 1,
+ 4, 3, 5,
+ 4, 4, 6,
+ 4, 4, 7,
+/* TotalZerosChromaDC_YUV422_01.out */
+ 6, 1, 1,
+ 4, 1, 1,
+ 4, 2, 2,
+ 4, 2, 0,
+/* TotalZerosChromaDC_YUV422_02.out */
+ 5, 0, 0,
+ 4, 1, 2,
+ 4, 1, 3,
+ 5, 0, 2,
+/* TotalZerosChromaDC_YUV422_03.out */
+ 6, 0, 0,
+ 4, 1, 3,
+ 4, 2, 0,
+ 4, 2, 4,
+/* TotalZerosChromaDC_YUV422_04.out */
+ 5, 0, 0,
+ 5, 0, 1,
+/* TotalZerosChromaDC_YUV422_05.out */
+ 5, 0, 0,
+ 4, 0, 2,
+/* TotalZerosChromaDC_YUV422_06.out */
+ 4, 0, 0,
+ 4, 0, 1
+};
+
+static const unsigned short h264_vlc_table_size = 544;
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/h264fw_data.h b/drivers/media/platform/vxe-vxd/decoder/h264fw_data.h
new file mode 100644
index 000000000000..e098d27948d0
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/h264fw_data.h
@@ -0,0 +1,652 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Public data structures for the h264 parser firmware module.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+/* Include shared header version here to replace the standard version */
+#include "h264fw_data_shared.h"
+
+#ifndef _H264FW_DATA_H_
+#define _H264FW_DATA_H_
+
+#include "vdecfw_shared.h"
+
+/* Maximum number of alternative CPB specifications in the stream */
+#define H264_MAXIMUMVALUEOFCPB_CNT 32
+
+/*
+ * The maximum DPB size is related to the number of MVC views supported
+ * The size is defined in H.10.2 for the H.264 spec.
+ * If the number of views needs to be changed the DPB size should be too
+ * The limits are as follows:
+ * NumViews 1, 2, 4, 8, 16
+ * MaxDpbFrames: 16, 16, 32, 48, 64
+ */
+#ifdef H264_ENABLE_MVC
+#define H264FW_MAX_NUM_VIEWS 4
+#define H264FW_MAX_DPB_SIZE 32
+#define H264FW_MAX_NUM_MVC_REFS 16
+#else
+#define H264FW_MAX_NUM_VIEWS 1
+#define H264FW_MAX_DPB_SIZE 16
+#define H264FW_MAX_NUM_MVC_REFS 1
+#endif
+
+/* Maximum value for num_ref_frames_in_pic_order_cnt_cycle */
+#define H264FW_MAX_CYCLE_REF_FRAMES 256
+
+/* 4x4 scaling list size */
+#define H264FW_4X4_SIZE 16
+/* 8x8 scaling list size */
+#define H264FW_8X8_SIZE 64
+/* Number of 4x4 scaling lists */
+#define H264FW_NUM_4X4_LISTS 6
+/* Number of 8x8 scaling lists */
+#define H264FW_NUM_8X8_LISTS 6
+
+/* Number of reference picture lists */
+#define H264FW_MAX_REFPIC_LISTS 2
+
+/*
+ * The maximum number of slice groups
+ * remove if slice group map is prepared on the host
+ */
+#define H264FW_MAX_SLICE_GROUPS 8
+
+/* The maximum number of planes for 4:4:4 separate color plane streams */
+#define H264FW_MAX_PLANES 3
+
+#define H264_MAX_SGM_SIZE 8196
+
+#define IS_H264_HIGH_PROFILE(profile_idc, type) \
+ ({ \
+ type __profile_idc = profile_idc; \
+ ((__profile_idc) == H264_PROFILE_HIGH) || \
+ ((__profile_idc) == H264_PROFILE_HIGH10) || \
+ ((__profile_idc) == H264_PROFILE_HIGH422) || \
+ ((__profile_idc) == H264_PROFILE_HIGH444) || \
+ ((__profile_idc) == H264_PROFILE_CAVLC444) || \
+ ((__profile_idc) == H264_PROFILE_MVC_HIGH) || \
+ ((__profile_idc) == H264_PROFILE_MVC_STEREO); }) \
+
+/*
+ * This type describes the H.264 NAL unit types
+ */
+enum h264_enaltype {
+ H264FW_NALTYPE_SLICE = 1,
+ H264FW_NALTYPE_IDRSLICE = 5,
+ H264FW_NALTYPE_SEI = 6,
+ H264FW_NALTYPE_SPS = 7,
+ H264FW_NALTYPE_PPS = 8,
+ H264FW_NALTYPE_AUD = 9,
+ H264FW_NALTYPE_EOSEQ = 10,
+ H264FW_NALTYPE_EOSTR = 11,
+ H264FW_NALTYPE_PREFIX = 14,
+ H264FW_NALTYPE_SUBSET_SPS = 15,
+ H264FW_NALTYPE_AUXILIARY_SLICE = 19,
+ H264FW_NALTYPE_EXTSLICE = 20,
+ H264FW_NALTYPE_EXTSLICE_DEPTH_VIEW = 21,
+ H264FW_NALTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * AVC Profile IDC definitions
+ */
+enum h264_eprofileidc {
+ /* YUV 4:4:4/14 "CAVLC 4:4:4 */
+ H264_PROFILE_CAVLC444 = 44,
+ /* YUV 4:2:0/8 "Baseline */
+ H264_PROFILE_BASELINE = 66,
+ /* YUV 4:2:0/8 "Main */
+ H264_PROFILE_MAIN = 77,
+ /* YUV 4:2:0/8 "Scalable" */
+ H264_PROFILE_SCALABLE = 83,
+ /* YUV 4:2:0/8 "Extended" */
+ H264_PROFILE_EXTENDED = 88,
+ /* YUV 4:2:0/8 "High" */
+ H264_PROFILE_HIGH = 100,
+ /* YUV 4:2:0/10 "High 10" */
+ H264_PROFILE_HIGH10 = 110,
+ /* YUV 4:2:0/8 "Multiview High" */
+ H264_PROFILE_MVC_HIGH = 118,
+ /* YUV 4:2:2/10 "High 4:2:2" */
+ H264_PROFILE_HIGH422 = 122,
+ /* YUV 4:2:0/8 "Stereo High" */
+ H264_PROFILE_MVC_STEREO = 128,
+ /* YUV 4:4:4/14 "High 4:4:4" */
+ H264_PROFILE_HIGH444 = 244,
+ H264_PROFILE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines the constraint set flags
+ */
+enum h264fw_econstraint_flag {
+ /* Compatible with Baseline profile */
+ H264FW_CONSTRAINT_BASELINE_SHIFT = 7,
+ /* Compatible with Main profile */
+ H264FW_CONSTRAINT_MAIN_SHIFT = 6,
+ /* Compatible with Extended profile */
+ H264FW_CONSTRAINT_EXTENDED_SHIFT = 5,
+ /* Compatible with Intra profiles */
+ H264FW_CONSTRAINT_INTRA_SHIFT = 4,
+ /* Compatible with Multiview High profile */
+ H264FW_CONSTRAINT_MULTIHIGH_SHIFT = 3,
+ /* Compatible with Stereo High profile */
+ H264FW_CONSTRAINT_STEREOHIGH_SHIFT = 2,
+ /* Reserved flag */
+ H264FW_CONSTRAINT_RESERVED6_SHIFT = 1,
+ /* Reserved flag */
+ H264FW_CONSTRAINT_RESERVED7_SHIFT = 0,
+ H264FW_CONSTRAINT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This enum describes the reference status of an H.264 picture.
+ * Unpaired fields should have all eRefStatusX set to the same value
+ *
+ * For Frame, Mbaff, and Pair types individual fields and frame ref status
+ * should be set accordingly.
+ *
+ * eRefStatusFrame eRefStatusTop eRefStatusBottom
+ * UNUSED UNUSED UNUSED
+ * SHORTTERM SHORTTERM SHORTTERM
+ * LONGTERM LONGTERM LONGTERM
+ *
+ * UNUSED SHORT/LONGTERM UNUSED
+ * UNUSED UNUSED SHORT/LONGTERM
+ *
+ * SHORTTERM LONGTERM SHORTTERM
+ * SHORTTERM SHORTTERM LONGTERM
+ * NB: It is not clear from the spec if the Frame should be marked as short
+ * or long term in this case
+ */
+enum h264fw_ereference {
+ /* Picture is unused for reference */
+ H264FW_REF_UNUSED = 0,
+ /* used for short term reference */
+ H264FW_REF_SHORTTERM,
+ /* used for Long Term reference */
+ H264FW_REF_LONGTERM,
+ H264FW_REF_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines the picture structure.
+ */
+enum h264fw_epicture_type {
+ /* No valid picture */
+ H264FW_TYPE_NONE = 0,
+ /* Picture contains the top (even) lines of the frame */
+ H264FW_TYPE_TOP,
+ /* Picture contains the bottom (odd) lines of the frame */
+ H264FW_TYPE_BOTTOM,
+ /* Picture contains the entire frame */
+ H264FW_TYPE_FRAME,
+ /* Picture contains an MBAFF frame */
+ H264FW_TYPE_MBAFF,
+ /* Picture contains top and bottom lines of the frame */
+ H264FW_TYPE_PAIR,
+ H264FW_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This describes the SPS header data required by the H264 firmware that should
+ * be supplied by the Host.
+ */
+struct h264fw_sequence_ps {
+ /* syntax elements from SPS */
+ /* syntax element from bitstream - 8 bit */
+ unsigned char profile_idc;
+ /* syntax element from bitstream - 2 bit */
+ unsigned char chroma_format_idc;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char separate_colour_plane_flag;
+ /* syntax element from bitstream - 3 bit */
+ unsigned char bit_depth_luma_minus8;
+ /* syntax element from bitstream - 3 bit */
+ unsigned char bit_depth_chroma_minus8;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char delta_pic_order_always_zero_flag;
+ /* syntax element from bitstream - 4+ bit */
+ unsigned char log2_max_pic_order_cnt_lsb;
+ /* syntax element from bitstream - 5 bit */
+ unsigned char max_num_ref_frames;
+ /* syntax element from bitstream - 4+ bit */
+ unsigned char log2_max_frame_num;
+ /* syntax element from bitstream - 2 bit */
+ unsigned char pic_order_cnt_type;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char frame_mbs_only_flag;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char gaps_in_frame_num_value_allowed_flag;
+
+ /*
+ * set0--7 flags as they occur in the bitstream (including reserved
+ * values)
+ */
+ unsigned char constraint_set_flags;
+ /* syntax element from bitstream - 8 bit */
+ unsigned char level_idc;
+ /* syntax element from bitstream - 8 bit */
+ unsigned char num_ref_frames_in_pic_order_cnt_cycle;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char mb_adaptive_frame_field_flag;
+ /* syntax element from bitstream - 32 bit */
+ int offset_for_non_ref_pic;
+ /* syntax element from bitstream - 32 bit */
+ int offset_for_top_to_bottom_field;
+
+ /* syntax element from bitstream */
+ unsigned int pic_width_in_mbs_minus1;
+ /* syntax element from bitstream */
+ unsigned int pic_height_in_map_units_minus1;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char direct_8x8_inference_flag;
+ /* syntax element from bitstream */
+ unsigned char qpprime_y_zero_transform_bypass_flag;
+
+ /* syntax element from bitstream - 32 bit each */
+ int offset_for_ref_frame[H264FW_MAX_CYCLE_REF_FRAMES];
+
+ /* From VUI information */
+ unsigned char num_reorder_frames;
+ /*
+ * From VUI/MVC SEI, 0 indicates not set, any actual 0 value will be
+ * inferred by the firmware
+ */
+ unsigned char max_dec_frame_buffering;
+
+ /* From SPS MVC Extension - for the current view_id */
+ /* Number of views in this stream */
+ unsigned char num_views;
+ /* a Map in order of VOIdx of view_id's */
+ unsigned short view_ids[H264FW_MAX_NUM_VIEWS];
+
+ /* Disable VDMC horizontal/vertical filtering */
+ unsigned char disable_vdmc_filt;
+ /* Disable CABAC 4:4:4 4x4 transform as not available */
+ unsigned char transform4x4_mb_not_available;
+
+ /* anchor reference list */
+ unsigned short anchor_inter_view_reference_id_list[2]
+ [H264FW_MAX_NUM_VIEWS][H264FW_MAX_NUM_MVC_REFS];
+ /* nonanchor reference list */
+ unsigned short non_anchor_inter_view_reference_id_list[2]
+ [H264FW_MAX_NUM_VIEWS][H264FW_MAX_NUM_MVC_REFS];
+ /* number of elements in aui16AnchorInterViewReferenceIndiciesLX[] */
+ unsigned short num_anchor_refsx[2][H264FW_MAX_NUM_VIEWS];
+ /* number of elements in aui16NonAnchorInterViewReferenceIndiciesLX[] */
+ unsigned short num_non_anchor_refsx[2][H264FW_MAX_NUM_VIEWS];
+};
+
+/*
+ * This structure represents HRD parameters.
+ */
+struct h264fw_hrd {
+ /* cpb_cnt_minus1 */
+ unsigned char cpb_cnt_minus1;
+ /* bit_rate_scale */
+ unsigned char bit_rate_scale;
+ /* cpb_size_scale */
+ unsigned char cpb_size_scale;
+ /* bit_rate_value_minus1 */
+ unsigned int bit_rate_value_minus1[H264_MAXIMUMVALUEOFCPB_CNT];
+ /* cpb_size_value_minus1 */
+ unsigned int cpb_size_value_minus1[H264_MAXIMUMVALUEOFCPB_CNT];
+ /* cbr_flag */
+ unsigned char cbr_flag[H264_MAXIMUMVALUEOFCPB_CNT];
+ /* initial_cpb_removal_delay_length_minus1 */
+ unsigned char initial_cpb_removal_delay_length_minus1;
+ /* cpb_removal_delay_length_minus1 */
+ unsigned char cpb_removal_delay_length_minus1;
+ /* dpb_output_delay_length_minus1 */
+ unsigned char dpb_output_delay_length_minus1;
+ /* time_offset_length */
+ unsigned char time_offset_length;
+};
+
+/*
+ * This structure represents the VUI parameters data.
+ */
+struct h264fw_vui {
+ int aspect_ratio_info_present_flag;
+ unsigned char aspect_ratio_idc;
+ unsigned short sar_width;
+ unsigned short sar_height;
+ int overscan_info_present_flag;
+ int overscan_appropriate_flag;
+ int video_signal_type_present_flag;
+ unsigned char video_format;
+ int video_full_range_flag;
+ int colour_description_present_flag;
+ unsigned char colour_primaries;
+ unsigned char transfer_characteristics;
+ unsigned char matrix_coefficients;
+ int chroma_location_info_present_flag;
+ unsigned int chroma_sample_loc_type_top_field;
+ unsigned int chroma_sample_loc_type_bottom_field;
+ int timing_info_present_flag;
+ unsigned int num_units_in_tick;
+ unsigned int time_scale;
+ int fixed_frame_rate_flag;
+ int nal_hrd_parameters_present_flag;
+ struct h264fw_hrd nal_hrd_params;
+ int vcl_hrd_parameters_present_flag;
+ struct h264fw_hrd vcl_hrd_params;
+ int low_delay_hrd_flag;
+ int pic_struct_present_flag;
+ int bitstream_restriction_flag;
+ int motion_vectors_over_pic_boundaries_flag;
+ unsigned int max_bytes_per_pic_denom;
+ unsigned int max_bits_per_mb_denom;
+ unsigned int log2_max_mv_length_vertical;
+ unsigned int log2_max_mv_length_horizontal;
+ unsigned int num_reorder_frames;
+ unsigned int max_dec_frame_buffering;
+};
+
+/*
+ * This describes the HW specific SPS header data required by the H264
+ * firmware that should be supplied by the Host.
+ */
+struct h264fw_ddsequence_ps {
+ /* pre-packed registers derived from SPS */
+ /* Value for CR_VEC_ENTDEC_FE_CONTROL */
+ unsigned int regentdec_control;
+
+ /* NB: This register should contain the 4-bit SGM flag */
+ /* Value for CR_VEC_H264_FE_SPS0 & CR_VEC_H264_BE_SPS0 combined */
+ unsigned int reg_sps0;
+ /* Value of CR_VEC_H264_BE_INTRA_8x8 */
+ unsigned int reg_beintra;
+ /* Value of CR_VEC_H264_FE_CABAC444 */
+ unsigned int reg_fecaabac444;
+ /* Treat CABAC 4:4:4 4x4 transform as not available */
+ unsigned char transform4x4_mb_notavialbale;
+ /* Disable VDMC horizontal/vertical filtering */
+ unsigned char disable_vdmcfilt;
+};
+
+/*
+ * This describes the PPS header data required by the H264 firmware that should
+ * be supplied by the Host.
+ */
+struct h264fw_picture_ps {
+ /* syntax elements from the PPS */
+ /* syntax element from bitstream - 1 bit */
+ unsigned char deblocking_filter_control_present_flag;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char transform_8x8_mode_flag;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char entropy_coding_mode_flag;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char redundant_pic_cnt_present_flag;
+
+ /* syntax element from bitstream - 2 bit */
+ unsigned char weighted_bipred_idc;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char weighted_pred_flag;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char pic_order_present_flag;
+
+ /* 26 + syntax element from bitstream - 7 bit */
+ unsigned char pic_init_qp;
+ /* syntax element from bitstream - 1 bit */
+ unsigned char constrained_intra_pred_flag;
+ /* syntax element from bitstream - 5 bit each */
+ unsigned char num_ref_lx_active_minus1[H264FW_MAX_REFPIC_LISTS];
+
+ /* syntax element from bitstream - 3 bit */
+ unsigned char slice_group_map_type;
+ /* syntax element from bitstream - 3 bit */
+ unsigned char num_slice_groups_minus1;
+ /* syntax element from bitstream - 13 bit */
+ unsigned short slice_group_change_rate_minus1;
+
+ /* syntax element from bitstream */
+ unsigned int chroma_qp_index_offset;
+ /* syntax element from bitstream */
+ unsigned int second_chroma_qp_index_offset;
+
+ /* scaling lists are derived from both SPS and PPS information */
+ /* but will change whenever the PPS changes */
+ /* The derived set of tables are associated here with the PPS */
+ /* NB: These are in H.264 order */
+ /* derived from SPS and PPS - 8 bit each */
+ unsigned char scalinglist4x4[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE];
+ /* derived from SPS and PPS - 8 bit each */
+ unsigned char scalinglist8x8[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE];
+};
+
+/*
+ * This describes the HW specific PPS header data required by the H264
+ * firmware that should be supplied by the Host.
+ */
+struct h264fw_dd_picture_ps {
+ /* values derived from the PPS */
+ /* Value for MSVDX_CMDS_SLICE_PARAMS_MODE_CONFIG */
+ unsigned char vdmc_mode_config;
+
+ /* pre-packed registers derived from the PPS */
+ /* Value for CR_VEC_H264_FE_PPS0 & CR_VEC_H264_BE_PPS0 combined */
+ unsigned int reg_pps0;
+
+ /*
+ * scaling lists are derived from both SPS and PPS information
+ * but will change whenever the PPS changes
+ * The derived set of tables are associated here with the PPS
+ * But this will become invalid if the SPS changes and will have to be
+ * recalculated
+ * These tables MUST be aligned on a 32-bit boundary
+ * NB: These are in MSVDX order
+ */
+ /* derived from SPS and PPS - 8 bit each */
+ unsigned char scalinglist4x4[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE];
+ /* derived from SPS and PPS - 8 bit each */
+ unsigned char scalinglist8x8[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE];
+};
+
+/*
+ * This describes the H.264 parser component "Header data", shown in the
+ * Firmware Memory Layout diagram. This data is required by the H264 firmware
+ * and should be supplied by the Host.
+ */
+struct h264fw_header_data {
+ /* Decode buffers and output control for the current picture */
+ /* Primary decode buffer base addresses */
+ struct vdecfw_image_buffer primary;
+ /* buffer base addresses for alternate output */
+ struct vdecfw_image_buffer alternate;
+ /* Output control: rotation, scaling, oold, etc. */
+ unsigned int pic_cmds[VDECFW_CMD_MAX];
+ /* Macroblock parameters base address for the picture */
+ unsigned int mbparams_base_address;
+
+ unsigned int mbparams_size_per_plane;
+
+ /* Buffers for context preload for colour plane switching (6.x.x) */
+ unsigned int preload_buffer_base_address
+ [H264FW_MAX_PLANES];
+
+ /*
+ * slice group map should be calculated on Host
+ * (using some slice params) and base address provided here
+ */
+ /* Base address of active slice group map */
+ /* Base address of active slice group map */
+ unsigned int slicegroupmap_base_address;
+
+ /* H264 specific control */
+ /* do second pass Intra Deblock on frame */
+ unsigned int do_old __attribute__ (aligned(4));
+ /* set to IMG_FALSE to disable second-pass deblock */
+ unsigned int two_pass_flag __attribute__ (aligned(4));
+ /* set to IMG_TRUE to disable MVC */
+ unsigned int disable_mvc __attribute__ (aligned(4));
+ /*
+ * Do we have second PPS in uipSecondPPSInfoSource provided for the
+ * second field
+ */
+ unsigned int second_pps __attribute__ (aligned(4));
+};
+
+/*
+ * This describes an H.264 picture. It is part of the Context data
+ */
+struct h264fw_picture {
+ /* Primary (reconstructed) picture buffers */
+ struct vdecfw_image_buffer primary;
+ /* Secondary (alternative) picture buffers */
+ struct vdecfw_image_buffer alternate;
+ /* Macroblock parameters base address for the picture */
+ unsigned int mbparams_base_address;
+
+ /* Unique ID for this picture */
+ unsigned int transaction_id;
+ /* Picture type */
+ enum h264fw_epicture_type pricture_type;
+
+ /* Reference status of the picture */
+ enum h264fw_ereference ref_status_bottom;
+ /* Reference status of the picture */
+ enum h264fw_ereference ref_status_top;
+ /* Reference status of the picture */
+ enum h264fw_ereference ref_status_frame;
+
+ /* Frame Number */
+ unsigned int frame_number;
+ /* Short term reference info */
+ int fame_number_wrap;
+ /* long term reference number - should be 8-bit */
+ unsigned int longterm_frame_idx;
+
+ /* Top field order count for this picture */
+ int top_field_order_count;
+ /* Bottom field order count for this picture */
+ int bottom_field_order_count;
+ /* MVC view_id */
+ unsigned short view_id;
+ /*
+ * When picture is in the DPB Offset to use into the MSVDX DPB reg table
+ * when the current picture is the same view as this.
+ */
+ unsigned char view_dpb_offset;
+ /* Flags for this picture for the display process */
+ unsigned char display_flags;
+
+ /* IMG_FALSE if sent to display, or otherwise not needed for display */
+ unsigned char needed_for_output;
+};
+
+/*
+ * This structure describes frame data for POC calculation
+ */
+struct h264fw_poc_picture_data {
+ /* type 0,1,2 */
+ unsigned char mmco_5_flag;
+
+ /* type 0 */
+ unsigned char bottom_field_flag;
+ unsigned short pic_order_cnt_lsb;
+ int top_field_order_count;
+ int pic_order_count_msb;
+
+ /* type 1,2 */
+ int16 frame_num;
+ int frame_num_offset;
+
+ /* output */
+ int bottom_filed_order_count;
+};
+
+/*
+ * This structure describes picture data for determining Complementary
+ * Field Pairs
+ */
+struct h264fw_last_pic_data {
+ /* Unique ID for this picture */
+ unsigned int transaction_id;
+ /* Picture type */
+ enum h264fw_epicture_type picture_type;
+ /* Reference status of the picture */
+ enum h264fw_ereference ref_status_frame;
+ /* Frame Number */
+ unsigned int frame_number;
+
+ unsigned int luma_recon;
+ unsigned int chroma_recon;
+ unsigned int chroma_2_recon;
+ unsigned int luma_alter;
+ unsigned int chroma_alter;
+ unsigned int chroma_2_alter;
+ struct vdecfw_image_buffer primary;
+ struct vdecfw_image_buffer alternate;
+ unsigned int mbparams_base_address;
+ /* Top field order count for this picture */
+ int top_field_order_count;
+ /* Bottom field order count for this picture */
+ int bottom_field_order_count;
+};
+
+/*
+ * This describes the H.264 parser component "Context data", shown in the
+ * Firmware Memory Layout diagram. This data is the state preserved across
+ * pictures. It is loaded and saved by the Firmware, but requires the host to
+ * provide buffer(s) for this.
+ */
+struct h264fw_context_data {
+ /* Decoded Picture Buffer */
+ struct h264fw_picture dpb[H264FW_MAX_DPB_SIZE];
+ /*
+ * Inter-view reference components - also used as detail of the previous
+ * picture for any particular view, can be used to determine
+ * complemetary field pairs
+ */
+ struct h264fw_picture interview_prediction_ref[H264FW_MAX_NUM_VIEWS];
+ /* previous ref pic for type0, previous pic for type1&2 */
+ struct h264fw_poc_picture_data prev_poc_pic_data[H264FW_MAX_NUM_VIEWS];
+ /* previous picture information to detect complementary field pairs */
+ struct h264fw_last_pic_data last_pic_data[H264FW_MAX_NUM_VIEWS];
+ struct h264fw_last_pic_data last_displayed_pic_data
+ [H264FW_MAX_NUM_VIEWS];
+
+ /* previous reference frame number for each view */
+ unsigned short prev_ref_frame_num[H264FW_MAX_NUM_VIEWS];
+ /* Bitmap of used slots in each view DPB */
+ unsigned short dpb_bitmap[H264FW_MAX_NUM_VIEWS];
+
+ /* DPB size */
+ unsigned int dpb_size;
+ /* Number of pictures in DPB */
+ unsigned int dpb_fullness;
+
+ unsigned char prev_display_flags;
+ int prev_display;
+ int prev_release;
+ /* Active parameter sets */
+ /* Sequence Parameter Set data */
+ struct h264fw_sequence_ps sps;
+ /* Picture Parameter Set data */
+ struct h264fw_picture_ps pps;
+ /*
+ * Picture Parameter Set data for second field when in the same buffer
+ */
+ struct h264fw_picture_ps second_pps;
+
+ /* Set if stream is MVC */
+ int mvc;
+ /* DPB long term reference information */
+ int max_longterm_frame_idx[H264FW_MAX_NUM_VIEWS];
+};
+
+#endif /* _H264FW_DATA_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/h264fw_data_shared.h b/drivers/media/platform/vxe-vxd/decoder/h264fw_data_shared.h
new file mode 100644
index 000000000000..b8efd5f4c2f5
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/h264fw_data_shared.h
@@ -0,0 +1,759 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Public data structures for the h264 parser firmware module
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifdef USE_SHARING
+#endif
+
+#ifndef _H264FW_DATA_H_
+#define _H264FW_DATA_H_
+
+#include "vdecfw_share.h"
+#include "vdecfw_shared.h"
+
+#define H264_MAX_SPS_COUNT 32
+#define H264_MAX_PPS_COUNT 256
+
+#define H264_SCALING_LISTS_NUM_CHROMA_IDC_NON_3 (8)
+#define H264_SCALING_LISTS_NUM_CHROMA_IDC_3 (12)
+#define MAX_PIC_SCALING_LIST (12)
+
+/* Maximum number of alternative CPB specifications in the stream */
+#define H264_MAXIMUMVALUEOFCPB_CNT 32
+
+/*
+ * The maximum DPB size is related to the number of MVC views supported
+ * The size is defined in H.10.2 for the H.264 spec.
+ * If the number of views needs to be changed the DPB size should be too
+ * The limits are as follows:
+ * NumViews: 1, 2, 4, 8, 16
+ * MaxDpbFrames: 16, 16, 32, 48, 64
+ */
+
+#define H264FW_MAX_NUM_VIEWS 1
+#define H264FW_MAX_DPB_SIZE 16
+#define H264FW_MAX_NUM_MVC_REFS 1
+
+/* Number of H264 VLC table configuration registers */
+#define H264FW_NUM_VLC_REG 22
+
+/* Maximum value for num_ref_frames_in_pic_order_cnt_cycle */
+#define H264FW_MAX_CYCLE_REF_FRAMES 256
+
+/* 4x4 scaling list size */
+#define H264FW_4X4_SIZE 16
+/* 8x8 scaling list size */
+#define H264FW_8X8_SIZE 64
+/* Number of 4x4 scaling lists */
+#define H264FW_NUM_4X4_LISTS 6
+/* Number of 8x8 scaling lists */
+#define H264FW_NUM_8X8_LISTS 6
+
+/* Number of reference picture lists */
+#define H264FW_MAX_REFPIC_LISTS 2
+
+/*
+ * The maximum number of slice groups
+ * remove if slice group map is prepared on the host
+ */
+#define H264FW_MAX_SLICE_GROUPS 8
+
+/* The maximum number of planes for 4:4:4 separate colour plane streams */
+#define H264FW_MAX_PLANES 3
+
+#define H264_MAX_SGM_SIZE 8196
+
+#define IS_H264_HIGH_PROFILE(profile_idc, type) \
+ ({ \
+ type __profile_idc = profile_idc; \
+ (__profile_idc == H264_PROFILE_HIGH) || \
+ (__profile_idc == H264_PROFILE_HIGH10) || \
+ (__profile_idc == H264_PROFILE_HIGH422) || \
+ (__profile_idc == H264_PROFILE_HIGH444) || \
+ (__profile_idc == H264_PROFILE_CAVLC444) || \
+ (__profile_idc == H264_PROFILE_MVC_HIGH) || \
+ (__profile_idc == H264_PROFILE_MVC_STEREO); }) \
+
+/* This type describes the H.264 NAL unit types */
+enum h264_enaltype {
+ H264FW_NALTYPE_SLICE = 1,
+ H264FW_NALTYPE_IDRSLICE = 5,
+ H264FW_NALTYPE_SEI = 6,
+ H264FW_NALTYPE_SPS = 7,
+ H264FW_NALTYPE_PPS = 8,
+ H264FW_NALTYPE_AUD = 9,
+ H264FW_NALTYPE_EOSEQ = 10,
+ H264FW_NALTYPE_EOSTR = 11,
+ H264FW_NALTYPE_PREFIX = 14,
+ H264FW_NALTYPE_SUBSET_SPS = 15,
+ H264FW_NALTYPE_AUXILIARY_SLICE = 19,
+ H264FW_NALTYPE_EXTSLICE = 20,
+ H264FW_NALTYPE_EXTSLICE_DEPTH_VIEW = 21,
+ H264FW_NALTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* AVC Profile IDC definitions */
+enum h264_eprofileidc {
+ H264_PROFILE_CAVLC444 = 44,
+ H264_PROFILE_BASELINE = 66,
+ H264_PROFILE_MAIN = 77,
+ H264_PROFILE_SCALABLE = 83,
+ H264_PROFILE_EXTENDED = 88,
+ H264_PROFILE_HIGH = 100,
+ H264_PROFILE_HIGH10 = 110,
+ H264_PROFILE_MVC_HIGH = 118,
+ H264_PROFILE_HIGH422 = 122,
+ H264_PROFILE_MVC_STEREO = 128,
+ H264_PROFILE_HIGH444 = 244,
+ H264_PROFILE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* This type defines the constraint set flags */
+enum h264fw_econstraint_flag {
+ H264FW_CONSTRAINT_BASELINE_SHIFT = 7,
+ H264FW_CONSTRAINT_MAIN_SHIFT = 6,
+ H264FW_CONSTRAINT_EXTENDED_SHIFT = 5,
+ H264FW_CONSTRAINT_INTRA_SHIFT = 4,
+ H264FW_CONSTRAINT_MULTIHIGH_SHIFT = 3,
+ H264FW_CONSTRAINT_STEREOHIGH_SHIFT = 2,
+ H264FW_CONSTRAINT_RESERVED6_SHIFT = 1,
+ H264FW_CONSTRAINT_RESERVED7_SHIFT = 0,
+ H264FW_CONSTRAINT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This enum describes the reference status of an H.264 picture.
+ *
+ * Unpaired fields should have all eRefStatusX set to the same value
+ *
+ * For Frame, Mbaff, and Pair types individual fields and frame ref status
+ * should be set accordingly.
+ *
+ * eRefStatusFrame eRefStatusTop eRefStatusBottom
+ * UNUSED UNUSED UNUSED
+ * SHORTTERM SHORTTERM SHORTTERM
+ * LONGTERM LONGTERM LONGTERM
+ *
+ * UNUSED SHORT/LONGTERM UNUSED
+ * UNUSED UNUSED SHORT/LONGTERM
+ *
+ * SHORTTERM LONGTERM SHORTTERM
+ * SHORTTERM SHORTTERM LONGTERM
+ * - NB: It is not clear from the spec if the Frame should be marked as short
+ * or long term in this case
+ */
+enum h264fw_ereference {
+ H264FW_REF_UNUSED = 0,
+ H264FW_REF_SHORTTERM,
+ H264FW_REF_LONGTERM,
+ H264FW_REF_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* This type defines the picture structure. */
+enum h264fw_epicture_type {
+ H264FW_TYPE_NONE = 0,
+ H264FW_TYPE_TOP,
+ H264FW_TYPE_BOTTOM,
+ H264FW_TYPE_FRAME,
+ H264FW_TYPE_MBAFF,
+ H264FW_TYPE_PAIR,
+ H264FW_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This describes the SPS header data required by the H264 firmware that should
+ * be supplied by the Host.
+ */
+struct h264fw_sequence_ps {
+ /* syntax elements from SPS */
+
+ /* syntax element from bitstream - 8 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, profile_idc);
+ /* syntax element from bitstream - 2 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, chroma_format_idc);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, separate_colour_plane_flag);
+ /* syntax element from bitstream - 3 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, bit_depth_luma_minus8);
+ /* syntax element from bitstream - 3 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, bit_depth_chroma_minus8);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, delta_pic_order_always_zero_flag);
+ /* syntax element from bitstream - 4+ bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, log2_max_pic_order_cnt_lsb);
+
+ /* syntax element from bitstream - 5 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, max_num_ref_frames);
+ /* syntax element from bitstream - 4+ bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, log2_max_frame_num);
+ /* syntax element from bitstream - 2 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pic_order_cnt_type);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, frame_mbs_only_flag);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, gaps_in_frame_num_value_allowed_flag);
+
+ /*
+ * set0--7 flags as they occur in the bitstream
+ * (including reserved values)
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, constraint_set_flags);
+ /* syntax element from bitstream - 8 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, level_idc);
+ /* syntax element from bitstream - 8 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_ref_frames_in_pic_order_cnt_cycle);
+
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, mb_adaptive_frame_field_flag);
+ /* syntax element from bitstream - 32 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, offset_for_non_ref_pic);
+ /* syntax element from bitstream - 32 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, offset_for_top_to_bottom_field);
+
+ /* syntax element from bitstream */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, pic_width_in_mbs_minus1);
+ /* syntax element from bitstream */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, pic_height_in_map_units_minus1);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, direct_8x8_inference_flag);
+ /* syntax element from bitstream */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, qpprime_y_zero_transform_bypass_flag);
+
+ /* syntax element from bitstream - 32 bit each */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, offset_for_ref_frame[H264FW_MAX_CYCLE_REF_FRAMES]);
+
+ /* From VUI information */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_reorder_frames);
+ /*
+ * From VUI/MVC SEI, 0 indicates not set, any actual 0
+ * value will be inferred by the firmware
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, max_dec_frame_buffering);
+
+ /* From SPS MVC Extension - for the current view_id */
+
+ /* Number of views in this stream */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_views);
+ /* a Map in order of VOIdx of view_id's */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, view_ids[H264FW_MAX_NUM_VIEWS]);
+
+ /* Disable VDMC horizontal/vertical filtering */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, disable_vdmc_filt);
+ /* Disable CABAC 4:4:4 4x4 transform as not available */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, transform4x4_mb_not_available);
+
+ /* anchor reference list */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned short,
+ anchor_inter_view_reference_id_list[2][H264FW_MAX_NUM_VIEWS]
+ [H264FW_MAX_NUM_MVC_REFS]);
+ /* nonanchor reference list */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned short,
+ non_anchor_inter_view_reference_id_list[2][H264FW_MAX_NUM_VIEWS]
+ [H264FW_MAX_NUM_MVC_REFS]);
+ /* number of elements in aui16AnchorInterViewReferenceIndiciesLX[] */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned short,
+ num_anchor_refsx[2][H264FW_MAX_NUM_VIEWS]);
+ /* number of elements in aui16NonAnchorInterViewReferenceIndiciesLX[] */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned short,
+ num_non_anchor_refsx[2][H264FW_MAX_NUM_VIEWS]);
+};
+
+/*
+ * This structure represents HRD parameters.
+ */
+struct h264fw_hrd {
+ /* cpb_cnt_minus1; */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, cpb_cnt_minus1);
+ /* bit_rate_scale; */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, bit_rate_scale);
+ /* cpb_size_scale; */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, cpb_size_scale);
+ /* bit_rate_value_minus1 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ bit_rate_value_minus1[H264_MAXIMUMVALUEOFCPB_CNT]);
+ /* cpb_size_value_minus1 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ cpb_size_value_minus1[H264_MAXIMUMVALUEOFCPB_CNT]);
+ /* cbr_flag */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ cbr_flag[H264_MAXIMUMVALUEOFCPB_CNT]);
+ /* initial_cpb_removal_delay_length_minus1; */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ initial_cpb_removal_delay_length_minus1);
+ /* cpb_removal_delay_length_minus1; */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ cpb_removal_delay_length_minus1);
+ /* dpb_output_delay_length_minus1; */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ dpb_output_delay_length_minus1);
+ /* time_offset_length; */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, time_offset_length);
+};
+
+/*
+ * This structure represents the VUI parameters data.
+ */
+struct h264fw_vui {
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, aspect_ratio_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, aspect_ratio_idc);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, sar_width);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, sar_height);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, overscan_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, overscan_appropriate_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, video_signal_type_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, video_format);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, video_full_range_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, colour_description_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, colour_primaries);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, transfer_characteristics);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, matrix_coefficients);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, chroma_location_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, chroma_sample_loc_type_top_field);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, chroma_sample_loc_type_bottom_field);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, timing_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, num_units_in_tick);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, time_scale);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, fixed_frame_rate_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, nal_hrd_parameters_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ struct h264fw_hrd, nal_hrd_params);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, vcl_hrd_parameters_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ struct h264fw_hrd, vcl_hrd_params);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, low_delay_hrd_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, pic_struct_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, bitstream_restriction_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, motion_vectors_over_pic_boundaries_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, max_bytes_per_pic_denom);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, max_bits_per_mb_denom);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, log2_max_mv_length_vertical);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, log2_max_mv_length_horizontal);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, num_reorder_frames);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, max_dec_frame_buffering);
+};
+
+/*
+ * This describes the HW specific SPS header data required by the H264
+ * firmware that should be supplied by the Host.
+ */
+struct h264fw_ddsequence_ps {
+ /* Value for CR_VEC_ENTDEC_FE_CONTROL */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, regentdec_control);
+
+ /* NB: This register should contain the 4-bit SGM flag */
+
+ /* Value for CR_VEC_H264_FE_SPS0 & CR_VEC_H264_BE_SPS0 combined */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int, reg_sps0);
+ /* Value of CR_VEC_H264_BE_INTRA_8x8 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, reg_beintra);
+ /* Value of CR_VEC_H264_FE_CABAC444 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, reg_fecaabac444);
+
+ /* Treat CABAC 4:4:4 4x4 transform as not available */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ transform4x4_mb_notavialbale);
+ /* Disable VDMC horizontal/vertical filtering */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ disable_vdmcfilt);
+};
+
+/*
+ * This describes the PPS header data required by the H264 firmware that should
+ * be supplied by the Host.
+ */
+struct h264fw_picture_ps {
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ deblocking_filter_control_present_flag);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ transform_8x8_mode_flag);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ entropy_coding_mode_flag);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ redundant_pic_cnt_present_flag);
+
+ /* syntax element from bitstream - 2 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ weighted_bipred_idc);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ weighted_pred_flag);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ pic_order_present_flag);
+
+ /* 26 + syntax element from bitstream - 7 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char, pic_init_qp);
+ /* syntax element from bitstream - 1 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ constrained_intra_pred_flag);
+ /* syntax element from bitstream - 5 bit each */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ num_ref_lx_active_minus1[H264FW_MAX_REFPIC_LISTS]);
+
+ /* syntax element from bitstream - 3 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ slice_group_map_type);
+ /* syntax element from bitstream - 3 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ num_slice_groups_minus1);
+ /* syntax element from bitstream - 13 bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned short,
+ slice_group_change_rate_minus1);
+
+ /* syntax element from bitstream */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, int,
+ chroma_qp_index_offset);
+ /* syntax element from bitstream */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, int,
+ second_chroma_qp_index_offset);
+
+ /*
+ * scaling lists are derived from both SPS and PPS information
+ * but will change whenever the PPS changes
+ * The derived set of tables are associated here with the PPS
+ * NB: These are in H.264 order
+ */
+
+ /* derived from SPS and PPS - 8 bit each */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ scalinglist4x4[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE]);
+ /* derived from SPS and PPS - 8 bit each */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ scalinglist8x8[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE]);
+};
+
+/*
+ * This describes the HW specific PPS header data required by the H264
+ * firmware that should be supplied by the Host.
+ */
+struct h264fw_dd_picture_ps {
+ /* Value for MSVDX_CMDS_SLICE_PARAMS_MODE_CONFIG */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ vdmc_mode_config);
+ /* Value for CR_VEC_H264_FE_PPS0 & CR_VEC_H264_BE_PPS0 combined */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int, reg_pps0);
+
+ /*
+ * Scaling lists are derived from both SPS and PPS information
+ * but will change whenever the PPS changes. The derived set of tables
+ * are associated here with the PPS, but this will become invalid if
+ * the SPS changes and will have to be recalculated.
+ * These tables MUST be aligned on a 32-bit boundary
+ * NB: These are in MSVDX order
+ */
+
+ /* derived from SPS and PPS - 8 bit each */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ scalinglist4x4[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE]);
+ /* derived from SPS and PPS - 8 bit each */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ scalinglist8x8[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE]);
+};
+
+/*
+ * This describes the H.264 parser component "Header data", shown in the
+ * Firmware Memory Layout diagram. This data is required by the H264 firmware
+ * and should be supplied by the Host.
+ */
+struct h264fw_header_data {
+ struct vdecfw_image_buffer primary;
+ struct vdecfw_image_buffer alternate;
+
+ /* Output control: rotation, scaling, oold, etc. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ pic_cmds[VDECFW_CMD_MAX]);
+ /* Macroblock parameters base address for the picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ mbparams_base_address);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ mbparams_size_per_plane);
+ /* Buffers for context preload for colour plane switching (6.x.x) */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ preload_buffer_base_address[H264FW_MAX_PLANES]);
+ /* Base address of active slice group map */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ slicegroupmap_base_address);
+
+ /* do second pass Intra Deblock on frame */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char, do_old);
+ /* set to IMG_FALSE to disable second-pass deblock */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ two_pass_flag);
+ /* set to IMG_TRUE to disable MVC */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ disable_mvc);
+ /*
+ * Do we have second PPS in uipSecondPPSInfoSource provided
+ * for the second field.
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ second_pps);
+};
+
+/* This describes an H.264 picture. It is part of the Context data */
+struct h264fw_picture {
+ /* Primary (reconstructed) picture buffers */
+ struct vdecfw_image_buffer primary;
+ /* Secondary (alternative) picture buffers */
+ struct vdecfw_image_buffer alternate;
+ /* Macroblock parameters base address for the picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, mbparams_base_address);
+
+ /* Unique ID for this picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, transaction_id);
+ /* Picture type */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum h264fw_epicture_type, pricture_type);
+
+ /* Reference status of the picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum h264fw_ereference, ref_status_bottom);
+ /* Reference status of the picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum h264fw_ereference, ref_status_top);
+ /* Reference status of the picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum h264fw_ereference, ref_status_frame);
+
+ /* Frame Number */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, frame_number);
+ /* Short term reference info */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, frame_number_wrap);
+ /* long term reference number - should be 8-bit */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, longterm_frame_idx);
+
+ /* Top field order count for this picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, top_field_order_count);
+ /* Bottom field order count for this picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, bottom_field_order_count);
+
+ /* MVC view_id */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, view_id);
+
+ /*
+ * When picture is in the DPB Offset to use into
+ * the MSVDX DPB reg table when the current
+ * picture is the same view as this.
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, view_dpb_offset);
+ /* Flags for this picture for the display process */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, display_flags);
+
+ /* IMG_FALSE if sent to display, or otherwise not needed for display */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, needed_for_output);
+};
+
+/* This structure describes frame data for POC calculation */
+struct h264fw_poc_picture_data {
+ /* type 0,1,2 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, mmco_5_flag);
+
+ /* type 0 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, bottom_field_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, pic_order_cnt_lsb);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, top_field_order_count);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, pic_order_count_msb);
+
+ /* type 1,2 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, short, frame_num);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, int, frame_num_offset);
+
+ /* output */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, bottom_filed_order_count);
+};
+
+/*
+ * This structure describes picture data for determining
+ * Complementary Field Pairs
+ */
+struct h264fw_last_pic_data {
+ /* Unique ID for this picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, transaction_id);
+ /* Picture type */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum h264fw_epicture_type, picture_type);
+ /* Reference status of the picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum h264fw_ereference, ref_status_frame);
+ /* Frame Number */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, frame_number);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, luma_recon);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, chroma_recon);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, chroma_2_recon);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, luma_alter);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, chroma_alter);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, chroma_2_alter);
+
+ struct vdecfw_image_buffer primary;
+ struct vdecfw_image_buffer alternate;
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, mbparams_base_address);
+ /* Top field order count for this picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, top_field_order_count);
+ /* Bottom field order count for this picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, bottom_field_order_count);
+};
+
+/*
+ * This describes the H.264 parser component "Context data", shown in the
+ * Firmware Memory Layout diagram. This data is the state preserved across
+ * pictures. It is loaded and saved by the Firmware, but requires the host to
+ * provide buffer(s) for this.
+ */
+struct h264fw_context_data {
+ struct h264fw_picture dpb[H264FW_MAX_DPB_SIZE];
+ /*
+ * Inter-view reference components - also used as detail of the previous
+ * picture for any particular view, can be used to determine
+ * complemetary field pairs
+ */
+ struct h264fw_picture interview_prediction_ref[H264FW_MAX_NUM_VIEWS];
+ /* previous ref pic for type0, previous pic for type1&2 */
+ struct h264fw_poc_picture_data prev_poc_pic_data[H264FW_MAX_NUM_VIEWS];
+ /* previous picture information to detect complementary field pairs */
+ struct h264fw_last_pic_data last_pic_data[H264FW_MAX_NUM_VIEWS];
+ struct h264fw_last_pic_data
+ last_displayed_pic_data[H264FW_MAX_NUM_VIEWS];
+
+ /* previous reference frame number for each view */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned short,
+ prev_ref_frame_num[H264FW_MAX_NUM_VIEWS]);
+ /* Bitmap of used slots in each view DPB */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned short,
+ dpb_bitmap[H264FW_MAX_NUM_VIEWS]);
+
+ /* DPB size */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int, dpb_size);
+ /* Number of pictures in DPB */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ dpb_fullness);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ prev_display_flags);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, int, prev_display);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, int, prev_release);
+ /* Sequence Parameter Set data */
+ struct h264fw_sequence_ps sps;
+ /* Picture Parameter Set data */
+ struct h264fw_picture_ps pps;
+ /* Picture Parameter Set data for second field if in the same buffer */
+ struct h264fw_picture_ps second_pps;
+
+ /* Set if stream is MVC */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, int, mvc);
+ /* DPB long term reference information */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, int,
+ max_longterm_frame_idx[H264FW_MAX_NUM_VIEWS]);
+};
+
+#endif /* _H264FW_DATA_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/hevc_secure_parser.c b/drivers/media/platform/vxe-vxd/decoder/hevc_secure_parser.c
new file mode 100644
index 000000000000..35fbd7155420
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/hevc_secure_parser.c
@@ -0,0 +1,2895 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * hevc secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ * Re-written for upstreming
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp_int.h"
+#include "hevc_secure_parser.h"
+#include "hevcfw_data.h"
+#include "pixel_api.h"
+#include "swsr.h"
+#include "vdec_defs.h"
+#include "vdecdd_utils.h"
+
+#if defined(DEBUG_DECODER_DRIVER)
+#define BSPP_HEVC_SYNTAX(fmt, ...) pr_info("[hevc] " fmt, ## __VA_ARGS__)
+
+#else
+
+#define BSPP_HEVC_SYNTAX(fmt, ...)
+#endif
+
+static void HEVC_SWSR_U1(unsigned char *what, unsigned char *where, void *swsr_ctx)
+{
+ *where = swsr_read_bits(swsr_ctx, 1);
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s, u(1) : %u", what, *where);
+#endif
+}
+
+static void HEVC_SWSR_UN(unsigned char *what, unsigned int *where,
+ unsigned char numbits, void *swsr_ctx)
+{
+ *where = swsr_read_bits(swsr_ctx, numbits);
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s, u(%u) : %u", what, numbits, *where);
+#endif
+}
+
+static void HEVC_SWSR_UE(unsigned char *what, unsigned int *where, void *swsr_ctx)
+{
+ *where = swsr_read_unsigned_expgoulomb(swsr_ctx);
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s, ue(v) : %u", what, *where);
+#endif
+}
+
+static void HEVC_SWSR_SE(unsigned char *what, int *where, void *swsr_ctx)
+{
+ *where = swsr_read_signed_expgoulomb(swsr_ctx);
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s, se(v) : %u", what, *where);
+#endif
+}
+
+static void HEVC_SWSR_FN(unsigned char *what, unsigned char *where,
+ unsigned char numbits, unsigned char pattern,
+ enum bspp_error_type *bspperror, void *swsr_ctx)
+{
+ *where = swsr_read_bits(swsr_ctx, numbits);
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s, f(%u) : %u", what, numbits, *where);
+#endif
+ if (*where != pattern) {
+ *bspperror |= BSPP_ERROR_INVALID_VALUE;
+ pr_warn("Invalid value of %s (f(%u), expected: %u, got: %u)",
+ what, numbits, pattern, *where);
+ }
+}
+
+static void HEVC_UCHECK(unsigned char *what, unsigned int val,
+ unsigned int expected,
+ enum bspp_error_type *bspperror)
+{
+ if (val != expected) {
+ *bspperror |= BSPP_ERROR_INVALID_VALUE;
+ pr_warn("Invalid value of %s (expected: %u, got: %u)",
+ what, expected, val);
+ }
+}
+
+static void HEVC_RANGEUCHECK(unsigned char *what, unsigned int val,
+ unsigned int min, unsigned int max,
+ enum bspp_error_type *bspperror)
+{
+ if ((min > 0 && val < min) || val > max) {
+ *bspperror |= BSPP_ERROR_INVALID_VALUE;
+ pr_warn("Value of %s out of range (expected: [%u, %u], got: %u)",
+ what, min, max, val);
+ }
+}
+
+static void HEVC_RANGESCHECK(unsigned char *what, int val, int min, int max,
+ enum bspp_error_type *bspperror)
+{
+ if (val < min || val > max) {
+ *bspperror |= BSPP_ERROR_INVALID_VALUE;
+ pr_warn("Value of %s out of range (expected: [%d, %d], got: %d)",
+ what, min, max, val);
+ }
+}
+
+#define HEVC_STATIC_ASSERT(expr) ((void)sizeof(unsigned char[1 - 2 * !(expr)]))
+
+#define HEVC_MIN(a, b, type) ({ \
+ type __a = a; \
+ type __b = b; \
+ (((__a) <= (__b)) ? (__a) : (__b)); })
+#define HEVC_MAX(a, b, type) ({ \
+ type __a = a; \
+ type __b = b; \
+ (((__a) >= (__b)) ? (__a) : (__b)); })
+#define HEVC_ALIGN(_val, _alignment, type) ({ \
+ type val = _val; \
+ type alignment = _alignment; \
+ (((val) + (alignment) - 1) & ~((alignment) - 1)); })
+
+static const enum pixel_fmt_idc pixelformat_idc[] = {
+ PIXEL_FORMAT_MONO,
+ PIXEL_FORMAT_420,
+ PIXEL_FORMAT_422,
+ PIXEL_FORMAT_444
+};
+
+static enum bspp_error_type bspp_hevc_parse_vps(void *sr_ctx, struct bspp_hevc_vps *vps);
+
+static void bspp_hevc_sublayhrdparams(void *sr_ctx,
+ struct bspp_hevc_hrd_parameters *hrdparams,
+ unsigned char sublayer_id);
+
+static void bspp_hevc_parsehrdparams(void *sr_ctx,
+ struct bspp_hevc_hrd_parameters *hrdparams,
+ unsigned char common_infpresent,
+ unsigned char max_numsublayers_minus1);
+
+static enum bspp_error_type bspp_hevc_parsesps(void *sr_ctx,
+ void *str_res,
+ struct bspp_hevc_sps *sps);
+
+static enum bspp_error_type bspp_hevc_parsepps(void *sr_ctx, void *str_res,
+ struct bspp_hevc_pps *pps);
+
+static int bspp_hevc_reset_ppsinfo(void *secure_ppsinfo);
+
+static void bspp_hevc_dotilecalculations(struct bspp_hevc_sps *sps,
+ struct bspp_hevc_pps *pps);
+
+static enum bspp_error_type bspp_hevc_parse_slicesegmentheader
+ (void *sr_ctx, void *str_res,
+ struct bspp_hevc_slice_segment_header *ssh,
+ unsigned char nalunit_type,
+ struct bspp_vps_info **vpsinfo,
+ struct bspp_sequence_hdr_info **spsinfo,
+ struct bspp_pps_info **ppsinfo);
+
+static enum bspp_error_type bspp_hevc_parse_profiletierlevel
+ (void *sr_ctx,
+ struct bspp_hevc_profile_tierlevel *ptl,
+ unsigned char vps_maxsublayers_minus1);
+
+static void bspp_hevc_getdefault_scalinglist(unsigned char size_id, unsigned char matrix_id,
+ const unsigned char **default_scalinglist,
+ unsigned int *size);
+
+static enum bspp_error_type bspp_hevc_parse_scalinglistdata
+ (void *sr_ctx,
+ struct bspp_hevc_scalinglist_data *scaling_listdata);
+
+static void bspp_hevc_usedefault_scalinglists(struct bspp_hevc_scalinglist_data *scaling_listdata);
+
+static enum bspp_error_type bspp_hevc_parse_shortterm_refpicset
+ (void *sr_ctx,
+ struct bspp_hevc_shortterm_refpicset *st_refpicset,
+ unsigned char st_rps_idx,
+ unsigned char in_slice_header);
+
+static void bspp_hevc_fillcommonseqhdr(struct bspp_hevc_sps *sps,
+ struct vdec_comsequ_hdrinfo *common_seq);
+
+static void bspp_hevc_fillpicturehdr(struct vdec_comsequ_hdrinfo *common_seq,
+ enum hevc_nalunittype nalunit_type,
+ struct bspp_pict_hdr_info *picture_hdr,
+ struct bspp_hevc_sps *sps,
+ struct bspp_hevc_pps *pps,
+ struct bspp_hevc_vps *vps);
+
+static void bspp_hevc_fill_fwsps(struct bspp_hevc_sps *sps,
+ struct hevcfw_sequence_ps *fwsps);
+
+static void bspp_hevc_fill_fwst_rps(struct bspp_hevc_shortterm_refpicset *strps,
+ struct hevcfw_short_term_ref_picset *fwstrps);
+
+static void bspp_hevc_fill_fwpps(struct bspp_hevc_pps *pps,
+ struct hevcfw_picture_ps *fw_pps);
+
+static void bspp_hevc_fill_fw_scaling_lists(struct bspp_hevc_pps *pps,
+ struct bspp_hevc_sps *sps,
+ struct hevcfw_picture_ps *fw_pps);
+
+static unsigned int bspp_ceil_log2(unsigned int linear_val);
+
+static unsigned char bspp_hevc_picture_is_irap(enum hevc_nalunittype nalunit_type);
+
+static unsigned char bspp_hevc_picture_is_cra(enum hevc_nalunittype nalunit_type);
+
+static unsigned char bspp_hevc_picture_is_idr(enum hevc_nalunittype nalunit_type);
+
+static unsigned char bspp_hevc_picture_is_bla(enum hevc_nalunittype nalunit_type);
+
+static unsigned char bspp_hevc_picture_getnorasl_outputflag
+ (enum hevc_nalunittype nalunit_type,
+ struct bspp_hevc_inter_pict_ctx *inter_pict_ctx);
+
+static unsigned char bspp_hevc_range_extensions_is_enabled
+ (struct bspp_hevc_profile_tierlevel *profile_tierlevel);
+
+static int bspp_hevc_unitparser(void *swsr_ctx, struct bspp_unit_data *unitdata)
+{
+ void *sr_ctx = swsr_ctx;
+ int result = 0;
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+ struct bspp_inter_pict_data *inter_pict_ctx =
+ unitdata->parse_state->inter_pict_ctx;
+ unsigned char forbidden_zero_bit = 0;
+ unsigned char nal_unit_type = 0;
+ unsigned char nuh_layer_id = 0;
+ unsigned char nuh_temporal_id_plus1 = 0;
+
+ HEVC_SWSR_FN("forbidden_zero_bit", &forbidden_zero_bit, 1, 0, &parse_err, sr_ctx);
+ HEVC_SWSR_UN("nal_unit_type", (unsigned int *)&nal_unit_type, 6, sr_ctx);
+ /* for current version of HEVC nuh_layer_id "shall be equal to 0" */
+ HEVC_SWSR_FN("nuh_layer_id", &nuh_layer_id, 6, 0, &parse_err, sr_ctx);
+ HEVC_SWSR_UN("nuh_temporal_id_plus1", (unsigned int *)&nuh_temporal_id_plus1, 3, sr_ctx);
+
+ switch (unitdata->unit_type) {
+ case BSPP_UNIT_VPS:
+ {
+ struct bspp_hevc_vps *vps =
+ (struct bspp_hevc_vps *)unitdata->out.vps_info->secure_vpsinfo;
+
+ unitdata->parse_error |= bspp_hevc_parse_vps(sr_ctx, vps);
+ unitdata->out.vps_info->vps_id =
+ vps->vps_video_parameter_set_id;
+ }
+ break;
+
+ case BSPP_UNIT_SEQUENCE:
+ {
+ struct bspp_ddbuf_array_info *tmp;
+ struct hevcfw_sequence_ps *fwsps;
+ struct vdec_comsequ_hdrinfo *common_seq;
+ struct bspp_hevc_sps *sps =
+ (struct bspp_hevc_sps *)unitdata->out.sequ_hdr_info->secure_sequence_info;
+
+ unitdata->parse_error |= bspp_hevc_parsesps(sr_ctx,
+ unitdata->str_res_handle,
+ sps);
+ unitdata->out.sequ_hdr_info->sequ_hdr_info.sequ_hdr_id =
+ sps->sps_seq_parameter_set_id;
+
+ tmp = &unitdata->out.sequ_hdr_info->fw_sequence;
+ /* handle firmware headers */
+ fwsps =
+ (struct hevcfw_sequence_ps *)((unsigned char *)tmp->ddbuf_info.cpu_virt_addr +
+ tmp->buf_offset);
+
+ bspp_hevc_fill_fwsps(sps, fwsps);
+
+ /* handle common sequence header */
+ common_seq =
+ &unitdata->out.sequ_hdr_info->sequ_hdr_info.com_sequ_hdr_info;
+
+ bspp_hevc_fillcommonseqhdr(sps, common_seq);
+ }
+ break;
+
+ case BSPP_UNIT_PPS:
+ {
+ struct bspp_ddbuf_array_info *tmp;
+ struct hevcfw_picture_ps *fw_pps;
+ struct bspp_hevc_pps *pps =
+ (struct bspp_hevc_pps *)unitdata->out.pps_info->secure_pps_info;
+
+ unitdata->parse_error |= bspp_hevc_parsepps(sr_ctx,
+ unitdata->str_res_handle,
+ pps);
+ unitdata->out.pps_info->pps_id = pps->pps_pic_parameter_set_id;
+
+ tmp = &unitdata->out.pps_info->fw_pps;
+ /* handle firmware headers */
+ fw_pps =
+ (struct hevcfw_picture_ps *)((unsigned char *)tmp->ddbuf_info.cpu_virt_addr +
+ tmp->buf_offset);
+ bspp_hevc_fill_fwpps(pps, fw_pps);
+ }
+ break;
+
+ case BSPP_UNIT_PICTURE:
+ {
+ struct bspp_hevc_slice_segment_header ssh;
+ struct bspp_vps_info *vps_info = NULL;
+ struct bspp_sequence_hdr_info *sequ_hdr_info = NULL;
+ struct bspp_hevc_sps *hevc_sps = NULL;
+ struct bspp_pps_info *ppsinfo = NULL;
+ enum bspp_error_type parse_error;
+ struct bspp_ddbuf_array_info *tmp;
+ struct hevcfw_picture_ps *fw_pps;
+ struct bspp_pict_data *pictdata;
+ struct bspp_hevc_pps *pps;
+
+ /*
+ * EOS has to be attached to picture data, so it can be used
+ * for NoRaslOutputFlag calculation in FW
+ */
+ inter_pict_ctx->hevc_ctx.eos_detected = 0;
+ if (nal_unit_type == HEVC_NALTYPE_EOS) {
+ inter_pict_ctx->hevc_ctx.eos_detected = 1;
+ break;
+ }
+
+ parse_error = bspp_hevc_parse_slicesegmentheader(sr_ctx,
+ unitdata->str_res_handle,
+ &ssh,
+ nal_unit_type,
+ &vps_info,
+ &sequ_hdr_info,
+ &ppsinfo);
+ unitdata->parse_error |= parse_error;
+ unitdata->slice = 1;
+
+ if (parse_error != BSPP_ERROR_NONE &&
+ parse_error != BSPP_ERROR_CORRECTION_VALIDVALUE) {
+ result = IMG_ERROR_CANCELLED;
+ break;
+ }
+
+ /* if we just started new picture. */
+ if (ssh.first_slice_segment_in_pic_flag) {
+ tmp = &ppsinfo->fw_pps;
+ /* handle firmware headers */
+ fw_pps =
+ (struct hevcfw_picture_ps *)((unsigned char *)tmp->ddbuf_info.cpu_virt_addr
+ + tmp->buf_offset);
+
+ inter_pict_ctx->hevc_ctx.first_after_eos = 0;
+ if (inter_pict_ctx->hevc_ctx.eos_detected) {
+ inter_pict_ctx->hevc_ctx.first_after_eos = 1;
+ inter_pict_ctx->hevc_ctx.eos_detected = 0;
+ }
+
+ /* fill common picture header */
+ bspp_hevc_fillpicturehdr(&sequ_hdr_info->sequ_hdr_info.com_sequ_hdr_info,
+ (enum hevc_nalunittype)nal_unit_type,
+ unitdata->out.pict_hdr_info,
+ (struct bspp_hevc_sps *)
+ sequ_hdr_info->secure_sequence_info,
+ (struct bspp_hevc_pps *)ppsinfo->secure_pps_info,
+ (struct bspp_hevc_vps *)vps_info->secure_vpsinfo);
+
+ bspp_hevc_fill_fw_scaling_lists(ppsinfo->secure_pps_info,
+ sequ_hdr_info->secure_sequence_info,
+ fw_pps);
+
+ pictdata = &unitdata->out.pict_hdr_info->pict_aux_data;
+ /*
+ * We have no container for the PPS that passes down
+ * to the kernel, for this reason the hevc secure parser
+ * needs to populate that info into the picture
+ * header PictAuxData.
+ */
+ pictdata->bufmap_id = ppsinfo->bufmap_id;
+ pictdata->buf_offset = ppsinfo->buf_offset;
+ pictdata->pic_data = fw_pps;
+ pictdata->id = fw_pps->pps_pic_parameter_set_id;
+ pictdata->size = sizeof(*fw_pps);
+
+ ppsinfo->ref_count++;
+
+ /* new Coded Video Sequence indication */
+ if (nal_unit_type == HEVC_NALTYPE_IDR_W_RADL ||
+ nal_unit_type == HEVC_NALTYPE_IDR_N_LP ||
+ nal_unit_type == HEVC_NALTYPE_BLA_N_LP ||
+ nal_unit_type == HEVC_NALTYPE_BLA_W_RADL ||
+ nal_unit_type == HEVC_NALTYPE_BLA_W_LP ||
+ nal_unit_type == HEVC_NALTYPE_CRA) {
+ unitdata->new_closed_gop = 1;
+ inter_pict_ctx->hevc_ctx.seq_pic_count = 0;
+ }
+
+ /* Attach SEI data to the picture. */
+ if (!inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic) {
+ /*
+ * If there is already a non-empty SEI list
+ * available
+ */
+ if (inter_pict_ctx->hevc_ctx.sei_rawdata_list) {
+ /* attach it to the picture header. */
+ unitdata->out.pict_hdr_info->hevc_pict_hdr_info.raw_sei_datalist_firstfield
+ =
+ (void *)inter_pict_ctx->hevc_ctx.sei_rawdata_list;
+ inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic = 1;
+ } else {
+ /* Otherwise expose a handle a picture header field to
+ * attach SEI list later.
+ */
+ inter_pict_ctx->hevc_ctx.hndl_pichdr_sei_rawdata_list =
+ &unitdata->out.pict_hdr_info->hevc_pict_hdr_info.raw_sei_datalist_firstfield;
+ }
+ }
+
+ /* Attach raw VUI data to the picture header. */
+ hevc_sps = (struct bspp_hevc_sps *)sequ_hdr_info->secure_sequence_info;
+ if (hevc_sps->vui_raw_data) {
+ hevc_sps->vui_raw_data->ref_count++;
+ unitdata->out.pict_hdr_info->hevc_pict_hdr_info.raw_vui_data =
+ (void *)hevc_sps->vui_raw_data;
+ }
+
+ inter_pict_ctx->hevc_ctx.seq_pic_count++;
+
+ /* NoOutputOfPriorPicsFlag */
+ inter_pict_ctx->not_dpb_flush = 0;
+ if (unitdata->new_closed_gop &&
+ bspp_hevc_picture_is_irap((enum hevc_nalunittype)nal_unit_type) &&
+ bspp_hevc_picture_getnorasl_outputflag((enum hevc_nalunittype)
+ nal_unit_type,
+ &inter_pict_ctx->hevc_ctx)) {
+ if (bspp_hevc_picture_is_cra((enum hevc_nalunittype)nal_unit_type))
+ inter_pict_ctx->not_dpb_flush = 1;
+ else
+ inter_pict_ctx->not_dpb_flush =
+ ssh.no_output_of_prior_pics_flag;
+ }
+
+ unitdata->parse_state->next_pic_is_new = 0;
+ }
+
+ pps = (struct bspp_hevc_pps *)ppsinfo->secure_pps_info;
+ unitdata->pict_sequ_hdr_id = pps->pps_seq_parameter_set_id;
+ }
+ break;
+
+ case BSPP_UNIT_UNCLASSIFIED:
+ case BSPP_UNIT_NON_PICTURE:
+ case BSPP_UNIT_UNSUPPORTED:
+ break;
+
+ default:
+ VDEC_ASSERT("Unknown BSPP Unit Type" == NULL);
+ break;
+ }
+
+ return result;
+}
+
+static void bspp_hevc_initialiseparsing(struct bspp_parse_state *parse_state)
+{
+ /* Indicate that SEI info has not yet been attached to this picture. */
+ parse_state->inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic = 0;
+}
+
+static void bspp_hevc_finaliseparsing(void *str_alloc, struct bspp_parse_state *parse_state)
+{
+ /*
+ * If SEI info has not yet been attached to the picture and
+ * there is anything to be attached.
+ */
+ if (!parse_state->inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic &&
+ parse_state->inter_pict_ctx->hevc_ctx.sei_rawdata_list) {
+ /* attach the SEI list if there is a handle provided for that. */
+ if (parse_state->inter_pict_ctx->hevc_ctx.hndl_pichdr_sei_rawdata_list) {
+ /* Attach the raw SEI list to the picture. */
+ *parse_state->inter_pict_ctx->hevc_ctx.hndl_pichdr_sei_rawdata_list =
+ (void *)parse_state->inter_pict_ctx->hevc_ctx.sei_rawdata_list;
+ /* Reset the inter-picture data. */
+ parse_state->inter_pict_ctx->hevc_ctx.hndl_pichdr_sei_rawdata_list = NULL;
+ } else {
+ /* Nowhere to attach the raw SEI list, so just free it. */
+ bspp_freeraw_sei_datalist
+ (str_alloc, parse_state->inter_pict_ctx->hevc_ctx.sei_rawdata_list);
+ }
+ }
+
+ /* Indicate that SEI info has been attached to the picture. */
+ parse_state->inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic = 1;
+ /* Reset the inter-picture SEI list. */
+ parse_state->inter_pict_ctx->hevc_ctx.sei_rawdata_list = NULL;
+}
+
+static enum bspp_error_type bspp_hevc_parse_vps(void *sr_ctx, struct bspp_hevc_vps *vps)
+{
+ unsigned int parse_err = BSPP_ERROR_NONE;
+ unsigned int i, j;
+
+ VDEC_ASSERT(vps);
+ VDEC_ASSERT(sr_ctx);
+
+ memset(vps, 0, sizeof(struct bspp_hevc_vps));
+
+ HEVC_SWSR_UN("vps_video_parameter_set_id",
+ (unsigned int *)&vps->vps_video_parameter_set_id, 4, sr_ctx);
+ HEVC_SWSR_UN("vps_reserved_three_2bits",
+ (unsigned int *)&vps->vps_reserved_three_2bits, 2, sr_ctx);
+ HEVC_SWSR_UN("vps_max_layers_minus1",
+ (unsigned int *)&vps->vps_max_layers_minus1, 6, sr_ctx);
+ HEVC_SWSR_UN("vps_max_sub_layers_minus1",
+ (unsigned int *)&vps->vps_max_sub_layers_minus1, 3, sr_ctx);
+ HEVC_RANGEUCHECK("vps_max_sub_layers_minus1", vps->vps_max_sub_layers_minus1, 0,
+ HEVC_MAX_NUM_SUBLAYERS - 1, &parse_err);
+ HEVC_SWSR_U1("vps_temporal_id_nesting_flag",
+ &vps->vps_temporal_id_nesting_flag, sr_ctx);
+ HEVC_SWSR_UN("vps_reserved_0xffff_16bits",
+ (unsigned int *)&vps->vps_reserved_0xffff_16bits, 16, sr_ctx);
+
+ if (vps->vps_max_sub_layers_minus1 == 0)
+ HEVC_UCHECK("vps_temporal_id_nesting_flag",
+ vps->vps_temporal_id_nesting_flag, 1, &parse_err);
+
+ parse_err |= bspp_hevc_parse_profiletierlevel(sr_ctx, &vps->profiletierlevel,
+ vps->vps_max_sub_layers_minus1);
+
+ HEVC_SWSR_U1("vps_sub_layer_ordering_info_present_flag",
+ &vps->vps_sub_layer_ordering_info_present_flag, sr_ctx);
+ for (i = vps->vps_sub_layer_ordering_info_present_flag ?
+ 0 : vps->vps_max_sub_layers_minus1;
+ i <= vps->vps_max_sub_layers_minus1; ++i) {
+ HEVC_SWSR_UE("vps_max_dec_pic_buffering_minus1",
+ (unsigned int *)&vps->vps_max_dec_pic_buffering_minus1[i], sr_ctx);
+ HEVC_SWSR_UE("vps_max_num_reorder_pics",
+ (unsigned int *)&vps->vps_max_num_reorder_pics[i], sr_ctx);
+ HEVC_SWSR_UE("vps_max_latency_increase_plus1",
+ (unsigned int *)&vps->vps_max_latency_increase_plus1[i], sr_ctx);
+ }
+
+ HEVC_SWSR_UN("vps_max_layer_id", (unsigned int *)&vps->vps_max_layer_id, 6, sr_ctx);
+ HEVC_SWSR_UE("vps_num_layer_sets_minus1",
+ (unsigned int *)&vps->vps_num_layer_sets_minus1, sr_ctx);
+
+ for (i = 1; i <= vps->vps_num_layer_sets_minus1; ++i) {
+ for (j = 0; j <= vps->vps_max_layer_id; ++j) {
+ HEVC_SWSR_U1("layer_id_included_flag",
+ &vps->layer_id_included_flag[i][j], sr_ctx);
+ }
+ }
+
+ HEVC_SWSR_U1("vps_timing_info_present_flag", &vps->vps_timing_info_present_flag, sr_ctx);
+ if (vps->vps_timing_info_present_flag) {
+ HEVC_SWSR_UN("vps_num_units_in_tick",
+ (unsigned int *)&vps->vps_num_units_in_tick, 32, sr_ctx);
+ HEVC_SWSR_UN("vps_time_scale",
+ (unsigned int *)&vps->vps_time_scale, 32, sr_ctx);
+ HEVC_SWSR_U1("vps_poc_proportional_to_timing_flag",
+ &vps->vps_poc_proportional_to_timing_flag, sr_ctx);
+ if (vps->vps_poc_proportional_to_timing_flag)
+ HEVC_SWSR_UE("vps_num_ticks_poc_diff_one_minus1",
+ (unsigned int *)&vps->vps_num_ticks_poc_diff_one_minus1,
+ sr_ctx);
+
+ HEVC_SWSR_UE("vps_num_hrd_parameters",
+ (unsigned int *)&vps->vps_num_hrd_parameters, sr_ctx);
+
+ /* consume hrd_parameters */
+ for (i = 0; i < vps->vps_num_hrd_parameters; i++) {
+ unsigned short hrd_layer_set_idx;
+ unsigned char cprms_present_flag = 1;
+ struct bspp_hevc_hrd_parameters hrdparams;
+
+ HEVC_SWSR_UE("hrd_layer_set_idx",
+ (unsigned int *)&hrd_layer_set_idx, sr_ctx);
+ if (i > 0)
+ HEVC_SWSR_U1("cprms_present_flag", &cprms_present_flag, sr_ctx);
+
+ bspp_hevc_parsehrdparams(sr_ctx, &hrdparams,
+ cprms_present_flag,
+ vps->vps_max_sub_layers_minus1);
+ }
+ }
+ HEVC_SWSR_U1("vps_extension_flag", &vps->vps_extension_flag, sr_ctx);
+
+ return (enum bspp_error_type)parse_err;
+}
+
+static void bspp_hevc_sublayhrdparams(void *sr_ctx,
+ struct bspp_hevc_hrd_parameters *hrdparams,
+ unsigned char sublayer_id)
+{
+ unsigned char i;
+ unsigned char cpb_cnt = hrdparams->cpb_cnt_minus1[sublayer_id];
+ struct bspp_hevc_sublayer_hrd_parameters *sublay_hrdparams =
+ &hrdparams->sublayhrdparams[sublayer_id];
+
+ VDEC_ASSERT(sr_ctx);
+ VDEC_ASSERT(hrdparams);
+ VDEC_ASSERT(cpb_cnt < HEVC_MAX_CPB_COUNT);
+ VDEC_ASSERT(sublayer_id < HEVC_MAX_NUM_SUBLAYERS);
+
+ for (i = 0; i <= cpb_cnt; i++) {
+ HEVC_SWSR_UE("bit_rate_value_minus1",
+ (unsigned int *)&sublay_hrdparams->bit_rate_value_minus1[i], sr_ctx);
+ HEVC_SWSR_UE("cpb_size_value_minus1",
+ (unsigned int *)&sublay_hrdparams->cpb_size_value_minus1[i], sr_ctx);
+ if (hrdparams->sub_pic_hrd_params_present_flag) {
+ HEVC_SWSR_UE("cpb_size_du_value_minus1",
+ (unsigned int *)
+ &sublay_hrdparams->cpb_size_du_value_minus1[i],
+ sr_ctx);
+ HEVC_SWSR_UE("bit_rate_du_value_minus1",
+ (unsigned int *)
+ &sublay_hrdparams->bit_rate_du_value_minus1[i],
+ sr_ctx);
+ }
+ HEVC_SWSR_U1("cbr_flag", &sublay_hrdparams->cbr_flag[i], sr_ctx);
+ }
+}
+
+static void bspp_hevc_parsehrdparams(void *sr_ctx,
+ struct bspp_hevc_hrd_parameters *hrdparams,
+ unsigned char common_infpresent,
+ unsigned char max_numsublayers_minus1)
+{
+ unsigned char i;
+
+ VDEC_ASSERT(sr_ctx);
+ VDEC_ASSERT(hrdparams);
+ VDEC_ASSERT(max_numsublayers_minus1 < HEVC_MAX_NUM_SUBLAYERS);
+
+ memset(hrdparams, 0, sizeof(struct bspp_hevc_hrd_parameters));
+
+ if (common_infpresent) {
+ HEVC_SWSR_U1("nal_hrd_parameters_present_flag",
+ &hrdparams->nal_hrd_parameters_present_flag, sr_ctx);
+ HEVC_SWSR_U1("vcl_hrd_parameters_present_flag",
+ &hrdparams->vcl_hrd_parameters_present_flag, sr_ctx);
+ if (hrdparams->nal_hrd_parameters_present_flag ||
+ hrdparams->vcl_hrd_parameters_present_flag) {
+ HEVC_SWSR_U1("sub_pic_hrd_params_present_flag",
+ &hrdparams->sub_pic_hrd_params_present_flag,
+ sr_ctx);
+ if (hrdparams->sub_pic_hrd_params_present_flag) {
+ HEVC_SWSR_UN("tick_divisor_minus2",
+ (unsigned int *)&hrdparams->tick_divisor_minus2,
+ 8, sr_ctx);
+ HEVC_SWSR_UN
+ ("du_cpb_removal_delay_increment_length_minus1",
+ (unsigned int *)
+ &hrdparams->du_cpb_removal_delay_increment_length_minus1,
+ 5, sr_ctx);
+ HEVC_SWSR_U1("sub_pic_cpb_params_in_pic_timing_sei_flag",
+ &hrdparams->sub_pic_cpb_params_in_pic_timing_sei_flag,
+ sr_ctx);
+ HEVC_SWSR_UN("dpb_output_delay_du_length_minus1",
+ (unsigned int *)
+ &hrdparams->dpb_output_delay_du_length_minus1,
+ 5, sr_ctx);
+ }
+ HEVC_SWSR_UN("bit_rate_scale",
+ (unsigned int *)&hrdparams->bit_rate_scale, 4, sr_ctx);
+ HEVC_SWSR_UN("cpb_size_scale",
+ (unsigned int *)&hrdparams->cpb_size_scale, 4, sr_ctx);
+ if (hrdparams->sub_pic_hrd_params_present_flag)
+ HEVC_SWSR_UN("cpb_size_du_scale",
+ (unsigned int *)&hrdparams->cpb_size_du_scale,
+ 4, sr_ctx);
+
+ HEVC_SWSR_UN("initial_cpb_removal_delay_length_minus1",
+ (unsigned int *)
+ &hrdparams->initial_cpb_removal_delay_length_minus1,
+ 5, sr_ctx);
+ HEVC_SWSR_UN("au_cpb_removal_delay_length_minus1",
+ (unsigned int *)&hrdparams->au_cpb_removal_delay_length_minus1,
+ 5, sr_ctx);
+ HEVC_SWSR_UN("dpb_output_delay_length_minus1",
+ (unsigned int *)&hrdparams->dpb_output_delay_length_minus1,
+ 5, sr_ctx);
+ }
+ }
+ for (i = 0; i <= max_numsublayers_minus1; i++) {
+ HEVC_SWSR_U1("fixed_pic_rate_general_flag",
+ &hrdparams->fixed_pic_rate_general_flag[i], sr_ctx);
+ hrdparams->fixed_pic_rate_within_cvs_flag[i] =
+ hrdparams->fixed_pic_rate_general_flag[i];
+ if (!hrdparams->fixed_pic_rate_general_flag[i])
+ HEVC_SWSR_U1("fixed_pic_rate_within_cvs_flag",
+ &hrdparams->fixed_pic_rate_within_cvs_flag[i],
+ sr_ctx);
+
+ if (hrdparams->fixed_pic_rate_within_cvs_flag[i])
+ HEVC_SWSR_UE("elemental_duration_in_tc_minus1",
+ (unsigned int *)&hrdparams->elemental_duration_in_tc_minus1[i],
+ sr_ctx);
+ else
+ HEVC_SWSR_U1("low_delay_hrd_flag",
+ &hrdparams->low_delay_hrd_flag[i], sr_ctx);
+
+ if (!hrdparams->low_delay_hrd_flag[i])
+ HEVC_SWSR_UE("cpb_cnt_minus1",
+ (unsigned int *)&hrdparams->cpb_cnt_minus1[i], sr_ctx);
+
+ if (hrdparams->nal_hrd_parameters_present_flag)
+ bspp_hevc_sublayhrdparams(sr_ctx, hrdparams, i);
+
+ if (hrdparams->vcl_hrd_parameters_present_flag)
+ bspp_hevc_sublayhrdparams(sr_ctx, hrdparams, i);
+ }
+}
+
+static enum bspp_error_type bspp_hevc_parsevui_parameters
+ (void *sr_ctx,
+ struct bspp_hevc_vui_params *vui_params,
+ unsigned char sps_max_sub_layers_minus1)
+{
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+
+ VDEC_ASSERT(sr_ctx);
+ VDEC_ASSERT(vui_params);
+
+ memset(vui_params, 0, sizeof(struct bspp_hevc_vui_params));
+
+ HEVC_SWSR_U1("aspect_ratio_info_present_flag",
+ &vui_params->aspect_ratio_info_present_flag, sr_ctx);
+ if (vui_params->aspect_ratio_info_present_flag) {
+ HEVC_SWSR_UN("aspect_ratio_idc",
+ (unsigned int *)&vui_params->aspect_ratio_idc, 8, sr_ctx);
+ if (vui_params->aspect_ratio_idc == HEVC_EXTENDED_SAR) {
+ HEVC_SWSR_UN("sar_width",
+ (unsigned int *)&vui_params->sar_width, 16, sr_ctx);
+ HEVC_SWSR_UN("sar_height",
+ (unsigned int *)&vui_params->sar_height, 16, sr_ctx);
+ }
+ }
+ HEVC_SWSR_U1("overscan_info_present_flag",
+ &vui_params->overscan_info_present_flag, sr_ctx);
+
+ if (vui_params->overscan_info_present_flag)
+ HEVC_SWSR_U1("overscan_appropriate_flag",
+ &vui_params->overscan_appropriate_flag, sr_ctx);
+
+ HEVC_SWSR_U1("video_signal_type_present_flag",
+ &vui_params->video_signal_type_present_flag, sr_ctx);
+
+ if (vui_params->video_signal_type_present_flag) {
+ HEVC_SWSR_UN("video_format",
+ (unsigned int *)&vui_params->video_format, 3, sr_ctx);
+ HEVC_SWSR_U1("video_full_range_flag",
+ &vui_params->video_full_range_flag, sr_ctx);
+ HEVC_SWSR_U1("colour_description_present_flag",
+ &vui_params->colour_description_present_flag,
+ sr_ctx);
+ if (vui_params->colour_description_present_flag) {
+ HEVC_SWSR_UN("colour_primaries",
+ (unsigned int *)&vui_params->colour_primaries, 8, sr_ctx);
+ HEVC_SWSR_UN("transfer_characteristics",
+ (unsigned int *)&vui_params->transfer_characteristics,
+ 8, sr_ctx);
+ HEVC_SWSR_UN("matrix_coeffs",
+ (unsigned int *)&vui_params->matrix_coeffs, 8, sr_ctx);
+ }
+ }
+
+ HEVC_SWSR_U1("chroma_loc_info_present_flag",
+ &vui_params->chroma_loc_info_present_flag, sr_ctx);
+ if (vui_params->chroma_loc_info_present_flag) {
+ HEVC_SWSR_UE("chroma_sample_loc_type_top_field",
+ (unsigned int *)&vui_params->chroma_sample_loc_type_top_field,
+ sr_ctx);
+ HEVC_RANGEUCHECK("chroma_sample_loc_type_top_field",
+ vui_params->chroma_sample_loc_type_top_field,
+ 0, 5, &parse_err);
+ HEVC_SWSR_UE("chroma_sample_loc_type_bottom_field",
+ (unsigned int *)&vui_params->chroma_sample_loc_type_bottom_field,
+ sr_ctx);
+ HEVC_RANGEUCHECK("chroma_sample_loc_type_bottom_field",
+ vui_params->chroma_sample_loc_type_bottom_field,
+ 0, 5, &parse_err);
+ }
+ HEVC_SWSR_U1("neutral_chroma_indication_flag",
+ &vui_params->neutral_chroma_indication_flag, sr_ctx);
+ HEVC_SWSR_U1("field_seq_flag",
+ &vui_params->field_seq_flag, sr_ctx);
+ HEVC_SWSR_U1("frame_field_info_present_flag",
+ &vui_params->frame_field_info_present_flag, sr_ctx);
+ HEVC_SWSR_U1("default_display_window_flag",
+ &vui_params->default_display_window_flag, sr_ctx);
+ if (vui_params->default_display_window_flag) {
+ HEVC_SWSR_UE("def_disp_win_left_offset",
+ (unsigned int *)&vui_params->def_disp_win_left_offset, sr_ctx);
+ HEVC_SWSR_UE("def_disp_win_right_offset",
+ (unsigned int *)&vui_params->def_disp_win_right_offset, sr_ctx);
+ HEVC_SWSR_UE("def_disp_win_top_offset",
+ (unsigned int *)&vui_params->def_disp_win_top_offset, sr_ctx);
+ HEVC_SWSR_UE("def_disp_win_bottom_offset",
+ (unsigned int *)&vui_params->def_disp_win_bottom_offset, sr_ctx);
+ }
+ HEVC_SWSR_U1("vui_timing_info_present_flag",
+ &vui_params->vui_timing_info_present_flag, sr_ctx);
+ if (vui_params->vui_timing_info_present_flag) {
+ HEVC_SWSR_UN("vui_num_units_in_tick",
+ (unsigned int *)&vui_params->vui_num_units_in_tick, 32, sr_ctx);
+ HEVC_SWSR_UN("vui_time_scale",
+ (unsigned int *)&vui_params->vui_time_scale, 32, sr_ctx);
+ HEVC_SWSR_U1("vui_poc_proportional_to_timing_flag",
+ &vui_params->vui_poc_proportional_to_timing_flag,
+ sr_ctx);
+ if (vui_params->vui_poc_proportional_to_timing_flag)
+ HEVC_SWSR_UE("vui_num_ticks_poc_diff_one_minus1",
+ (unsigned int *)&vui_params->vui_num_ticks_poc_diff_one_minus1,
+ sr_ctx);
+
+ HEVC_SWSR_U1("vui_hrd_parameters_present_flag",
+ &vui_params->vui_hrd_parameters_present_flag,
+ sr_ctx);
+ if (vui_params->vui_hrd_parameters_present_flag)
+ bspp_hevc_parsehrdparams(sr_ctx, &vui_params->vui_hrd_params,
+ 1, sps_max_sub_layers_minus1);
+ }
+ HEVC_SWSR_U1("bitstream_restriction_flag",
+ &vui_params->bitstream_restriction_flag, sr_ctx);
+
+ if (vui_params->bitstream_restriction_flag) {
+ HEVC_SWSR_U1("tiles_fixed_structure_flag",
+ &vui_params->tiles_fixed_structure_flag, sr_ctx);
+ HEVC_SWSR_U1("motion_vectors_over_pic_boundaries_flag",
+ &vui_params->motion_vectors_over_pic_boundaries_flag,
+ sr_ctx);
+ HEVC_SWSR_U1("restricted_ref_pic_lists_flag",
+ &vui_params->restricted_ref_pic_lists_flag, sr_ctx);
+
+ HEVC_SWSR_UE("min_spatial_segmentation_idc",
+ (unsigned int *)&vui_params->min_spatial_segmentation_idc, sr_ctx);
+ HEVC_RANGEUCHECK("min_spatial_segmentation_idc",
+ vui_params->min_spatial_segmentation_idc,
+ 0, 4095, &parse_err);
+
+ HEVC_SWSR_UE("max_bytes_per_pic_denom",
+ (unsigned int *)&vui_params->max_bytes_per_pic_denom, sr_ctx);
+ HEVC_RANGEUCHECK("max_bytes_per_pic_denom", vui_params->max_bytes_per_pic_denom,
+ 0, 16, &parse_err);
+
+ HEVC_SWSR_UE("max_bits_per_min_cu_denom",
+ (unsigned int *)&vui_params->max_bits_per_min_cu_denom, sr_ctx);
+ HEVC_RANGEUCHECK("max_bits_per_min_cu_denom", vui_params->max_bits_per_min_cu_denom,
+ 0, 16, &parse_err);
+
+ HEVC_SWSR_UE("log2_max_mv_length_horizontal",
+ (unsigned int *)&vui_params->log2_max_mv_length_horizontal, sr_ctx);
+ HEVC_RANGEUCHECK("log2_max_mv_length_horizontal",
+ vui_params->log2_max_mv_length_horizontal,
+ 0, 16, &parse_err);
+
+ HEVC_SWSR_UE("log2_max_mv_length_vertical",
+ (unsigned int *)&vui_params->log2_max_mv_length_vertical, sr_ctx);
+ HEVC_RANGEUCHECK("log2_max_mv_length_vertical",
+ vui_params->log2_max_mv_length_vertical,
+ 0, 15, &parse_err);
+ }
+
+ return parse_err;
+}
+
+static enum bspp_error_type bspp_hevc_parse_spsrange_extensions
+ (void *sr_ctx,
+ struct bspp_hevc_sps_range_exts *range_exts)
+{
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+
+ VDEC_ASSERT(sr_ctx);
+ VDEC_ASSERT(range_exts);
+
+ memset(range_exts, 0, sizeof(struct bspp_hevc_sps_range_exts));
+
+ HEVC_SWSR_U1("transform_skip_rotation_enabled_flag",
+ &range_exts->transform_skip_rotation_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("transform_skip_context_enabled_flag",
+ &range_exts->transform_skip_context_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("implicit_rdpcm_enabled_flag",
+ &range_exts->implicit_rdpcm_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("explicit_rdpcm_enabled_flag",
+ &range_exts->explicit_rdpcm_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("extended_precision_processing_flag",
+ &range_exts->extended_precision_processing_flag, sr_ctx);
+ HEVC_UCHECK("extended_precision_processing_flag",
+ range_exts->extended_precision_processing_flag,
+ 0, &parse_err);
+ HEVC_SWSR_U1("intra_smoothing_disabled_flag",
+ &range_exts->intra_smoothing_disabled_flag, sr_ctx);
+ HEVC_SWSR_U1("high_precision_offsets_enabled_flag",
+ &range_exts->high_precision_offsets_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("persistent_rice_adaptation_enabled_flag",
+ &range_exts->persistent_rice_adaptation_enabled_flag,
+ sr_ctx);
+ HEVC_SWSR_U1("cabac_bypass_alignment_enabled_flag",
+ &range_exts->cabac_bypass_alignment_enabled_flag, sr_ctx);
+
+ return parse_err;
+}
+
+static unsigned char
+bspp_hevc_checksps_range_extensions(struct bspp_hevc_sps_range_exts *range_exts)
+{
+ VDEC_ASSERT(range_exts);
+
+ if (range_exts->transform_skip_rotation_enabled_flag ||
+ range_exts->transform_skip_context_enabled_flag ||
+ range_exts->implicit_rdpcm_enabled_flag ||
+ range_exts->explicit_rdpcm_enabled_flag ||
+ range_exts->extended_precision_processing_flag ||
+ range_exts->intra_smoothing_disabled_flag ||
+ range_exts->persistent_rice_adaptation_enabled_flag ||
+ range_exts->cabac_bypass_alignment_enabled_flag)
+ return 1;
+ /*
+ * Note: high_precision_offsets_enabled_flag is supported even
+ * if hw capabilities (bHevcRangeExt is not set)
+ */
+ return 0;
+}
+
+static enum bspp_error_type bspp_hevc_parsesps(void *sr_ctx,
+ void *str_res,
+ struct bspp_hevc_sps *sps)
+{
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+ unsigned char i;
+ unsigned int min_cblog2_size_y;
+
+ if (!sr_ctx || !sps) {
+ VDEC_ASSERT(0);
+ return BSPP_ERROR_INVALID_VALUE;
+ }
+
+ memset(sps, 0, sizeof(struct bspp_hevc_sps));
+
+ HEVC_SWSR_UN("sps_video_parameter_set_id",
+ (unsigned int *)&sps->sps_video_parameter_set_id, 4, sr_ctx);
+ HEVC_SWSR_UN("sps_max_sub_layers_minus1",
+ (unsigned int *)&sps->sps_max_sub_layers_minus1, 3, sr_ctx);
+ HEVC_RANGEUCHECK("sps_max_sub_layers_minus1", sps->sps_max_sub_layers_minus1, 0,
+ HEVC_MAX_NUM_SUBLAYERS - 1, &parse_err);
+ HEVC_SWSR_U1("sps_temporal_id_nesting_flag",
+ &sps->sps_temporal_id_nesting_flag, sr_ctx);
+
+ if (sps->sps_max_sub_layers_minus1 == 0)
+ HEVC_UCHECK("sps_temporal_id_nesting_flag",
+ sps->sps_temporal_id_nesting_flag, 1, &parse_err);
+
+ parse_err |= bspp_hevc_parse_profiletierlevel
+ (sr_ctx, &sps->profile_tier_level,
+ sps->sps_max_sub_layers_minus1);
+
+ HEVC_SWSR_UE("sps_seq_parameter_set_id",
+ (unsigned int *)&sps->sps_seq_parameter_set_id, sr_ctx);
+ HEVC_RANGEUCHECK("sps_seq_parameter_set_id", sps->sps_seq_parameter_set_id, 0,
+ HEVC_MAX_SPS_COUNT - 1, &parse_err);
+
+ HEVC_SWSR_UE("chroma_format_idc", (unsigned int *)&sps->chroma_format_idc, sr_ctx);
+ HEVC_RANGEUCHECK("chroma_format_idc", sps->chroma_format_idc, 0, 3, &parse_err);
+
+ if (sps->chroma_format_idc == 3)
+ HEVC_SWSR_U1("separate_colour_plane_flag",
+ &sps->separate_colour_plane_flag, sr_ctx);
+
+ HEVC_SWSR_UE("pic_width_in_luma_samples",
+ (unsigned int *)&sps->pic_width_in_luma_samples, sr_ctx);
+ HEVC_SWSR_UE("pic_height_in_luma_samples",
+ (unsigned int *)&sps->pic_height_in_luma_samples, sr_ctx);
+
+ HEVC_SWSR_U1("conformance_window_flag", &sps->conformance_window_flag, sr_ctx);
+
+ if (sps->pic_width_in_luma_samples == 0 ||
+ sps->pic_height_in_luma_samples == 0) {
+ pr_warn("Invalid video dimensions (%u, %u)",
+ sps->pic_width_in_luma_samples,
+ sps->pic_height_in_luma_samples);
+ parse_err |= BSPP_ERROR_UNRECOVERABLE;
+ }
+
+ if (sps->conformance_window_flag) {
+ HEVC_SWSR_UE("conf_win_left_offset",
+ (unsigned int *)&sps->conf_win_left_offset, sr_ctx);
+ HEVC_SWSR_UE("conf_win_right_offset",
+ (unsigned int *)&sps->conf_win_right_offset, sr_ctx);
+ HEVC_SWSR_UE("conf_win_top_offset",
+ (unsigned int *)&sps->conf_win_top_offset, sr_ctx);
+ HEVC_SWSR_UE("conf_win_bottom_offset",
+ (unsigned int *)&sps->conf_win_bottom_offset, sr_ctx);
+ }
+
+ HEVC_SWSR_UE("bit_depth_luma_minus8",
+ (unsigned int *)&sps->bit_depth_luma_minus8, sr_ctx);
+ HEVC_RANGEUCHECK("bit_depth_luma_minus8",
+ sps->bit_depth_luma_minus8, 0, 6, &parse_err);
+ HEVC_SWSR_UE("bit_depth_chroma_minus8",
+ (unsigned int *)&sps->bit_depth_chroma_minus8, sr_ctx);
+ HEVC_RANGEUCHECK("bit_depth_chroma_minus8", sps->bit_depth_chroma_minus8,
+ 0, 6, &parse_err);
+
+ HEVC_SWSR_UE("log2_max_pic_order_cnt_lsb_minus4",
+ (unsigned int *)&sps->log2_max_pic_order_cnt_lsb_minus4, sr_ctx);
+ HEVC_RANGEUCHECK("log2_max_pic_order_cnt_lsb_minus4",
+ sps->log2_max_pic_order_cnt_lsb_minus4,
+ 0, 12, &parse_err);
+
+ HEVC_SWSR_U1("sps_sub_layer_ordering_info_present_flag",
+ &sps->sps_sub_layer_ordering_info_present_flag, sr_ctx);
+ for (i = (sps->sps_sub_layer_ordering_info_present_flag ?
+ 0 : sps->sps_max_sub_layers_minus1);
+ i <= sps->sps_max_sub_layers_minus1; ++i) {
+ HEVC_SWSR_UE("sps_max_dec_pic_buffering_minus1",
+ (unsigned int *)&sps->sps_max_dec_pic_buffering_minus1[i], sr_ctx);
+ HEVC_SWSR_UE("sps_max_num_reorder_pics",
+ (unsigned int *)&sps->sps_max_num_reorder_pics[i], sr_ctx);
+ HEVC_SWSR_UE("sps_max_latency_increase_plus1",
+ (unsigned int *)&sps->sps_max_latency_increase_plus1[i], sr_ctx);
+ }
+
+ HEVC_SWSR_UE("log2_min_luma_coding_block_size_minus3",
+ (unsigned int *)&sps->log2_min_luma_coding_block_size_minus3, sr_ctx);
+ HEVC_SWSR_UE("log2_diff_max_min_luma_coding_block_size",
+ (unsigned int *)&sps->log2_diff_max_min_luma_coding_block_size, sr_ctx);
+ HEVC_SWSR_UE("log2_min_transform_block_size_minus2",
+ (unsigned int *)&sps->log2_min_transform_block_size_minus2, sr_ctx);
+ HEVC_SWSR_UE("log2_diff_max_min_transform_block_size",
+ (unsigned int *)&sps->log2_diff_max_min_transform_block_size, sr_ctx);
+ HEVC_SWSR_UE("max_transform_hierarchy_depth_inter",
+ (unsigned int *)&sps->max_transform_hierarchy_depth_inter, sr_ctx);
+ HEVC_SWSR_UE("max_transform_hierarchy_depth_intra",
+ (unsigned int *)&sps->max_transform_hierarchy_depth_intra, sr_ctx);
+
+ HEVC_SWSR_U1("scaling_list_enabled_flag", &sps->scaling_list_enabled_flag, sr_ctx);
+
+ if (sps->scaling_list_enabled_flag) {
+ HEVC_SWSR_U1("sps_scaling_list_data_present_flag",
+ &sps->sps_scaling_list_data_present_flag, sr_ctx);
+ if (sps->sps_scaling_list_data_present_flag)
+ parse_err |= bspp_hevc_parse_scalinglistdata(sr_ctx,
+ &sps->scalinglist_data);
+ else
+ bspp_hevc_usedefault_scalinglists(&sps->scalinglist_data);
+ }
+
+ HEVC_SWSR_U1("amp_enabled_flag", &sps->amp_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("sample_adaptive_offset_enabled_flag",
+ &sps->sample_adaptive_offset_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("pcm_enabled_flag", &sps->pcm_enabled_flag, sr_ctx);
+
+ if (sps->pcm_enabled_flag) {
+ HEVC_SWSR_UN("pcm_sample_bit_depth_luma_minus1",
+ (unsigned int *)&sps->pcm_sample_bit_depth_luma_minus1,
+ 4, sr_ctx);
+ HEVC_SWSR_UN("pcm_sample_bit_depth_chroma_minus1",
+ (unsigned int *)&sps->pcm_sample_bit_depth_chroma_minus1,
+ 4, sr_ctx);
+ HEVC_SWSR_UE("log2_min_pcm_luma_coding_block_size_minus3",
+ (unsigned int *)&sps->log2_min_pcm_luma_coding_block_size_minus3,
+ sr_ctx);
+ HEVC_SWSR_UE("log2_diff_max_min_pcm_luma_coding_block_size",
+ (unsigned int *)&sps->log2_diff_max_min_pcm_luma_coding_block_size,
+ sr_ctx);
+ HEVC_SWSR_U1("pcm_loop_filter_disabled_flag",
+ &sps->pcm_loop_filter_disabled_flag, sr_ctx);
+ } else {
+ sps->pcm_sample_bit_depth_luma_minus1 = 7;
+ sps->pcm_sample_bit_depth_chroma_minus1 = 7;
+ sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
+ sps->log2_diff_max_min_pcm_luma_coding_block_size = 2;
+ }
+
+ HEVC_SWSR_UE("num_short_term_ref_pic_sets",
+ (unsigned int *)&sps->num_short_term_ref_pic_sets, sr_ctx);
+ HEVC_RANGEUCHECK("num_short_term_ref_pic_sets", sps->num_short_term_ref_pic_sets, 0,
+ HEVC_MAX_NUM_ST_REF_PIC_SETS - 1, &parse_err);
+
+ for (i = 0; i < sps->num_short_term_ref_pic_sets; ++i) {
+ parse_err |= bspp_hevc_parse_shortterm_refpicset(sr_ctx,
+ sps->rps_list,
+ i,
+ 0);
+ }
+
+ HEVC_SWSR_U1("long_term_ref_pics_present_flag",
+ &sps->long_term_ref_pics_present_flag, sr_ctx);
+ if (sps->long_term_ref_pics_present_flag) {
+ HEVC_SWSR_UE("num_long_term_ref_pics_sps",
+ (unsigned int *)&sps->num_long_term_ref_pics_sps, sr_ctx);
+ HEVC_RANGEUCHECK("num_long_term_ref_pics_sps",
+ sps->num_long_term_ref_pics_sps, 0,
+ HEVC_MAX_NUM_LT_REF_PICS, &parse_err);
+ for (i = 0; i < sps->num_long_term_ref_pics_sps; ++i) {
+ HEVC_SWSR_UN("lt_ref_pic_poc_lsb_sps",
+ (unsigned int *)&sps->lt_ref_pic_poc_lsb_sps[i],
+ sps->log2_max_pic_order_cnt_lsb_minus4 + 4,
+ sr_ctx);
+ HEVC_SWSR_U1("used_by_curr_pic_lt_sps_flag",
+ &sps->used_by_curr_pic_lt_sps_flag[i],
+ sr_ctx);
+ }
+ }
+
+ HEVC_SWSR_U1("sps_temporal_mvp_enabled_flag", &sps->sps_temporal_mvp_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("strong_intra_smoothing_enabled_flag",
+ &sps->strong_intra_smoothing_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("vui_parameters_present_flag", &sps->vui_parameters_present_flag, sr_ctx);
+
+ if (sps->vui_parameters_present_flag)
+ bspp_hevc_parsevui_parameters(sr_ctx, &sps->vui_params,
+ sps->sps_max_sub_layers_minus1);
+
+ HEVC_SWSR_U1("sps_extension_present_flag", &sps->sps_extension_present_flag, sr_ctx);
+ if (sps->sps_extension_present_flag &&
+ bspp_hevc_range_extensions_is_enabled(&sps->profile_tier_level)) {
+ HEVC_SWSR_U1("sps_range_extensions_flag", &sps->sps_range_extensions_flag, sr_ctx);
+
+ HEVC_SWSR_UN("sps_extension_7bits", (unsigned int *)&sps->sps_extension_7bits, 7,
+ sr_ctx);
+ /*
+ * ignore extension data. Although we inform
+ * if some non-zero data was found
+ */
+ HEVC_UCHECK("sps_extension_7bits", sps->sps_extension_7bits, 0, &parse_err);
+ /*
+ * TODO ?: the newest HEVC spec (10/2014) splits
+ * "sps_extension_7bits" to * sps_multilayer_extension_flag (1)
+ * sps_extension_6bits (6)
+ */
+ if (sps->sps_range_extensions_flag)
+ parse_err |= bspp_hevc_parse_spsrange_extensions
+ (sr_ctx, &sps->range_exts);
+ }
+ /*
+ * calculate "derived" variables needed further in the parsing process
+ * (of other headers) and save them for later use
+ */
+ sps->sub_width_c = 1;
+ sps->sub_height_c = 1;
+ if (sps->chroma_format_idc == 2) {
+ sps->sub_width_c = 2;
+ } else if (sps->chroma_format_idc == 1) {
+ sps->sub_width_c = 2;
+ sps->sub_height_c = 2;
+ }
+
+ min_cblog2_size_y = sps->log2_min_luma_coding_block_size_minus3 + 3;
+ sps->ctb_log2size_y =
+ min_cblog2_size_y + sps->log2_diff_max_min_luma_coding_block_size;
+ sps->ctb_size_y = 1 << sps->ctb_log2size_y;
+
+ if (sps->ctb_size_y > 0) {
+ /* use integer division with rounding up */
+ sps->pic_width_in_ctbs_y =
+ (sps->pic_width_in_luma_samples + sps->ctb_size_y - 1)
+ / sps->ctb_size_y;
+ sps->pic_height_in_ctbs_y =
+ (sps->pic_height_in_luma_samples + sps->ctb_size_y - 1)
+ / sps->ctb_size_y;
+ } else {
+ parse_err |= BSPP_ERROR_INVALID_VALUE;
+ }
+
+ sps->pic_size_in_ctbs_y =
+ sps->pic_width_in_ctbs_y * sps->pic_height_in_ctbs_y;
+
+ sps->max_pic_order_cnt_lsb =
+ 1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+ for (i = 0; i <= sps->sps_max_sub_layers_minus1; ++i) {
+ sps->sps_max_latency_pictures[i] =
+ sps->sps_max_num_reorder_pics[i] +
+ sps->sps_max_latency_increase_plus1[i] - 1;
+ }
+
+ BSPP_HEVC_SYNTAX("ctb_size_y: %u", sps->ctb_size_y);
+ BSPP_HEVC_SYNTAX("pic_width_in_ctbs_y: %u", sps->pic_width_in_ctbs_y);
+ BSPP_HEVC_SYNTAX("pic_height_in_ctbs_y: %u", sps->pic_height_in_ctbs_y);
+ BSPP_HEVC_SYNTAX("pic_size_in_ctbs_y: %u", sps->pic_size_in_ctbs_y);
+
+ return parse_err;
+}
+
+static int bspp_hevc_release_sequhdrinfo(void *str_alloc, void *secure_spsinfo)
+{
+ struct bspp_hevc_sps *hevc_sps = (struct bspp_hevc_sps *)secure_spsinfo;
+
+ if (!hevc_sps)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Release the raw VIU data. */
+ bspp_streamrelese_rawbstrdataplain(str_alloc, (void *)hevc_sps->vui_raw_data);
+ return 0;
+}
+
+static int bspp_hevc_releasedata(void *str_alloc, enum bspp_unit_type data_type,
+ void *data_handle)
+{
+ int result = 0;
+
+ if (!data_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ switch (data_type) {
+ case BSPP_UNIT_SEQUENCE:
+ result = bspp_hevc_release_sequhdrinfo(str_alloc, data_handle);
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static int bspp_hevc_reset_ppsinfo(void *secure_ppsinfo)
+{
+ struct bspp_hevc_pps *hevc_pps = NULL;
+
+ if (!secure_ppsinfo)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ hevc_pps = (struct bspp_hevc_pps *)secure_ppsinfo;
+
+ memset(hevc_pps, 0, sizeof(*hevc_pps));
+
+ return 0;
+}
+
+static int bspp_hevc_resetdata(enum bspp_unit_type data_type, void *data_handle)
+{
+ int result = 0;
+
+ switch (data_type) {
+ case BSPP_UNIT_PPS:
+ result = bspp_hevc_reset_ppsinfo(data_handle);
+ break;
+ default:
+ break;
+ }
+ return result;
+}
+
+static enum bspp_error_type bspp_hevc_parsepps_range_extensions
+ (void *sr_ctx,
+ struct bspp_hevc_pps_range_exts *range_exts,
+ unsigned char transform_skip_enabled_flag,
+ unsigned char log2_diff_max_min_luma_coding_block_size)
+{
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+
+ VDEC_ASSERT(sr_ctx);
+ VDEC_ASSERT(range_exts);
+
+ memset(range_exts, 0, sizeof(struct bspp_hevc_pps_range_exts));
+
+ if (transform_skip_enabled_flag)
+ HEVC_SWSR_UE("log2_max_transform_skip_block_size_minus2",
+ (unsigned int *)&range_exts->log2_max_transform_skip_block_size_minus2,
+ sr_ctx);
+
+ HEVC_SWSR_U1("cross_component_prediction_enabled_flag",
+ &range_exts->cross_component_prediction_enabled_flag,
+ sr_ctx);
+ HEVC_UCHECK("cross_component_prediction_enabled_flag",
+ range_exts->cross_component_prediction_enabled_flag, 0,
+ &parse_err);
+
+ HEVC_SWSR_U1("chroma_qp_offset_list_enabled_flag",
+ &range_exts->chroma_qp_offset_list_enabled_flag, sr_ctx);
+
+ if (range_exts->chroma_qp_offset_list_enabled_flag) {
+ unsigned char i;
+
+ HEVC_SWSR_UE("diff_cu_chroma_qp_offset_depth",
+ (unsigned int *)&range_exts->diff_cu_chroma_qp_offset_depth,
+ sr_ctx);
+ HEVC_RANGEUCHECK("diff_cu_chroma_qp_offset_depth",
+ range_exts->diff_cu_chroma_qp_offset_depth, 0,
+ log2_diff_max_min_luma_coding_block_size,
+ &parse_err);
+
+ HEVC_SWSR_UE("chroma_qp_offset_list_len_minus1",
+ (unsigned int *)&range_exts->chroma_qp_offset_list_len_minus1,
+ sr_ctx);
+ HEVC_RANGEUCHECK("chroma_qp_offset_list_len_minus1",
+ range_exts->chroma_qp_offset_list_len_minus1,
+ 0, HEVC_MAX_CHROMA_QP - 1, &parse_err);
+ for (i = 0; i <= range_exts->chroma_qp_offset_list_len_minus1; i++) {
+ HEVC_SWSR_SE("cb_qp_offset_list",
+ (int *)&range_exts->cb_qp_offset_list[i], sr_ctx);
+ HEVC_RANGESCHECK("cb_qp_offset_list", range_exts->cb_qp_offset_list[i],
+ -12, 12, &parse_err);
+ HEVC_SWSR_SE("cr_qp_offset_list",
+ (int *)&range_exts->cr_qp_offset_list[i], sr_ctx);
+ HEVC_RANGESCHECK("cr_qp_offset_list", range_exts->cr_qp_offset_list[i],
+ -12, 12, &parse_err);
+ }
+ }
+ HEVC_SWSR_UE("log2_sao_offset_scale_luma",
+ (unsigned int *)&range_exts->log2_sao_offset_scale_luma, sr_ctx);
+ HEVC_UCHECK("log2_sao_offset_scale_luma",
+ range_exts->log2_sao_offset_scale_luma, 0, &parse_err);
+ HEVC_SWSR_UE("log2_sao_offset_scale_chroma",
+ (unsigned int *)&range_exts->log2_sao_offset_scale_chroma, sr_ctx);
+ HEVC_UCHECK("log2_sao_offset_scale_chroma",
+ range_exts->log2_sao_offset_scale_chroma, 0, &parse_err);
+
+ return parse_err;
+}
+
+static unsigned char bspp_hevc_checkppsrangeextensions
+ (struct bspp_hevc_pps_range_exts *range_exts)
+{
+ VDEC_ASSERT(range_exts);
+
+ if (range_exts->log2_max_transform_skip_block_size_minus2 ||
+ range_exts->cross_component_prediction_enabled_flag)
+ return 1;
+ /*
+ * Note: chroma_qp_offset_list_enabled_flag is supported even
+ * if hw capabilities (bHevcRangeExt is not set)
+ */
+ return 0;
+}
+
+static enum bspp_error_type bspp_hevc_parsepps
+ (void *sr_ctx, void *str_res,
+ struct bspp_hevc_pps *pps)
+{
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+ struct bspp_sequence_hdr_info *spsinfo = NULL;
+ struct bspp_hevc_sps *sps = NULL;
+
+ VDEC_ASSERT(sr_ctx);
+ VDEC_ASSERT(pps);
+ memset(pps, 0, sizeof(struct bspp_hevc_pps));
+
+ HEVC_SWSR_UE("pps_pic_parameter_set_id",
+ (unsigned int *)&pps->pps_pic_parameter_set_id, sr_ctx);
+ HEVC_RANGEUCHECK("pps_pic_parameter_set_id", pps->pps_pic_parameter_set_id, 0,
+ HEVC_MAX_PPS_COUNT - 1, &parse_err);
+ HEVC_SWSR_UE("pps_seq_parameter_set_id",
+ (unsigned int *)&pps->pps_seq_parameter_set_id, sr_ctx);
+ HEVC_RANGEUCHECK("pps_seq_parameter_set_id", pps->pps_seq_parameter_set_id, 0,
+ HEVC_MAX_SPS_COUNT - 1, &parse_err);
+
+ spsinfo = bspp_get_sequ_hdr(str_res, pps->pps_seq_parameter_set_id);
+ if (!spsinfo) {
+ parse_err |= BSPP_ERROR_NO_SEQUENCE_HDR;
+ } else {
+ sps = (struct bspp_hevc_sps *)spsinfo->secure_sequence_info;
+ VDEC_ASSERT(sps->sps_seq_parameter_set_id ==
+ pps->pps_seq_parameter_set_id);
+ }
+
+ HEVC_SWSR_U1("dependent_slice_segments_enabled_flag",
+ &pps->dependent_slice_segments_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("output_flag_present_flag",
+ &pps->output_flag_present_flag, sr_ctx);
+ HEVC_SWSR_UN("num_extra_slice_header_bits",
+ (unsigned int *)&pps->num_extra_slice_header_bits, 3, sr_ctx);
+ HEVC_SWSR_U1("sign_data_hiding_enabled_flag", &pps->sign_data_hiding_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("cabac_init_present_flag", &pps->cabac_init_present_flag, sr_ctx);
+ HEVC_SWSR_UE("num_ref_idx_l0_default_active_minus1",
+ (unsigned int *)&pps->num_ref_idx_l0_default_active_minus1, sr_ctx);
+ HEVC_RANGEUCHECK("num_ref_idx_l0_default_active_minus1",
+ pps->num_ref_idx_l0_default_active_minus1, 0, 14, &parse_err);
+ HEVC_SWSR_UE("num_ref_idx_l1_default_active_minus1",
+ (unsigned int *)&pps->num_ref_idx_l1_default_active_minus1, sr_ctx);
+ HEVC_RANGEUCHECK("num_ref_idx_l1_default_active_minus1",
+ pps->num_ref_idx_l1_default_active_minus1, 0, 14, &parse_err);
+ HEVC_SWSR_SE("init_qp_minus26", (int *)&pps->init_qp_minus26, sr_ctx);
+
+ if (sps)
+ HEVC_RANGESCHECK("init_qp_minus26", pps->init_qp_minus26,
+ -(26 + (6 * sps->bit_depth_luma_minus8)), 25, &parse_err);
+
+ HEVC_SWSR_U1("constrained_intra_pred_flag", &pps->constrained_intra_pred_flag, sr_ctx);
+ HEVC_SWSR_U1("transform_skip_enabled_flag", &pps->transform_skip_enabled_flag, sr_ctx);
+
+ HEVC_SWSR_U1("cu_qp_delta_enabled_flag", &pps->cu_qp_delta_enabled_flag, sr_ctx);
+
+ if (pps->cu_qp_delta_enabled_flag)
+ HEVC_SWSR_UE("diff_cu_qp_delta_depth",
+ (unsigned int *)&pps->diff_cu_qp_delta_depth, sr_ctx);
+
+ HEVC_SWSR_SE("pps_cb_qp_offset", (int *)&pps->pps_cb_qp_offset, sr_ctx);
+ HEVC_RANGESCHECK("pps_cb_qp_offset", pps->pps_cb_qp_offset, -12, 12, &parse_err);
+ HEVC_SWSR_SE("pps_cr_qp_offset", (int *)&pps->pps_cr_qp_offset, sr_ctx);
+ HEVC_RANGESCHECK("pps_cr_qp_offset", pps->pps_cr_qp_offset, -12, 12, &parse_err);
+ HEVC_SWSR_U1("pps_slice_chroma_qp_offsets_present_flag",
+ &pps->pps_slice_chroma_qp_offsets_present_flag, sr_ctx);
+ HEVC_SWSR_U1("weighted_pred_flag", &pps->weighted_pred_flag, sr_ctx);
+ HEVC_SWSR_U1("weighted_bipred_flag", &pps->weighted_bipred_flag, sr_ctx);
+ HEVC_SWSR_U1("transquant_bypass_enabled_flag",
+ &pps->transquant_bypass_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("tiles_enabled_flag", &pps->tiles_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("entropy_coding_sync_enabled_flag",
+ &pps->entropy_coding_sync_enabled_flag, sr_ctx);
+
+ if (pps->tiles_enabled_flag) {
+ HEVC_SWSR_UE("num_tile_columns_minus1",
+ (unsigned int *)&pps->num_tile_columns_minus1, sr_ctx);
+ HEVC_RANGEUCHECK("num_tile_columns_minus1", pps->num_tile_columns_minus1, 0,
+ HEVC_MAX_TILE_COLS - 1, &parse_err);
+
+ if (pps->num_tile_columns_minus1 > HEVC_MAX_TILE_COLS)
+ pps->num_tile_columns_minus1 = HEVC_MAX_TILE_COLS;
+
+ HEVC_SWSR_UE("num_tile_rows_minus1", (unsigned int *)&pps->num_tile_rows_minus1,
+ sr_ctx);
+ HEVC_RANGEUCHECK("num_tile_rows_minus1", pps->num_tile_rows_minus1, 0,
+ HEVC_MAX_TILE_ROWS - 1, &parse_err);
+
+ if (pps->num_tile_rows_minus1 > HEVC_MAX_TILE_ROWS)
+ pps->num_tile_rows_minus1 = HEVC_MAX_TILE_ROWS;
+
+ HEVC_SWSR_U1("uniform_spacing_flag", &pps->uniform_spacing_flag, sr_ctx);
+
+ if (!pps->uniform_spacing_flag) {
+ unsigned char i = 0;
+
+ for (i = 0; i < pps->num_tile_columns_minus1; ++i)
+ HEVC_SWSR_UE("column_width_minus1",
+ (unsigned int *)&pps->column_width_minus1[i],
+ sr_ctx);
+
+ for (i = 0; i < pps->num_tile_rows_minus1; ++i)
+ HEVC_SWSR_UE("row_height_minus1",
+ (unsigned int *)&pps->row_height_minus1[i],
+ sr_ctx);
+ }
+ HEVC_SWSR_U1("loop_filter_across_tiles_enabled_flag",
+ &pps->loop_filter_across_tiles_enabled_flag, sr_ctx);
+ } else {
+ pps->loop_filter_across_tiles_enabled_flag = 1;
+ }
+
+ HEVC_SWSR_U1("pps_loop_filter_across_slices_enabled_flag",
+ &pps->pps_loop_filter_across_slices_enabled_flag, sr_ctx);
+
+ HEVC_SWSR_U1("deblocking_filter_control_present_flag",
+ &pps->deblocking_filter_control_present_flag, sr_ctx);
+
+ if (pps->deblocking_filter_control_present_flag) {
+ HEVC_SWSR_U1("deblocking_filter_override_enabled_flag",
+ &pps->deblocking_filter_override_enabled_flag, sr_ctx);
+ HEVC_SWSR_U1("pps_deblocking_filter_disabled_flag",
+ &pps->pps_deblocking_filter_disabled_flag, sr_ctx);
+ if (!pps->pps_deblocking_filter_disabled_flag) {
+ HEVC_SWSR_SE("pps_beta_offset_div2", (int *)&pps->pps_beta_offset_div2,
+ sr_ctx);
+ HEVC_RANGESCHECK("pps_beta_offset_div2", pps->pps_beta_offset_div2, -6, 6,
+ &parse_err);
+ HEVC_SWSR_SE("pps_tc_offset_div2", (int *)&pps->pps_tc_offset_div2, sr_ctx);
+ HEVC_RANGESCHECK("pps_tc_offset_div2", pps->pps_tc_offset_div2, -6, 6,
+ &parse_err);
+ }
+ }
+
+ HEVC_SWSR_U1("pps_scaling_list_data_present_flag",
+ &pps->pps_scaling_list_data_present_flag, sr_ctx);
+ if (pps->pps_scaling_list_data_present_flag)
+ parse_err |= bspp_hevc_parse_scalinglistdata(sr_ctx, &pps->scaling_list);
+
+ HEVC_SWSR_U1("lists_modification_present_flag",
+ &pps->lists_modification_present_flag, sr_ctx);
+ HEVC_SWSR_UE("log2_parallel_merge_level_minus2",
+ (unsigned int *)&pps->log2_parallel_merge_level_minus2, sr_ctx);
+ HEVC_SWSR_U1("slice_segment_header_extension_present_flag",
+ &pps->slice_segment_header_extension_present_flag, sr_ctx);
+
+ HEVC_SWSR_U1("pps_extension_present_flag", &pps->pps_extension_present_flag, sr_ctx);
+ if (pps->pps_extension_present_flag &&
+ bspp_hevc_range_extensions_is_enabled(&sps->profile_tier_level)) {
+ HEVC_SWSR_U1("pps_range_extensions_flag",
+ &pps->pps_range_extensions_flag, sr_ctx);
+ HEVC_SWSR_UN("pps_extension_7bits",
+ (unsigned int *)&pps->pps_extension_7bits, 7, sr_ctx);
+ /*
+ * ignore extension data. Although we inform
+ * if some non-zero data was found
+ */
+ HEVC_UCHECK("pps_extension_7bits", pps->pps_extension_7bits, 0, &parse_err);
+
+ /*
+ * TODO ?: the newest HEVC spec (10/2014) splits "pps_extension_7bits" to
+ * pps_multilayer_extension_flag (1)
+ * pps_extension_6bits (6)
+ */
+ if (pps->pps_range_extensions_flag && sps) {
+ parse_err |= bspp_hevc_parsepps_range_extensions
+ (sr_ctx,
+ &pps->range_exts,
+ pps->transform_skip_enabled_flag,
+ sps->log2_diff_max_min_luma_coding_block_size);
+ }
+ }
+
+ /* calculate derived elements */
+ if (pps->tiles_enabled_flag && sps)
+ bspp_hevc_dotilecalculations(sps, pps);
+
+ return parse_err;
+}
+
+static void bspp_hevc_dotilecalculations(struct bspp_hevc_sps *sps,
+ struct bspp_hevc_pps *pps)
+{
+ unsigned short colwidth[HEVC_MAX_TILE_COLS];
+ unsigned short rowheight[HEVC_MAX_TILE_ROWS];
+ unsigned char i;
+
+ if (!pps->tiles_enabled_flag) {
+ pps->max_tile_height_in_ctbs_y = sps->pic_height_in_ctbs_y;
+ return;
+ }
+
+ if (pps->uniform_spacing_flag) {
+ for (i = 0; i <= pps->num_tile_columns_minus1; ++i) {
+ colwidth[i] = ((i + 1) * sps->pic_width_in_ctbs_y) /
+ (pps->num_tile_columns_minus1 + 1) -
+ (i * sps->pic_width_in_ctbs_y) /
+ (pps->num_tile_columns_minus1 + 1);
+ }
+
+ for (i = 0; i <= pps->num_tile_rows_minus1; ++i) {
+ rowheight[i] = ((i + 1) * sps->pic_height_in_ctbs_y) /
+ (pps->num_tile_rows_minus1 + 1) -
+ (i * sps->pic_height_in_ctbs_y) /
+ (pps->num_tile_rows_minus1 + 1);
+ }
+
+ pps->max_tile_height_in_ctbs_y = rowheight[0];
+ } else {
+ pps->max_tile_height_in_ctbs_y = 0;
+
+ colwidth[pps->num_tile_columns_minus1] = sps->pic_width_in_ctbs_y;
+ for (i = 0; i <= pps->num_tile_columns_minus1; ++i) {
+ colwidth[i] = pps->column_width_minus1[i] + 1;
+ colwidth[pps->num_tile_columns_minus1] -= colwidth[i];
+ }
+
+ rowheight[pps->num_tile_rows_minus1] = sps->pic_height_in_ctbs_y;
+ for (i = 0; i <= pps->num_tile_rows_minus1; ++i) {
+ rowheight[i] = pps->row_height_minus1[i] + 1;
+ rowheight[pps->num_tile_rows_minus1] -= rowheight[i];
+
+ if (rowheight[i] > pps->max_tile_height_in_ctbs_y)
+ pps->max_tile_height_in_ctbs_y = rowheight[i];
+ }
+
+ if (rowheight[pps->num_tile_rows_minus1] > pps->max_tile_height_in_ctbs_y)
+ pps->max_tile_height_in_ctbs_y =
+ rowheight[pps->num_tile_rows_minus1];
+ }
+
+ for (i = 0; i <= pps->num_tile_columns_minus1; ++i)
+ pps->col_bd[i + 1] = pps->col_bd[i] + colwidth[i];
+
+ for (i = 0; i <= pps->num_tile_rows_minus1; ++i)
+ pps->row_bd[i + 1] = pps->row_bd[i] + rowheight[i];
+}
+
+static enum bspp_error_type bspp_hevc_parse_slicesegmentheader
+ (void *sr_ctx, void *str_res,
+ struct bspp_hevc_slice_segment_header *ssh,
+ unsigned char nalunit_type,
+ struct bspp_vps_info **vpsinfo,
+ struct bspp_sequence_hdr_info **spsinfo,
+ struct bspp_pps_info **ppsinfo)
+{
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+ struct bspp_hevc_pps *pps = NULL;
+ struct bspp_hevc_sps *sps = NULL;
+ struct bspp_hevc_vps *vps = NULL;
+
+ VDEC_ASSERT(sr_ctx);
+ VDEC_ASSERT(ssh);
+ VDEC_ASSERT(vpsinfo);
+ VDEC_ASSERT(spsinfo);
+ VDEC_ASSERT(ppsinfo);
+
+ memset(ssh, 0, sizeof(struct bspp_hevc_slice_segment_header));
+
+ HEVC_SWSR_U1("first_slice_segment_in_pic_flag",
+ &ssh->first_slice_segment_in_pic_flag, sr_ctx);
+
+ if (bspp_hevc_picture_is_irap((enum hevc_nalunittype)nalunit_type))
+ HEVC_SWSR_U1("no_output_of_prior_pics_flag",
+ &ssh->no_output_of_prior_pics_flag, sr_ctx);
+
+ HEVC_SWSR_UE("slice_pic_parameter_set_id", (unsigned int *)&ssh->slice_pic_parameter_set_id,
+ sr_ctx);
+ HEVC_RANGEUCHECK("slice_pic_parameter_set_id", ssh->slice_pic_parameter_set_id, 0,
+ HEVC_MAX_PPS_COUNT - 1, &parse_err);
+
+ if (ssh->slice_pic_parameter_set_id >= HEVC_MAX_PPS_COUNT) {
+ pr_warn("PPS Id invalid (%u), setting to 0",
+ ssh->slice_pic_parameter_set_id);
+ ssh->slice_pic_parameter_set_id = 0;
+ parse_err &= ~BSPP_ERROR_INVALID_VALUE;
+ parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+ }
+
+ /* set PPS */
+ *ppsinfo = bspp_get_pps_hdr(str_res, ssh->slice_pic_parameter_set_id);
+ if (!(*ppsinfo)) {
+ parse_err |= BSPP_ERROR_NO_PPS;
+ goto error;
+ }
+ pps = (struct bspp_hevc_pps *)(*ppsinfo)->secure_pps_info;
+ if (!pps) {
+ parse_err |= BSPP_ERROR_NO_PPS;
+ goto error;
+ }
+ VDEC_ASSERT(pps->pps_pic_parameter_set_id == ssh->slice_pic_parameter_set_id);
+
+ *spsinfo = bspp_get_sequ_hdr(str_res, pps->pps_seq_parameter_set_id);
+ if (!(*spsinfo)) {
+ parse_err |= BSPP_ERROR_NO_SEQUENCE_HDR;
+ goto error;
+ }
+ sps = (struct bspp_hevc_sps *)(*spsinfo)->secure_sequence_info;
+ VDEC_ASSERT(sps->sps_seq_parameter_set_id == pps->pps_seq_parameter_set_id);
+
+ *vpsinfo = bspp_get_vpshdr(str_res, sps->sps_video_parameter_set_id);
+ if (!(*vpsinfo)) {
+ parse_err |= BSPP_ERROR_NO_VPS;
+ goto error;
+ }
+ vps = (struct bspp_hevc_vps *)(*vpsinfo)->secure_vpsinfo;
+ VDEC_ASSERT(vps->vps_video_parameter_set_id == sps->sps_video_parameter_set_id);
+
+ if (!ssh->first_slice_segment_in_pic_flag) {
+ if (pps->dependent_slice_segments_enabled_flag)
+ HEVC_SWSR_U1("dependent_slice_segment_flag",
+ &ssh->dependent_slice_segment_flag, sr_ctx);
+
+ HEVC_SWSR_UN("slice_segment_address",
+ (unsigned int *)&ssh->slice_segment_address,
+ bspp_ceil_log2(sps->pic_size_in_ctbs_y), sr_ctx);
+ }
+
+error:
+ return parse_err;
+}
+
+static enum bspp_error_type bspp_hevc_parse_profiletierlevel
+ (void *sr_ctx,
+ struct bspp_hevc_profile_tierlevel *ptl,
+ unsigned char vps_maxsublayers_minus1)
+{
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+ unsigned char i, j;
+ unsigned int res = 0;
+
+ VDEC_ASSERT(sr_ctx);
+ VDEC_ASSERT(ptl);
+ VDEC_ASSERT(vps_maxsublayers_minus1 < HEVC_MAX_NUM_SUBLAYERS);
+
+ memset(ptl, 0, sizeof(struct bspp_hevc_profile_tierlevel));
+
+ HEVC_SWSR_UN("general_profile_space", (unsigned int *)&ptl->general_profile_space, 2,
+ sr_ctx);
+ HEVC_SWSR_U1("general_tier_flag", &ptl->general_tier_flag, sr_ctx);
+ HEVC_SWSR_UN("general_profile_idc", (unsigned int *)&ptl->general_profile_idc, 5, sr_ctx);
+
+ for (j = 0; j < HEVC_MAX_NUM_PROFILE_IDC; ++j) {
+ HEVC_SWSR_U1("general_profile_compatibility_flag",
+ &ptl->general_profile_compatibility_flag[j],
+ sr_ctx);
+ }
+
+ HEVC_SWSR_U1("general_progressive_source_flag",
+ &ptl->general_progressive_source_flag, sr_ctx);
+ HEVC_SWSR_U1("general_interlaced_source_flag",
+ &ptl->general_interlaced_source_flag, sr_ctx);
+ HEVC_SWSR_U1("general_non_packed_constraint_flag",
+ &ptl->general_non_packed_constraint_flag, sr_ctx);
+ HEVC_SWSR_U1("general_frame_only_constraint_flag",
+ &ptl->general_frame_only_constraint_flag, sr_ctx);
+
+ if (ptl->general_profile_idc == 4 ||
+ ptl->general_profile_compatibility_flag[4]) {
+ HEVC_SWSR_U1("general_max_12bit_constraint_flag",
+ &ptl->general_max_12bit_constraint_flag, sr_ctx);
+ HEVC_SWSR_U1("general_max_10bit_constraint_flag",
+ &ptl->general_max_10bit_constraint_flag, sr_ctx);
+ HEVC_SWSR_U1("general_max_8bit_constraint_flag",
+ &ptl->general_max_8bit_constraint_flag, sr_ctx);
+ HEVC_SWSR_U1("general_max_422chroma_constraint_flag",
+ &ptl->general_max_422chroma_constraint_flag,
+ sr_ctx);
+ HEVC_SWSR_U1("general_max_420chroma_constraint_flag",
+ &ptl->general_max_420chroma_constraint_flag,
+ sr_ctx);
+ HEVC_SWSR_U1("general_max_monochrome_constraint_flag",
+ &ptl->general_max_monochrome_constraint_flag,
+ sr_ctx);
+ HEVC_SWSR_U1("general_intra_constraint_flag",
+ &ptl->general_intra_constraint_flag, sr_ctx);
+ HEVC_SWSR_U1("general_one_picture_only_constraint_flag",
+ &ptl->general_one_picture_only_constraint_flag,
+ sr_ctx);
+ HEVC_SWSR_U1("general_lower_bit_rate_constraint_flag",
+ &ptl->general_lower_bit_rate_constraint_flag,
+ sr_ctx);
+ HEVC_SWSR_UN("general_reserved_zero_35bits", &res, 32, sr_ctx);
+ HEVC_UCHECK("general_reserved_zero_35bits", res, 0, &parse_err);
+ HEVC_SWSR_UN("general_reserved_zero_35bits", &res, 3, sr_ctx);
+ HEVC_UCHECK("general_reserved_zero_35bits", res, 0, &parse_err);
+ } else {
+ HEVC_SWSR_UN("general_reserved_zero_44bits (1)", &res, 32, sr_ctx);
+ HEVC_UCHECK("general_reserved_zero_44bits (1)", res, 0, &parse_err);
+ HEVC_SWSR_UN("general_reserved_zero_44bits (2)", &res, 12, sr_ctx);
+ HEVC_UCHECK("general_reserved_zero_44bits (2)", res, 0, &parse_err);
+ }
+
+ HEVC_SWSR_UN("general_level_idc", (unsigned int *)&ptl->general_level_idc, 8, sr_ctx);
+ HEVC_RANGEUCHECK("general_level_idc", ptl->general_level_idc,
+ HEVC_LEVEL_IDC_MIN, HEVC_LEVEL_IDC_MAX, &parse_err);
+
+ for (i = 0; i < vps_maxsublayers_minus1; ++i) {
+ HEVC_SWSR_U1("sub_layer_profile_present_flag",
+ &ptl->sub_layer_profile_present_flag[i], sr_ctx);
+ HEVC_SWSR_U1("sub_layer_level_present_flag",
+ &ptl->sub_layer_level_present_flag[i], sr_ctx);
+ }
+
+ if (vps_maxsublayers_minus1 > 0) {
+ for (i = vps_maxsublayers_minus1; i < 8; ++i) {
+ HEVC_SWSR_UN("reserved_zero_2bits", &res, 2, sr_ctx);
+ HEVC_UCHECK("reserved_zero_2bits", res, 0, &parse_err);
+ }
+ }
+
+ for (i = 0; i < vps_maxsublayers_minus1; ++i) {
+ if (ptl->sub_layer_profile_present_flag[i]) {
+ HEVC_SWSR_UN("sub_layer_profile_space",
+ (unsigned int *)&ptl->sub_layer_profile_space[i], 2, sr_ctx);
+ HEVC_SWSR_U1("sub_layer_tier_flag", &ptl->sub_layer_tier_flag[i], sr_ctx);
+ HEVC_SWSR_UN("sub_layer_profile_idc",
+ (unsigned int *)&ptl->sub_layer_profile_idc[i], 5, sr_ctx);
+ for (j = 0; j < HEVC_MAX_NUM_PROFILE_IDC; ++j)
+ HEVC_SWSR_U1("sub_layer_profile_compatibility_flag",
+ &ptl->sub_layer_profile_compatibility_flag[i][j],
+ sr_ctx);
+
+ HEVC_SWSR_U1("sub_layer_progressive_source_flag",
+ &ptl->sub_layer_progressive_source_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_interlaced_source_flag",
+ &ptl->sub_layer_interlaced_source_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_non_packed_constraint_flag",
+ &ptl->sub_layer_non_packed_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_frame_only_constraint_flag",
+ &ptl->sub_layer_frame_only_constraint_flag[i],
+ sr_ctx);
+
+ if (ptl->sub_layer_profile_idc[i] == 4 ||
+ ptl->sub_layer_profile_compatibility_flag[i][4]) {
+ HEVC_SWSR_U1("sub_layer_max_12bit_constraint_flag",
+ &ptl->sub_layer_max_12bit_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_max_10bit_constraint_flag",
+ &ptl->sub_layer_max_10bit_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_max_8bit_constraint_flag",
+ &ptl->sub_layer_max_8bit_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_max_422chroma_constraint_flag",
+ &ptl->sub_layer_max_422chroma_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_max_420chroma_constraint_flag",
+ &ptl->sub_layer_max_420chroma_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_max_monochrome_constraint_flag",
+ &ptl->sub_layer_max_monochrome_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_intra_constraint_flag",
+ &ptl->sub_layer_intra_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_one_picture_only_constraint_flag",
+ &ptl->sub_layer_one_picture_only_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_U1("sub_layer_lower_bit_rate_constraint_flag",
+ &ptl->sub_layer_lower_bit_rate_constraint_flag[i],
+ sr_ctx);
+ HEVC_SWSR_UN("sub_layer_reserved_zero_35bits",
+ &res, 32, sr_ctx);
+ HEVC_UCHECK("sub_layer_reserved_zero_35bits",
+ res, 0, &parse_err);
+ HEVC_SWSR_UN("sub_layer_reserved_zero_35bits",
+ &res, 3, sr_ctx);
+ HEVC_UCHECK("sub_layer_reserved_zero_35bits",
+ res, 0, &parse_err);
+ } else {
+ HEVC_SWSR_UN("sub_layer_reserved_zero_44bits (1)",
+ &res, 32, sr_ctx);
+ HEVC_UCHECK("sub_layer_reserved_zero_44bits (1)",
+ res, 0, &parse_err);
+ HEVC_SWSR_UN("sub_layer_reserved_zero_44bits (2)",
+ &res, 12, sr_ctx);
+ HEVC_UCHECK("sub_layer_reserved_zero_44bits (2)",
+ res, 0, &parse_err);
+ }
+ }
+ if (ptl->sub_layer_level_present_flag[i])
+ HEVC_SWSR_UN("sub_layer_level_idc",
+ (unsigned int *)&ptl->sub_layer_level_idc[i], 8, sr_ctx);
+ }
+ return parse_err;
+}
+
+/* Default scaling lists */
+#define HEVC_SCALING_LIST_0_SIZE 16
+#define HEVC_SCALING_LIST_123_SIZE 64
+
+static const unsigned char def_4x4[HEVC_SCALING_LIST_0_SIZE] = {
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+};
+
+static const unsigned char def_8x8_intra[HEVC_SCALING_LIST_123_SIZE] = {
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 16, 17, 16, 17, 18,
+ 17, 18, 18, 17, 18, 21, 19, 20, 21, 20, 19, 21, 24, 22, 22, 24,
+ 24, 22, 22, 24, 25, 25, 27, 30, 27, 25, 25, 29, 31, 35, 35, 31,
+ 29, 36, 41, 44, 41, 36, 47, 54, 54, 47, 65, 70, 65, 88, 88, 115
+};
+
+static const unsigned char def_8x8_inter[HEVC_SCALING_LIST_123_SIZE] = {
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18,
+ 18, 18, 18, 18, 18, 20, 20, 20, 20, 20, 20, 20, 24, 24, 24, 24,
+ 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 28, 28, 28, 28, 28,
+ 28, 33, 33, 33, 33, 33, 41, 41, 41, 41, 54, 54, 54, 71, 71, 91
+};
+
+/*
+ * Scan order mapping when translating scaling lists from bitstream order
+ * to PVDEC order
+ */
+static const unsigned char HEVC_INV_ZZ_SCAN4[HEVC_SCALING_LIST_MATRIX_SIZE / 4] = {
+ 0, 1, 2, 4, 3, 6, 7, 10, 5, 8, 9, 12, 11, 13, 14, 15
+};
+
+static const unsigned char HEVC_INV_ZZ_SCAN8[HEVC_SCALING_LIST_MATRIX_SIZE] = {
+ 0, 1, 2, 4, 3, 6, 7, 11, 5, 8, 9, 13, 12, 17, 18, 24,
+ 10, 15, 16, 22, 21, 28, 29, 36, 23, 30, 31, 38, 37, 43, 44, 49,
+ 14, 19, 20, 26, 25, 32, 33, 40, 27, 34, 35, 42, 41, 47, 48, 53,
+ 39, 45, 46, 51, 50, 54, 55, 58, 52, 56, 57, 60, 59, 61, 62, 63
+};
+
+static void bspp_hevc_getdefault_scalinglist
+ (unsigned char size_id, unsigned char matrix_id,
+ const unsigned char **default_scalinglist,
+ unsigned int *size)
+{
+ static const unsigned char *defaultlists
+ [HEVC_SCALING_LIST_NUM_SIZES][HEVC_SCALING_LIST_NUM_MATRICES] = {
+ { def_4x4, def_4x4, def_4x4, def_4x4, def_4x4, def_4x4 },
+ { def_8x8_intra, def_8x8_intra, def_8x8_intra,
+ def_8x8_inter, def_8x8_inter, def_8x8_inter },
+ { def_8x8_intra, def_8x8_intra, def_8x8_intra,
+ def_8x8_inter, def_8x8_inter, def_8x8_inter },
+ { def_8x8_intra, def_8x8_inter, NULL, NULL, NULL, NULL }
+ };
+
+ static const unsigned int lists_sizes
+ [HEVC_SCALING_LIST_NUM_SIZES][HEVC_SCALING_LIST_NUM_MATRICES] = {
+ { sizeof(def_4x4), sizeof(def_4x4), sizeof(def_4x4),
+ sizeof(def_4x4), sizeof(def_4x4), sizeof(def_4x4) },
+ { sizeof(def_8x8_intra), sizeof(def_8x8_intra),
+ sizeof(def_8x8_intra), sizeof(def_8x8_inter),
+ sizeof(def_8x8_inter), sizeof(def_8x8_inter) },
+ { sizeof(def_8x8_intra), sizeof(def_8x8_intra),
+ sizeof(def_8x8_intra), sizeof(def_8x8_inter),
+ sizeof(def_8x8_inter), sizeof(def_8x8_inter) },
+ { sizeof(def_8x8_intra), sizeof(def_8x8_inter), 0, 0, 0, 0 }
+ };
+
+ /* to assert that input to this function was correct */
+ VDEC_ASSERT(size_id < 4);
+ VDEC_ASSERT(size_id < 3 ? (matrix_id < 6) : (matrix_id < 2));
+
+ *default_scalinglist = defaultlists[size_id][matrix_id];
+ *size = lists_sizes[size_id][matrix_id];
+}
+
+static enum bspp_error_type bspp_hevc_parse_scalinglistdata
+ (void *sr_ctx,
+ struct bspp_hevc_scalinglist_data *scaling_listdata)
+{
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+ unsigned char size_id, matrix_id;
+
+ for (size_id = 0; size_id < HEVC_SCALING_LIST_NUM_SIZES; ++size_id) {
+ for (matrix_id = 0; matrix_id < ((size_id == 3) ? 2 : 6);
+ ++matrix_id) {
+ /*
+ * Select scaling list on which we will operate in
+ * the iteration
+ */
+ unsigned char *scalinglist = scaling_listdata->lists[size_id][matrix_id];
+
+ unsigned char scaling_list_pred_mode_flag = 0;
+
+ HEVC_SWSR_U1("scaling_list_pred_mode_flag",
+ &scaling_list_pred_mode_flag, sr_ctx);
+ if (!scaling_list_pred_mode_flag) {
+ unsigned char scaling_list_pred_matrix_id_delta = 0;
+ const unsigned char *defaultlist = NULL;
+ unsigned int listsize = 0;
+
+ HEVC_SWSR_UE("scaling_list_pred_matrixid_delta",
+ (unsigned int *)&scaling_list_pred_matrix_id_delta,
+ sr_ctx);
+
+ bspp_hevc_getdefault_scalinglist(size_id,
+ matrix_id,
+ &defaultlist,
+ &listsize);
+
+ if (scaling_list_pred_matrix_id_delta == 0) {
+ /* use default one */
+ memcpy(scalinglist, defaultlist, listsize);
+ if (size_id > 1)
+ scaling_listdata->dccoeffs[size_id -
+ 2][matrix_id] = 8 + 8;
+ } else {
+ unsigned char ref_matrix_id =
+ matrix_id - scaling_list_pred_matrix_id_delta;
+ unsigned char *refscalinglist =
+ scaling_listdata->lists[size_id][ref_matrix_id];
+ /*
+ * use reference list given by
+ * scaling_list_pred_matrix_id_delta
+ */
+ memcpy(scalinglist, refscalinglist, listsize);
+ if (size_id > 1)
+ scaling_listdata->dccoeffs[size_id - 2][matrix_id] =
+ scaling_listdata->dccoeffs[size_id -
+ 2][ref_matrix_id];
+ }
+ } else {
+ /*
+ * scaling list coefficients
+ * signalled explicitly
+ */
+ static const short coef_startvalue = 8;
+ static const unsigned char matrix_max_coef_num = 64;
+
+ short next_coef = coef_startvalue;
+ unsigned char coef_num =
+ HEVC_MIN(matrix_max_coef_num,
+ (1 << (4 + (size_id << 1))), unsigned char);
+
+ unsigned char i;
+
+ if (size_id > 1) {
+ short scaling_list_dc_coef_minus8 = 0;
+
+ HEVC_SWSR_SE("scaling_list_dc_coef_minus8",
+ (int *)&scaling_list_dc_coef_minus8,
+ sr_ctx);
+ HEVC_RANGESCHECK("scaling_list_dc_coef_minus8",
+ scaling_list_dc_coef_minus8,
+ -7, 247, &parse_err);
+
+ next_coef = scaling_list_dc_coef_minus8 + 8;
+ scaling_listdata->dccoeffs[size_id - 2][matrix_id] =
+ (unsigned char)next_coef;
+ }
+ for (i = 0; i < coef_num; ++i) {
+ short scaling_list_delta_coef = 0;
+
+ HEVC_SWSR_SE("scaling_list_delta_coef",
+ (int *)&scaling_list_delta_coef, sr_ctx);
+ HEVC_RANGESCHECK("scaling_list_delta_coef",
+ scaling_list_delta_coef, -128, 127,
+ &parse_err);
+
+ next_coef = (next_coef + scaling_list_delta_coef + 256) &
+ 0xFF;
+ scalinglist[i] = next_coef;
+ }
+ }
+ }
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ /* print calculated scaling lists */
+ for (size_id = 0; size_id < HEVC_SCALING_LIST_NUM_SIZES; ++size_id) {
+ for (matrix_id = 0; matrix_id < ((size_id == 3) ? 2 : 6);
+ ++matrix_id) {
+ unsigned char i = 0;
+ /*
+ * Select scaling list on which we will operate
+ * in the iteration
+ */
+ unsigned char *scalinglist = scaling_listdata->lists[size_id][matrix_id];
+
+ for (; i < ((size_id == 0) ? 16 : 64); ++i) {
+ BSPP_HEVC_SYNTAX("scalinglist[%u][%u][%u] = %u",
+ size_id,
+ matrix_id,
+ i,
+ scalinglist[i]);
+ }
+ }
+ }
+#endif
+
+ return parse_err;
+}
+
+static void
+bspp_hevc_usedefault_scalinglists(struct bspp_hevc_scalinglist_data *scaling_listdata)
+{
+ unsigned char size_id, matrix_id;
+
+ for (size_id = 0; size_id < HEVC_SCALING_LIST_NUM_SIZES; ++size_id) {
+ for (matrix_id = 0; matrix_id < ((size_id == 3) ? 2 : 6);
+ ++matrix_id) {
+ unsigned char *list = scaling_listdata->lists[size_id][matrix_id];
+ const unsigned char *defaultlist = NULL;
+ unsigned int listsize = 0;
+
+ bspp_hevc_getdefault_scalinglist(size_id, matrix_id, &defaultlist,
+ &listsize);
+
+ memcpy(list, defaultlist, listsize);
+ }
+ }
+
+ memset(scaling_listdata->dccoeffs, 8 + 8, sizeof(scaling_listdata->dccoeffs));
+}
+
+static enum bspp_error_type bspp_hevc_parse_shortterm_refpicset
+ (void *sr_ctx,
+ struct bspp_hevc_shortterm_refpicset *st_refpicset,
+ unsigned char st_rps_idx,
+ unsigned char in_slice_header)
+{
+ /*
+ * Note: unfortunately short term ref pic set has to be
+ * "partially-decoded" and parsed at the same time because derived
+ * syntax elements are used for prediction of subsequent
+ * short term ref pic sets.
+ */
+ enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+
+ struct bspp_hevc_shortterm_refpicset *strps =
+ &st_refpicset[st_rps_idx];
+ unsigned char inter_ref_pic_set_prediction_flag = 0;
+ unsigned int i = 0;
+
+ memset(strps, 0, sizeof(*strps));
+
+ if (st_rps_idx != 0) {
+ HEVC_SWSR_U1("inter_ref_pic_set_prediction_flag",
+ &inter_ref_pic_set_prediction_flag, sr_ctx);
+ }
+
+ if (inter_ref_pic_set_prediction_flag) {
+ signed char j = 0;
+ unsigned char j_8 = 0;
+ unsigned char ref_rps_idx = 0;
+ int delta_rps = 0;
+ unsigned char i = 0;
+ unsigned char delta_idx_minus1 = 0;
+ unsigned char delta_rps_sign = 0;
+ unsigned short abs_delta_rps_minus1 = 0;
+ unsigned char used_by_curr_pic_flag[HEVC_MAX_NUM_REF_PICS];
+ unsigned char use_delta_flag[HEVC_MAX_NUM_REF_PICS];
+
+ struct bspp_hevc_shortterm_refpicset *ref_strps = NULL;
+
+ if (in_slice_header) {
+ HEVC_SWSR_UE("delta_idx_minus1", (unsigned int *)&delta_idx_minus1, sr_ctx);
+ HEVC_RANGEUCHECK("delta_idx_minus1", delta_idx_minus1, 0, st_rps_idx - 1,
+ &parse_err);
+ }
+
+ HEVC_SWSR_U1("delta_rps_sign", &delta_rps_sign, sr_ctx);
+ HEVC_SWSR_UE("abs_delta_rps_minus1", (unsigned int *)&abs_delta_rps_minus1, sr_ctx);
+ HEVC_RANGEUCHECK("abs_delta_rps_minus1", abs_delta_rps_minus1, 0, ((1 << 15) - 1),
+ &parse_err);
+
+ ref_rps_idx = st_rps_idx - (delta_idx_minus1 + 1);
+ ref_strps = &st_refpicset[ref_rps_idx];
+
+ memset(use_delta_flag, 1, sizeof(use_delta_flag));
+
+ for (j_8 = 0; j_8 <= ref_strps->num_delta_pocs; ++j_8) {
+ HEVC_SWSR_U1("used_by_curr_pic_flag", &used_by_curr_pic_flag[j_8], sr_ctx);
+ if (!used_by_curr_pic_flag[j_8])
+ HEVC_SWSR_U1("use_delta_flag", &use_delta_flag[j_8], sr_ctx);
+ }
+
+ delta_rps =
+ (1 - 2 * delta_rps_sign) * (abs_delta_rps_minus1 + 1);
+
+ /*
+ * predict delta POC values of current strps from
+ * reference strps
+ */
+ for (j = ref_strps->num_positive_pics - 1; j >= 0; --j) {
+ int dpoc = ref_strps->delta_poc_s1[j] + delta_rps;
+
+ if (dpoc < 0 && use_delta_flag[ref_strps->num_negative_pics + j]) {
+ strps->delta_poc_s0[i] = dpoc;
+ strps->used_bycurr_pic_s0[i++] =
+ used_by_curr_pic_flag[ref_strps->num_negative_pics + j];
+ }
+ }
+
+ if (delta_rps < 0 && use_delta_flag[ref_strps->num_delta_pocs]) {
+ strps->delta_poc_s0[i] = delta_rps;
+ strps->used_bycurr_pic_s0[i++] =
+ used_by_curr_pic_flag[ref_strps->num_delta_pocs];
+ }
+
+ for (j_8 = 0; j_8 < ref_strps->num_negative_pics; ++j_8) {
+ int dpoc = ref_strps->delta_poc_s0[j_8] + delta_rps;
+
+ if (dpoc < 0 && use_delta_flag[j_8]) {
+ strps->delta_poc_s0[i] = dpoc;
+ strps->used_bycurr_pic_s0[i++] = used_by_curr_pic_flag[j_8];
+ }
+ }
+
+ strps->num_negative_pics = i;
+
+ i = 0;
+ for (j = ref_strps->num_negative_pics - 1; j >= 0; --j) {
+ int dpoc = ref_strps->delta_poc_s0[j] + delta_rps;
+
+ if (dpoc > 0 && use_delta_flag[j]) {
+ strps->delta_poc_s1[i] = dpoc;
+ strps->used_bycurr_pic_s1[i++] =
+ used_by_curr_pic_flag[j];
+ }
+ }
+
+ if (delta_rps > 0 && use_delta_flag[ref_strps->num_delta_pocs]) {
+ strps->delta_poc_s1[i] = delta_rps;
+ strps->used_bycurr_pic_s1[i++] =
+ used_by_curr_pic_flag[ref_strps->num_delta_pocs];
+ }
+
+ for (j_8 = 0; j_8 < ref_strps->num_positive_pics; ++j_8) {
+ int dpoc = ref_strps->delta_poc_s1[j_8] + delta_rps;
+
+ if (dpoc > 0 && use_delta_flag[ref_strps->num_negative_pics + j_8]) {
+ strps->delta_poc_s1[i] = dpoc;
+ strps->used_bycurr_pic_s1[i++] =
+ used_by_curr_pic_flag[ref_strps->num_negative_pics + j_8];
+ }
+ }
+
+ strps->num_positive_pics = i;
+ strps->num_delta_pocs = strps->num_negative_pics + strps->num_positive_pics;
+ if (strps->num_delta_pocs > (HEVC_MAX_NUM_REF_PICS - 1)) {
+ strps->num_delta_pocs = HEVC_MAX_NUM_REF_PICS - 1;
+ parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+ }
+ } else {
+ unsigned char num_negative_pics = 0;
+ unsigned char num_positive_pics = 0;
+ unsigned short delta_poc_s0_minus1[HEVC_MAX_NUM_REF_PICS];
+ unsigned char used_by_curr_pic_s0_flag[HEVC_MAX_NUM_REF_PICS];
+ unsigned short delta_poc_s1_minus1[HEVC_MAX_NUM_REF_PICS];
+ unsigned char used_by_curr_pic_s1_flag[HEVC_MAX_NUM_REF_PICS];
+ unsigned char j = 0;
+
+ HEVC_SWSR_UE("num_negative_pics", (unsigned int *)&num_negative_pics, sr_ctx);
+ if (num_negative_pics > HEVC_MAX_NUM_REF_PICS) {
+ num_negative_pics = HEVC_MAX_NUM_REF_PICS;
+ parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+ }
+ HEVC_SWSR_UE("num_positive_pics", (unsigned int *)&num_positive_pics, sr_ctx);
+ if (num_positive_pics > HEVC_MAX_NUM_REF_PICS) {
+ num_positive_pics = HEVC_MAX_NUM_REF_PICS;
+ parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+ }
+
+ for (j = 0; j < num_negative_pics; ++j) {
+ HEVC_SWSR_UE("delta_poc_s0_minus1",
+ (unsigned int *)&delta_poc_s0_minus1[j], sr_ctx);
+ HEVC_RANGEUCHECK("delta_poc_s0_minus1", delta_poc_s0_minus1[j], 0,
+ ((1 << 15) - 1), &parse_err);
+ HEVC_SWSR_U1("used_by_curr_pic_s0_flag",
+ &used_by_curr_pic_s0_flag[j], sr_ctx);
+
+ if (j == 0)
+ strps->delta_poc_s0[j] =
+ -(delta_poc_s0_minus1[j] + 1);
+ else
+ strps->delta_poc_s0[j] = strps->delta_poc_s0[j - 1] -
+ (delta_poc_s0_minus1[j] + 1);
+
+ strps->used_bycurr_pic_s0[j] = used_by_curr_pic_s0_flag[j];
+ }
+
+ for (j = 0; j < num_positive_pics; j++) {
+ HEVC_SWSR_UE("delta_poc_s1_minus1",
+ (unsigned int *)&delta_poc_s1_minus1[j], sr_ctx);
+ HEVC_RANGEUCHECK("delta_poc_s1_minus1", delta_poc_s1_minus1[j], 0,
+ ((1 << 15) - 1), &parse_err);
+ HEVC_SWSR_U1("used_by_curr_pic_s1_flag",
+ &used_by_curr_pic_s1_flag[j], sr_ctx);
+
+ if (j == 0)
+ strps->delta_poc_s1[j] =
+ (delta_poc_s1_minus1[j] + 1);
+ else
+ strps->delta_poc_s1[j] = strps->delta_poc_s1[j - 1] +
+ (delta_poc_s1_minus1[j] + 1);
+ strps->used_bycurr_pic_s1[j] = used_by_curr_pic_s1_flag[j];
+ }
+
+ strps->num_negative_pics = num_negative_pics;
+ strps->num_positive_pics = num_positive_pics;
+ strps->num_delta_pocs = strps->num_negative_pics + strps->num_positive_pics;
+ if (strps->num_delta_pocs > (HEVC_MAX_NUM_REF_PICS - 1)) {
+ strps->num_delta_pocs = HEVC_MAX_NUM_REF_PICS - 1;
+ parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+ }
+ }
+
+ BSPP_HEVC_SYNTAX
+ ("strps[%u]: num_delta_pocs: %u (%u (num_negative_pics) + %u (num_positive_pics))",
+ st_rps_idx, strps->num_delta_pocs, strps->num_negative_pics,
+ strps->num_positive_pics);
+
+ for (i = 0; i < strps->num_negative_pics; ++i) {
+ BSPP_HEVC_SYNTAX("StRps[%u][%u]: delta_poc_s0: %d, used_bycurr_pic_s0: %u",
+ st_rps_idx, i, strps->delta_poc_s0[i],
+ strps->used_bycurr_pic_s0[i]);
+ }
+
+ for (i = 0; i < strps->num_positive_pics; ++i) {
+ BSPP_HEVC_SYNTAX("StRps[%u][%u]: delta_poc_s1: %d, used_bycurr_pic_s1: %u",
+ st_rps_idx, i, strps->delta_poc_s1[i],
+ strps->used_bycurr_pic_s1[i]);
+ }
+
+ return parse_err;
+}
+
+static void bspp_hevc_fillcommonseqhdr(struct bspp_hevc_sps *sps,
+ struct vdec_comsequ_hdrinfo *common_seq)
+{
+ struct bspp_hevc_vui_params *vui = &sps->vui_params;
+ unsigned char chroma_idc = sps->chroma_format_idc;
+ struct pixel_pixinfo *pixel_info = &common_seq->pixel_info;
+ unsigned int maxsub_layersmin1;
+ unsigned int maxdpb_size;
+ struct vdec_rect *rawdisp_region;
+
+ common_seq->codec_profile = sps->profile_tier_level.general_profile_idc;
+ common_seq->codec_level = sps->profile_tier_level.general_level_idc;
+
+ if (sps->vui_parameters_present_flag &&
+ vui->vui_timing_info_present_flag) {
+ common_seq->frame_rate_num = vui->vui_time_scale;
+ common_seq->frame_rate_den = vui->vui_num_units_in_tick;
+ common_seq->frame_rate =
+ 1 * common_seq->frame_rate_num / common_seq->frame_rate_den;
+ }
+
+ if (vui->aspect_ratio_info_present_flag) {
+ common_seq->aspect_ratio_num = vui->sar_width;
+ common_seq->aspect_ratio_den = vui->sar_height;
+ }
+
+ common_seq->interlaced_frames = 0;
+
+ /* handle pixel format definitions */
+ pixel_info->chroma_fmt = chroma_idc == 0 ? 0 : 1;
+ pixel_info->chroma_fmt_idc = pixelformat_idc[chroma_idc];
+ pixel_info->chroma_interleave =
+ chroma_idc == 0 ? PIXEL_INVALID_CI : PIXEL_UV_ORDER;
+ pixel_info->bitdepth_y = sps->bit_depth_luma_minus8 + 8;
+ pixel_info->bitdepth_c = sps->bit_depth_chroma_minus8 + 8;
+
+ pixel_info->mem_pkg = (pixel_info->bitdepth_y > 8 ||
+ (pixel_info->bitdepth_c > 8 && pixel_info->chroma_fmt)) ?
+ PIXEL_BIT10_MSB_MP : PIXEL_BIT8_MP;
+ pixel_info->num_planes =
+ chroma_idc == 0 ? 1 : (sps->separate_colour_plane_flag ? 3 : 2);
+
+ pixel_info->pixfmt = pixel_get_pixfmt(pixel_info->chroma_fmt_idc,
+ pixel_info->chroma_interleave,
+ pixel_info->mem_pkg,
+ pixel_info->bitdepth_y,
+ pixel_info->chroma_fmt ?
+ pixel_info->bitdepth_c : PIXEL_INVALID_BDC,
+ pixel_info->num_planes);
+
+ common_seq->max_frame_size.width = sps->pic_width_in_ctbs_y * sps->ctb_size_y;
+ common_seq->max_frame_size.height = sps->pic_height_in_ctbs_y * sps->ctb_size_y;
+
+ common_seq->frame_size.width = sps->pic_width_in_luma_samples;
+ common_seq->frame_size.height = sps->pic_height_in_luma_samples;
+
+ /* Get HEVC max num ref pictures and pass to bspp info*/
+ vdecddutils_ref_pic_hevc_get_maxnum(common_seq, &common_seq->max_ref_frame_num);
+
+ common_seq->field_codec_mblocks = 0;
+
+ maxsub_layersmin1 = sps->sps_max_sub_layers_minus1;
+ maxdpb_size =
+ HEVC_MAX(sps->sps_max_dec_pic_buffering_minus1[maxsub_layersmin1] + 1,
+ sps->sps_max_num_reorder_pics[maxsub_layersmin1], unsigned char);
+
+ if (sps->sps_max_latency_increase_plus1[maxsub_layersmin1]) {
+ maxdpb_size =
+ HEVC_MAX(maxdpb_size,
+ sps->sps_max_latency_pictures[maxsub_layersmin1], unsigned int);
+ }
+
+ maxdpb_size = HEVC_MIN(maxdpb_size,
+ HEVC_MAX_NUM_REF_IDX_ACTIVE + 1, unsigned int);
+
+ common_seq->min_pict_buf_num = HEVC_MAX(maxdpb_size, 6, unsigned int);
+
+ common_seq->picture_reordering = 1;
+ common_seq->post_processing = 0;
+
+ /* handle display region calculation */
+ rawdisp_region = &common_seq->raw_display_region;
+
+ rawdisp_region->width = sps->pic_width_in_luma_samples;
+ rawdisp_region->height = sps->pic_height_in_luma_samples;
+ rawdisp_region->top_offset = 0;
+ rawdisp_region->left_offset = 0;
+
+ if (sps->conformance_window_flag) {
+ struct vdec_rect *disp_region =
+ &common_seq->orig_display_region;
+
+ disp_region->top_offset =
+ sps->sub_height_c * sps->conf_win_top_offset;
+ disp_region->left_offset =
+ sps->sub_width_c * sps->conf_win_left_offset;
+ disp_region->width =
+ sps->pic_width_in_luma_samples -
+ disp_region->left_offset -
+ sps->sub_width_c * sps->conf_win_right_offset;
+ disp_region->height =
+ sps->pic_height_in_luma_samples -
+ disp_region->top_offset -
+ sps->sub_height_c * sps->conf_win_bottom_offset;
+ } else {
+ common_seq->orig_display_region =
+ common_seq->raw_display_region;
+ }
+}
+
+static void bspp_hevc_fillpicturehdr(struct vdec_comsequ_hdrinfo *common_seq,
+ enum hevc_nalunittype nalunit_type,
+ struct bspp_pict_hdr_info *picture_hdr,
+ struct bspp_hevc_sps *sps,
+ struct bspp_hevc_pps *pps,
+ struct bspp_hevc_vps *vps)
+{
+ picture_hdr->intra_coded = (nalunit_type == HEVC_NALTYPE_IDR_W_RADL ||
+ nalunit_type == HEVC_NALTYPE_IDR_N_LP);
+ picture_hdr->field = 0;
+ picture_hdr->post_processing = 0;
+ picture_hdr->discontinuous_mbs = 0;
+ picture_hdr->pict_aux_data.id = BSPP_INVALID;
+ picture_hdr->second_pict_aux_data.id = BSPP_INVALID;
+ picture_hdr->pict_sgm_data.id = BSPP_INVALID;
+ picture_hdr->coded_frame_size.width =
+ HEVC_ALIGN(sps->pic_width_in_luma_samples, HEVC_MIN_CODED_UNIT_SIZE, unsigned int);
+ picture_hdr->coded_frame_size.height =
+ HEVC_ALIGN(sps->pic_height_in_luma_samples, HEVC_MIN_CODED_UNIT_SIZE, unsigned int);
+ picture_hdr->disp_info.enc_disp_region = common_seq->orig_display_region;
+ picture_hdr->disp_info.disp_region = common_seq->orig_display_region;
+ picture_hdr->disp_info.raw_disp_region = common_seq->raw_display_region;
+ picture_hdr->disp_info.num_pan_scan_windows = 0;
+ picture_hdr->hevc_pict_hdr_info.range_ext_present =
+ (sps->profile_tier_level.general_profile_idc == 4) ||
+ sps->profile_tier_level.general_profile_compatibility_flag[4];
+
+ picture_hdr->hevc_pict_hdr_info.is_full_range_ext = 0;
+ if (picture_hdr->hevc_pict_hdr_info.range_ext_present &&
+ (bspp_hevc_checkppsrangeextensions(&pps->range_exts) ||
+ bspp_hevc_checksps_range_extensions(&sps->range_exts)))
+ picture_hdr->hevc_pict_hdr_info.is_full_range_ext = 1;
+
+ memset(picture_hdr->disp_info.pan_scan_windows, 0,
+ sizeof(picture_hdr->disp_info.pan_scan_windows));
+}
+
+static void bspp_hevc_fill_fwsps(struct bspp_hevc_sps *sps, struct hevcfw_sequence_ps *fwsps)
+{
+ unsigned char i;
+
+ fwsps->pic_width_in_luma_samples = sps->pic_width_in_luma_samples;
+ fwsps->pic_height_in_luma_samples = sps->pic_height_in_luma_samples;
+ fwsps->num_short_term_ref_pic_sets = sps->num_short_term_ref_pic_sets;
+ fwsps->num_long_term_ref_pics_sps = sps->num_long_term_ref_pics_sps;
+ fwsps->sps_max_sub_layers_minus1 = sps->sps_max_sub_layers_minus1;
+ fwsps->max_transform_hierarchy_depth_inter =
+ sps->max_transform_hierarchy_depth_inter;
+ fwsps->max_transform_hierarchy_depth_intra =
+ sps->max_transform_hierarchy_depth_intra;
+ fwsps->log2_diff_max_min_transform_block_size =
+ sps->log2_diff_max_min_transform_block_size;
+ fwsps->log2_min_transform_block_size_minus2 =
+ sps->log2_min_transform_block_size_minus2;
+ fwsps->log2_diff_max_min_luma_coding_block_size =
+ sps->log2_diff_max_min_luma_coding_block_size;
+ fwsps->log2_min_luma_coding_block_size_minus3 =
+ sps->log2_min_luma_coding_block_size_minus3;
+
+ HEVC_STATIC_ASSERT(sizeof(sps->sps_max_dec_pic_buffering_minus1) ==
+ sizeof(fwsps->sps_max_dec_pic_buffering_minus1));
+ memcpy(fwsps->sps_max_dec_pic_buffering_minus1, sps->sps_max_dec_pic_buffering_minus1,
+ sizeof(fwsps->sps_max_dec_pic_buffering_minus1[0]) *
+ (sps->sps_max_sub_layers_minus1 + 1));
+
+ HEVC_STATIC_ASSERT(sizeof(sps->sps_max_num_reorder_pics) ==
+ sizeof(fwsps->sps_max_num_reorder_pics));
+ memcpy(fwsps->sps_max_num_reorder_pics, sps->sps_max_num_reorder_pics,
+ sizeof(fwsps->sps_max_num_reorder_pics[0]) *
+ (sps->sps_max_sub_layers_minus1 + 1));
+
+ HEVC_STATIC_ASSERT(sizeof(sps->sps_max_latency_increase_plus1) ==
+ sizeof(fwsps->sps_max_latency_increase_plus1));
+ memcpy(fwsps->sps_max_latency_increase_plus1, sps->sps_max_latency_increase_plus1,
+ sizeof(fwsps->sps_max_latency_increase_plus1[0]) *
+ (sps->sps_max_sub_layers_minus1 + 1));
+
+ fwsps->chroma_format_idc = sps->chroma_format_idc;
+ fwsps->separate_colour_plane_flag = sps->separate_colour_plane_flag;
+ fwsps->log2_max_pic_order_cnt_lsb_minus4 =
+ sps->log2_max_pic_order_cnt_lsb_minus4;
+ fwsps->long_term_ref_pics_present_flag =
+ sps->long_term_ref_pics_present_flag;
+ fwsps->sample_adaptive_offset_enabled_flag =
+ sps->sample_adaptive_offset_enabled_flag;
+ fwsps->sps_temporal_mvp_enabled_flag =
+ sps->sps_temporal_mvp_enabled_flag;
+ fwsps->bit_depth_luma_minus8 = sps->bit_depth_luma_minus8;
+ fwsps->bit_depth_chroma_minus8 = sps->bit_depth_chroma_minus8;
+ fwsps->pcm_sample_bit_depth_luma_minus1 =
+ sps->pcm_sample_bit_depth_luma_minus1;
+ fwsps->pcm_sample_bit_depth_chroma_minus1 =
+ sps->pcm_sample_bit_depth_chroma_minus1;
+ fwsps->log2_min_pcm_luma_coding_block_size_minus3 =
+ sps->log2_min_pcm_luma_coding_block_size_minus3;
+ fwsps->log2_diff_max_min_pcm_luma_coding_block_size =
+ sps->log2_diff_max_min_pcm_luma_coding_block_size;
+ fwsps->pcm_loop_filter_disabled_flag =
+ sps->pcm_loop_filter_disabled_flag;
+ fwsps->amp_enabled_flag = sps->amp_enabled_flag;
+ fwsps->pcm_enabled_flag = sps->pcm_enabled_flag;
+ fwsps->strong_intra_smoothing_enabled_flag =
+ sps->strong_intra_smoothing_enabled_flag;
+ fwsps->scaling_list_enabled_flag = sps->scaling_list_enabled_flag;
+ fwsps->transform_skip_rotation_enabled_flag =
+ sps->range_exts.transform_skip_rotation_enabled_flag;
+ fwsps->transform_skip_context_enabled_flag =
+ sps->range_exts.transform_skip_context_enabled_flag;
+ fwsps->implicit_rdpcm_enabled_flag =
+ sps->range_exts.implicit_rdpcm_enabled_flag;
+ fwsps->explicit_rdpcm_enabled_flag =
+ sps->range_exts.explicit_rdpcm_enabled_flag;
+ fwsps->extended_precision_processing_flag =
+ sps->range_exts.extended_precision_processing_flag;
+ fwsps->intra_smoothing_disabled_flag =
+ sps->range_exts.intra_smoothing_disabled_flag;
+ /* high precision makes no sense for 8 bit luma & chroma,
+ * so forward this parameter only when bitdepth > 8
+ */
+ if (sps->bit_depth_luma_minus8 || sps->bit_depth_chroma_minus8)
+ fwsps->high_precision_offsets_enabled_flag =
+ sps->range_exts.high_precision_offsets_enabled_flag;
+
+ fwsps->persistent_rice_adaptation_enabled_flag =
+ sps->range_exts.persistent_rice_adaptation_enabled_flag;
+ fwsps->cabac_bypass_alignment_enabled_flag =
+ sps->range_exts.cabac_bypass_alignment_enabled_flag;
+
+ HEVC_STATIC_ASSERT(sizeof(sps->lt_ref_pic_poc_lsb_sps) ==
+ sizeof(fwsps->lt_ref_pic_poc_lsb_sps));
+ HEVC_STATIC_ASSERT(sizeof(sps->used_by_curr_pic_lt_sps_flag) ==
+ sizeof(fwsps->used_by_curr_pic_lt_sps_flag));
+ memcpy(fwsps->lt_ref_pic_poc_lsb_sps, sps->lt_ref_pic_poc_lsb_sps,
+ sizeof(fwsps->lt_ref_pic_poc_lsb_sps[0]) *
+ sps->num_long_term_ref_pics_sps);
+ memcpy(fwsps->used_by_curr_pic_lt_sps_flag, sps->used_by_curr_pic_lt_sps_flag,
+ sizeof(fwsps->used_by_curr_pic_lt_sps_flag[0]) * sps->num_long_term_ref_pics_sps);
+
+ for (i = 0; i < sps->num_short_term_ref_pic_sets; ++i)
+ bspp_hevc_fill_fwst_rps(&sps->rps_list[i], &fwsps->st_rps_list[i]);
+
+ /* derived elements */
+ fwsps->pic_size_in_ctbs_y = sps->pic_size_in_ctbs_y;
+ fwsps->pic_height_in_ctbs_y = sps->pic_height_in_ctbs_y;
+ fwsps->pic_width_in_ctbs_y = sps->pic_width_in_ctbs_y;
+ fwsps->ctb_size_y = sps->ctb_size_y;
+ fwsps->ctb_log2size_y = sps->ctb_log2size_y;
+ fwsps->max_pic_order_cnt_lsb = sps->max_pic_order_cnt_lsb;
+
+ HEVC_STATIC_ASSERT(sizeof(sps->sps_max_latency_pictures) ==
+ sizeof(fwsps->sps_max_latency_pictures));
+ memcpy(fwsps->sps_max_latency_pictures, sps->sps_max_latency_pictures,
+ sizeof(fwsps->sps_max_latency_pictures[0]) *
+ (sps->sps_max_sub_layers_minus1 + 1));
+}
+
+static void bspp_hevc_fill_fwst_rps(struct bspp_hevc_shortterm_refpicset *strps,
+ struct hevcfw_short_term_ref_picset *fwstrps)
+{
+ fwstrps->num_delta_pocs = strps->num_delta_pocs;
+ fwstrps->num_negative_pics = strps->num_negative_pics;
+ fwstrps->num_positive_pics = strps->num_positive_pics;
+
+ HEVC_STATIC_ASSERT(sizeof(strps->delta_poc_s0) ==
+ sizeof(fwstrps->delta_poc_s0));
+ memcpy(fwstrps->delta_poc_s0, strps->delta_poc_s0,
+ sizeof(fwstrps->delta_poc_s0[0]) * strps->num_negative_pics);
+
+ HEVC_STATIC_ASSERT(sizeof(strps->delta_poc_s1) ==
+ sizeof(fwstrps->delta_poc_s1));
+ memcpy(fwstrps->delta_poc_s1, strps->delta_poc_s1,
+ sizeof(fwstrps->delta_poc_s1[0]) * strps->num_positive_pics);
+
+ HEVC_STATIC_ASSERT(sizeof(strps->used_bycurr_pic_s0) ==
+ sizeof(fwstrps->used_bycurr_pic_s0));
+ memcpy(fwstrps->used_bycurr_pic_s0, strps->used_bycurr_pic_s0,
+ sizeof(fwstrps->used_bycurr_pic_s0[0]) * strps->num_negative_pics);
+
+ HEVC_STATIC_ASSERT(sizeof(strps->used_bycurr_pic_s1) ==
+ sizeof(fwstrps->used_bycurr_pic_s1));
+ memcpy(fwstrps->used_bycurr_pic_s1, strps->used_bycurr_pic_s1,
+ sizeof(fwstrps->used_bycurr_pic_s1[0]) * strps->num_positive_pics);
+}
+
+static void bspp_hevc_fill_fwpps(struct bspp_hevc_pps *pps, struct hevcfw_picture_ps *fw_pps)
+{
+ fw_pps->pps_pic_parameter_set_id = pps->pps_pic_parameter_set_id;
+ fw_pps->num_tile_columns_minus1 = pps->num_tile_columns_minus1;
+ fw_pps->num_tile_rows_minus1 = pps->num_tile_rows_minus1;
+ fw_pps->diff_cu_qp_delta_depth = pps->diff_cu_qp_delta_depth;
+ fw_pps->init_qp_minus26 = pps->init_qp_minus26;
+ fw_pps->pps_beta_offset_div2 = pps->pps_beta_offset_div2;
+ fw_pps->pps_tc_offset_div2 = pps->pps_tc_offset_div2;
+ fw_pps->pps_cb_qp_offset = pps->pps_cb_qp_offset;
+ fw_pps->pps_cr_qp_offset = pps->pps_cr_qp_offset;
+ fw_pps->log2_parallel_merge_level_minus2 =
+ pps->log2_parallel_merge_level_minus2;
+
+ fw_pps->dependent_slice_segments_enabled_flag =
+ pps->dependent_slice_segments_enabled_flag;
+ fw_pps->output_flag_present_flag = pps->output_flag_present_flag;
+ fw_pps->num_extra_slice_header_bits = pps->num_extra_slice_header_bits;
+ fw_pps->lists_modification_present_flag =
+ pps->lists_modification_present_flag;
+ fw_pps->cabac_init_present_flag = pps->cabac_init_present_flag;
+ fw_pps->weighted_pred_flag = pps->weighted_pred_flag;
+ fw_pps->weighted_bipred_flag = pps->weighted_bipred_flag;
+ fw_pps->pps_slice_chroma_qp_offsets_present_flag =
+ pps->pps_slice_chroma_qp_offsets_present_flag;
+ fw_pps->deblocking_filter_override_enabled_flag =
+ pps->deblocking_filter_override_enabled_flag;
+ fw_pps->tiles_enabled_flag = pps->tiles_enabled_flag;
+ fw_pps->entropy_coding_sync_enabled_flag =
+ pps->entropy_coding_sync_enabled_flag;
+ fw_pps->slice_segment_header_extension_present_flag =
+ pps->slice_segment_header_extension_present_flag;
+ fw_pps->transquant_bypass_enabled_flag =
+ pps->transquant_bypass_enabled_flag;
+ fw_pps->cu_qp_delta_enabled_flag = pps->cu_qp_delta_enabled_flag;
+ fw_pps->transform_skip_enabled_flag = pps->transform_skip_enabled_flag;
+ fw_pps->sign_data_hiding_enabled_flag =
+ pps->sign_data_hiding_enabled_flag;
+ fw_pps->num_ref_idx_l0_default_active_minus1 =
+ pps->num_ref_idx_l0_default_active_minus1;
+ fw_pps->num_ref_idx_l1_default_active_minus1 =
+ pps->num_ref_idx_l1_default_active_minus1;
+ fw_pps->constrained_intra_pred_flag = pps->constrained_intra_pred_flag;
+ fw_pps->pps_deblocking_filter_disabled_flag =
+ pps->pps_deblocking_filter_disabled_flag;
+ fw_pps->pps_loop_filter_across_slices_enabled_flag =
+ pps->pps_loop_filter_across_slices_enabled_flag;
+ fw_pps->loop_filter_across_tiles_enabled_flag =
+ pps->loop_filter_across_tiles_enabled_flag;
+ fw_pps->log2_max_transform_skip_block_size_minus2 =
+ pps->range_exts.log2_max_transform_skip_block_size_minus2;
+ fw_pps->cross_component_prediction_enabled_flag =
+ pps->range_exts.cross_component_prediction_enabled_flag;
+ fw_pps->chroma_qp_offset_list_enabled_flag =
+ pps->range_exts.chroma_qp_offset_list_enabled_flag;
+ fw_pps->diff_cu_chroma_qp_offset_depth =
+ pps->range_exts.diff_cu_chroma_qp_offset_depth;
+ fw_pps->chroma_qp_offset_list_len_minus1 =
+ pps->range_exts.chroma_qp_offset_list_len_minus1;
+ memcpy(fw_pps->cb_qp_offset_list, pps->range_exts.cb_qp_offset_list,
+ sizeof(pps->range_exts.cb_qp_offset_list));
+ memcpy(fw_pps->cr_qp_offset_list, pps->range_exts.cr_qp_offset_list,
+ sizeof(pps->range_exts.cr_qp_offset_list));
+
+ /* derived elements */
+ HEVC_STATIC_ASSERT(sizeof(pps->col_bd) == sizeof(fw_pps->col_bd));
+ HEVC_STATIC_ASSERT(sizeof(pps->row_bd) == sizeof(fw_pps->row_bd));
+ memcpy(fw_pps->col_bd, pps->col_bd, sizeof(fw_pps->col_bd));
+ memcpy(fw_pps->row_bd, pps->row_bd, sizeof(fw_pps->row_bd));
+}
+
+static void bspp_hevc_fill_fw_scaling_lists(struct bspp_hevc_pps *pps,
+ struct bspp_hevc_sps *sps,
+ struct hevcfw_picture_ps *fw_pps)
+{
+ signed char size_id, matrix_id;
+ unsigned char *scalinglist;
+ /*
+ * We are starting at 1 to leave space for addresses,
+ * filled by lower layer
+ */
+ unsigned int *scaling_lists = &fw_pps->scaling_lists[1];
+ unsigned char i;
+
+ struct bspp_hevc_scalinglist_data *scaling_listdata =
+ pps->pps_scaling_list_data_present_flag ?
+ &pps->scaling_list :
+ &sps->scalinglist_data;
+
+ if (!sps->scaling_list_enabled_flag)
+ return;
+
+ fw_pps->scaling_list_enabled_flag = sps->scaling_list_enabled_flag;
+
+ for (size_id = HEVC_SCALING_LIST_NUM_SIZES - 1;
+ size_id >= 0; --size_id) {
+ const unsigned char *zz =
+ (size_id == 0 ? HEVC_INV_ZZ_SCAN4 : HEVC_INV_ZZ_SCAN8);
+
+ for (matrix_id = 0; matrix_id < ((size_id == 3) ? 2 : 6);
+ ++matrix_id) {
+ /*
+ * Select scaling list on which we will operate
+ * in the iteration
+ */
+ scalinglist =
+ scaling_listdata->lists[size_id][matrix_id];
+
+ for (i = 0; i < ((size_id == 0) ? 16 : 64); i += 4) {
+ *scaling_lists =
+ scalinglist[zz[i + 3]] << 24 |
+ scalinglist[zz[i + 2]] << 16 |
+ scalinglist[zz[i + 1]] << 8 |
+ scalinglist[zz[i]];
+ scaling_lists += 2;
+ }
+ }
+ }
+
+ for (i = 0; i < 2; ++i) {
+ *scaling_lists = scaling_listdata->dccoeffs[1][i];
+ scaling_lists += 2;
+ }
+
+ for (i = 0; i < 6; ++i) {
+ *scaling_lists = scaling_listdata->dccoeffs[0][i];
+ scaling_lists += 2;
+ }
+}
+
+static unsigned int bspp_ceil_log2(unsigned int linear_val)
+{
+ unsigned int log_val = 0;
+
+ if (linear_val > 0)
+ --linear_val;
+
+ while (linear_val > 0) {
+ linear_val >>= 1;
+ ++log_val;
+ }
+
+ return log_val;
+}
+
+static unsigned char bspp_hevc_picture_is_irap(enum hevc_nalunittype nalunit_type)
+{
+ return (nalunit_type >= HEVC_NALTYPE_BLA_W_LP) &&
+ (nalunit_type <= HEVC_NALTYPE_RSV_IRAP_VCL23);
+}
+
+static unsigned char bspp_hevc_picture_is_cra(enum hevc_nalunittype nalunit_type)
+{
+ return (nalunit_type == HEVC_NALTYPE_CRA);
+}
+
+static unsigned char bspp_hevc_picture_is_idr(enum hevc_nalunittype nalunit_type)
+{
+ return (nalunit_type == HEVC_NALTYPE_IDR_N_LP) ||
+ (nalunit_type == HEVC_NALTYPE_IDR_W_RADL);
+}
+
+static unsigned char bspp_hevc_picture_is_bla(enum hevc_nalunittype nalunit_type)
+{
+ return (nalunit_type >= HEVC_NALTYPE_BLA_W_LP) &&
+ (nalunit_type <= HEVC_NALTYPE_BLA_N_LP);
+}
+
+static unsigned char bspp_hevc_picture_getnorasl_outputflag
+ (enum hevc_nalunittype nalunit_type,
+ struct bspp_hevc_inter_pict_ctx *inter_pict_ctx)
+{
+ VDEC_ASSERT(inter_pict_ctx);
+
+ if (bspp_hevc_picture_is_idr(nalunit_type) ||
+ bspp_hevc_picture_is_bla(nalunit_type) ||
+ inter_pict_ctx->first_after_eos ||
+ (bspp_hevc_picture_is_cra(nalunit_type) && inter_pict_ctx->seq_pic_count == 1))
+ return 1;
+
+ return 0;
+}
+
+static unsigned char bspp_hevc_range_extensions_is_enabled
+ (struct bspp_hevc_profile_tierlevel *profile_tierlevel)
+{
+ unsigned char is_enabled;
+
+ is_enabled = profile_tierlevel->general_profile_idc >= 4 ||
+ profile_tierlevel->general_profile_compatibility_flag[4];
+
+ return is_enabled;
+}
+
+static void bspp_hevc_parse_codec_config(void *hndl_swsr_ctx, unsigned int *unit_count,
+ unsigned int *unit_array_count,
+ unsigned int *delim_length,
+ unsigned int *size_delim_length)
+{
+ unsigned long long value = 23;
+
+ /*
+ * Set the shift-register up to provide next 23 bytes
+ * without emulation prevention detection.
+ */
+ swsr_consume_delim(hndl_swsr_ctx, SWSR_EMPREVENT_NONE, 0, &value);
+ /*
+ * Codec config header must be read for size delimited data (HEVC)
+ * to get to the start of each unit.
+ * This parsing follows section 8.3.3.1.2 of ISO/IEC 14496-15:2013.
+ */
+ swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+ swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+ swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+ swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+ swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+ swsr_read_bits(hndl_swsr_ctx, 8);
+
+ *delim_length = ((swsr_read_bits(hndl_swsr_ctx, 8) & 0x3) + 1) * 8;
+ *unit_array_count = swsr_read_bits(hndl_swsr_ctx, 8);
+
+ /* Size delimiter is only 2 bytes for HEVC codec configuration. */
+ *size_delim_length = 2 * 8;
+}
+
+static void bspp_hevc_update_unitcounts(void *hndl_swsr_ctx, unsigned int *unit_count,
+ unsigned int *unit_array_count)
+{
+ if (*unit_array_count != 0) {
+ unsigned long long value = 3;
+
+ if (*unit_count == 0) {
+ /*
+ * Set the shift-register up to provide next 3 bytes
+ * without emulation prevention detection.
+ */
+ swsr_consume_delim(hndl_swsr_ctx, SWSR_EMPREVENT_NONE, 0, &value);
+
+ swsr_read_bits(hndl_swsr_ctx, 8);
+ *unit_count = swsr_read_bits(hndl_swsr_ctx, 16);
+
+ (*unit_array_count)--;
+ (*unit_count)--;
+ }
+ }
+}
+
+void bspp_hevc_determine_unittype(unsigned char bitstream_unittype,
+ int disable_mvc,
+ enum bspp_unit_type *bspp_unittype)
+{
+ /* 6 bits for NAL Unit Type in HEVC */
+ unsigned char type = (bitstream_unittype >> 1) & 0x3f;
+
+ switch (type) {
+ case HEVC_NALTYPE_VPS:
+ *bspp_unittype = BSPP_UNIT_VPS;
+ break;
+
+ case HEVC_NALTYPE_SPS:
+ *bspp_unittype = BSPP_UNIT_SEQUENCE;
+ break;
+
+ case HEVC_NALTYPE_PPS:
+ *bspp_unittype = BSPP_UNIT_PPS;
+ break;
+
+ case HEVC_NALTYPE_TRAIL_N:
+ case HEVC_NALTYPE_TRAIL_R:
+ case HEVC_NALTYPE_TSA_N:
+ case HEVC_NALTYPE_TSA_R:
+ case HEVC_NALTYPE_STSA_N:
+ case HEVC_NALTYPE_STSA_R:
+ case HEVC_NALTYPE_RADL_N:
+ case HEVC_NALTYPE_RADL_R:
+ case HEVC_NALTYPE_RASL_N:
+ case HEVC_NALTYPE_RASL_R:
+ case HEVC_NALTYPE_BLA_W_LP:
+ case HEVC_NALTYPE_BLA_W_RADL:
+ case HEVC_NALTYPE_BLA_N_LP:
+ case HEVC_NALTYPE_IDR_W_RADL:
+ case HEVC_NALTYPE_IDR_N_LP:
+ case HEVC_NALTYPE_CRA:
+ case HEVC_NALTYPE_EOS:
+ /* Attach EOS to picture data, so it can be detected in FW */
+ *bspp_unittype = BSPP_UNIT_PICTURE;
+ break;
+
+ case HEVC_NALTYPE_AUD:
+ case HEVC_NALTYPE_PREFIX_SEI:
+ case HEVC_NALTYPE_SUFFIX_SEI:
+ case HEVC_NALTYPE_EOB:
+ case HEVC_NALTYPE_FD:
+ *bspp_unittype = BSPP_UNIT_NON_PICTURE;
+ break;
+
+ default:
+ *bspp_unittype = BSPP_UNIT_UNSUPPORTED;
+ break;
+ }
+}
+
+int bspp_hevc_set_parser_config(enum vdec_bstr_format bstr_format,
+ struct bspp_vid_std_features *pvidstd_features,
+ struct bspp_swsr_ctx *pswsr_ctx,
+ struct bspp_parser_callbacks *parser_callbacks,
+ struct bspp_inter_pict_data *pinterpict_data)
+{
+ /* set HEVC parser callbacks. */
+ parser_callbacks->parse_unit_cb = bspp_hevc_unitparser;
+ parser_callbacks->release_data_cb = bspp_hevc_releasedata;
+ parser_callbacks->reset_data_cb = bspp_hevc_resetdata;
+ parser_callbacks->parse_codec_config_cb = bspp_hevc_parse_codec_config;
+ parser_callbacks->update_unit_counts_cb = bspp_hevc_update_unitcounts;
+ parser_callbacks->initialise_parsing_cb = bspp_hevc_initialiseparsing;
+ parser_callbacks->finalise_parsing_cb = bspp_hevc_finaliseparsing;
+
+ /* Set HEVC specific features. */
+ pvidstd_features->seq_size = sizeof(struct bspp_hevc_sequ_hdr_info);
+ pvidstd_features->uses_vps = 1;
+ pvidstd_features->vps_size = sizeof(struct bspp_hevc_vps);
+ pvidstd_features->uses_pps = 1;
+ pvidstd_features->pps_size = sizeof(struct bspp_hevc_pps);
+
+ /* Set HEVC specific shift register config. */
+ pswsr_ctx->emulation_prevention = SWSR_EMPREVENT_00000300;
+
+ if (bstr_format == VDEC_BSTRFORMAT_DEMUX_BYTESTREAM ||
+ bstr_format == VDEC_BSTRFORMAT_ELEMENTARY) {
+ pswsr_ctx->sr_config.delim_type = SWSR_DELIM_SCP;
+ pswsr_ctx->sr_config.delim_length = 3 * 8;
+ pswsr_ctx->sr_config.scp_value = 0x000001;
+ } else if (bstr_format == VDEC_BSTRFORMAT_DEMUX_SIZEDELIMITED) {
+ pswsr_ctx->sr_config.delim_type = SWSR_DELIM_SIZE;
+ pswsr_ctx->sr_config.delim_length = 4 * 8;
+ } else {
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/hevc_secure_parser.h b/drivers/media/platform/vxe-vxd/decoder/hevc_secure_parser.h
new file mode 100644
index 000000000000..72424e8b8041
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/hevc_secure_parser.h
@@ -0,0 +1,455 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * h.264 secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __HEVCSECUREPARSER_H__
+#define __HEVCSECUREPARSER_H__
+
+#include "bspp_int.h"
+
+#define HEVC_MAX_NUM_PROFILE_IDC (32)
+#define HEVC_MAX_NUM_SUBLAYERS (7)
+#define HEVC_MAX_VPS_OP_SETS_PLUS1 (1024)
+#define HEVC_MAX_VPS_NUH_RESERVED_ZERO_LAYER_ID_PLUS1 (1)
+#define HEVC_MAX_NUM_REF_PICS (16)
+#define HEVC_MAX_NUM_ST_REF_PIC_SETS (65)
+#define HEVC_MAX_NUM_LT_REF_PICS (32)
+#define HEVC_MAX_NUM_REF_IDX_ACTIVE (15)
+#define HEVC_LEVEL_IDC_MIN (30)
+#define HEVC_LEVEL_IDC_MAX (186)
+#define HEVC_1_0_PROFILE_IDC_MAX (3)
+#define HEVC_MAX_CPB_COUNT (32)
+#define HEVC_MIN_CODED_UNIT_SIZE (8)
+
+/* hevc scaling lists (all values are maximum possible ones) */
+#define HEVC_SCALING_LIST_NUM_SIZES (4)
+#define HEVC_SCALING_LIST_NUM_MATRICES (6)
+#define HEVC_SCALING_LIST_MATRIX_SIZE (64)
+
+#define HEVC_MAX_TILE_COLS (20)
+#define HEVC_MAX_TILE_ROWS (22)
+
+#define HEVC_EXTENDED_SAR (255)
+
+#define HEVC_MAX_CHROMA_QP (6)
+
+enum hevc_nalunittype {
+ HEVC_NALTYPE_TRAIL_N = 0,
+ HEVC_NALTYPE_TRAIL_R = 1,
+ HEVC_NALTYPE_TSA_N = 2,
+ HEVC_NALTYPE_TSA_R = 3,
+ HEVC_NALTYPE_STSA_N = 4,
+ HEVC_NALTYPE_STSA_R = 5,
+ HEVC_NALTYPE_RADL_N = 6,
+ HEVC_NALTYPE_RADL_R = 7,
+ HEVC_NALTYPE_RASL_N = 8,
+ HEVC_NALTYPE_RASL_R = 9,
+ HEVC_NALTYPE_RSV_VCL_N10 = 10,
+ HEVC_NALTYPE_RSV_VCL_R11 = 11,
+ HEVC_NALTYPE_RSV_VCL_N12 = 12,
+ HEVC_NALTYPE_RSV_VCL_R13 = 13,
+ HEVC_NALTYPE_RSV_VCL_N14 = 14,
+ HEVC_NALTYPE_RSV_VCL_R15 = 15,
+ HEVC_NALTYPE_BLA_W_LP = 16,
+ HEVC_NALTYPE_BLA_W_RADL = 17,
+ HEVC_NALTYPE_BLA_N_LP = 18,
+ HEVC_NALTYPE_IDR_W_RADL = 19,
+ HEVC_NALTYPE_IDR_N_LP = 20,
+ HEVC_NALTYPE_CRA = 21,
+ HEVC_NALTYPE_RSV_IRAP_VCL22 = 22,
+ HEVC_NALTYPE_RSV_IRAP_VCL23 = 23,
+ HEVC_NALTYPE_VPS = 32,
+ HEVC_NALTYPE_SPS = 33,
+ HEVC_NALTYPE_PPS = 34,
+ HEVC_NALTYPE_AUD = 35,
+ HEVC_NALTYPE_EOS = 36,
+ HEVC_NALTYPE_EOB = 37,
+ HEVC_NALTYPE_FD = 38,
+ HEVC_NALTYPE_PREFIX_SEI = 39,
+ HEVC_NALTYPE_SUFFIX_SEI = 40,
+ HEVC_NALTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum bspp_hevcslicetype {
+ HEVC_SLICE_B = 0,
+ HEVC_SLICE_P = 1,
+ HEVC_SLICE_I = 2,
+ HEVC_SLICE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* HEVC NAL unit header */
+struct bspp_hevcnalheader {
+ unsigned char nal_unit_type;
+ unsigned char nuh_layer_id;
+ unsigned char nuh_temporal_id_plus1;
+};
+
+/* HEVC video profile_tier_level */
+struct bspp_hevc_profile_tierlevel {
+ unsigned char general_profile_space;
+ unsigned char general_tier_flag;
+ unsigned char general_profile_idc;
+ unsigned char general_profile_compatibility_flag[HEVC_MAX_NUM_PROFILE_IDC];
+ unsigned char general_progressive_source_flag;
+ unsigned char general_interlaced_source_flag;
+ unsigned char general_non_packed_constraint_flag;
+ unsigned char general_frame_only_constraint_flag;
+ unsigned char general_max_12bit_constraint_flag;
+ unsigned char general_max_10bit_constraint_flag;
+ unsigned char general_max_8bit_constraint_flag;
+ unsigned char general_max_422chroma_constraint_flag;
+ unsigned char general_max_420chroma_constraint_flag;
+ unsigned char general_max_monochrome_constraint_flag;
+ unsigned char general_intra_constraint_flag;
+ unsigned char general_one_picture_only_constraint_flag;
+ unsigned char general_lower_bit_rate_constraint_flag;
+ unsigned char general_level_idc;
+ unsigned char sub_layer_profile_present_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_level_present_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_profile_space[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_tier_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_profile_idc[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_profile_compatibility_flag[HEVC_MAX_NUM_SUBLAYERS -
+ 1][HEVC_MAX_NUM_PROFILE_IDC];
+ unsigned char sub_layer_progressive_source_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_interlaced_source_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_non_packed_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_frame_only_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_12bit_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_10bit_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_8bit_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_422chroma_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_420chroma_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_monochrome_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_intra_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_one_picture_only_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_lower_bit_rate_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_level_idc[HEVC_MAX_NUM_SUBLAYERS - 1];
+};
+
+/* HEVC sub layer HRD parameters */
+struct bspp_hevc_sublayer_hrd_parameters {
+ unsigned char bit_rate_value_minus1[HEVC_MAX_CPB_COUNT];
+ unsigned char cpb_size_value_minus1[HEVC_MAX_CPB_COUNT];
+ unsigned char cpb_size_du_value_minus1[HEVC_MAX_CPB_COUNT];
+ unsigned char bit_rate_du_value_minus1[HEVC_MAX_CPB_COUNT];
+ unsigned char cbr_flag[HEVC_MAX_CPB_COUNT];
+};
+
+/* HEVC HRD parameters */
+struct bspp_hevc_hrd_parameters {
+ unsigned char nal_hrd_parameters_present_flag;
+ unsigned char vcl_hrd_parameters_present_flag;
+ unsigned char sub_pic_hrd_params_present_flag;
+ unsigned char tick_divisor_minus2;
+ unsigned char du_cpb_removal_delay_increment_length_minus1;
+ unsigned char sub_pic_cpb_params_in_pic_timing_sei_flag;
+ unsigned char dpb_output_delay_du_length_minus1;
+ unsigned char bit_rate_scale;
+ unsigned char cpb_size_scale;
+ unsigned char cpb_size_du_scale;
+ unsigned char initial_cpb_removal_delay_length_minus1;
+ unsigned char au_cpb_removal_delay_length_minus1;
+ unsigned char dpb_output_delay_length_minus1;
+ unsigned char fixed_pic_rate_general_flag[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned char fixed_pic_rate_within_cvs_flag[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned char elemental_duration_in_tc_minus1[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned char low_delay_hrd_flag[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned char cpb_cnt_minus1[HEVC_MAX_NUM_SUBLAYERS];
+ struct bspp_hevc_sublayer_hrd_parameters sublayhrdparams[HEVC_MAX_NUM_SUBLAYERS];
+};
+
+/* HEVC video parameter set */
+struct bspp_hevc_vps {
+ unsigned char is_different;
+ unsigned char is_sent;
+ unsigned char is_available;
+ unsigned char vps_video_parameter_set_id;
+ unsigned char vps_reserved_three_2bits;
+ unsigned char vps_max_layers_minus1;
+ unsigned char vps_max_sub_layers_minus1;
+ unsigned char vps_temporal_id_nesting_flag;
+ unsigned short vps_reserved_0xffff_16bits;
+ struct bspp_hevc_profile_tierlevel profiletierlevel;
+ unsigned char vps_max_dec_pic_buffering_minus1[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned char vps_max_num_reorder_pics[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned char vps_max_latency_increase_plus1[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned char vps_sub_layer_ordering_info_present_flag;
+ unsigned char vps_max_layer_id;
+ unsigned char vps_num_layer_sets_minus1;
+ unsigned char layer_id_included_flag[HEVC_MAX_VPS_OP_SETS_PLUS1]
+ [HEVC_MAX_VPS_NUH_RESERVED_ZERO_LAYER_ID_PLUS1];
+ unsigned char vps_timing_info_present_flag;
+ unsigned int vps_num_units_in_tick;
+ unsigned int vps_time_scale;
+ unsigned char vps_poc_proportional_to_timing_flag;
+ unsigned char vps_num_ticks_poc_diff_one_minus1;
+ unsigned char vps_num_hrd_parameters;
+ unsigned char *hrd_layer_set_idx;
+ unsigned char *cprms_present_flag;
+ unsigned char vps_extension_flag;
+ unsigned char vps_extension_data_flag;
+};
+
+/* HEVC scaling lists */
+struct bspp_hevc_scalinglist_data {
+ unsigned char dccoeffs[HEVC_SCALING_LIST_NUM_SIZES - 2][HEVC_SCALING_LIST_NUM_MATRICES];
+ unsigned char lists[HEVC_SCALING_LIST_NUM_SIZES][HEVC_SCALING_LIST_NUM_MATRICES]
+ [HEVC_SCALING_LIST_MATRIX_SIZE];
+};
+
+/* HEVC short term reference picture set */
+struct bspp_hevc_shortterm_refpicset {
+ unsigned char num_negative_pics;
+ unsigned char num_positive_pics;
+ short delta_poc_s0[HEVC_MAX_NUM_REF_PICS];
+ short delta_poc_s1[HEVC_MAX_NUM_REF_PICS];
+ unsigned char used_bycurr_pic_s0[HEVC_MAX_NUM_REF_PICS];
+ unsigned char used_bycurr_pic_s1[HEVC_MAX_NUM_REF_PICS];
+ unsigned char num_delta_pocs;
+};
+
+/* HEVC video usability information */
+struct bspp_hevc_vui_params {
+ unsigned char aspect_ratio_info_present_flag;
+ unsigned char aspect_ratio_idc;
+ unsigned short sar_width;
+ unsigned short sar_height;
+ unsigned char overscan_info_present_flag;
+ unsigned char overscan_appropriate_flag;
+ unsigned char video_signal_type_present_flag;
+ unsigned char video_format;
+ unsigned char video_full_range_flag;
+ unsigned char colour_description_present_flag;
+ unsigned char colour_primaries;
+ unsigned char transfer_characteristics;
+ unsigned char matrix_coeffs;
+ unsigned char chroma_loc_info_present_flag;
+ unsigned char chroma_sample_loc_type_top_field;
+ unsigned char chroma_sample_loc_type_bottom_field;
+ unsigned char neutral_chroma_indication_flag;
+ unsigned char field_seq_flag;
+ unsigned char frame_field_info_present_flag;
+ unsigned char default_display_window_flag;
+ unsigned short def_disp_win_left_offset;
+ unsigned short def_disp_win_right_offset;
+ unsigned short def_disp_win_top_offset;
+ unsigned short def_disp_win_bottom_offset;
+ unsigned char vui_timing_info_present_flag;
+ unsigned int vui_num_units_in_tick;
+ unsigned int vui_time_scale;
+ unsigned char vui_poc_proportional_to_timing_flag;
+ unsigned int vui_num_ticks_poc_diff_one_minus1;
+ unsigned char vui_hrd_parameters_present_flag;
+ struct bspp_hevc_hrd_parameters vui_hrd_params;
+ unsigned char bitstream_restriction_flag;
+ unsigned char tiles_fixed_structure_flag;
+ unsigned char motion_vectors_over_pic_boundaries_flag;
+ unsigned char restricted_ref_pic_lists_flag;
+ unsigned short min_spatial_segmentation_idc;
+ unsigned char max_bytes_per_pic_denom;
+ unsigned char max_bits_per_min_cu_denom;
+ unsigned char log2_max_mv_length_horizontal;
+ unsigned char log2_max_mv_length_vertical;
+};
+
+/* HEVC sps range extensions */
+struct bspp_hevc_sps_range_exts {
+ unsigned char transform_skip_rotation_enabled_flag;
+ unsigned char transform_skip_context_enabled_flag;
+ unsigned char implicit_rdpcm_enabled_flag;
+ unsigned char explicit_rdpcm_enabled_flag;
+ unsigned char extended_precision_processing_flag;
+ unsigned char intra_smoothing_disabled_flag;
+ unsigned char high_precision_offsets_enabled_flag;
+ unsigned char persistent_rice_adaptation_enabled_flag;
+ unsigned char cabac_bypass_alignment_enabled_flag;
+};
+
+/* HEVC sequence parameter set */
+struct bspp_hevc_sps {
+ unsigned char is_different;
+ unsigned char is_sent;
+ unsigned char is_available;
+ unsigned char sps_video_parameter_set_id;
+ unsigned char sps_max_sub_layers_minus1;
+ unsigned char sps_temporal_id_nesting_flag;
+ struct bspp_hevc_profile_tierlevel profile_tier_level;
+ unsigned char sps_seq_parameter_set_id;
+ unsigned char chroma_format_idc;
+ unsigned char separate_colour_plane_flag;
+ unsigned int pic_width_in_luma_samples;
+ unsigned int pic_height_in_luma_samples;
+ unsigned char conformance_window_flag;
+ unsigned short conf_win_left_offset;
+ unsigned short conf_win_right_offset;
+ unsigned short conf_win_top_offset;
+ unsigned short conf_win_bottom_offset;
+ unsigned char bit_depth_luma_minus8;
+ unsigned char bit_depth_chroma_minus8;
+ unsigned char log2_max_pic_order_cnt_lsb_minus4;
+ unsigned char sps_sub_layer_ordering_info_present_flag;
+ unsigned char sps_max_dec_pic_buffering_minus1[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned char sps_max_num_reorder_pics[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned int sps_max_latency_increase_plus1[HEVC_MAX_NUM_SUBLAYERS];
+ unsigned char log2_min_luma_coding_block_size_minus3;
+ unsigned char log2_diff_max_min_luma_coding_block_size;
+ unsigned char log2_min_transform_block_size_minus2;
+ unsigned char log2_diff_max_min_transform_block_size;
+ unsigned char max_transform_hierarchy_depth_inter;
+ unsigned char max_transform_hierarchy_depth_intra;
+ unsigned char scaling_list_enabled_flag;
+ unsigned char sps_scaling_list_data_present_flag;
+ struct bspp_hevc_scalinglist_data scalinglist_data;
+ unsigned char amp_enabled_flag;
+ unsigned char sample_adaptive_offset_enabled_flag;
+ unsigned char pcm_enabled_flag;
+ unsigned char pcm_sample_bit_depth_luma_minus1;
+ unsigned char pcm_sample_bit_depth_chroma_minus1;
+ unsigned char log2_min_pcm_luma_coding_block_size_minus3;
+ unsigned char log2_diff_max_min_pcm_luma_coding_block_size;
+ unsigned char pcm_loop_filter_disabled_flag;
+ unsigned char num_short_term_ref_pic_sets;
+ struct bspp_hevc_shortterm_refpicset rps_list[HEVC_MAX_NUM_ST_REF_PIC_SETS];
+ unsigned char long_term_ref_pics_present_flag;
+ unsigned char num_long_term_ref_pics_sps;
+ unsigned short lt_ref_pic_poc_lsb_sps[HEVC_MAX_NUM_LT_REF_PICS];
+ unsigned char used_by_curr_pic_lt_sps_flag[HEVC_MAX_NUM_LT_REF_PICS];
+ unsigned char sps_temporal_mvp_enabled_flag;
+ unsigned char strong_intra_smoothing_enabled_flag;
+ unsigned char vui_parameters_present_flag;
+ struct bspp_hevc_vui_params vui_params;
+ unsigned char sps_extension_present_flag;
+ unsigned char sps_range_extensions_flag;
+ struct bspp_hevc_sps_range_exts range_exts;
+ unsigned char sps_extension_7bits;
+ unsigned char sps_extension_data_flag;
+ /* derived elements */
+ unsigned char sub_width_c;
+ unsigned char sub_height_c;
+ unsigned char ctb_log2size_y;
+ unsigned char ctb_size_y;
+ unsigned int pic_width_in_ctbs_y;
+ unsigned int pic_height_in_ctbs_y;
+ unsigned int pic_size_in_ctbs_y;
+ int max_pic_order_cnt_lsb;
+ unsigned int sps_max_latency_pictures[HEVC_MAX_NUM_SUBLAYERS];
+ /* raw vui data as extracted from bitstream. */
+ struct bspp_raw_bitstream_data *vui_raw_data;
+};
+
+/**
+ * struct bspp_hevc_sequ_hdr_info - This structure contains HEVC sequence
+ * header information (VPS, SPS, VUI)
+ * contains everything parsed from the
+ * video/sequence header.
+ * @vps: HEVC sequence header information
+ * @sps:HEVC sequence header information
+ */
+struct bspp_hevc_sequ_hdr_info {
+ struct bspp_hevc_vps vps;
+ struct bspp_hevc_sps sps;
+};
+
+/* HEVC pps range extensions */
+struct bspp_hevc_pps_range_exts {
+ unsigned char log2_max_transform_skip_block_size_minus2;
+ unsigned char cross_component_prediction_enabled_flag;
+ unsigned char chroma_qp_offset_list_enabled_flag;
+ unsigned char diff_cu_chroma_qp_offset_depth;
+ unsigned char chroma_qp_offset_list_len_minus1;
+ unsigned char cb_qp_offset_list[HEVC_MAX_CHROMA_QP];
+ unsigned char cr_qp_offset_list[HEVC_MAX_CHROMA_QP];
+ unsigned char log2_sao_offset_scale_luma;
+ unsigned char log2_sao_offset_scale_chroma;
+};
+
+/* HEVC picture parameter set */
+struct bspp_hevc_pps {
+ unsigned char is_available;
+ unsigned char is_param_copied;
+ unsigned char pps_pic_parameter_set_id;
+ unsigned char pps_seq_parameter_set_id;
+ unsigned char dependent_slice_segments_enabled_flag;
+ unsigned char output_flag_present_flag;
+ unsigned char num_extra_slice_header_bits;
+ unsigned char sign_data_hiding_enabled_flag;
+ unsigned char cabac_init_present_flag;
+ unsigned char num_ref_idx_l0_default_active_minus1;
+ unsigned char num_ref_idx_l1_default_active_minus1;
+ unsigned char init_qp_minus26;
+ unsigned char constrained_intra_pred_flag;
+ unsigned char transform_skip_enabled_flag;
+ unsigned char cu_qp_delta_enabled_flag;
+ unsigned char diff_cu_qp_delta_depth;
+ int pps_cb_qp_offset;
+ int pps_cr_qp_offset;
+ unsigned char pps_slice_chroma_qp_offsets_present_flag;
+ unsigned char weighted_pred_flag;
+ unsigned char weighted_bipred_flag;
+ unsigned char transquant_bypass_enabled_flag;
+ unsigned char tiles_enabled_flag;
+ unsigned char entropy_coding_sync_enabled_flag;
+ unsigned char num_tile_columns_minus1;
+ unsigned char num_tile_rows_minus1;
+ unsigned char uniform_spacing_flag;
+ unsigned char column_width_minus1[HEVC_MAX_TILE_COLS];
+ unsigned char row_height_minus1[HEVC_MAX_TILE_ROWS];
+ unsigned char loop_filter_across_tiles_enabled_flag;
+ unsigned char pps_loop_filter_across_slices_enabled_flag;
+ unsigned char deblocking_filter_control_present_flag;
+ unsigned char deblocking_filter_override_enabled_flag;
+ unsigned char pps_deblocking_filter_disabled_flag;
+ unsigned char pps_beta_offset_div2;
+ unsigned char pps_tc_offset_div2;
+ unsigned char pps_scaling_list_data_present_flag;
+ struct bspp_hevc_scalinglist_data scaling_list;
+ unsigned char lists_modification_present_flag;
+ unsigned char log2_parallel_merge_level_minus2;
+ unsigned char slice_segment_header_extension_present_flag;
+ unsigned char pps_extension_present_flag;
+ unsigned char pps_range_extensions_flag;
+ struct bspp_hevc_pps_range_exts range_exts;
+ unsigned char pps_extension_7bits;
+ unsigned char pps_extension_data_flag;
+ /* derived elements */
+ unsigned short col_bd[HEVC_MAX_TILE_COLS + 1];
+ unsigned short row_bd[HEVC_MAX_TILE_ROWS + 1];
+ /* PVDEC derived elements */
+ unsigned int max_tile_height_in_ctbs_y;
+};
+
+/* HEVC slice segment header */
+struct bspp_hevc_slice_segment_header {
+ unsigned char bslice_is_idr;
+ unsigned char first_slice_segment_in_pic_flag;
+ unsigned char no_output_of_prior_pics_flag;
+ unsigned char slice_pic_parameter_set_id;
+ unsigned char dependent_slice_segment_flag;
+ unsigned int slice_segment_address;
+};
+
+/*
+ * @Function bspp_hevc_set_parser_config
+ * sets the parser configuration.
+ */
+int bspp_hevc_set_parser_config(enum vdec_bstr_format bstr_format,
+ struct bspp_vid_std_features *pvidstd_features,
+ struct bspp_swsr_ctx *pswsr_ctx,
+ struct bspp_parser_callbacks *pparser_callbacks,
+ struct bspp_inter_pict_data *pinterpict_data);
+
+void bspp_hevc_determine_unittype(unsigned char bitstream_unittype,
+ int disable_mvc,
+ enum bspp_unit_type *bspp_unittype);
+
+#endif /*__H264SECUREPARSER_H__ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/hevcfw_data.h b/drivers/media/platform/vxe-vxd/decoder/hevcfw_data.h
new file mode 100644
index 000000000000..cdfe8d067d90
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/hevcfw_data.h
@@ -0,0 +1,472 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Public data structures for the hevc parser firmware module.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+/* Include shared header version here to replace the standard version. */
+#include "hevcfw_data_shared.h"
+
+#ifndef _HEVCFW_DATA_H_
+#define _HEVCFW_DATA_H_
+
+#include "vdecfw_shared.h"
+
+#define HEVC_MAX_SPS_COUNT 16
+#define HEVC_MAX_PPS_COUNT 64
+
+#define HEVCFW_MAX_NUM_PROFILE_IDC 32
+
+#define HEVCFW_MAX_NUM_REF_PICS 16
+#define HEVCFW_MAX_NUM_ST_REF_PIC_SETS 65
+#define HEVCFW_MAX_NUM_LT_REF_PICS 32
+#define HEVCFW_MAX_NUM_SUBLAYERS 7
+#define HEVCFW_SCALING_LISTS_BUFSIZE 256
+#define HEVCFW_MAX_TILE_COLS 20
+#define HEVCFW_MAX_TILE_ROWS 22
+
+#define HEVCFW_MAX_CHROMA_QP 6
+
+#define HEVCFW_MAX_DPB_SIZE HEVCFW_MAX_NUM_REF_PICS
+#define HEVCFW_REF_PIC_LIST0 0
+#define HEVCFW_REF_PIC_LIST1 1
+#define HEVCFW_NUM_REF_PIC_LISTS 2
+#define HEVCFW_NUM_DPB_DIFF_REGS 4
+
+/* non-critical errors */
+#define HEVC_ERR_INVALID_VALUE (20)
+#define HEVC_ERR_CORRECTION_VALIDVALUE (21)
+
+#define HEVC_IS_ERR_CRITICAL(err) \
+ ((err) > HEVC_ERR_CORRECTION_VALIDVALUE ? 1 : 0)
+
+/* critical errors */
+#define HEVC_ERR_INV_VIDEO_DIMENSION (22)
+#define HEVC_ERR_NO_SEQUENCE_HDR (23)
+#define HEVC_ERR_SPS_EXT_UNSUPP (24 | VDECFW_UNSUPPORTED_CODE_BASE)
+#define HEVC_ERR_PPS_EXT_UNSUPP (25 | VDECFW_UNSUPPORTED_CODE_BASE)
+
+#define HEVC_ERR_FAILED_TO_STORE_VPS (100)
+#define HEVC_ERR_FAILED_TO_STORE_SPS (101)
+#define HEVC_ERR_FAILED_TO_STORE_PPS (102)
+
+#define HEVC_ERR_FAILED_TO_FETCH_VPS (103)
+#define HEVC_ERR_FAILED_TO_FETCH_SPS (104)
+#define HEVC_ERR_FAILED_TO_FETCH_PPS (105)
+/* HEVC Scaling Lists (all values are maximum possible ones) */
+#define HEVCFW_SCALING_LIST_NUM_SIZES 4
+#define HEVCFW_SCALING_LIST_NUM_MATRICES 6
+#define HEVCFW_SCALING_LIST_MATRIX_SIZE 64
+
+struct hevcfw_scaling_listdata {
+ unsigned char dc_coeffs
+ [HEVCFW_SCALING_LIST_NUM_SIZES - 2]
+ [HEVCFW_SCALING_LIST_NUM_MATRICES];
+ unsigned char lists
+ [HEVCFW_SCALING_LIST_NUM_SIZES]
+ [HEVCFW_SCALING_LIST_NUM_MATRICES]
+ [HEVCFW_SCALING_LIST_MATRIX_SIZE];
+};
+
+/* HEVC Video Profile_Tier_Level */
+struct hevcfw_profile_tier_level {
+ unsigned char general_profile_space;
+ unsigned char general_tier_flag;
+ unsigned char general_profile_idc;
+ unsigned char general_profile_compatibility_flag[HEVCFW_MAX_NUM_PROFILE_IDC];
+ unsigned char general_progressive_source_flag;
+ unsigned char general_interlaced_source_flag;
+ unsigned char general_non_packed_constraint_flag;
+ unsigned char general_frame_only_constraint_flag;
+ unsigned char general_max_12bit_constraint_flag;
+ unsigned char general_max_10bit_constraint_flag;
+ unsigned char general_max_8bit_constraint_flag;
+ unsigned char general_max_422chroma_constraint_flag;
+ unsigned char general_max_420chroma_constraint_flag;
+ unsigned char general_max_monochrome_constraint_flag;
+ unsigned char general_intra_constraint_flag;
+ unsigned char general_one_picture_only_constraint_flag;
+ unsigned char general_lower_bit_rate_constraint_flag;
+ unsigned char general_level_idc;
+ unsigned char sub_layer_profile_present_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_level_present_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_profile_space[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_tier_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_profile_idc[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_profile_compatibility_flag[HEVCFW_MAX_NUM_SUBLAYERS -
+ 1][HEVCFW_MAX_NUM_PROFILE_IDC];
+ unsigned char sub_layer_progressive_source_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_interlaced_source_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_non_packed_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_frame_only_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_12bit_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_10bit_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_8bit_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_422chroma_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_420chroma_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_max_monochrome_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_intra_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_one_picture_only_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_lower_bit_rate_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+ unsigned char sub_layer_level_idc[HEVCFW_MAX_NUM_SUBLAYERS - 1];
+};
+
+struct hevcfw_video_ps {
+ int is_different;
+ int is_sent;
+ int is_available;
+ unsigned char vps_video_parameter_set_id;
+ unsigned char vps_reserved_three_2bits;
+ unsigned char vps_max_layers_minus1;
+ unsigned char vps_max_sub_layers_minus1;
+ unsigned char vps_temporal_id_nesting_flag;
+ unsigned short vps_reserved_0xffff_16bits;
+ struct hevcfw_profile_tier_level profile_tier_level;
+};
+
+/* HEVC Video Usability Information */
+struct hevcfw_vui_params {
+ unsigned char aspect_ratio_info_present_flag;
+ unsigned char aspect_ratio_idc;
+ unsigned short sar_width;
+ unsigned short sar_height;
+ unsigned char overscan_info_present_flag;
+ unsigned char overscan_appropriate_flag;
+ unsigned char video_signal_type_present_flag;
+ unsigned char video_format;
+ unsigned char video_full_range_flag;
+ unsigned char colour_description_present_flag;
+ unsigned char colour_primaries;
+ unsigned char transfer_characteristics;
+ unsigned char matrix_coeffs;
+ unsigned char chroma_loc_info_present_flag;
+ unsigned char chroma_sample_loc_type_top_field;
+ unsigned char chroma_sample_loc_type_bottom_field;
+ unsigned char neutral_chroma_indication_flag;
+ unsigned char field_seq_flag;
+ unsigned char frame_field_info_present_flag;
+ unsigned char default_display_window_flag;
+ unsigned short def_disp_win_left_offset;
+ unsigned short def_disp_win_right_offset;
+ unsigned short def_disp_win_top_offset;
+ unsigned short def_disp_win_bottom_offset;
+ unsigned char vui_timing_info_present_flag;
+ unsigned int vui_num_units_in_tick;
+ unsigned int vui_time_scale;
+};
+
+/* HEVC Short Term Reference Picture Set */
+struct hevcfw_short_term_ref_picset {
+ unsigned char num_negative_pics;
+ unsigned char num_positive_pics;
+ short delta_poc_s0[HEVCFW_MAX_NUM_REF_PICS];
+ short delta_poc_s1[HEVCFW_MAX_NUM_REF_PICS];
+ unsigned char used_bycurr_pic_s0[HEVCFW_MAX_NUM_REF_PICS];
+ unsigned char used_bycurr_pic_s1[HEVCFW_MAX_NUM_REF_PICS];
+ unsigned char num_delta_pocs;
+};
+
+/*
+ * This describes the SPS header data required by the HEVC firmware that should
+ * be supplied by the Host.
+ */
+struct hevcfw_sequence_ps {
+ /* syntax elements from SPS */
+ unsigned short pic_width_in_luma_samples;
+ unsigned short pic_height_in_luma_samples;
+ unsigned char num_short_term_ref_pic_sets;
+ unsigned char num_long_term_ref_pics_sps;
+ unsigned short lt_ref_pic_poc_lsb_sps[HEVCFW_MAX_NUM_LT_REF_PICS];
+ unsigned char used_by_curr_pic_lt_sps_flag[HEVCFW_MAX_NUM_LT_REF_PICS];
+ struct hevcfw_short_term_ref_picset st_rps_list[HEVCFW_MAX_NUM_ST_REF_PIC_SETS];
+ unsigned char sps_max_sub_layers_minus1;
+ unsigned char sps_max_dec_pic_buffering_minus1[HEVCFW_MAX_NUM_SUBLAYERS];
+ unsigned char sps_max_num_reorder_pics[HEVCFW_MAX_NUM_SUBLAYERS];
+ unsigned int sps_max_latency_increase_plus1[HEVCFW_MAX_NUM_SUBLAYERS];
+ unsigned char max_transform_hierarchy_depth_inter;
+ unsigned char max_transform_hierarchy_depth_intra;
+ unsigned char log2_diff_max_min_transform_block_size;
+ unsigned char log2_min_transform_block_size_minus2;
+ unsigned char log2_diff_max_min_luma_coding_block_size;
+ unsigned char log2_min_luma_coding_block_size_minus3;
+ unsigned char chroma_format_idc;
+ unsigned char separate_colour_plane_flag;
+ unsigned char num_extra_slice_header_bits;
+ unsigned char log2_max_pic_order_cnt_lsb_minus4;
+ unsigned char long_term_ref_pics_present_flag;
+ unsigned char sample_adaptive_offset_enabled_flag;
+ unsigned char sps_temporal_mvp_enabled_flag;
+ unsigned char bit_depth_luma_minus8;
+ unsigned char bit_depth_chroma_minus8;
+ unsigned char pcm_sample_bit_depth_luma_minus1;
+ unsigned char pcm_sample_bit_depth_chroma_minus1;
+ unsigned char log2_min_pcm_luma_coding_block_size_minus3;
+ unsigned char log2_diff_max_min_pcm_luma_coding_block_size;
+ unsigned char pcm_loop_filter_disabled_flag;
+ unsigned char amp_enabled_flag;
+ unsigned char pcm_enabled_flag;
+ unsigned char strong_intra_smoothing_enabled_flag;
+ unsigned char scaling_list_enabled_flag;
+ unsigned char transform_skip_rotation_enabled_flag;
+ unsigned char transform_skip_context_enabled_flag;
+ unsigned char implicit_rdpcm_enabled_flag;
+ unsigned char explicit_rdpcm_enabled_flag;
+ unsigned char extended_precision_processing_flag;
+ unsigned char intra_smoothing_disabled_flag;
+ unsigned char high_precision_offsets_enabled_flag;
+ unsigned char persistent_rice_adaptation_enabled_flag;
+ unsigned char cabac_bypass_alignment_enabled_flag;
+ /* derived elements */
+ unsigned int pic_size_in_ctbs_y;
+ unsigned short pic_height_in_ctbs_y;
+ unsigned short pic_width_in_ctbs_y;
+ unsigned char ctb_size_y;
+ unsigned char ctb_log2size_y;
+ int max_pic_order_cnt_lsb;
+ unsigned int sps_max_latency_pictures[HEVCFW_MAX_NUM_SUBLAYERS];
+ unsigned char pps_seq_parameter_set_id;
+ unsigned char sps_video_parameter_set_id;
+ unsigned char sps_temporal_id_nesting_flag;
+ unsigned char sps_seq_parameter_set_id;
+ /* local */
+ unsigned char conformance_window_flag;
+ unsigned short conf_win_left_offset;
+ unsigned short conf_win_right_offset;
+ unsigned short conf_win_top_offset;
+ unsigned short conf_win_bottom_offset;
+ unsigned char sps_sub_layer_ordering_info_present_flag;
+ unsigned char sps_scaling_list_data_present_flag;
+ unsigned char vui_parameters_present_flag;
+ unsigned char sps_extension_present_flag;
+ struct hevcfw_vui_params vui_params;
+ /* derived elements */
+ unsigned char sub_width_c;
+ unsigned char sub_height_c;
+ struct hevcfw_profile_tier_level profile_tier_level;
+ struct hevcfw_scaling_listdata scaling_listdata;
+};
+
+/*
+ * This describes the HEVC parser component "Header data", shown in the
+ * Firmware Memory Layout diagram. This data is required by the HEVC firmware
+ * and should be supplied by the Host.
+ */
+struct hevcfw_headerdata {
+ /* Decode buffers and output control for the current picture */
+ /* Primary decode buffer base addresses */
+ struct vdecfw_image_buffer primary;
+ /* buffer base addresses for alternate output */
+ struct vdecfw_image_buffer alternate;
+ /* address of buffer for temporal mv params */
+ unsigned int temporal_outaddr;
+};
+
+/*
+ * This describes the PPS header data required by the HEVC firmware that should
+ * be supplied by the Host.
+ */
+struct hevcfw_picture_ps {
+ /* syntax elements from the PPS */
+ unsigned char pps_pic_parameter_set_id;
+ unsigned char num_tile_columns_minus1;
+ unsigned char num_tile_rows_minus1;
+ unsigned char diff_cu_qp_delta_depth;
+ unsigned char init_qp_minus26;
+ unsigned char pps_beta_offset_div2;
+ unsigned char pps_tc_offset_div2;
+ unsigned char pps_cb_qp_offset;
+ unsigned char pps_cr_qp_offset;
+ unsigned char log2_parallel_merge_level_minus2;
+ unsigned char dependent_slice_segments_enabled_flag;
+ unsigned char output_flag_present_flag;
+ unsigned char num_extra_slice_header_bits;
+ unsigned char lists_modification_present_flag;
+ unsigned char cabac_init_present_flag;
+ unsigned char weighted_pred_flag;
+ unsigned char weighted_bipred_flag;
+ unsigned char pps_slice_chroma_qp_offsets_present_flag;
+ unsigned char deblocking_filter_override_enabled_flag;
+ unsigned char tiles_enabled_flag;
+ unsigned char entropy_coding_sync_enabled_flag;
+ unsigned char slice_segment_header_extension_present_flag;
+ unsigned char transquant_bypass_enabled_flag;
+ unsigned char cu_qp_delta_enabled_flag;
+ unsigned char transform_skip_enabled_flag;
+ unsigned char sign_data_hiding_enabled_flag;
+ unsigned char num_ref_idx_l0_default_active_minus1;
+ unsigned char num_ref_idx_l1_default_active_minus1;
+ unsigned char constrained_intra_pred_flag;
+ unsigned char pps_deblocking_filter_disabled_flag;
+ unsigned char pps_loop_filter_across_slices_enabled_flag;
+ unsigned char loop_filter_across_tiles_enabled_flag;
+ /* rewritten from SPS, maybe at some point we could get rid of this */
+ unsigned char scaling_list_enabled_flag;
+ unsigned char log2_max_transform_skip_block_size_minus2;
+ unsigned char cross_component_prediction_enabled_flag;
+ unsigned char chroma_qp_offset_list_enabled_flag;
+ unsigned char diff_cu_chroma_qp_offset_depth;
+ /*
+ * PVDEC derived elements. HEVCFW_SCALING_LISTS_BUFSIZE is
+ * multiplied by 2 to ensure that there will be space for address of
+ * each element. These addresses are completed in lower layer.
+ */
+ unsigned int scaling_lists[HEVCFW_SCALING_LISTS_BUFSIZE * 2];
+ /* derived elements */
+ unsigned short col_bd[HEVCFW_MAX_TILE_COLS + 1];
+ unsigned short row_bd[HEVCFW_MAX_TILE_ROWS + 1];
+
+ unsigned char chroma_qp_offset_list_len_minus1;
+ unsigned char cb_qp_offset_list[HEVCFW_MAX_CHROMA_QP];
+ unsigned char cr_qp_offset_list[HEVCFW_MAX_CHROMA_QP];
+
+ unsigned char uniform_spacing_flag;
+ unsigned char column_width_minus1[HEVCFW_MAX_TILE_COLS];
+ unsigned char row_height_minus1[HEVCFW_MAX_TILE_ROWS];
+
+ unsigned char pps_seq_parameter_set_id;
+ unsigned char deblocking_filter_control_present_flag;
+ unsigned char pps_scaling_list_data_present_flag;
+ unsigned char pps_extension_present_flag;
+
+ struct hevcfw_scaling_listdata scaling_list;
+};
+
+/* This enum determines reference picture status */
+enum hevcfw_reference_type {
+ HEVCFW_REF_UNUSED = 0,
+ HEVCFW_REF_SHORTTERM,
+ HEVCFW_REF_LONGTERM,
+ HEVCFW_REF_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* This describes an HEVC picture. It is part of the Context data */
+struct hevcfw_picture {
+ /* Primary (reconstructed) picture buffers */
+ struct vdecfw_image_buffer primary;
+ /* Secondary (alternative) picture buffers */
+ struct vdecfw_image_buffer alternate;
+ /* Unique ID for this picture */
+ unsigned int transaction_id;
+ /* nut of first ssh of picture, determines picture type */
+ unsigned char nalunit_type;
+ /* Picture Order Count (frame number) */
+ int pic_order_cnt_val;
+ /* Slice Picture Order Count Lsb */
+ int slice_pic_ordercnt_lsb;
+ unsigned char pic_output_flag;
+ /* information about long-term pictures */
+ unsigned short dpb_longterm_flags;
+ unsigned int dpb_pic_order_diff[HEVCFW_NUM_DPB_DIFF_REGS];
+ /* address of buffer for temporal mv params */
+ unsigned int temporal_outaddr;
+ /* worst case Dpb diff for the current pic */
+ unsigned int dpb_diff;
+};
+
+/*
+ * This is a wrapper for a picture to hold it in a Decoded Picture Buffer
+ * for further reference
+ */
+struct hevcfw_picture_in_dpb {
+ /* DPB data about the picture */
+ enum hevcfw_reference_type ref_type;
+ unsigned char valid;
+ unsigned char needed_for_output;
+ unsigned char pic_latency_count;
+ /* Picture itself */
+ struct hevcfw_picture picture;
+};
+
+/*
+ * This describes an HEVC's Decoded Picture Buffer (DPB).
+ * It is part of the Context data
+ */
+#define HEVCFW_DPB_IDX_INVALID -1
+
+struct hevcfw_decoded_picture_buffer {
+ /* reference pictures */
+ struct hevcfw_picture_in_dpb pictures[HEVCFW_MAX_DPB_SIZE];
+ /* organizational data of DPB */
+ unsigned int fullness;
+};
+
+/*
+ * This describes an HEVC's Reference Picture Set (RPS).
+ * It is part of the Context data
+ */
+struct hevcfw_reference_picture_set {
+ /* sizes of poc lists */
+ unsigned char num_pocst_curr_before;
+ unsigned char num_pocst_curr_after;
+ unsigned char num_pocst_foll;
+ unsigned char num_poclt_curr;
+ unsigned char num_poclt_foll;
+ /* poc lists */
+ int pocst_curr_before[HEVCFW_MAX_NUM_REF_PICS];
+ int pocst_curr_after[HEVCFW_MAX_NUM_REF_PICS];
+ int pocst_foll[HEVCFW_MAX_NUM_REF_PICS];
+ int poclt_curr[HEVCFW_MAX_NUM_REF_PICS];
+ int poclt_foll[HEVCFW_MAX_NUM_REF_PICS];
+ /* derived elements */
+ unsigned char curr_delta_pocmsb_presentflag[HEVCFW_MAX_NUM_REF_PICS];
+ unsigned char foll_delta_pocmsb_presentflag[HEVCFW_MAX_NUM_REF_PICS];
+ /* reference picture sets: indices in DPB */
+ unsigned char ref_picsetlt_curr[HEVCFW_MAX_NUM_REF_PICS];
+ unsigned char ref_picsetlt_foll[HEVCFW_MAX_NUM_REF_PICS];
+ unsigned char ref_picsetst_curr_before[HEVCFW_MAX_NUM_REF_PICS];
+ unsigned char ref_picsetst_curr_after[HEVCFW_MAX_NUM_REF_PICS];
+ unsigned char ref_picsetst_foll[HEVCFW_MAX_NUM_REF_PICS];
+};
+
+/*
+ * This describes the HEVC parser component "Context data", shown in the
+ * Firmware Memory Layout diagram. This data is the state preserved across
+ * pictures. It is loaded and saved by the Firmware, but requires the host to
+ * provide buffer(s) for this.
+ */
+struct hevcfw_ctx_data {
+ struct hevcfw_sequence_ps sps;
+ struct hevcfw_picture_ps pps;
+ /*
+ * data from last picture with TemporalId = 0 that is not a RASL, RADL
+ * or sub-layer non-reference picture
+ */
+ int prev_pic_order_cnt_lsb;
+ int prev_pic_order_cnt_msb;
+ unsigned char last_irapnorasl_output_flag;
+ /*
+ * Decoded Pictures Buffer holds information about decoded pictures
+ * needed for further INTER decoding
+ */
+ struct hevcfw_decoded_picture_buffer dpb;
+ /* Reference Picture Set is determined on per-picture basis */
+ struct hevcfw_reference_picture_set rps;
+ /*
+ * Reference Picture List is determined using data from Reference
+ * Picture Set and from Slice (Segment) Header on per-slice basis
+ */
+ unsigned char ref_pic_list[HEVCFW_NUM_REF_PIC_LISTS][HEVCFW_MAX_NUM_REF_PICS];
+ /*
+ * Reference Picture List used to send reflist to the host, the only
+ * difference is that missing references are marked
+ * with HEVCFW_DPB_IDX_INVALID
+ */
+ unsigned char ref_pic_listhlp[HEVCFW_NUM_REF_PIC_LISTS][HEVCFW_MAX_NUM_REF_PICS];
+
+ unsigned int pic_count;
+ unsigned int slice_segment_count;
+ /* There was EOS NAL detected and no new picture yet */
+ int eos_detected;
+ /* This is first picture after EOS NAL */
+ int first_after_eos;
+};
+
+#endif /* _HEVCFW_DATA_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/hevcfw_data_shared.h b/drivers/media/platform/vxe-vxd/decoder/hevcfw_data_shared.h
new file mode 100644
index 000000000000..d57008fd96f8
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/hevcfw_data_shared.h
@@ -0,0 +1,767 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Public data structures for the hevc parser firmware module
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifdef USE_SHARING
+#endif
+
+#ifndef _HEVCFW_DATA_H_
+#define _HEVCFW_DATA_H_
+
+#include "vdecfw_share.h"
+#include "vdecfw_shared.h"
+
+#define HEVC_MAX_VPS_COUNT 16
+#define HEVC_MAX_SPS_COUNT 16
+#define HEVC_MAX_PPS_COUNT 64
+
+#define HEVCFW_MAX_NUM_PROFILE_IDC 32
+#define HEVCFW_MAX_VPS_OP_SETS_PLUS1 1024
+#define HEVCFW_MAX_VPS_NUH_RESERVED_ZERO_LAYER_ID_PLUS1 1
+
+#define HEVCFW_MAX_NUM_REF_PICS 16
+#define HEVCFW_MAX_NUM_ST_REF_PIC_SETS 65
+#define HEVCFW_MAX_NUM_LT_REF_PICS 32
+#define HEVCFW_MAX_NUM_SUBLAYERS 7
+#define HEVCFW_SCALING_LISTS_BUFSIZE 256
+#define HEVCFW_MAX_TILE_COLS 20
+#define HEVCFW_MAX_TILE_ROWS 22
+
+#define HEVCFW_MAX_CHROMA_QP 6
+
+#define HEVCFW_MAX_DPB_SIZE HEVCFW_MAX_NUM_REF_PICS
+#define HEVCFW_REF_PIC_LIST0 0
+#define HEVCFW_REF_PIC_LIST1 1
+#define HEVCFW_NUM_REF_PIC_LISTS 2
+#define HEVCFW_NUM_DPB_DIFF_REGS 4
+
+/* non-critical errors*/
+#define HEVC_ERR_INVALID_VALUE (20)
+#define HEVC_ERR_CORRECTION_VALIDVALUE (21)
+
+#define HEVC_IS_ERR_CRITICAL(err) \
+ ((err) > HEVC_ERR_CORRECTION_VALIDVALUE ? 1 : 0)
+
+/* critical errors*/
+#define HEVC_ERR_INV_VIDEO_DIMENSION (22)
+#define HEVC_ERR_NO_SEQUENCE_HDR (23)
+#define HEVC_ERR_SPS_EXT_UNSUPP (24 | VDECFW_UNSUPPORTED_CODE_BASE)
+#define HEVC_ERR_PPS_EXT_UNSUPP (25 | VDECFW_UNSUPPORTED_CODE_BASE)
+
+#define HEVC_ERR_FAILED_TO_STORE_VPS (100)
+#define HEVC_ERR_FAILED_TO_STORE_SPS (101)
+#define HEVC_ERR_FAILED_TO_STORE_PPS (102)
+
+#define HEVC_ERR_FAILED_TO_FETCH_VPS (103)
+#define HEVC_ERR_FAILED_TO_FETCH_SPS (104)
+#define HEVC_ERR_FAILED_TO_FETCH_PPS (105)
+/* HEVC Scaling Lists (all values are maximum possible ones) */
+#define HEVCFW_SCALING_LIST_NUM_SIZES 4
+#define HEVCFW_SCALING_LIST_NUM_MATRICES 6
+#define HEVCFW_SCALING_LIST_MATRIX_SIZE 64
+
+struct hevcfw_scaling_listdata {
+ unsigned char dc_coeffs
+ [HEVCFW_SCALING_LIST_NUM_SIZES - 2]
+ [HEVCFW_SCALING_LIST_NUM_MATRICES];
+
+ unsigned char lists
+ [HEVCFW_SCALING_LIST_NUM_SIZES]
+ [HEVCFW_SCALING_LIST_NUM_MATRICES]
+ [HEVCFW_SCALING_LIST_MATRIX_SIZE];
+};
+
+/* HEVC Video Profile_Tier_Level */
+struct hevcfw_profile_tier_level {
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_profile_space);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_tier_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_profile_idc);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ general_profile_compatibility_flag
+ [HEVCFW_MAX_NUM_PROFILE_IDC]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_progressive_source_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_interlaced_source_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_non_packed_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_frame_only_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_max_12bit_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_max_10bit_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_max_8bit_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_max_422chroma_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_max_420chroma_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_max_monochrome_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_intra_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ general_one_picture_only_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_lower_bit_rate_constraint_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, general_level_idc);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_profile_present_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_level_present_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_profile_space[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_tier_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_profile_idc[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_profile_compatibility_flag
+ [HEVCFW_MAX_NUM_SUBLAYERS - 1][HEVCFW_MAX_NUM_PROFILE_IDC]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_progressive_source_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_interlaced_source_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_non_packed_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_frame_only_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_max_12bit_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_max_10bit_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_max_8bit_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_max_422chroma_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_max_420chroma_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_max_monochrome_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_intra_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_one_picture_only_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_lower_bit_rate_constraint_flag[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sub_layer_level_idc[HEVCFW_MAX_NUM_SUBLAYERS - 1]);
+};
+
+struct hevcfw_video_ps {
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, is_different);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, is_sent);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, is_available);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, vps_video_parameter_set_id);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, vps_reserved_three_2bits);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, vps_max_layers_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, vps_max_sub_layers_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, vps_temporal_id_nesting_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, vps_reserved_0xffff_16bits);
+ struct hevcfw_profile_tier_level profile_tier_level;
+};
+
+/* HEVC Video Usability Information */
+struct hevcfw_vui_params {
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, aspect_ratio_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, aspect_ratio_idc);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, sar_width);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, sar_height);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, overscan_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, overscan_appropriate_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, video_signal_type_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, video_format);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, video_full_range_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, colour_description_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, colour_primaries);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, transfer_characteristics);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, matrix_coeffs);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, chroma_loc_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, chroma_sample_loc_type_top_field);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, chroma_sample_loc_type_bottom_field);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, neutral_chroma_indication_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, field_seq_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, frame_field_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, default_display_window_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, def_disp_win_left_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, def_disp_win_right_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, def_disp_win_top_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, def_disp_win_bottom_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, vui_timing_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, vui_num_units_in_tick);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, vui_time_scale);
+};
+
+/* HEVC Short Term Reference Picture Set */
+struct hevcfw_short_term_ref_picset {
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_negative_pics);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_positive_pics);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ short, delta_poc_s0[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ short, delta_poc_s1[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, used_bycurr_pic_s0[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, used_bycurr_pic_s1[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_delta_pocs);
+};
+
+/*
+ * This describes the SPS header data required by the HEVC firmware that should
+ * be supplied by the Host.
+ */
+struct hevcfw_sequence_ps {
+ /* syntax elements from SPS */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, pic_width_in_luma_samples);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, pic_height_in_luma_samples);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_short_term_ref_pic_sets);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_long_term_ref_pics_sps);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short,
+ lt_ref_pic_poc_lsb_sps[HEVCFW_MAX_NUM_LT_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ used_by_curr_pic_lt_sps_flag[HEVCFW_MAX_NUM_LT_REF_PICS]);
+ struct hevcfw_short_term_ref_picset st_rps_list[HEVCFW_MAX_NUM_ST_REF_PIC_SETS];
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sps_max_sub_layers_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sps_max_dec_pic_buffering_minus1[HEVCFW_MAX_NUM_SUBLAYERS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ sps_max_num_reorder_pics[HEVCFW_MAX_NUM_SUBLAYERS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int,
+ sps_max_latency_increase_plus1[HEVCFW_MAX_NUM_SUBLAYERS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, max_transform_hierarchy_depth_inter);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, max_transform_hierarchy_depth_intra);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, log2_diff_max_min_transform_block_size);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, log2_min_transform_block_size_minus2);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ log2_diff_max_min_luma_coding_block_size);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, log2_min_luma_coding_block_size_minus3);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, chroma_format_idc);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, separate_colour_plane_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_extra_slice_header_bits);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, log2_max_pic_order_cnt_lsb_minus4);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, long_term_ref_pics_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sample_adaptive_offset_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sps_temporal_mvp_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, bit_depth_luma_minus8);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, bit_depth_chroma_minus8);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pcm_sample_bit_depth_luma_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pcm_sample_bit_depth_chroma_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ log2_min_pcm_luma_coding_block_size_minus3);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ log2_diff_max_min_pcm_luma_coding_block_size);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pcm_loop_filter_disabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, amp_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pcm_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, strong_intra_smoothing_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, scaling_list_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, transform_skip_rotation_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, transform_skip_context_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, implicit_rdpcm_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, explicit_rdpcm_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, extended_precision_processing_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, intra_smoothing_disabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, high_precision_offsets_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, persistent_rice_adaptation_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, cabac_bypass_alignment_enabled_flag);
+ /* derived elements */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, pic_size_in_ctbs_y);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, pic_height_in_ctbs_y);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, pic_width_in_ctbs_y);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, ctb_size_y);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, ctb_log2size_y);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, max_pic_order_cnt_lsb);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int,
+ sps_max_latency_pictures[HEVCFW_MAX_NUM_SUBLAYERS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_seq_parameter_set_id);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sps_video_parameter_set_id);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sps_temporal_id_nesting_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sps_seq_parameter_set_id);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, conformance_window_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, conf_win_left_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, conf_win_right_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, conf_win_top_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, conf_win_bottom_offset);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sps_sub_layer_ordering_info_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sps_scaling_list_data_present_flag);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, vui_parameters_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sps_extension_present_flag);
+
+ struct hevcfw_vui_params vui_params;
+ /* derived elements */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sub_width_c);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sub_height_c);
+
+ struct hevcfw_profile_tier_level profile_tier_level;
+ struct hevcfw_scaling_listdata scaling_listdata;
+};
+
+/*
+ * This describes the HEVC parser component "Header data", shown in the
+ * Firmware Memory Layout diagram. This data is required by the HEVC firmware
+ * and should be supplied by the Host.
+ */
+struct hevcfw_headerdata {
+ /* Decode buffers and output control for the current picture */
+ /* Primary decode buffer base addresses */
+ struct vdecfw_image_buffer primary;
+ /* buffer base addresses for alternate output */
+ struct vdecfw_image_buffer alternate;
+ /* address of buffer for temporal mv params */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, temporal_outaddr);
+};
+
+/*
+ * This describes the PPS header data required by the HEVC firmware that should
+ * be supplied by the Host.
+ */
+struct hevcfw_picture_ps {
+ /* syntax elements from the PPS */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_pic_parameter_set_id);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_tile_columns_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_tile_rows_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, diff_cu_qp_delta_depth);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, init_qp_minus26);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_beta_offset_div2);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_tc_offset_div2);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_cb_qp_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_cr_qp_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, log2_parallel_merge_level_minus2);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, dependent_slice_segments_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, output_flag_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_extra_slice_header_bits);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, lists_modification_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, cabac_init_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, weighted_pred_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, weighted_bipred_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ pps_slice_chroma_qp_offsets_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ deblocking_filter_override_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, tiles_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, entropy_coding_sync_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ slice_segment_header_extension_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, transquant_bypass_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, cu_qp_delta_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, transform_skip_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, sign_data_hiding_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_ref_idx_l0_default_active_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_ref_idx_l1_default_active_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, constrained_intra_pred_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_deblocking_filter_disabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ pps_loop_filter_across_slices_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, loop_filter_across_tiles_enabled_flag);
+
+ /* rewritten from SPS, maybe at some point we could get rid of this */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, scaling_list_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ log2_max_transform_skip_block_size_minus2);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ cross_component_prediction_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, chroma_qp_offset_list_enabled_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, diff_cu_chroma_qp_offset_depth);
+ /*
+ * PVDEC derived elements. HEVCFW_SCALING_LISTS_BUFSIZE is
+ * multiplied by 2 to ensure that there will be space for address of
+ * each element. These addresses are completed in lower layer.
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int,
+ scaling_lists[HEVCFW_SCALING_LISTS_BUFSIZE * 2]);
+ /* derived elements */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, col_bd[HEVCFW_MAX_TILE_COLS + 1]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, row_bd[HEVCFW_MAX_TILE_ROWS + 1]);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, chroma_qp_offset_list_len_minus1);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, cb_qp_offset_list[HEVCFW_MAX_CHROMA_QP]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, cr_qp_offset_list[HEVCFW_MAX_CHROMA_QP]);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, uniform_spacing_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ column_width_minus1[HEVCFW_MAX_TILE_COLS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ row_height_minus1[HEVCFW_MAX_TILE_ROWS]);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_seq_parameter_set_id);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, deblocking_filter_control_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_scaling_list_data_present_flag);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pps_extension_present_flag);
+
+ struct hevcfw_scaling_listdata scaling_list;
+};
+
+/* This enum determines reference picture status */
+enum hevcfw_reference_type {
+ HEVCFW_REF_UNUSED = 0,
+ HEVCFW_REF_SHORTTERM,
+ HEVCFW_REF_LONGTERM,
+ HEVCFW_REF_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* This describes an HEVC picture. It is part of the Context data */
+struct hevcfw_picture {
+ /* Primary (reconstructed) picture buffers */
+ struct vdecfw_image_buffer primary;
+ /* Secondary (alternative) picture buffers */
+ struct vdecfw_image_buffer alternate;
+ /* Unique ID for this picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, transaction_id);
+ /* nut of first ssh of picture, determines picture type */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, nalunit_type);
+ /* Picture Order Count (frame number) */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, pic_order_cnt_val);
+ /* Slice Picture Order Count Lsb */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, slice_pic_ordercnt_lsb);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pic_output_flag);
+ /* information about long-term pictures */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, dpb_longterm_flags);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int,
+ dpb_pic_order_diff[HEVCFW_NUM_DPB_DIFF_REGS]);
+ /* address of buffer for temporal mv params */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, temporal_outaddr);
+ /* worst case Dpb diff for the current pic */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, dpb_diff);
+};
+
+/*
+ * This is a wrapper for a picture to hold it in a Decoded Picture Buffer
+ * for further reference
+ */
+struct hevcfw_picture_in_dpb {
+ /* DPB data about the picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum hevcfw_reference_type, ref_type);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, valid);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, needed_for_output);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, pic_latency_count);
+ /* Picture itself */
+ struct hevcfw_picture picture;
+};
+
+/*
+ * This describes an HEVC's Decoded Picture Buffer (DPB).
+ * It is part of the Context data
+ */
+
+#define HEVCFW_DPB_IDX_INVALID -1
+
+struct hevcfw_decoded_picture_buffer {
+ /* reference pictures */
+ struct hevcfw_picture_in_dpb pictures[HEVCFW_MAX_DPB_SIZE];
+ /* organizational data of DPB */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int, fullness);
+};
+
+/*
+ * This describes an HEVC's Reference Picture Set (RPS).
+ * It is part of the Context data
+ */
+struct hevcfw_reference_picture_set {
+ /* sizes of poc lists */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_pocst_curr_before);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_pocst_curr_after);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_pocst_foll);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_poclt_curr);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, num_poclt_foll);
+ /* poc lists */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, pocst_curr_before[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, pocst_curr_after[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, pocst_foll[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, poclt_curr[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, poclt_foll[HEVCFW_MAX_NUM_REF_PICS]);
+ /* derived elements */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ curr_delta_pocmsb_presentflag[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ foll_delta_pocmsb_presentflag[HEVCFW_MAX_NUM_REF_PICS]);
+ /* reference picture sets: indices in DPB */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, ref_picsetlt_curr[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, ref_picsetlt_foll[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ ref_picsetst_curr_before[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ ref_picsetst_curr_after[HEVCFW_MAX_NUM_REF_PICS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, ref_picsetst_foll[HEVCFW_MAX_NUM_REF_PICS]);
+};
+
+/*
+ * This describes the HEVC parser component "Context data", shown in the
+ * Firmware Memory Layout diagram. This data is the state preserved across
+ * pictures. It is loaded and saved by the Firmware, but requires the host to
+ * provide buffer(s) for this.
+ */
+struct hevcfw_ctx_data {
+ struct hevcfw_sequence_ps sps;
+ struct hevcfw_picture_ps pps;
+ /*
+ * data from last picture with TemporalId = 0 that is not a RASL, RADL
+ * or sub-layer non-reference picture
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, prev_pic_order_cnt_lsb);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, prev_pic_order_cnt_msb);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, last_irapnorasl_output_flag);
+ /*
+ * Decoded Pictures Buffer holds information about decoded pictures
+ * needed for further INTER decoding
+ */
+ struct hevcfw_decoded_picture_buffer dpb;
+ /* Reference Picture Set is determined on per-picture basis */
+ struct hevcfw_reference_picture_set rps;
+ /*
+ * Reference Picture List is determined using data from Reference
+ * Picture Set and from Slice (Segment) Header on per-slice basis
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char,
+ ref_pic_list[HEVCFW_NUM_REF_PIC_LISTS][HEVCFW_MAX_NUM_REF_PICS]);
+ /*
+ * Reference Picture List used to send reflist to the host, the only
+ * difference is that missing references are marked
+ * with HEVCFW_DPB_IDX_INVALID
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char,
+ ref_pic_listhlp[HEVCFW_NUM_REF_PIC_LISTS][HEVCFW_MAX_NUM_REF_PICS]);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, pic_count);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, slice_segment_count);
+ /* There was EOS NAL detected and no new picture yet */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, eos_detected);
+ /* This is first picture after EOS NAL */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, first_after_eos);
+};
+
+#endif /* _HEVCFW_DATA_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/hw_control.c b/drivers/media/platform/vxe-vxd/decoder/hw_control.c
new file mode 100644
index 000000000000..7a57b763ec54
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/hw_control.c
@@ -0,0 +1,1232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD DEC Hardware control implementation
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "decoder.h"
+#include "hw_control.h"
+#include "img_msvdx_vdmc_regs.h"
+#include "img_pvdec_core_regs.h"
+#include "img_pvdec_pixel_regs.h"
+#include "img_pvdec_test_regs.h"
+#include "img_vdec_fw_msg.h"
+#include "img_video_bus4_mmu_regs.h"
+#include "img_msvdx_core_regs.h"
+#include "reg_io2.h"
+#include "vdecdd_defs.h"
+#include "vxd_dec.h"
+#include "vxd_ext.h"
+#include "vxd_int.h"
+#include "vxd_pvdec_priv.h"
+
+#define MSG_GROUP_MASK 0xf0
+
+struct hwctrl_ctx {
+ unsigned int is_initialised;
+ unsigned int is_on_seq_replay;
+ unsigned int replay_tid;
+ unsigned int num_pipes;
+ struct vdecdd_dd_devconfig devconfig;
+ void *hndl_vxd;
+ void *dec_core;
+ void *comp_init_userdata;
+ struct vidio_ddbufinfo dev_ptd_bufinfo;
+ struct lst_t pend_pict_list;
+ struct hwctrl_msgstatus host_msg_status;
+ void *hmsg_task_event;
+ void *hmsg_task_kick;
+ void *hmsg_task;
+ unsigned int is_msg_task_active;
+ struct hwctrl_state state;
+ struct hwctrl_state prev_state;
+ unsigned int is_prev_hw_state_set;
+ unsigned int is_fatal_state;
+};
+
+struct vdeckm_context {
+ unsigned int core_num;
+ struct vxd_coreprops props;
+ unsigned short current_msgid;
+ unsigned char reader_active;
+ void *comms_ram_addr;
+ unsigned int state_offset;
+ unsigned int state_size;
+};
+
+/*
+ * Panic reason identifier.
+ */
+enum pvdec_panic_reason {
+ PANIC_REASON_OTHER = 0,
+ PANIC_REASON_WDT,
+ PANIC_REASON_READ_TIMEOUT,
+ PANIC_REASON_CMD_TIMEOUT,
+ PANIC_REASON_MMU_FAULT,
+ PANIC_REASON_MAX,
+ PANIC_REASON_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Panic reason strings.
+ * NOTE: Should match the pvdec_panic_reason ids.
+ */
+static unsigned char *apanic_reason[PANIC_REASON_MAX] = {
+ [PANIC_REASON_OTHER] = "Other",
+ [PANIC_REASON_WDT] = "Watch Dog Timeout",
+ [PANIC_REASON_READ_TIMEOUT] = "Read Timeout",
+ [PANIC_REASON_CMD_TIMEOUT] = "Command Timeout",
+ [PANIC_REASON_MMU_FAULT] = "MMU Page Fault"
+};
+
+/*
+ * Maximum length of the panic reason string.
+ */
+#define PANIC_REASON_LEN (255)
+
+static struct vdeckm_context acore_ctx[VXD_MAX_CORES] = {0};
+
+static int vdeckm_getregsoffsets(const void *hndl_vxd,
+ struct decoder_regsoffsets *regs_offsets)
+{
+ struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+
+ if (!core_ctx)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ regs_offsets->vdmc_cmd_offset = MSVDX_CMD_OFFSET;
+ regs_offsets->vec_offset = MSVDX_VEC_OFFSET;
+ regs_offsets->entropy_offset = PVDEC_ENTROPY_OFFSET;
+ regs_offsets->vec_be_regs_offset = PVDEC_VEC_BE_OFFSET;
+ regs_offsets->vdec_be_codec_regs_offset = PVDEC_VEC_BE_CODEC_OFFSET;
+
+ return IMG_SUCCESS;
+}
+
+static int vdeckm_send_message(const void *hndl_vxd,
+ struct hwctrl_to_kernel_msg *to_kernelmsg,
+ void *vxd_dec_ctx)
+{
+ struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+ unsigned int count = 0;
+ unsigned int *msg;
+
+ if (!core_ctx || !to_kernelmsg)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ msg = kzalloc(VXD_SIZE_MSG_BUFFER, GFP_KERNEL);
+ if (!msg)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ msg[count++] = to_kernelmsg->flags;
+ msg[count++] = to_kernelmsg->msg_size;
+
+ memcpy(&msg[count], to_kernelmsg->msg_hdr, to_kernelmsg->msg_size);
+
+ core_ctx->reader_active = 1;
+
+ if (!(to_kernelmsg->msg_hdr)) {
+ kfree(msg);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ pr_debug("[HWCTRL] adding message to vxd queue\n");
+ vxd_send_msg(vxd_dec_ctx, (struct vxd_fw_msg *)msg);
+
+ kfree(msg);
+
+ return 0;
+}
+
+static void vdeckm_return_msg(const void *hndl_vxd,
+ struct hwctrl_to_kernel_msg *to_kernelmsg)
+{
+ if (to_kernelmsg)
+ kfree(to_kernelmsg->msg_hdr);
+}
+
+static int vdeckm_handle_mtxtohost_msg(unsigned int *msg, struct lst_t *pend_pict_list,
+ enum vxd_msg_attr *msg_attr,
+ struct dec_decpict **decpict,
+ unsigned char msg_type,
+ unsigned int trans_id)
+{
+ struct dec_decpict *pdec_pict;
+
+ int ret = 0;
+ switch (msg_type) {
+ case FW_DEVA_COMPLETED:
+ {
+ struct dec_pict_attrs *pict_attrs = NULL;
+ unsigned short error_flags = 0;
+ unsigned int no_bewdts = 0;
+ unsigned int mbs_dropped = 0;
+ unsigned int mbs_recovered = 0;
+ unsigned char flag = 0;
+
+ pr_debug("Received message from firmware\n");
+ error_flags = MEMIO_READ_FIELD(msg, FW_DEVA_COMPLETED_ERROR_FLAGS);
+
+ no_bewdts = MEMIO_READ_FIELD(msg, FW_DEVA_COMPLETED_NUM_BEWDTS);
+
+ mbs_dropped = MEMIO_READ_FIELD(msg, FW_DEVA_COMPLETED_NUM_MBSDROPPED);
+
+ mbs_recovered = MEMIO_READ_FIELD(msg, FW_DEVA_COMPLETED_NUM_MBSRECOVERED);
+
+ pdec_pict = lst_first(pend_pict_list);
+ while (pdec_pict) {
+ if (pdec_pict->transaction_id == trans_id)
+ break;
+ pdec_pict = lst_next(pdec_pict);
+ }
+ /*
+ * We must have a picture in the list that matches
+ * the transaction id
+ */
+ if (!pdec_pict)
+ return IMG_ERROR_FATAL;
+
+ if (!(pdec_pict->first_fld_fwmsg) || !(pdec_pict->second_fld_fwmsg))
+ return IMG_ERROR_FATAL;
+
+ flag = pdec_pict->first_fld_fwmsg->pict_attrs.first_fld_rcvd;
+ if (flag) {
+ pict_attrs = &pdec_pict->second_fld_fwmsg->pict_attrs;
+ } else {
+ pict_attrs = &pdec_pict->first_fld_fwmsg->pict_attrs;
+ flag = 1;
+ }
+
+ pict_attrs->fe_err = (unsigned int)error_flags;
+ pict_attrs->no_be_wdt = no_bewdts;
+ pict_attrs->mbs_dropped = mbs_dropped;
+ pict_attrs->mbs_recovered = mbs_recovered;
+ /*
+ * We may successfully replayed the picture,
+ * so reset the error flags
+ */
+ pict_attrs->pict_attrs.dwrfired = 0;
+ pict_attrs->pict_attrs.mmufault = 0;
+ pict_attrs->pict_attrs.deverror = 0;
+
+ *msg_attr = VXD_MSG_ATTR_DECODED;
+ *decpict = pdec_pict;
+ break;
+ }
+
+ case FW_DEVA_PANIC:
+ {
+ unsigned int panic_info = MEMIO_READ_FIELD(msg, FW_DEVA_PANIC_ERROR_INT);
+ unsigned char panic_reason[PANIC_REASON_LEN] = "Reason(s): ";
+ unsigned char is_panic_reson_identified = 0;
+ /*
+ * Create panic reason string.
+ */
+ if (REGIO_READ_FIELD(panic_info, PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS,
+ CR_HOST_SYS_WDT)) {
+ strncat(panic_reason, apanic_reason[PANIC_REASON_WDT],
+ PANIC_REASON_LEN - 1);
+ is_panic_reson_identified = 1;
+ }
+ if (REGIO_READ_FIELD(panic_info, PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS,
+ CR_HOST_READ_TIMEOUT_PROC_IRQ)) {
+ strncat(panic_reason, apanic_reason[PANIC_REASON_READ_TIMEOUT],
+ PANIC_REASON_LEN - 1);
+ is_panic_reson_identified = 1;
+ }
+ if (REGIO_READ_FIELD(panic_info, PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS,
+ CR_HOST_COMMAND_TIMEOUT_PROC_IRQ)) {
+ strncat(panic_reason, apanic_reason[PANIC_REASON_CMD_TIMEOUT],
+ PANIC_REASON_LEN - 1);
+ is_panic_reson_identified = 1;
+ }
+ if (!is_panic_reson_identified) {
+ strncat(panic_reason, apanic_reason[PANIC_REASON_OTHER],
+ PANIC_REASON_LEN - 1);
+ }
+ panic_reason[strlen(panic_reason) - 2] = 0;
+ if (trans_id != 0)
+ pr_err("TID=0x%08X [FIRMWARE PANIC %s]\n", trans_id, panic_reason);
+ else
+ pr_err("TID=NULL [GENERAL FIRMWARE PANIC %s]\n", panic_reason);
+ ret = IMG_ERROR_FATAL;
+
+ break;
+ }
+
+ case FW_ASSERT:
+ {
+ unsigned int fwfile_namehash = MEMIO_READ_FIELD(msg, FW_ASSERT_FILE_NAME_HASH);
+ unsigned int fwfile_line = MEMIO_READ_FIELD(msg, FW_ASSERT_FILE_LINE);
+
+ pr_err("ASSERT file name hash:0x%08X line number:%d\n",
+ fwfile_namehash, fwfile_line);
+ ret = IMG_ERROR_FATAL;
+ break;
+ }
+
+ case FW_SO:
+ {
+ unsigned int task_name = MEMIO_READ_FIELD(msg, FW_SO_TASK_NAME);
+ unsigned char sztaskname[sizeof(unsigned int) + 1];
+
+ sztaskname[0] = task_name >> 24;
+ sztaskname[1] = (task_name >> 16) & 0xff;
+ sztaskname[2] = (task_name >> 8) & 0xff;
+ sztaskname[3] = task_name & 0xff;
+ if (sztaskname[3] != 0)
+ sztaskname[4] = 0;
+ pr_warn("STACK OVERFLOW for %s task\n", sztaskname);
+ break;
+ }
+
+ case FW_VXD_EMPTY_COMPL:
+ /*
+ * Empty completion message sent as response to init,
+ * configure etc The architecture of vxd.ko module
+ * requires the firmware to send a reply for every
+ * message submitted by the user space.
+ */
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int vdeckm_handle_hosttomtx_msg(unsigned int *msg, struct lst_t *pend_pict_list,
+ enum vxd_msg_attr *msg_attr,
+ struct dec_decpict **decpict,
+ unsigned char msg_type,
+ unsigned int trans_id,
+ unsigned int msg_flags)
+{
+ struct dec_decpict *pdec_pict;
+
+ pr_debug("Received message from HOST\n");
+
+ switch (msg_type) {
+ case FW_DEVA_PARSE:
+ {
+ struct dec_pict_attrs *pict_attrs = NULL;
+ unsigned char flag = 0;
+
+ pdec_pict = lst_first(pend_pict_list);
+ while (pdec_pict) {
+ if (pdec_pict->transaction_id == trans_id)
+ break;
+
+ pdec_pict = lst_next(pdec_pict);
+ }
+
+ /*
+ * We must have a picture in the list that matches
+ * the transaction id
+ */
+ if (!pdec_pict) {
+ pr_err("Firmware decoded message received\n");
+ pr_err("no pending picture\n");
+ return IMG_ERROR_FATAL;
+ }
+
+ if (!(pdec_pict->first_fld_fwmsg) || !(pdec_pict->second_fld_fwmsg)) {
+ pr_err("invalid pending picture struct\n");
+ return IMG_ERROR_FATAL;
+ }
+
+ flag = pdec_pict->first_fld_fwmsg->pict_attrs.first_fld_rcvd;
+ if (flag) {
+ pict_attrs = &pdec_pict->second_fld_fwmsg->pict_attrs;
+ } else {
+ pict_attrs = &pdec_pict->first_fld_fwmsg->pict_attrs;
+ flag = 1;
+ }
+
+ /*
+ * The below info is fetched from firmware state
+ * afterwards, so just set this to zero for now.
+ */
+ pict_attrs->fe_err = 0;
+ pict_attrs->no_be_wdt = 0;
+ pict_attrs->mbs_dropped = 0;
+ pict_attrs->mbs_recovered = 0;
+
+ vxd_get_pictattrs(msg_flags, &pict_attrs->pict_attrs);
+ vxd_get_msgerrattr(msg_flags, msg_attr);
+
+ if (*msg_attr == VXD_MSG_ATTR_FATAL)
+ pr_err("[TID=0x%08X] [DECODE_FAILED]\n", trans_id);
+ if (*msg_attr == VXD_MSG_ATTR_CANCELED)
+ pr_err("[TID=0x%08X] [DECODE_CANCELED]\n", trans_id);
+
+ *decpict = pdec_pict;
+ break;
+ }
+
+ case FW_DEVA_PARSE_FRAGMENT:
+ /*
+ * Do nothing - Picture holds the list of fragments.
+ * So, in case of any error those would be replayed
+ * anyway.
+ */
+ break;
+ default:
+ pr_warn("Unknown message received 0x%02x\n", msg_type);
+ break;
+ }
+
+ return 0;
+}
+
+static int vdeckm_process_msg(const void *hndl_vxd, unsigned int *msg,
+ struct lst_t *pend_pict_list,
+ unsigned int msg_flags,
+ enum vxd_msg_attr *msg_attr,
+ struct dec_decpict **decpict)
+{
+ struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+ unsigned char msg_type;
+ unsigned char msg_group;
+ unsigned int trans_id = 0;
+ int ret = 0;
+ struct vdec_pict_hwcrc *pict_hwcrc = NULL;
+ struct dec_decpict *pdec_pict;
+
+ if (!core_ctx || !msg || !msg_attr || !pend_pict_list || !decpict)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ *msg_attr = VXD_MSG_ATTR_NONE;
+ *decpict = NULL;
+
+ trans_id = MEMIO_READ_FIELD(msg, FW_DEVA_GENMSG_TRANS_ID);
+ msg_type = MEMIO_READ_FIELD(msg, FW_DEVA_GENMSG_MSG_TYPE);
+ msg_group = msg_type & MSG_GROUP_MASK;
+
+ switch (msg_group) {
+ case MSG_TYPE_START_PSR_MTXHOST_MSG:
+ ret = vdeckm_handle_mtxtohost_msg(msg, pend_pict_list, msg_attr,
+ decpict, msg_type, trans_id);
+ break;
+ /*
+ * Picture decode has been returned as unprocessed.
+ * Locate the picture with corresponding TID and mark
+ * it as decoded with errors.
+ */
+ case MSG_TYPE_START_PSR_HOSTMTX_MSG:
+ ret = vdeckm_handle_hosttomtx_msg(msg, pend_pict_list, msg_attr,
+ decpict, msg_type, trans_id,
+ msg_flags);
+ break;
+
+ case FW_DEVA_SIGNATURES_HEVC:
+ case FW_DEVA_SIGNATURES_LEGACY:
+ {
+ unsigned int *signatures = msg + (FW_DEVA_SIGNATURES_SIGNATURES_OFFSET /
+ sizeof(unsigned int));
+ unsigned char sigcount = MEMIO_READ_FIELD(msg, FW_DEVA_SIGNATURES_MSG_SIZE) -
+ ((FW_DEVA_SIGNATURES_SIZE / sizeof(unsigned int)) - 1);
+ unsigned int selected = MEMIO_READ_FIELD(msg, FW_DEVA_SIGNATURES_SIGNATURE_SELECT);
+ unsigned char i, j = 0;
+
+ pdec_pict = lst_first(pend_pict_list);
+ while (pdec_pict) {
+ if (pdec_pict->transaction_id == trans_id)
+ break;
+ pdec_pict = lst_next(pdec_pict);
+ }
+
+ /* We must have a picture in the list that matches the tid */
+ VDEC_ASSERT(pdec_pict);
+ if (!pdec_pict) {
+ pr_err("Firmware signatures message received with no pending picture\n");
+ return IMG_ERROR_FATAL;
+ }
+
+ VDEC_ASSERT(pdec_pict->first_fld_fwmsg);
+ VDEC_ASSERT(pdec_pict->second_fld_fwmsg);
+ if (!pdec_pict->first_fld_fwmsg || !pdec_pict->second_fld_fwmsg) {
+ pr_err("Invalid pending picture struct\n");
+ return IMG_ERROR_FATAL;
+ }
+ if (pdec_pict->first_fld_fwmsg->pict_hwcrc.first_fld_rcvd) {
+ pict_hwcrc = &pdec_pict->second_fld_fwmsg->pict_hwcrc;
+ } else {
+ pict_hwcrc = &pdec_pict->first_fld_fwmsg->pict_hwcrc;
+ if (selected & (PVDEC_SIGNATURE_GROUP_20 | PVDEC_SIGNATURE_GROUP_24))
+ pdec_pict->first_fld_fwmsg->pict_hwcrc.first_fld_rcvd = TRUE;
+ }
+
+ for (i = 0; i < 32; i++) {
+ unsigned int group = selected & (1 << i);
+
+ switch (group) {
+ case PVDEC_SIGNATURE_GROUP_20:
+ pict_hwcrc->crc_vdmc_pix_recon = signatures[j++];
+ break;
+
+ case PVDEC_SIGNATURE_GROUP_24:
+ pict_hwcrc->vdeb_sysmem_wrdata = signatures[j++];
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* sanity check */
+ sigcount -= j;
+ VDEC_ASSERT(sigcount == 0);
+
+ /*
+ * suppress PVDEC_SIGNATURE_GROUP_1 and notify
+ * only about groups used for verification
+ */
+#ifdef DEBUG_DECODER_DRIVER
+ if (selected & (PVDEC_SIGNATURE_GROUP_20 | PVDEC_SIGNATURE_GROUP_24))
+ pr_info("[TID=0x%08X] [SIGNATURES]\n", trans_id);
+#endif
+
+ *decpict = pdec_pict;
+
+ break;
+ }
+
+ default: {
+#ifdef DEBUG_DECODER_DRIVER
+ unsigned short msg_size, i;
+
+ pr_warn("Unknown message type received: 0x%x", msg_type);
+
+ msg_size = MEMIO_READ_FIELD(msg, FW_DEVA_GENMSG_MSG_SIZE);
+
+ for (i = 0; i < msg_size; i++)
+ pr_info("0x%04x: 0x%08x\n", i, msg[i]);
+#endif
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void vdeckm_vlr_copy(void *dst, void *src, unsigned int size)
+{
+ unsigned int *pdst = (unsigned int *)dst;
+ unsigned int *psrc = (unsigned int *)src;
+
+ size /= 4;
+ while (size--)
+ *pdst++ = *psrc++;
+}
+
+static int vdeckm_get_core_state(const void *hndl_vxd, struct vxd_states *state)
+{
+ struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+ struct vdecfw_pvdecfirmwarestate firmware_state;
+ unsigned char pipe = 0;
+
+#ifdef ERROR_RECOVERY_SIMULATION
+ /*
+ * if disable_fw_irq_value is not zero, return error. If processed further
+ * the kernel will crash because we have ignored the interrupt, but here
+ * we will try to access comms_ram_addr which will result in crash.
+ */
+ if (disable_fw_irq_value != 0)
+ return IMG_ERROR_INVALID_PARAMETERS;
+#endif
+
+ if (!core_ctx || !state)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /*
+ * If state is requested for the first time.
+ */
+ if (core_ctx->state_size == 0) {
+ unsigned int regval;
+ /*
+ * get the state buffer info.
+ */
+ regval = *((unsigned int *)core_ctx->comms_ram_addr +
+ (PVDEC_COM_RAM_STATE_BUF_SIZE_AND_OFFSET_OFFSET / sizeof(unsigned int)));
+ core_ctx->state_size = PVDEC_COM_RAM_BUF_GET_SIZE(regval, STATE);
+ core_ctx->state_offset = PVDEC_COM_RAM_BUF_GET_OFFSET(regval, STATE);
+ }
+
+ /*
+ * If state buffer is available.
+ */
+ if (core_ctx->state_size) {
+ /*
+ * Determine the latest transaction to have passed each
+ * checkpoint in the firmware.
+ * Read the firmware state from VEC Local RAM
+ */
+ vdeckm_vlr_copy(&firmware_state, (unsigned char *)core_ctx->comms_ram_addr +
+ core_ctx->state_offset, core_ctx->state_size);
+
+ for (pipe = 0; pipe < core_ctx->props.num_pixel_pipes; pipe++) {
+ /*
+ * Set pipe presence.
+ */
+ state->fw_state.pipe_state[pipe].is_pipe_present = 1;
+
+ /*
+ * For checkpoints copy message ids here. These will
+ * be translated into transaction ids later.
+ */
+ memcpy(state->fw_state.pipe_state[pipe].acheck_point,
+ firmware_state.pipestate[pipe].check_point,
+ sizeof(state->fw_state.pipe_state[pipe].acheck_point));
+ state->fw_state.pipe_state[pipe].firmware_action =
+ firmware_state.pipestate[pipe].firmware_action;
+ state->fw_state.pipe_state[pipe].cur_codec =
+ firmware_state.pipestate[pipe].curr_codec;
+ state->fw_state.pipe_state[pipe].fe_slices =
+ firmware_state.pipestate[pipe].fe_slices;
+ state->fw_state.pipe_state[pipe].be_slices =
+ firmware_state.pipestate[pipe].be_slices;
+ state->fw_state.pipe_state[pipe].fe_errored_slices =
+ firmware_state.pipestate[pipe].fe_errored_slices;
+ state->fw_state.pipe_state[pipe].be_errored_slices =
+ firmware_state.pipestate[pipe].be_errored_slices;
+ state->fw_state.pipe_state[pipe].be_mbs_dropped =
+ firmware_state.pipestate[pipe].be_mbs_dropped;
+ state->fw_state.pipe_state[pipe].be_mbs_recovered =
+ firmware_state.pipestate[pipe].be_mbs_recovered;
+ state->fw_state.pipe_state[pipe].fe_mb.x =
+ firmware_state.pipestate[pipe].last_fe_mb_xy & 0xFF;
+ state->fw_state.pipe_state[pipe].fe_mb.y =
+ (firmware_state.pipestate[pipe].last_fe_mb_xy >> 16) & 0xFF;
+ state->fw_state.pipe_state[pipe].be_mb.x =
+ REGIO_READ_FIELD(firmware_state.pipestate[pipe].last_be_mb_xy,
+ MSVDX_VDMC,
+ CR_VDMC_MACROBLOCK_NUMBER,
+ CR_VDMC_MACROBLOCK_X_OFFSET);
+ state->fw_state.pipe_state[pipe].be_mb.y =
+ REGIO_READ_FIELD(firmware_state.pipestate[pipe].last_be_mb_xy,
+ MSVDX_VDMC,
+ CR_VDMC_MACROBLOCK_NUMBER,
+ CR_VDMC_MACROBLOCK_Y_OFFSET);
+ }
+ }
+
+ return 0;
+}
+
+static int vdeckm_prepare_batch(struct vdeckm_context *core_ctx,
+ const struct hwctrl_batch_msgdata *batch_msgdata,
+ unsigned char **msg)
+{
+ unsigned char vdec_flags = 0;
+ unsigned short flags = 0;
+ unsigned char *pmsg = kzalloc(FW_DEVA_DECODE_SIZE, GFP_KERNEL);
+ struct vidio_ddbufinfo *pbatch_msg_bufinfo = batch_msgdata->batchmsg_bufinfo;
+
+ if (!pmsg)
+ return IMG_ERROR_MALLOC_FAILED;
+
+ if (batch_msgdata->size_delimited_mode)
+ vdec_flags |= FW_VDEC_NAL_SIZE_DELIM;
+
+ flags |= FW_DEVA_RENDER_HOST_INT;
+
+ /*
+ * Message type and stream ID
+ */
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_GENMSG_MSG_TYPE, FW_DEVA_PARSE, unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_CTRL_ALLOC_ADDR,
+ (unsigned int)pbatch_msg_bufinfo->dev_virt, unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_BUFFER_SIZE,
+ batch_msgdata->ctrl_alloc_bytes / sizeof(unsigned int), unsigned char*);
+
+ /*
+ * Operating mode and decode flags
+ */
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_OPERATING_MODE, batch_msgdata->operating_mode,
+ unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_FLAGS, flags, unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_VDEC_FLAGS, vdec_flags, unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_GENC_ID, batch_msgdata->genc_id, unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_MB_LOAD, batch_msgdata->mb_load, unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_STREAMID,
+ GET_STREAM_ID(batch_msgdata->transaction_id), unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_EXT_STATE_BUFFER,
+ (unsigned int)batch_msgdata->pvdec_fwctx->dev_virt, unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_MSG_ID, ++core_ctx->current_msgid,
+ unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_TRANS_ID, batch_msgdata->transaction_id,
+ unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_TILE_CFG, batch_msgdata->tile_cfg, unsigned char*);
+
+ /*
+ * size of message
+ */
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_GENMSG_MSG_SIZE,
+ FW_DEVA_DECODE_SIZE / sizeof(unsigned int), unsigned char*);
+
+ *msg = pmsg;
+
+ return 0;
+}
+
+static int vdeckm_prepare_fragment(struct vdeckm_context *core_ctx,
+ const struct hwctrl_fragment_msgdata
+ *fragment_msgdata,
+ unsigned char **msg)
+{
+ struct vidio_ddbufinfo *pbatch_msg_bufinfo = NULL;
+ unsigned char *pmsg = NULL;
+
+ pbatch_msg_bufinfo = fragment_msgdata->batchmsg_bufinfo;
+
+ if (!(fragment_msgdata->batchmsg_bufinfo)) {
+ pr_err("Batch message info missing!\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ pmsg = kzalloc(FW_DEVA_DECODE_FRAGMENT_SIZE, GFP_KERNEL);
+ if (!pmsg)
+ return IMG_ERROR_MALLOC_FAILED;
+ /*
+ * message type and stream id
+ */
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_GENMSG_MSG_TYPE,
+ FW_DEVA_PARSE_FRAGMENT, unsigned char*);
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_MSG_ID, ++core_ctx->current_msgid, unsigned char*);
+
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_FRAGMENT_CTRL_ALLOC_ADDR,
+ (unsigned int)pbatch_msg_bufinfo->dev_virt
+ + fragment_msgdata->ctrl_alloc_offset, unsigned char*);
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_FRAGMENT_BUFFER_SIZE,
+ fragment_msgdata->ctrl_alloc_bytes / sizeof(unsigned int),
+ unsigned char*);
+
+ /*
+ * size of message
+ */
+ MEMIO_WRITE_FIELD(pmsg, FW_DEVA_GENMSG_MSG_SIZE,
+ FW_DEVA_DECODE_FRAGMENT_SIZE / sizeof(unsigned int), unsigned char*);
+
+ *msg = pmsg;
+
+ return 0;
+}
+
+static int vdeckm_get_message(const void *hndl_vxd, const enum hwctrl_msgid msgid,
+ const struct hwctrl_msgdata *msgdata,
+ struct hwctrl_to_kernel_msg *to_kernelmsg)
+{
+ unsigned int result = 0;
+ struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+
+ if (!core_ctx || !to_kernelmsg || !msgdata)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ switch (msgid) {
+ case HWCTRL_MSGID_BATCH:
+ result = vdeckm_prepare_batch(core_ctx, &msgdata->batch_msgdata,
+ &to_kernelmsg->msg_hdr);
+ break;
+
+ case HWCTRL_MSGID_FRAGMENT:
+ result = vdeckm_prepare_fragment(core_ctx, &msgdata->fragment_msgdata,
+ &to_kernelmsg->msg_hdr);
+ vxd_set_msgflag(VXD_MSG_FLAG_DROP, &to_kernelmsg->flags);
+ break;
+
+ default:
+ result = IMG_ERROR_GENERIC_FAILURE;
+ pr_err("got a message that is not supported by PVDEC");
+ break;
+ }
+
+ if (result == 0) {
+ /* Set the stream ID for the next message to be sent. */
+ to_kernelmsg->km_str_id = msgdata->km_str_id;
+ to_kernelmsg->msg_size = MEMIO_READ_FIELD(to_kernelmsg->msg_hdr,
+ FW_DEVA_GENMSG_MSG_SIZE) *
+ sizeof(unsigned int);
+ }
+
+ return result;
+}
+
+static void hwctrl_dump_state(struct vxd_states *prev_state,
+ struct vxd_states *cur_state,
+ unsigned char pipe_minus1)
+{
+ pr_info("Back-End MbX [% 10d]",
+ prev_state->fw_state.pipe_state[pipe_minus1].be_mb.x);
+ pr_info("Back-End MbY [% 10d]",
+ prev_state->fw_state.pipe_state[pipe_minus1].be_mb.y);
+ pr_info("Front-End MbX [% 10d]",
+ prev_state->fw_state.pipe_state[pipe_minus1].fe_mb.x);
+ pr_info("Front-End MbY [% 10d]",
+ prev_state->fw_state.pipe_state[pipe_minus1].fe_mb.y);
+ pr_info("VDECFW_CHECKPOINT_BE_PICTURE_COMPLETE [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_BE_PICTURE_COMPLETE]);
+ pr_info("VDECFW_CHECKPOINT_BE_1SLICE_DONE [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_BE_1SLICE_DONE]);
+ pr_info("VDECFW_CHECKPOINT_BE_PICTURE_STARTED [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_BE_PICTURE_STARTED]);
+ pr_info("VDECFW_CHECKPOINT_FE_PICTURE_COMPLETE [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_FE_PICTURE_COMPLETE]);
+ pr_info("VDECFW_CHECKPOINT_FE_PARSE_DONE [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_FE_PARSE_DONE]);
+ pr_info("VDECFW_CHECKPOINT_FE_1SLICE_DONE [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_FE_1SLICE_DONE]);
+ pr_info("VDECFW_CHECKPOINT_ENTDEC_STARTED [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_ENTDEC_STARTED]);
+ pr_info("VDECFW_CHECKPOINT_FIRMWARE_SAVED [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_FIRMWARE_SAVED]);
+ pr_info("VDECFW_CHECKPOINT_PICMAN_COMPLETE [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_PICMAN_COMPLETE]);
+ pr_info("VDECFW_CHECKPOINT_FIRMWARE_READY [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_FIRMWARE_READY]);
+ pr_info("VDECFW_CHECKPOINT_PICTURE_STARTED [0x%08X]",
+ cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+ [VDECFW_CHECKPOINT_PICTURE_STARTED]);
+}
+
+static unsigned int hwctrl_calculate_load(struct bspp_pict_hdr_info *pict_hdr_info)
+{
+ return (((pict_hdr_info->coded_frame_size.width + 15) / 16)
+ * ((pict_hdr_info->coded_frame_size.height + 15) / 16));
+}
+
+static int hwctrl_send_batch_message(struct hwctrl_ctx *hwctx,
+ struct dec_decpict *decpict,
+ void *vxd_dec_ctx)
+{
+ int result;
+ struct hwctrl_to_kernel_msg to_kernelmsg = {0};
+ struct vidio_ddbufinfo *batchmsg_bufinfo =
+ decpict->batch_msginfo->ddbuf_info;
+ struct hwctrl_msgdata msg_data;
+ struct hwctrl_batch_msgdata *batch_msgdata = &msg_data.batch_msgdata;
+
+ memset(&msg_data, 0, sizeof(msg_data));
+
+ msg_data.km_str_id = GET_STREAM_ID(decpict->transaction_id);
+
+ batch_msgdata->batchmsg_bufinfo = batchmsg_bufinfo;
+
+ batch_msgdata->transaction_id = decpict->transaction_id;
+ batch_msgdata->pvdec_fwctx = decpict->str_pvdec_fw_ctxbuf;
+ batch_msgdata->ctrl_alloc_bytes = decpict->ctrl_alloc_bytes;
+ batch_msgdata->operating_mode = decpict->operating_op;
+ batch_msgdata->genc_id = decpict->genc_id;
+ batch_msgdata->mb_load = hwctrl_calculate_load(decpict->pict_hdr_info);
+ batch_msgdata->size_delimited_mode =
+ (decpict->pict_hdr_info->parser_mode != VDECFW_SCP_ONLY) ?
+ (1) : (0);
+
+ result = vdeckm_get_message(hwctx->hndl_vxd, HWCTRL_MSGID_BATCH,
+ &msg_data, &to_kernelmsg);
+ if (result != 0) {
+ pr_err("failed to get decode message\n");
+ return result;
+ }
+
+ pr_debug("[HWCTRL] send batch message\n");
+ result = vdeckm_send_message(hwctx->hndl_vxd, &to_kernelmsg,
+ vxd_dec_ctx);
+ if (result != 0)
+ return result;
+
+ vdeckm_return_msg(hwctx->hndl_vxd, &to_kernelmsg);
+
+ return 0;
+}
+
+int hwctrl_process_msg(void *hndl_hwctx, unsigned int msg_flags, unsigned int *msg,
+ struct dec_decpict **decpict)
+{
+ int result;
+ struct hwctrl_ctx *hwctx;
+ enum vxd_msg_attr msg_attr = VXD_MSG_ATTR_NONE;
+ struct dec_decpict *pdecpict = NULL;
+ unsigned int val_first = 0;
+ unsigned int val_sec = 0;
+
+ if (!hndl_hwctx || !msg || !decpict) {
+ VDEC_ASSERT(0);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+ *decpict = NULL;
+
+ pr_debug("[HWCTRL] : process message\n");
+ result = vdeckm_process_msg(hwctx->hndl_vxd, msg, &hwctx->pend_pict_list, msg_flags,
+ &msg_attr, &pdecpict);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* validate pointers before using them */
+ if (!pdecpict || !pdecpict->first_fld_fwmsg || !pdecpict->second_fld_fwmsg) {
+ VDEC_ASSERT(0);
+ return -EIO;
+ }
+
+ val_first = pdecpict->first_fld_fwmsg->pict_attrs.pict_attrs.deverror;
+ val_sec = pdecpict->second_fld_fwmsg->pict_attrs.pict_attrs.deverror;
+
+ if (val_first || val_sec)
+ pr_err("device signaled critical error!!!\n");
+
+ if (msg_attr == VXD_MSG_ATTR_DECODED) {
+ pdecpict->state = DECODER_PICTURE_STATE_DECODED;
+ /*
+ * We have successfully decoded a picture as normally or
+ * after the replay.
+ * Mark HW is in good state.
+ */
+ hwctx->is_fatal_state = 0;
+ } else if (msg_attr == VXD_MSG_ATTR_FATAL) {
+ struct hwctrl_state state;
+ unsigned char pipe_minus1 = 0;
+
+ memset(&state, 0, sizeof(state));
+
+ result = hwctrl_get_core_status(hwctx, &state);
+ if (result == 0) {
+ hwctx->is_prev_hw_state_set = 1;
+ memcpy(&hwctx->prev_state, &state, sizeof(struct hwctrl_state));
+
+ for (pipe_minus1 = 0; pipe_minus1 < hwctx->num_pipes;
+ pipe_minus1++) {
+#ifdef DEBUG_DECODER_DRIVER
+ hwctrl_dump_state(&state.core_state, &state.core_state,
+ pipe_minus1);
+#endif
+ }
+ }
+ pdecpict->state = DECODER_PICTURE_STATE_TO_DISCARD;
+ }
+ *decpict = pdecpict;
+
+ return 0;
+}
+
+int hwctrl_getcore_cached_status(void *hndl_hwctx, struct hwctrl_state *state)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+ if (hwctx->is_prev_hw_state_set)
+ memcpy(state, &hwctx->prev_state, sizeof(struct hwctrl_state));
+ else
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ return 0;
+}
+
+int hwctrl_get_core_status(void *hndl_hwctx, struct hwctrl_state *state)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+ unsigned int result = IMG_ERROR_GENERIC_FAILURE;
+
+ if (!hwctx->is_fatal_state && state) {
+ struct vxd_states *pcorestate = NULL;
+
+ pcorestate = &state->core_state;
+
+ memset(pcorestate, 0, sizeof(*(pcorestate)));
+
+ result = vdeckm_get_core_state(hwctx->hndl_vxd, pcorestate);
+ }
+
+ return result;
+}
+
+int hwctrl_is_on_seq_replay(void *hndl_hwctx)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+ return hwctx->is_on_seq_replay;
+}
+
+int hwctrl_picture_submitbatch(void *hndl_hwctx, struct dec_decpict *decpict, void *vxd_dec_ctx)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+ if (hwctx->is_initialised) {
+ lst_add(&hwctx->pend_pict_list, decpict);
+ if (!hwctx->is_on_seq_replay)
+ return hwctrl_send_batch_message(hwctx, decpict, vxd_dec_ctx);
+ }
+
+ return 0;
+}
+
+int hwctrl_getpicpend_pictlist(void *hndl_hwctx, unsigned int transaction_id,
+ struct dec_decpict **decpict)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+ struct dec_decpict *dec_pic;
+
+ dec_pic = lst_first(&hwctx->pend_pict_list);
+ while (dec_pic) {
+ if (dec_pic->transaction_id == transaction_id) {
+ *decpict = dec_pic;
+ break;
+ }
+ dec_pic = lst_next(dec_pic);
+ }
+
+ if (!dec_pic)
+ return IMG_ERROR_INVALID_ID;
+
+ return 0;
+}
+
+int hwctrl_peekheadpiclist(void *hndl_hwctx, struct dec_decpict **decpict)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+ if (hwctx)
+ *decpict = lst_first(&hwctx->pend_pict_list);
+
+ if (*decpict)
+ return 0;
+
+ return IMG_ERROR_GENERIC_FAILURE;
+}
+
+int hwctrl_getdecodedpicture(void *hndl_hwctx, struct dec_decpict **decpict)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+ if (hwctx) {
+ struct dec_decpict *cur_decpict;
+ /*
+ * Ensure that this picture is in the list.
+ */
+ cur_decpict = lst_first(&hwctx->pend_pict_list);
+ while (cur_decpict) {
+ if (cur_decpict->state == DECODER_PICTURE_STATE_DECODED) {
+ *decpict = cur_decpict;
+ return 0;
+ }
+
+ cur_decpict = lst_next(cur_decpict);
+ }
+ }
+
+ return IMG_ERROR_VALUE_OUT_OF_RANGE;
+}
+
+void hwctrl_removefrom_piclist(void *hndl_hwctx, struct dec_decpict *decpict)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+ if (hwctx) {
+ struct dec_decpict *cur_decpict;
+ /*
+ * Ensure that this picture is in the list.
+ */
+ cur_decpict = lst_first(&hwctx->pend_pict_list);
+ while (cur_decpict) {
+ if (cur_decpict == decpict) {
+ lst_remove(&hwctx->pend_pict_list, decpict);
+ break;
+ }
+
+ cur_decpict = lst_next(cur_decpict);
+ }
+ }
+}
+
+int hwctrl_getregsoffset(void *hndl_hwctx, struct decoder_regsoffsets *regs_offsets)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+ return vdeckm_getregsoffsets(hwctx->hndl_vxd, regs_offsets);
+}
+
+static int pvdec_create(struct vxd_dev *vxd, struct vxd_coreprops *core_props,
+ void **hndl_vdeckm_context)
+{
+ struct vdeckm_context *corectx;
+ struct vxd_core_props hndl_core_props;
+ int result;
+ int iMapSize, pageSize;
+ void *phy_addr;
+
+ if (!hndl_vdeckm_context || !core_props)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /*
+ * Obtain core context.
+ */
+ corectx = &acore_ctx[0];
+
+ memset(corectx, 0, sizeof(*corectx));
+
+ corectx->core_num = 0;
+
+ result = vxd_pvdec_get_props(vxd->dev, vxd->reg_base, &hndl_core_props);
+ if (result != 0)
+ return result;
+
+ vxd_get_coreproperties(&hndl_core_props, &corectx->props);
+
+ memcpy(core_props, &corectx->props, sizeof(*core_props));
+
+ pageSize = PAGE_SIZE;
+ /* end aligned to page (ceiling), in pages */
+ iMapSize = (PVDEC_COMMS_RAM_OFFSET + PVDEC_COMMS_RAM_SIZE + pageSize - 1) / pageSize;
+ /* subtract start aligned to page (floor), in pages */
+ iMapSize -= PVDEC_COMMS_RAM_OFFSET / pageSize;
+ /* convert to bytes */
+ iMapSize *= pageSize;
+ phy_addr = (void *)(0x4300000);
+ phy_addr += (PVDEC_COMMS_RAM_OFFSET);
+ corectx->comms_ram_addr = ioremap((phys_addr_t)phy_addr, iMapSize);
+ *hndl_vdeckm_context = corectx;
+
+ return 0;
+}
+
+int hwctrl_deinitialise(void *hndl_hwctx)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+ if (hwctx->is_initialised) {
+ kfree(hwctx);
+ hwctx = NULL;
+ }
+
+ return 0;
+}
+
+int hwctrl_initialise(void *dec_core, void *comp_int_userdata,
+ const struct vdecdd_dd_devconfig *dd_devconfig,
+ struct vxd_coreprops *core_props, void **hndl_hwctx)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)*hndl_hwctx;
+ int result;
+
+ if (!hwctx) {
+ hwctx = kzalloc(sizeof(*(hwctx)), GFP_KERNEL);
+ if (!hwctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ *hndl_hwctx = hwctx;
+ }
+
+ if (!hwctx->is_initialised) {
+ hwctx->hndl_vxd = ((struct dec_core_ctx *)dec_core)->dec_ctx->dev_handle;
+ result = pvdec_create(hwctx->hndl_vxd, core_props, &hwctx->hndl_vxd);
+ if (result != 0)
+ goto error;
+
+ lst_init(&hwctx->pend_pict_list);
+
+ hwctx->devconfig = *dd_devconfig;
+ hwctx->num_pipes = core_props->num_pixel_pipes;
+ hwctx->comp_init_userdata = comp_int_userdata;
+ hwctx->dec_core = dec_core;
+ hwctx->is_initialised = 1;
+ hwctx->is_on_seq_replay = 0;
+ hwctx->is_fatal_state = 0;
+ }
+
+ return 0;
+error:
+ hwctrl_deinitialise(*hndl_hwctx);
+
+ return result;
+}
+
+static int hwctrl_send_fragment_message(struct hwctrl_ctx *hwctx,
+ struct dec_pict_fragment *pict_fragment,
+ struct dec_decpict *decpict,
+ void *vxd_dec_ctx)
+{
+ int result;
+ struct hwctrl_to_kernel_msg to_kernelmsg = {0};
+ struct hwctrl_msgdata msg_data;
+ struct hwctrl_fragment_msgdata *pfragment_msgdata =
+ &msg_data.fragment_msgdata;
+
+ msg_data.km_str_id = GET_STREAM_ID(decpict->transaction_id);
+
+ pfragment_msgdata->ctrl_alloc_bytes = pict_fragment->ctrl_alloc_bytes;
+
+ pfragment_msgdata->ctrl_alloc_offset = pict_fragment->ctrl_alloc_offset;
+
+ pfragment_msgdata->batchmsg_bufinfo = decpict->batch_msginfo->ddbuf_info;
+
+ result = vdeckm_get_message(hwctx->hndl_vxd, HWCTRL_MSGID_FRAGMENT, &msg_data,
+ &to_kernelmsg);
+ if (result != 0) {
+ pr_err("Failed to get decode message\n");
+ return result;
+ }
+
+ result = vdeckm_send_message(hwctx->hndl_vxd, &to_kernelmsg, vxd_dec_ctx);
+ if (result != 0)
+ return result;
+
+ vdeckm_return_msg(hwctx->hndl_vxd, &to_kernelmsg);
+
+ return 0;
+}
+
+int hwctrl_picture_submit_fragment(void *hndl_hwctx,
+ struct dec_pict_fragment *pict_fragment,
+ struct dec_decpict *decpict,
+ void *vxd_dec_ctx)
+{
+ struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+ unsigned int result = 0;
+
+ if (hwctx->is_initialised) {
+ result = hwctrl_send_fragment_message(hwctx, pict_fragment,
+ decpict, vxd_dec_ctx);
+ if (result != 0)
+ pr_err("Failed to send fragment message to firmware !");
+ }
+
+ return result;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/hw_control.h b/drivers/media/platform/vxe-vxd/decoder/hw_control.h
new file mode 100644
index 000000000000..3f430969b998
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/hw_control.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC Hardware control implementation
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef _HW_CONTROL_H
+#define _HW_CONTROL_H
+
+#include "bspp.h"
+#include "decoder.h"
+#include "fw_interface.h"
+#include "img_dec_common.h"
+#include "img_errors.h"
+#include "lst.h"
+#include "mem_io.h"
+#include "vdecdd_defs.h"
+#include "vdecfw_shared.h"
+#include "vid_buf.h"
+#include "vxd_ext.h"
+#include "vxd_props.h"
+
+/* Size of additional buffers needed for each HEVC picture */
+#ifdef HAS_HEVC
+
+/* Empirically defined */
+#define MEM_TO_REG_BUF_SIZE 0x2000
+
+/*
+ * Max. no. of slices found in stream db: approx. 2200,
+ * set MAX_SLICES to 2368 to get buffer size page aligned
+ */
+#define MAX_SLICES 2368
+#define SLICE_PARAMS_SIZE 64
+#define SLICE_PARAMS_BUF_SIZE (MAX_SLICES * SLICE_PARAMS_SIZE)
+
+/*
+ * Size of buffer for "above params" structure, sufficient for stream of width 8192
+ * 192 * (8192/64) == 0x6000, see "above_param_size" in TRM
+ */
+#define ABOVE_PARAMS_BUF_SIZE 0x6000
+#endif
+
+enum hwctrl_msgid {
+ HWCTRL_MSGID_BATCH = 0,
+ HWCTRL_MSGID_FRAGMENT = 1,
+ CORE_MSGID_MAX,
+ CORE_MSGID_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct hwctrl_to_kernel_msg {
+ unsigned int msg_size;
+ unsigned int km_str_id;
+ unsigned int flags;
+ unsigned char *msg_hdr;
+};
+
+struct hwctrl_batch_msgdata {
+ struct vidio_ddbufinfo *batchmsg_bufinfo;
+ struct vidio_ddbufinfo *pvdec_fwctx;
+ unsigned int ctrl_alloc_bytes;
+ unsigned int operating_mode;
+ unsigned int transaction_id;
+ unsigned int tile_cfg;
+ unsigned int genc_id;
+ unsigned int mb_load;
+ unsigned int size_delimited_mode;
+};
+
+struct hwctrl_fragment_msgdata {
+ struct vidio_ddbufinfo *batchmsg_bufinfo;
+ unsigned int ctrl_alloc_offset;
+ unsigned int ctrl_alloc_bytes;
+};
+
+struct hwctrl_msgdata {
+ unsigned int km_str_id;
+ struct hwctrl_batch_msgdata batch_msgdata;
+ struct hwctrl_fragment_msgdata fragment_msgdata;
+};
+
+/*
+ * This structure contains MSVDX Message information.
+ */
+struct hwctrl_msgstatus {
+ unsigned char control_fence_id[VDECFW_MSGID_CONTROL_TYPES];
+ unsigned char decode_fence_id[VDECFW_MSGID_DECODE_TYPES];
+ unsigned char completion_fence_id[VDECFW_MSGID_COMPLETION_TYPES];
+};
+
+/*
+ * this structure contains the HWCTRL Core state.
+ */
+struct hwctrl_state {
+ struct vxd_states core_state;
+ struct hwctrl_msgstatus fwmsg_status;
+ struct hwctrl_msgstatus hostmsg_status;
+};
+
+int hwctrl_picture_submit_fragment(void *hndl_hwctx,
+ struct dec_pict_fragment *pict_fragment,
+ struct dec_decpict *decpict,
+ void *vxd_dec_ctx);
+
+int hwctrl_process_msg(void *hndl_hwct, unsigned int msg_flags, unsigned int *msg,
+ struct dec_decpict **decpict);
+
+int hwctrl_getcore_cached_status(void *hndl_hwctx, struct hwctrl_state *state);
+
+int hwctrl_get_core_status(void *hndl_hwctx, struct hwctrl_state *state);
+
+int hwctrl_is_on_seq_replay(void *hndl_hwctx);
+
+int hwctrl_picture_submitbatch(void *hndl_hwctx, struct dec_decpict *decpict,
+ void *vxd_dec_ctx);
+
+int hwctrl_getpicpend_pictlist(void *hndl_hwctx, unsigned int transaction_id,
+ struct dec_decpict **decpict);
+
+int hwctrl_peekheadpiclist(void *hndl_hwctx, struct dec_decpict **decpict);
+
+int hwctrl_getdecodedpicture(void *hndl_hwctx, struct dec_decpict **decpict);
+
+void hwctrl_removefrom_piclist(void *hndl_hwctx, struct dec_decpict *decpict);
+
+int hwctrl_getregsoffset(void *hndl_hwctx,
+ struct decoder_regsoffsets *regs_offsets);
+
+int hwctrl_initialise(void *dec_core, void *comp_int_userdata,
+ const struct vdecdd_dd_devconfig *dd_devconfig,
+ struct vxd_coreprops *core_props, void **hndl_hwctx);
+
+int hwctrl_deinitialise(void *hndl_hwctx);
+
+#endif /* _HW_CONTROL_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_dec_common.h b/drivers/media/platform/vxe-vxd/decoder/img_dec_common.h
new file mode 100644
index 000000000000..80ee51c644ba
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_dec_common.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG DEC common header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef _IMG_DEC_COMMON_H
+#define _IMG_DEC_COMMON_H
+
+#include <linux/types.h>
+
+#define VXD_MAX_PIPES 2
+#define MAX_DST_BUFFERS 32
+
+/* Helpers for parsing core properties. Based on HW registers layout. */
+#define VXD_GET_BITS(v, lb, rb, type) \
+ ({ \
+ type __rb = (rb); \
+ (((v) >> (__rb)) & ((1 << ((lb) - __rb + 1)) - 1)); })
+#define VXD_GET_BIT(v, b) (((v) >> (b)) & 1)
+
+/* Get major core revision. */
+#define VXD_MAJ_REV(props) (VXD_GET_BITS((props).core_rev, 23, 16, unsigned int))
+/* Get minor core revision. */
+#define VXD_MIN_REV(props) (VXD_GET_BITS((props).core_rev, 15, 8, unsigned int))
+/* Get maint core revision. */
+#define VXD_MAINT_REV(props) (VXD_GET_BITS((props).core_rev, 7, 0, unsigned int))
+/* Get number of entropy pipes available (HEVC). */
+#define VXD_NUM_ENT_PIPES(props) ((props).pvdec_core_id & 0xF)
+/* Get number of pixel pipes available (other standards). */
+#define VXD_NUM_PIX_PIPES(props) (((props).pvdec_core_id & 0xF0) >> 4)
+/* Get number of bits used by external memory interface. */
+#define VXD_EXTRN_ADDR_WIDTH(props) ((((props).mmu_config0 & 0xF0) >> 4) + 32)
+
+/* Check whether specific standard is supported by the pixel pipe. */
+#define VXD_HAS_MPEG2(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 0)
+#define VXD_HAS_MPEG4(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 1)
+#define VXD_HAS_H264(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 2)
+#define VXD_HAS_VC1(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 3)
+#define VXD_HAS_WMV9(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 4)
+#define VXD_HAS_JPEG(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 5)
+#define VXD_HAS_MPEG4_DATA_PART(props, pipe) \
+ VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 6)
+#define VXD_HAS_AVS(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 7)
+#define VXD_HAS_REAL(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 8)
+#define VXD_HAS_VP6(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 9)
+#define VXD_HAS_VP8(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 10)
+#define VXD_HAS_SORENSON(props, pipe) \
+ VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 11)
+#define VXD_HAS_HEVC(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 22)
+
+/* Check whether specific feature is supported by the pixel pipe */
+
+/*
+ * Max picture size for HEVC still picture profile is 64k wide and/or 64k
+ * high.
+ */
+#define VXD_HAS_HEVC_64K_STILL(props, pipe) \
+ (VXD_GET_BIT((props).pixel_misc_cfg[pipe], 24))
+
+/* Pixel processing pipe index. */
+#define VXD_PIX_PIPE_ID(props, pipe) \
+ (VXD_GET_BITS((props).pixel_misc_cfg[pipe], 18, 16, unsigned int))
+
+/* Number of stream supported by the pixel pipe DMAC and shift register. */
+#define VXD_PIX_NUM_STRS(props, pipe) \
+ (VXD_GET_BITS((props).pixel_misc_cfg[pipe], 13, 12, unsigned int) + 1)
+
+/* Is scaling supported. */
+#define VXD_HAS_SCALING(props, pipe) \
+ (VXD_GET_BIT((props).pixel_misc_cfg[pipe], 9))
+
+/* Is rotation supported. */
+#define VXD_HAS_ROTATION(props, pipe) \
+ (VXD_GET_BIT((props).pixel_misc_cfg[pipe], 8))
+
+/* Are HEVC range extensions supported. */
+#define VXD_HAS_HEVC_REXT(props, pipe) \
+ (VXD_GET_BIT((props).pixel_misc_cfg[pipe], 7))
+
+/* Maximum bit depth supported by the pipe. */
+#define VXD_MAX_BIT_DEPTH(props, pipe) \
+ (VXD_GET_BITS((props).pixel_misc_cfg[pipe], 6, 4, unsigned int) + 8)
+
+/*
+ * Maximum chroma fomar supported by the pipe in HEVC mode.
+ * 0x1 - 4:2:0
+ * 0x2 - 4:2:2
+ * 0x3 - 4:4:4
+ */
+#define VXD_MAX_HEVC_CHROMA_FMT(props, pipe) \
+ (VXD_GET_BITS((props).pixel_misc_cfg[pipe], 3, 2, unsigned int))
+
+/*
+ * Maximum chroma format supported by the pipe in H264 mode.
+ * 0x1 - 4:2:0
+ * 0x2 - 4:2:2
+ * 0x3 - 4:4:4
+ */
+#define VXD_MAX_H264_CHROMA_FMT(props, pipe) \
+ (VXD_GET_BITS((props).pixel_misc_cfg[pipe], 1, 0, unsigned int))
+
+/*
+ * Maximum frame width and height supported in MSVDX pipeline.
+ */
+#define VXD_MAX_WIDTH_MSVDX(props) \
+ (2 << (VXD_GET_BITS((props).pixel_max_frame_cfg, 4, 0, unsigned int)))
+#define VXD_MAX_HEIGHT_MSVDX(props) \
+ (2 << (VXD_GET_BITS((props).pixel_max_frame_cfg, 12, 8, unsigned int)))
+
+/*
+ * Maximum frame width and height supported in PVDEC pipeline.
+ */
+#define VXD_MAX_WIDTH_PVDEC(props) \
+ (2 << (VXD_GET_BITS((props).pixel_max_frame_cfg, 20, 16, unsigned int)))
+#define VXD_MAX_HEIGHT_PVDEC(props) \
+ (2 << (VXD_GET_BITS((props).pixel_max_frame_cfg, 28, 24, unsigned int)))
+
+#define PVDEC_COMMS_RAM_OFFSET 0x00002000
+#define PVDEC_COMMS_RAM_SIZE 0x00001000
+#define PVDEC_ENTROPY_OFFSET 0x00003000
+#define PVDEC_ENTROPY_SIZE 0x1FF
+#define PVDEC_VEC_BE_OFFSET 0x00005000
+#define PVDEC_VEC_BE_SIZE 0x3FF
+#define PVDEC_VEC_BE_CODEC_OFFSET 0x00005400
+#define MSVDX_VEC_OFFSET 0x00006000
+#define MSVDX_VEC_SIZE 0x7FF
+#define MSVDX_CMD_OFFSET 0x00007000
+
+/*
+ * Virtual memory heap address ranges for tiled
+ * and non-tiled buffers. Addresses within each
+ * range should be assigned to the appropriate
+ * buffers by the UM driver and mapped into the
+ * device using the corresponding KM driver ioctl.
+ */
+#define PVDEC_HEAP_UNTILED_START 0x00400000ul
+#define PVDEC_HEAP_UNTILED_SIZE 0x3FC00000ul
+#define PVDEC_HEAP_TILE512_START 0x40000000ul
+#define PVDEC_HEAP_TILE512_SIZE 0x10000000ul
+#define PVDEC_HEAP_TILE1024_START 0x50000000ul
+#define PVDEC_HEAP_TILE1024_SIZE 0x20000000ul
+#define PVDEC_HEAP_TILE2048_START 0x70000000ul
+#define PVDEC_HEAP_TILE2048_SIZE 0x30000000ul
+#define PVDEC_HEAP_TILE4096_START 0xA0000000ul
+#define PVDEC_HEAP_TILE4096_SIZE 0x30000000ul
+#define PVDEC_HEAP_BITSTREAM_START 0xD2000000ul
+#define PVDEC_HEAP_BITSTREAM_SIZE 0x0A000000ul
+#define PVDEC_HEAP_STREAM_START 0xE4000000ul
+#define PVDEC_HEAP_STREAM_SIZE 0x1C000000ul
+
+/*
+ * Max size of the message payload, in bytes. There are 7 bits used to encode
+ * the message size in the firmware interface.
+ */
+#define VXD_MAX_PAYLOAD_SIZE (127 * sizeof(unsigned int))
+/* Max size of the input message in bytes. */
+#define VXD_MAX_INPUT_SIZE (VXD_MAX_PAYLOAD_SIZE + sizeof(struct vxd_fw_msg))
+/*
+ * Min size of the input message. Two words needed for message header and
+ * stream PTD
+ */
+#define VXD_MIN_INPUT_SIZE 2
+/*
+ * Offset of the stream PTD within message. This word has to be left null in
+ * submitted message, driver will fill it in with an appropriate value.
+ */
+#define VXD_PTD_MSG_OFFSET 1
+
+/* Read flags */
+#define VXD_FW_MSG_RD_FLAGS_MASK 0xffff
+/* Driver watchdog interrupted processing of the message. */
+#define VXD_FW_MSG_FLAG_DWR 0x1
+/* VXD MMU fault occurred when the message was processed. */
+#define VXD_FW_MSG_FLAG_MMU_FAULT 0x2
+/* Invalid input message, e.g. the message was too large. */
+#define VXD_FW_MSG_FLAG_INV 0x4
+/* I/O error occurred when the message was processed. */
+#define VXD_FW_MSG_FLAG_DEV_ERR 0x8
+/*
+ * Driver error occurred when the message was processed, e.g. failed to
+ * allocate memory.
+ */
+#define VXD_FW_MSG_FLAG_DRV_ERR 0x10
+/*
+ * Item was canceled, without being fully processed
+ * i.e. corresponding stream was destroyed.
+ */
+#define VXD_FW_MSG_FLAG_CANCELED 0x20
+/* Firmware internal error occurred when the message was processed */
+#define VXD_FW_MSG_FLAG_FATAL 0x40
+
+/* Write flags */
+#define VXD_FW_MSG_WR_FLAGS_MASK 0xffff0000
+/* Indicates that message shall be dropped after sending it to the firmware. */
+#define VXD_FW_MSG_FLAG_DROP 0x10000
+/*
+ * Indicates that message shall be exclusively handled by
+ * the firmware/hardware. Any other pending messages are
+ * blocked until such message is handled.
+ */
+#define VXD_FW_MSG_FLAG_EXCL 0x20000
+
+#define VXD_MSG_SIZE(msg) (sizeof(struct vxd_fw_msg) + ((msg).payload_size))
+
+/* Header included at the beginning of firmware binary */
+struct vxd_fw_hdr {
+ unsigned int core_size;
+ unsigned int blob_size;
+ unsigned int firmware_id;
+ unsigned int timestamp;
+};
+
+/*
+ * struct vxd_dev_fw - Core component will allocate a buffer for firmware.
+ * This structure holds the information about the firmware
+ * binary.
+ * @buf_id: The buffer id allocation
+ * @hdr: firmware header information
+ * @fw_size: The size of the fw. Set after successful firmware request.
+ */
+struct vxd_dev_fw {
+ int buf_id;
+ struct vxd_fw_hdr *hdr;
+ unsigned int fw_size;
+ unsigned char ready;
+};
+
+/*
+ * struct vxd_core_props - contains HW core properties
+ * @core_rev: Core revision based on register CR_PVDEC_CORE_REV
+ * @pvdec_core_id: PVDEC Core id based on register CR_PVDEC_CORE_ID
+ * @mmu_config0: MMU configuration 0 based on register MMU_CONFIG0
+ * @mmu_config1: MMU configuration 1 based on register MMU_CONFIG1
+ * @mtx_ram_size: size of the MTX RAM based on register CR_PROC_DEBUG
+ * @pixel_max_frame_cfg: indicates the max frame height and width for
+ * PVDEC pipeline and MSVDX pipeline based on register
+ * MAX_FRAME_CONFIG
+ * @pixel_pipe_cfg: pipe configuration which codecs are supported in a
+ * Pixel Processing Pipe, based on register
+ * PIXEL_PIPE_CONFIG
+ * @pixel_misc_cfg: Additional pipe configuration eg. supported scaling
+ * or rotation, based on register PIXEL_MISC_CONFIG
+ * @dbg_fifo_size: contains the depth of the Debug FIFO, based on
+ * register CR_PROC_DEBUG_FIFO_SIZE
+ */
+struct vxd_core_props {
+ unsigned int core_rev;
+ unsigned int pvdec_core_id;
+ unsigned int mmu_config0;
+ unsigned int mmu_config1;
+ unsigned int mtx_ram_size;
+ unsigned int pixel_max_frame_cfg;
+ unsigned int pixel_pipe_cfg[VXD_MAX_PIPES];
+ unsigned int pixel_misc_cfg[VXD_MAX_PIPES];
+ unsigned int dbg_fifo_size;
+};
+
+struct vxd_alloc_data {
+ unsigned int heap_id; /* [IN] Heap ID of allocator */
+ unsigned int size; /* [IN] Size of device memory (in bytes) */
+ unsigned int attributes; /* [IN] Attributes of buffer */
+ unsigned int buf_id; /* [OUT] Generated buffer ID */
+};
+
+struct vxd_free_data {
+ unsigned int buf_id; /* [IN] ID of device buffer to free */
+};
+#endif /* _IMG_DEC_COMMON_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_msvdx_cmds.h b/drivers/media/platform/vxe-vxd/decoder/img_msvdx_cmds.h
new file mode 100644
index 000000000000..2748ff44624e
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_msvdx_cmds.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG MSVDX core Registers
+ * This file contains the MSVDX_CORE_REGS_H Definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_MSVDX_CMDS_H
+#define _IMG_MSVDX_CMDS_H
+
+#define MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET (0x0060)
+#define MSVDX_CMDS_VERTICAL_LUMA_COEFFICIENTS_OFFSET (0x0070)
+/**
+ * MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET -
+ * MSVDX_CMDS, VERTICAL_LUMA_COEFFICIENTS, VER_LUMA_COEFF_0
+ */
+#define MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET (0x0080)
+/* MSVDX_CMDS, HORIZONTAL_CHROMA_COEFFICIENTS, HOR_CHROMA_COEFF_0 */
+#define MSVDX_CMDS_VERTICAL_CHROMA_COEFFICIENTS_OFFSET (0x0090)
+/* MSVDX_CMDS, DISPLAY_PICTURE_SIZE, DISPLAY_PICTURE_HEIGHT */
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_HEIGHT_LSBMASK (0x00000FFF)
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_HEIGHT_SHIFT (12)
+/* MSVDX_CMDS, DISPLAY_PICTURE_SIZE, DISPLAY_PICTURE_WIDTH */
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_WIDTH_LSBMASK (0x00000FFF)
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_WIDTH_SHIFT (0)
+#define MSVDX_CMDS_PVDEC_DISPLAY_PICTURE_SIZE_OFFSET (0x00B0)
+#define MSVDX_CMDS_PVDEC_DISPLAY_PICTURE_SIZE_PVDEC_DISPLAY_PICTURE_HEIGHT_MIN1_LSBMASK \
+ (0x0000FFFF)
+#define MSVDX_CMDS_PVDEC_DISPLAY_PICTURE_SIZE_PVDEC_DISPLAY_PICTURE_HEIGHT_MIN1_SHIFT (16)
+/* MSVDX_CMDS, PVDEC_DISPLAY_PICTURE_SIZE, PVDEC_DISPLAY_PICTURE_WIDTH_MIN1 */
+#define MSVDX_CMDS_PVDEC_DISPLAY_PICTURE_SIZE_PVDEC_DISPLAY_PICTURE_WIDTH_MIN1_LSBMASK \
+ (0x0000FFFF)
+#define MSVDX_CMDS_PVDEC_DISPLAY_PICTURE_SIZE_PVDEC_DISPLAY_PICTURE_WIDTH_MIN1_SHIFT (0)
+/* MSVDX_CMDS, CODED_PICTURE_SIZE, CODED_PICTURE_HEIGHT */
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_HEIGHT_LSBMASK (0x00000FFF)
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_HEIGHT_SHIFT (12)
+/* MSVDX_CMDS, CODED_PICTURE_SIZE, CODED_PICTURE_WIDTH */
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_WIDTH_LSBMASK (0x00000FFF)
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_WIDTH_SHIFT (0)
+#define MSVDX_CMDS_PVDEC_CODED_PICTURE_SIZE_OFFSET (0x00B4)
+/* MSVDX_CMDS, OPERATING_MODE, USE_EXT_ROW_STRIDE */
+#define MSVDX_CMDS_OPERATING_MODE_USE_EXT_ROW_STRIDE_MASK (0x10000000)
+#define MSVDX_CMDS_OPERATING_MODE_USE_EXT_ROW_STRIDE_LSBMASK (0x00000001)
+#define MSVDX_CMDS_OPERATING_MODE_USE_EXT_ROW_STRIDE_SHIFT (28)
+/* MSVDX_CMDS, OPERATING_MODE, CHROMA_INTERLEAVED */
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_INTERLEAVED_MASK (0x08000000)
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_INTERLEAVED_LSBMASK (0x00000001)
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_INTERLEAVED_SHIFT (27)
+/* MSVDX_CMDS, OPERATING_MODE, ROW_STRIDE */
+#define MSVDX_CMDS_OPERATING_MODE_ROW_STRIDE_MASK (0x07000000)
+#define MSVDX_CMDS_OPERATING_MODE_ROW_STRIDE_LSBMASK (0x00000007)
+#define MSVDX_CMDS_OPERATING_MODE_ROW_STRIDE_SHIFT (24)
+/* MSVDX_CMDS, OPERATING_MODE, CODEC_PROFILE */
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_PROFILE_MASK (0x00300000)
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_PROFILE_LSBMASK (0x00000003)
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_PROFILE_SHIFT (20)
+/* MSVDX_CMDS, OPERATING_MODE, CODEC_MODE */
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_MODE_MASK (0x000F0000)
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_MODE_LSBMASK (0x0000000F)
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_MODE_SHIFT (16)
+/* MSVDX_CMDS, OPERATING_MODE, ASYNC_MODE */
+#define MSVDX_CMDS_OPERATING_MODE_ASYNC_MODE_MASK (0x00006000)
+#define MSVDX_CMDS_OPERATING_MODE_ASYNC_MODE_LSBMASK (0x00000003)
+#define MSVDX_CMDS_OPERATING_MODE_ASYNC_MODE_SHIFT (13)
+/* MSVDX_CMDS, OPERATING_MODE, CHROMA_FORMAT */
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_FORMAT_MASK (0x00001000)
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_FORMAT_LSBMASK (0x00000001)
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_FORMAT_SHIFT (12)
+/* MSVDX_CMDS, OPERATING_MODE, PIC_QUANT */
+#define MSVDX_CMDS_PVDEC_OPERATING_MODE_OFFSET (0x00A0)
+/* MSVDX_CMDS, EXT_OP_MODE, BIT_DEPTH_CHROMA_MINUS8 */
+#define MSVDX_CMDS_EXT_OP_MODE_BIT_DEPTH_CHROMA_MINUS8_MASK (0x00003000)
+#define MSVDX_CMDS_EXT_OP_MODE_BIT_DEPTH_CHROMA_MINUS8_LSBMASK (0x00000003)
+#define MSVDX_CMDS_EXT_OP_MODE_BIT_DEPTH_CHROMA_MINUS8_SHIFT (12)
+/* MSVDX_CMDS, EXT_OP_MODE, BIT_DEPTH_LUMA_MINUS8 */
+#define MSVDX_CMDS_EXT_OP_MODE_BIT_DEPTH_LUMA_MINUS8_MASK (0x00000300)
+#define MSVDX_CMDS_EXT_OP_MODE_BIT_DEPTH_LUMA_MINUS8_LSBMASK (0x00000003)
+#define MSVDX_CMDS_EXT_OP_MODE_BIT_DEPTH_LUMA_MINUS8_SHIFT (8)
+/* MSVDX_CMDS, EXT_OP_MODE, MEMORY_PACKING */
+#define MSVDX_CMDS_EXT_OP_MODE_MEMORY_PACKING_MASK (0x00000008)
+#define MSVDX_CMDS_EXT_OP_MODE_MEMORY_PACKING_LSBMASK (0x00000001)
+#define MSVDX_CMDS_EXT_OP_MODE_MEMORY_PACKING_SHIFT (3)
+/* MSVDX_CMDS, EXT_OP_MODE, CHROMA_FORMAT_IDC */
+#define MSVDX_CMDS_EXT_OP_MODE_CHROMA_FORMAT_IDC_MASK (0x00000003)
+#define MSVDX_CMDS_EXT_OP_MODE_CHROMA_FORMAT_IDC_LSBMASK (0x00000003)
+#define MSVDX_CMDS_EXT_OP_MODE_CHROMA_FORMAT_IDC_SHIFT (0)
+#define MSVDX_CMDS_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET (0x000C)
+/*
+ * MSVDX_CMDS, LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES,
+ * LUMA_RECON_BASE_ADDR
+ */
+#define MSVDX_CMDS_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET (0x0010)
+///* MSVDX_CMDS, AUX_MSB_BUFFER_BASE_ADDRESSES, AUX_MSB_BUFFER_BASE_ADDR */
+#define MSVDX_CMDS_INTRA_BUFFER_BASE_ADDRESS_OFFSET (0x0018)
+/* MSVDX_CMDS, INTRA_BUFFER_BASE_ADDRESS, INTRA_BASE_ADDR */
+
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_OFFSET (0x001C)
+
+/* MSVDX_CMDS, MC_CACHE_CONFIGURATION, CONFIG_REF_CHROMA_ADJUST */
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_CONFIG_REF_CHROMA_ADJUST_MASK (0x01000000)
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_CONFIG_REF_CHROMA_ADJUST_LSBMASK (0x00000001)
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_CONFIG_REF_CHROMA_ADJUST_SHIFT (24)
+/* MSVDX_CMDS, MC_CACHE_CONFIGURATION, CONFIG_REF_OFFSET */
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_CONFIG_REF_OFFSET_MASK (0x00FFF000)
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_CONFIG_REF_OFFSET_LSBMASK (0x00000FFF)
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_CONFIG_REF_OFFSET_SHIFT (12)
+/* MSVDX_CMDS, MC_CACHE_CONFIGURATION, CONFIG_ROW_OFFSET */
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_CONFIG_ROW_OFFSET_MASK (0x0000003F)
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_CONFIG_ROW_OFFSET_LSBMASK (0x0000003F)
+#define MSVDX_CMDS_MC_CACHE_CONFIGURATION_CONFIG_ROW_OFFSET_SHIFT (0)
+/* MSVDX_CMDS, H264_WEIGHTED_FACTOR_DENOMINATOR, Y_LOG2_WEIGHT_DENOM */
+#define MSVDX_CMDS_VC1_LUMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET (0x0028)
+/* MSVDX_CMDS, VC1_LUMA_RANGE_MAPPING_BASE_ADDRESS, LUMA_RANGE_BASE_ADDR */
+#define MSVDX_CMDS_VC1_CHROMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET (0x002C)
+/* MSVDX_CMDS, VC1_RANGE_MAPPING_FLAGS, LUMA_RANGE_MAP */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_OFFSET (0x003C)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION, EXT_ROT_ROW_STRIDE */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_EXT_ROT_ROW_STRIDE_MASK (0xFFC00000)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_EXT_ROT_ROW_STRIDE_LSBMASK \
+ (0x000003FF)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_EXT_ROT_ROW_STRIDE_SHIFT (22)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION, PACKED_422_OUTPUT */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_PACKED_422_OUTPUT_MASK (0x00000800)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_PACKED_422_OUTPUT_LSBMASK \
+ (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_PACKED_422_OUTPUT_SHIFT (11)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION, USE_AUX_LINE_BUF */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_USE_AUX_LINE_BUF_MASK (0x00000400)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_USE_AUX_LINE_BUF_LSBMASK (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_USE_AUX_LINE_BUF_SHIFT (10)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION, SCALE_INPUT_SIZE_SEL */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_SCALE_INPUT_SIZE_SEL_MASK \
+ (0x00000200)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_SCALE_INPUT_SIZE_SEL_LSBMASK \
+ (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_SCALE_INPUT_SIZE_SEL_SHIFT (9)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION, USE_EXT_ROT_ROW_STRIDE */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_USE_EXT_ROT_ROW_STRIDE_MASK \
+ (0x00000100)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_USE_EXT_ROT_ROW_STRIDE_LSBMASK \
+ (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_USE_EXT_ROT_ROW_STRIDE_SHIFT (8)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION, ROTATION_ROW_STRIDE */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_ROTATION_ROW_STRIDE_MASK (0x00000070)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_ROTATION_ROW_STRIDE_LSBMASK \
+ (0x00000007)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_ROTATION_ROW_STRIDE_SHIFT (4)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION, ROTATION_MODE */
+#define MSVDX_CMDS_EXTENDED_ROW_STRIDE_OFFSET (0x0040)
+
+/* MSVDX_CMDS, EXTENDED_ROW_STRIDE, EXT_ROW_STRIDE */
+#define MSVDX_CMDS_EXTENDED_ROW_STRIDE_EXT_ROW_STRIDE_MASK (0x0003FFC0)
+#define MSVDX_CMDS_EXTENDED_ROW_STRIDE_EXT_ROW_STRIDE_LSBMASK (0x00000FFF)
+#define MSVDX_CMDS_EXTENDED_ROW_STRIDE_EXT_ROW_STRIDE_SHIFT (6)
+/* MSVDX_CMDS, EXTENDED_ROW_STRIDE, REF_PIC_MMU_TILED */
+#define MSVDX_CMDS_CHROMA_ROW_STRIDE_OFFSET (0x01AC)
+/* MSVDX_CMDS, CHROMA_ROW_STRIDE, ALT_CHROMA_ROW_STRIDE */
+#define MSVDX_CMDS_CHROMA_ROW_STRIDE_ALT_CHROMA_ROW_STRIDE_MASK (0xFFC00000)
+#define MSVDX_CMDS_CHROMA_ROW_STRIDE_ALT_CHROMA_ROW_STRIDE_LSBMASK (0x000003FF)
+#define MSVDX_CMDS_CHROMA_ROW_STRIDE_ALT_CHROMA_ROW_STRIDE_SHIFT (22)
+/* MSVDX_CMDS, CHROMA_ROW_STRIDE, CHROMA_ROW_STRIDE */
+#define MSVDX_CMDS_CHROMA_ROW_STRIDE_CHROMA_ROW_STRIDE_MASK (0x0003FFC0)
+#define MSVDX_CMDS_CHROMA_ROW_STRIDE_CHROMA_ROW_STRIDE_LSBMASK (0x00000FFF)
+#define MSVDX_CMDS_CHROMA_ROW_STRIDE_CHROMA_ROW_STRIDE_SHIFT (6)
+/* MSVDX_CMDS, RPR_PICTURE_SIZE, RPR_PICTURE_WIDTH */
+#define MSVDX_CMDS_SCALED_DISPLAY_SIZE_OFFSET (0x0050)
+/* MSVDX_CMDS, SCALED_DISPLAY_SIZE, SCALE_DISPLAY_HEIGHT */
+#define MSVDX_CMDS_SCALED_DISPLAY_SIZE_SCALE_DISPLAY_HEIGHT_MASK (0x00FFF000)
+#define MSVDX_CMDS_SCALED_DISPLAY_SIZE_SCALE_DISPLAY_HEIGHT_LSBMASK (0x00000FFF)
+#define MSVDX_CMDS_SCALED_DISPLAY_SIZE_SCALE_DISPLAY_HEIGHT_SHIFT (12)
+/* MSVDX_CMDS, SCALED_DISPLAY_SIZE, SCALE_DISPLAY_WIDTH */
+#define MSVDX_CMDS_SCALED_DISPLAY_SIZE_SCALE_DISPLAY_WIDTH_MASK (0x00000FFF)
+#define MSVDX_CMDS_SCALED_DISPLAY_SIZE_SCALE_DISPLAY_WIDTH_LSBMASK (0x00000FFF)
+#define MSVDX_CMDS_SCALED_DISPLAY_SIZE_SCALE_DISPLAY_WIDTH_SHIFT (0)
+#define MSVDX_CMDS_PVDEC_SCALED_DISPLAY_SIZE_OFFSET (0x00B8)
+/* MSVDX_CMDS, PVDEC_SCALED_DISPLAY_SIZE, PVDEC_SCALE_DISPLAY_HEIGHT */
+#define MSVDX_CMDS_PVDEC_SCALED_DISPLAY_SIZE_PVDEC_SCALE_DISPLAY_HEIGHT_MASK (0xFFFF0000)
+#define MSVDX_CMDS_PVDEC_SCALED_DISPLAY_SIZE_PVDEC_SCALE_DISPLAY_HEIGHT_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_PVDEC_SCALED_DISPLAY_SIZE_PVDEC_SCALE_DISPLAY_HEIGHT_SHIFT (16)
+/* MSVDX_CMDS, PVDEC_SCALED_DISPLAY_SIZE, PVDEC_SCALE_DISPLAY_WIDTH */
+#define MSVDX_CMDS_PVDEC_SCALED_DISPLAY_SIZE_PVDEC_SCALE_DISPLAY_WIDTH_MASK (0x0000FFFF)
+#define MSVDX_CMDS_PVDEC_SCALED_DISPLAY_SIZE_PVDEC_SCALE_DISPLAY_WIDTH_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_PVDEC_SCALED_DISPLAY_SIZE_PVDEC_SCALE_DISPLAY_WIDTH_SHIFT (0)
+#define MSVDX_CMDS_HORIZONTAL_SCALE_CONTROL_OFFSET (0x0054)
+/* MSVDX_CMDS, HORIZONTAL_SCALE_CONTROL, HORIZONTAL_INITIAL_POS */
+#define MSVDX_CMDS_HORIZONTAL_SCALE_CONTROL_HORIZONTAL_INITIAL_POS_MASK (0xFFFF0000)
+#define MSVDX_CMDS_HORIZONTAL_SCALE_CONTROL_HORIZONTAL_INITIAL_POS_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_HORIZONTAL_SCALE_CONTROL_HORIZONTAL_INITIAL_POS_SHIFT (16)
+/* MSVDX_CMDS, HORIZONTAL_SCALE_CONTROL, HORIZONTAL_SCALE_PITCH */
+#define MSVDX_CMDS_HORIZONTAL_SCALE_CONTROL_HORIZONTAL_SCALE_PITCH_MASK (0x0000FFFF)
+#define MSVDX_CMDS_HORIZONTAL_SCALE_CONTROL_HORIZONTAL_SCALE_PITCH_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_HORIZONTAL_SCALE_CONTROL_HORIZONTAL_SCALE_PITCH_SHIFT (0)
+#define MSVDX_CMDS_VERTICAL_SCALE_CONTROL_OFFSET (0x0058)
+/* MSVDX_CMDS, VERTICAL_SCALE_CONTROL, VERTICAL_INITIAL_POS */
+#define MSVDX_CMDS_VERTICAL_SCALE_CONTROL_VERTICAL_INITIAL_POS_MASK (0xFFFF0000)
+#define MSVDX_CMDS_VERTICAL_SCALE_CONTROL_VERTICAL_INITIAL_POS_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_VERTICAL_SCALE_CONTROL_VERTICAL_INITIAL_POS_SHIFT (16)
+/* MSVDX_CMDS, VERTICAL_SCALE_CONTROL, VERTICAL_SCALE_PITCH */
+#define MSVDX_CMDS_VERTICAL_SCALE_CONTROL_VERTICAL_SCALE_PITCH_MASK (0x0000FFFF)
+#define MSVDX_CMDS_VERTICAL_SCALE_CONTROL_VERTICAL_SCALE_PITCH_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_VERTICAL_SCALE_CONTROL_VERTICAL_SCALE_PITCH_SHIFT (0)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_OFFSET (0x01B4)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, ALT_BIT_DEPTH_CHROMA_MINUS8 */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_BIT_DEPTH_CHROMA_MINUS8_MASK (0x00007000)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_BIT_DEPTH_CHROMA_MINUS8_LSBMASK \
+ (0x00000007)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_BIT_DEPTH_CHROMA_MINUS8_SHIFT (12)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, ALT_BIT_DEPTH_LUMA_MINUS8 */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_BIT_DEPTH_LUMA_MINUS8_MASK (0x00000700)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_BIT_DEPTH_LUMA_MINUS8_LSBMASK (0x00000007)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_BIT_DEPTH_LUMA_MINUS8_SHIFT (8)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, SCALE_LUMA_BIFILTER_HORIZ */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_LUMA_BIFILTER_HORIZ_MASK (0x00000080)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_LUMA_BIFILTER_HORIZ_LSBMASK (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_LUMA_BIFILTER_HORIZ_SHIFT (7)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, SCALE_LUMA_BIFILTER_VERT */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_LUMA_BIFILTER_VERT_MASK (0x00000040)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_LUMA_BIFILTER_VERT_LSBMASK (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_LUMA_BIFILTER_VERT_SHIFT (6)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, SCALE_CHROMA_BIFILTER_HORIZ */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_CHROMA_BIFILTER_HORIZ_MASK (0x00000020)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_CHROMA_BIFILTER_HORIZ_LSBMASK \
+ (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_CHROMA_BIFILTER_HORIZ_SHIFT (5)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, SCALE_CHROMA_BIFILTER_VERT */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_CHROMA_BIFILTER_VERT_MASK (0x00000010)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_CHROMA_BIFILTER_VERT_LSBMASK \
+ (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_CHROMA_BIFILTER_VERT_SHIFT (4)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, ALT_MEMORY_PACKING */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_MEMORY_PACKING_MASK (0x00000008)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_MEMORY_PACKING_LSBMASK (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_MEMORY_PACKING_SHIFT (3)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, SCALE_CHROMA_RESAMP_ONLY */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_CHROMA_RESAMP_ONLY_MASK (0x00000004)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_CHROMA_RESAMP_ONLY_LSBMASK (0x00000001)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_SCALE_CHROMA_RESAMP_ONLY_SHIFT (2)
+/* MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, ALT_OUTPUT_FORMAT */
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_OUTPUT_FORMAT_MASK (0x00000003)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_OUTPUT_FORMAT_LSBMASK (0x00000003)
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_ALT_OUTPUT_FORMAT_SHIFT (0)
+#define MSVDX_CMDS_SCALE_OUTPUT_SIZE_OFFSET (0x01B8)
+/* MSVDX_CMDS, SCALE_OUTPUT_SIZE, SCALE_OUTPUT_HEIGHT_MIN1 */
+#define MSVDX_CMDS_SCALE_OUTPUT_SIZE_SCALE_OUTPUT_HEIGHT_MIN1_MASK (0xFFFF0000)
+#define MSVDX_CMDS_SCALE_OUTPUT_SIZE_SCALE_OUTPUT_HEIGHT_MIN1_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_SCALE_OUTPUT_SIZE_SCALE_OUTPUT_HEIGHT_MIN1_SHIFT (16)
+/* MSVDX_CMDS, SCALE_OUTPUT_SIZE, SCALE_OUTPUT_WIDTH_MIN1 */
+#define MSVDX_CMDS_SCALE_OUTPUT_SIZE_SCALE_OUTPUT_WIDTH_MIN1_MASK (0x0000FFFF)
+#define MSVDX_CMDS_SCALE_OUTPUT_SIZE_SCALE_OUTPUT_WIDTH_MIN1_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_SCALE_OUTPUT_SIZE_SCALE_OUTPUT_WIDTH_MIN1_SHIFT (0)
+#define MSVDX_CMDS_SCALE_HORIZONTAL_CHROMA_OFFSET (0x01BC)
+/* MSVDX_CMDS, SCALE_HORIZONTAL_CHROMA, CHROMA_HORIZONTAL_INITIAL */
+#define MSVDX_CMDS_SCALE_HORIZONTAL_CHROMA_CHROMA_HORIZONTAL_INITIAL_MASK (0xFFFF0000)
+#define MSVDX_CMDS_SCALE_HORIZONTAL_CHROMA_CHROMA_HORIZONTAL_INITIAL_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_SCALE_HORIZONTAL_CHROMA_CHROMA_HORIZONTAL_INITIAL_SHIFT (16)
+#define MSVDX_CMDS_SCALE_HORIZONTAL_CHROMA_CHROMA_HORIZONTAL_PITCH_MASK (0x0000FFFF)
+#define MSVDX_CMDS_SCALE_HORIZONTAL_CHROMA_CHROMA_HORIZONTAL_PITCH_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_SCALE_HORIZONTAL_CHROMA_CHROMA_HORIZONTAL_PITCH_SHIFT (0)
+#define MSVDX_CMDS_SCALE_VERTICAL_CHROMA_OFFSET (0x01C0)
+/* MSVDX_CMDS, SCALE_VERTICAL_CHROMA, CHROMA_VERTICAL_INITIAL */
+#define MSVDX_CMDS_SCALE_VERTICAL_CHROMA_CHROMA_VERTICAL_INITIAL_MASK (0xFFFF0000)
+#define MSVDX_CMDS_SCALE_VERTICAL_CHROMA_CHROMA_VERTICAL_INITIAL_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_SCALE_VERTICAL_CHROMA_CHROMA_VERTICAL_INITIAL_SHIFT (16)
+/* MSVDX_CMDS, SCALE_VERTICAL_CHROMA, CHROMA_VERTICAL_PITCH */
+#define MSVDX_CMDS_SCALE_VERTICAL_CHROMA_CHROMA_VERTICAL_PITCH_MASK (0x0000FFFF)
+#define MSVDX_CMDS_SCALE_VERTICAL_CHROMA_CHROMA_VERTICAL_PITCH_LSBMASK (0x0000FFFF)
+#define MSVDX_CMDS_SCALE_VERTICAL_CHROMA_CHROMA_VERTICAL_PITCH_SHIFT (0)
+/* MSVDX_CMDS, MULTICORE_OPERATING_MODE, MBLK_ROW_OFFSET */
+#define MSVDX_CMDS_AUX_LINE_BUFFER_BASE_ADDRESS_OFFSET (0x01EC)
+
+#endif /* _IMG_MSVDX_CMDS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_msvdx_core_regs.h b/drivers/media/platform/vxe-vxd/decoder/img_msvdx_core_regs.h
new file mode 100644
index 000000000000..d46d5e04e826
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_msvdx_core_regs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG MSVDX core Registers
+ * This file contains the MSVDX_CORE_REGS_H Definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_MSVDX_CORE_REGS_H
+#define _IMG_MSVDX_CORE_REGS_H
+
+#define MSVDX_CORE_CR_MMU_TILE_NO_ENTRIES (4)
+#define MSVDX_CORE_CR_MMU_TILE_EXT_NO_ENTRIES (4)
+
+#endif /* _IMG_MSVDX_CORE_REGS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_msvdx_vdmc_regs.h b/drivers/media/platform/vxe-vxd/decoder/img_msvdx_vdmc_regs.h
new file mode 100644
index 000000000000..493eb7114ad7
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_msvdx_vdmc_regs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG MSVDX VDMC Registers
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_MSVDX_VDMC_REGS_H
+#define _IMG_MSVDX_VDMC_REGS_H
+
+/* MSVDX_VDMC, CR_VDMC_MACROBLOCK_NUMBER, CR_VDMC_MACROBLOCK_X_OFFSET */
+#define MSVDX_VDMC_CR_VDMC_MACROBLOCK_NUMBER_CR_VDMC_MACROBLOCK_X_OFFSET_MASK (0x0000FFFF)
+#define MSVDX_VDMC_CR_VDMC_MACROBLOCK_NUMBER_CR_VDMC_MACROBLOCK_X_OFFSET_SHIFT (0)
+
+/* MSVDX_VDMC, CR_VDMC_MACROBLOCK_NUMBER, CR_VDMC_MACROBLOCK_Y_OFFSET */
+#define MSVDX_VDMC_CR_VDMC_MACROBLOCK_NUMBER_CR_VDMC_MACROBLOCK_Y_OFFSET_MASK (0xFFFF0000)
+#define MSVDX_VDMC_CR_VDMC_MACROBLOCK_NUMBER_CR_VDMC_MACROBLOCK_Y_OFFSET_SHIFT (16)
+
+#endif /* _IMG_MSVDX_VDMC_REGS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_msvdx_vec_regs.h b/drivers/media/platform/vxe-vxd/decoder/img_msvdx_vec_regs.h
new file mode 100644
index 000000000000..58840d501b53
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_msvdx_vec_regs.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG MSVDX VEC Registers
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#if !defined(__MSVDX_VEC_REGS_H__)
+#define __MSVDX_VEC_REGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /* MSVDX_VEC, CR_VEC_VLR_COMMANDS_NUM, VLR_COMMANDS_STORE_NUMBER_OF_CMDS */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR0_OFFSET (0x00EC)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_ADDR0, VLC_TABLE_ADDR0 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR0_VLC_TABLE_ADDR0_MASK (0x000007FF)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_ADDR15, VLC_TABLE_ADDR31 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR16_OFFSET (0x01C0)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_ADDR15, VLC_TABLE_ADDR31 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR16_OFFSET (0x01C0)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_ADDR18, VLC_TABLE_ADDR37 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_WIDTH0_OFFSET (0x012C)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_ADDR0, VLC_TABLE_ADDR1 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR0_VLC_TABLE_ADDR1_SHIFT (11)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_INITIAL_WIDTH0, VLC_TABLE_INITIAL_WIDTH0 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_WIDTH0_VLC_TABLE_INITIAL_WIDTH0_MASK (0x00000007)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_INITIAL_WIDTH0, VLC_TABLE_INITIAL_WIDTH1 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_WIDTH0_VLC_TABLE_INITIAL_WIDTH1_SHIFT (3)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_INITIAL_OPCODE0, VLC_TABLE_INITIAL_OPCODE0 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_OPCODE0_VLC_TABLE_INITIAL_OPCODE0_MASK \
+ (0x00000003)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_INITIAL_WIDTH3, VLC_TABLE_INITIAL_WIDTH37 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_OPCODE0_OFFSET (0x013C)
+
+ /* MSVDX_VEC, CR_VEC_VLC_TABLE_INITIAL_OPCODE0, VLC_TABLE_INITIAL_OPCODE1 */
+#define MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_OPCODE0_VLC_TABLE_INITIAL_OPCODE1_SHIFT (2)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MSVDX_VEC_REGS_H__ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_pixfmts.h b/drivers/media/platform/vxe-vxd/decoder/img_pixfmts.h
new file mode 100644
index 000000000000..f21c8f9da4b5
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_pixfmts.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC SYSDEV and UI Interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ */
+
+#ifndef __IMG_PIXFMTS_H__
+#define __IMG_PIXFMTS_H__
+/*
+ * @brief Old pixel format definition
+ *
+ * @note These definitions are different in HW documentation(current to HW doc):
+ * @li PL8 is defined as PL111
+ * @li PL12 is sometime used wrongly for monochrome formats instead of PL_Y
+ */
+enum img_pixfmt {
+ IMG_PIXFMT_CLUT1 = 0,
+ IMG_PIXFMT_CLUT2 = 1,
+ IMG_PIXFMT_CLUT4 = 2,
+ IMG_PIXFMT_I4A4 = 3,
+ IMG_PIXFMT_I8A8 = 4,
+ IMG_PIXFMT_A8I8 = 51,
+ IMG_PIXFMT_RGB8 = 5,
+ IMG_PIXFMT_RGB332 = 6,
+ IMG_PIXFMT_RGB555 = 7,
+ IMG_PIXFMT_ARGB4444 = 8,
+ IMG_PIXFMT_ABGR4444 = 57,
+ IMG_PIXFMT_RGBA4444 = 58,
+ IMG_PIXFMT_BGRA4444 = 59,
+ IMG_PIXFMT_ARGB1555 = 9,
+ IMG_PIXFMT_ABGR1555 = 60,
+ IMG_PIXFMT_RGBA5551 = 61,
+ IMG_PIXFMT_BGRA5551 = 62,
+ IMG_PIXFMT_RGB565 = 10,
+ IMG_PIXFMT_BGR565 = 63,
+ IMG_PIXFMT_RGB888 = 11,
+ IMG_PIXFMT_RSGSBS888 = 68,
+ IMG_PIXFMT_ARGB8888 = 12,
+ IMG_PIXFMT_ABGR8888 = 41,
+ IMG_PIXFMT_BGRA8888 = 42,
+ IMG_PIXFMT_RGBA8888 = 56,
+ IMG_PIXFMT_ARGB8332 = 43,
+ IMG_PIXFMT_ARGB8161616 = 64,
+ IMG_PIXFMT_ARGB2101010 = 67,
+ IMG_PIXFMT_UYVY8888 = 13,
+ IMG_PIXFMT_VYUY8888 = 14,
+ IMG_PIXFMT_YVYU8888 = 15,
+ IMG_PIXFMT_YUYV8888 = 16,
+ IMG_PIXFMT_UYVY10101010 = 17,
+ IMG_PIXFMT_VYAUYA8888 = 18,
+ IMG_PIXFMT_YUV101010 = 19,
+ IMG_PIXFMT_AYUV4444 = 20,
+ IMG_PIXFMT_YUV888 = 21,
+ IMG_PIXFMT_AYUV8888 = 22,
+ IMG_PIXFMT_AYUV2101010 = 23,
+ IMG_PIXFMT_411PL111YUV8 = 120,
+ IMG_PIXFMT_411PL12YUV8 = 121,
+ IMG_PIXFMT_411PL12YVU8 = 24,
+ IMG_PIXFMT_420PL12YUV8 = 25,
+ IMG_PIXFMT_420PL12YVU8 = 26,
+ IMG_PIXFMT_422PL12YUV8 = 27,
+ IMG_PIXFMT_422PL12YVU8 = 28,
+ IMG_PIXFMT_420PL8YUV8 = 47,
+ IMG_PIXFMT_422PL8YUV8 = 48,
+ IMG_PIXFMT_420PL12YUV8_A8 = 31,
+ IMG_PIXFMT_422PL12YUV8_A8 = 32,
+ IMG_PIXFMT_PL12Y8 = 33,
+ IMG_PIXFMT_PL12YV8 = 35,
+ IMG_PIXFMT_PL12IMC2 = 36,
+ IMG_PIXFMT_A4 = 37,
+ IMG_PIXFMT_A8 = 38,
+ IMG_PIXFMT_YUV8 = 39,
+ IMG_PIXFMT_CVBS10 = 40,
+ IMG_PIXFMT_PL12YV12 = 44,
+#if ((!defined(METAG) && !defined(MTXG)) || defined(__linux__))
+ IMG_PIXFMT_F16 = 52,
+ IMG_PIXFMT_F32 = 53,
+ IMG_PIXFMT_F16F16F16F16 = 65,
+#endif
+ IMG_PIXFMT_L16 = 54,
+ IMG_PIXFMT_L32 = 55,
+ IMG_PIXFMT_Y1 = 66,
+ IMG_PIXFMT_444PL111YUV8 = 69,
+ IMG_PIXFMT_444PL12YUV8 = 137,
+ IMG_PIXFMT_444PL12YVU8 = 138,
+ IMG_PIXFMT_PL12Y10 = 34,
+ IMG_PIXFMT_PL12Y10_LSB = 96,
+ IMG_PIXFMT_PL12Y10_MSB = 97,
+ IMG_PIXFMT_420PL8YUV10 = 49,
+ IMG_PIXFMT_420PL111YUV10_LSB = 71,
+ IMG_PIXFMT_420PL111YUV10_MSB = 72,
+ IMG_PIXFMT_420PL12YUV10 = 29,
+ IMG_PIXFMT_420PL12YUV10_LSB = 74,
+ IMG_PIXFMT_420PL12YUV10_MSB = 75,
+ IMG_PIXFMT_420PL12YVU10 = 45,
+ IMG_PIXFMT_420PL12YVU10_LSB = 77,
+ IMG_PIXFMT_420PL12YVU10_MSB = 78,
+ IMG_PIXFMT_422PL8YUV10 = 50,
+ IMG_PIXFMT_422PL111YUV10_LSB = 122,
+ IMG_PIXFMT_422PL111YUV10_MSB = 123,
+ IMG_PIXFMT_422PL12YUV10 = 30,
+ IMG_PIXFMT_422PL12YUV10_LSB = 80,
+ IMG_PIXFMT_422PL12YUV10_MSB = 81,
+ IMG_PIXFMT_422PL12YVU10 = 46,
+ IMG_PIXFMT_422PL12YVU10_LSB = 83,
+ IMG_PIXFMT_422PL12YVU10_MSB = 84,
+ IMG_PIXFMT_444PL111YUV10 = 85,
+ IMG_PIXFMT_444PL111YUV10_LSB = 86,
+ IMG_PIXFMT_444PL111YUV10_MSB = 87,
+ IMG_PIXFMT_444PL12YUV10 = 139,
+ IMG_PIXFMT_444PL12YUV10_LSB = 141,
+ IMG_PIXFMT_444PL12YUV10_MSB = 142,
+ IMG_PIXFMT_444PL12YVU10 = 140,
+ IMG_PIXFMT_444PL12YVU10_LSB = 143,
+ IMG_PIXFMT_444PL12YVU10_MSB = 144,
+ IMG_PIXFMT_420PL12Y8UV10 = 88,
+ IMG_PIXFMT_420PL12Y8UV10_LSB = 98,
+ IMG_PIXFMT_420PL12Y8UV10_MSB = 99,
+ IMG_PIXFMT_420PL12Y8VU10 = 89,
+ IMG_PIXFMT_420PL12Y8VU10_LSB = 100,
+ IMG_PIXFMT_420PL12Y8VU10_MSB = 101,
+ IMG_PIXFMT_420PL111Y8UV10 = 70,
+ IMG_PIXFMT_420PL111Y8UV10_LSB = 127,
+ IMG_PIXFMT_420PL111Y8UV10_MSB = 125,
+ IMG_PIXFMT_422PL12Y8UV10 = 90,
+ IMG_PIXFMT_422PL12Y8UV10_LSB = 102,
+ IMG_PIXFMT_422PL12Y8UV10_MSB = 103,
+ IMG_PIXFMT_422PL12Y8VU10 = 91,
+ IMG_PIXFMT_422PL12Y8VU10_LSB = 104,
+ IMG_PIXFMT_422PL12Y8VU10_MSB = 105,
+ IMG_PIXFMT_444PL12Y8UV10 = 151,
+ IMG_PIXFMT_444PL12Y8UV10_LSB = 153,
+ IMG_PIXFMT_444PL12Y8UV10_MSB = 154,
+ IMG_PIXFMT_444PL12Y8VU10 = 152,
+ IMG_PIXFMT_444PL12Y8VU10_LSB = 155,
+ IMG_PIXFMT_444PL12Y8VU10_MSB = 156,
+ IMG_PIXFMT_420PL12Y10UV8 = 92,
+ IMG_PIXFMT_420PL12Y10UV8_LSB = 106,
+ IMG_PIXFMT_420PL12Y10UV8_MSB = 107,
+
+ IMG_PIXFMT_420PL12Y10VU8 = 93,
+ IMG_PIXFMT_420PL12Y10VU8_LSB = 108,
+ IMG_PIXFMT_420PL12Y10VU8_MSB = 109,
+
+ IMG_PIXFMT_420PL111Y10UV8 = 129,
+ IMG_PIXFMT_420PL111Y10UV8_LSB = 133,
+ IMG_PIXFMT_420PL111Y10UV8_MSB = 131,
+ IMG_PIXFMT_422PL12Y10UV8 = 94,
+ IMG_PIXFMT_422PL12Y10UV8_LSB = 110,
+ IMG_PIXFMT_422PL12Y10UV8_MSB = 111,
+ IMG_PIXFMT_422PL12Y10VU8 = 95,
+ IMG_PIXFMT_422PL12Y10VU8_LSB = 112,
+ IMG_PIXFMT_422PL12Y10VU8_MSB = 113,
+
+ IMG_PIXFMT_444PL111Y10UV8 = 114,
+ IMG_PIXFMT_444PL111Y10UV8_LSB = 115,
+ IMG_PIXFMT_444PL111Y10UV8_MSB = 116,
+ IMG_PIXFMT_444PL111Y8UV10 = 117,
+ IMG_PIXFMT_444PL111Y8UV10_LSB = 118,
+ IMG_PIXFMT_444PL111Y8UV10_MSB = 119,
+ IMG_PIXFMT_444PL12Y10UV8 = 145,
+ IMG_PIXFMT_444PL12Y10UV8_LSB = 147,
+ IMG_PIXFMT_444PL12Y10UV8_MSB = 148,
+ IMG_PIXFMT_444PL12Y10VU8 = 146,
+ IMG_PIXFMT_444PL12Y10VU8_LSB = 149,
+ IMG_PIXFMT_444PL12Y10VU8_MSB = 150,
+ IMG_PIXFMT_422PL111Y8UV10 = 124,
+ IMG_PIXFMT_422PL111Y8UV10_MSB = 126,
+ IMG_PIXFMT_422PL111Y8UV10_LSB = 128,
+
+ IMG_PIXFMT_422PL111Y10UV8 = 130,
+ IMG_PIXFMT_422PL111Y10UV8_LSB = 134,
+ IMG_PIXFMT_422PL111Y10UV8_MSB = 132,
+ IMG_PIXFMT_420PL8YUV12 = 160,
+ IMG_PIXFMT_422PL8YUV12 = 161,
+ IMG_PIXFMT_444PL8YUV12 = 162,
+ IMG_PIXFMT_420PL8YUV14 = 163,
+ IMG_PIXFMT_422PL8YUV14 = 164,
+ IMG_PIXFMT_444PL8YUV14 = 165,
+ IMG_PIXFMT_420PL8YUV16 = 166,
+ IMG_PIXFMT_422PL8YUV16 = 167,
+ IMG_PIXFMT_444PL8YUV16 = 168,
+ IMG_PIXFMT_UNDEFINED = 255,
+
+ IMG_PIXFMT_ARBPLANAR8 = 65536,
+ IMG_PIXFMT_ARBPLANAR8_LAST = IMG_PIXFMT_ARBPLANAR8 + 0xffff,
+ IMG_PIXFMT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_profiles_levels.h b/drivers/media/platform/vxe-vxd/decoder/img_profiles_levels.h
new file mode 100644
index 000000000000..710b429f7f3e
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_profiles_levels.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC SYSDEV and UI Interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __IMG_PROFILES_LEVELS_H
+#define __IMG_PROFILES_LEVELS_H
+
+#include "vdecdd_utils.h"
+
+/* Minimum level value for h.264 */
+#define H264_LEVEL_MIN (9)
+/* Maximum level value for h.264 */
+#define H264_LEVEL_MAX (52)
+/* Number of major levels for h.264 (5 + 1 for special levels) */
+#define H264_LEVEL_MAJOR_NUM (6)
+/* Number of minor levels for h.264 */
+#define H264_LEVEL_MINOR_NUM (4)
+/* Number of major levels for HEVC */
+#define HEVC_LEVEL_MAJOR_NUM (6)
+/* Number of minor levels for HEVC */
+#define HEVC_LEVEL_MINOR_NUM (3)
+
+#endif /*__IMG_PROFILES_LEVELS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_pvdec_core_regs.h b/drivers/media/platform/vxe-vxd/decoder/img_pvdec_core_regs.h
new file mode 100644
index 000000000000..70bb68a3154f
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_pvdec_core_regs.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG PVDEC CORE Registers
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_PVDEC_CORE_REGS_H
+#define _IMG_PVDEC_CORE_REGS_H
+
+/* PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS, CR_HOST_SYS_WDT */
+#define PVDEC_CORE_CR_PVDEC_HOST_INTERRUPT_STATUS_CR_HOST_SYS_WDT_MASK (0x10000000)
+
+#define PVDEC_CORE_CR_PVDEC_HOST_INTERRUPT_STATUS_CR_HOST_SYS_WDT_SHIFT (28)
+
+/* PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS, CR_HOST_READ_TIMEOUT_PROC_IRQ */
+#define PVDEC_CORE_CR_PVDEC_HOST_INTERRUPT_STATUS_CR_HOST_READ_TIMEOUT_PROC_IRQ_MASK \
+ (0x08000000)
+
+/* PVDEC_CORE, CR_PVDEC_CORE_REV, CR_PVDEC_MAJOR_REV */
+#define PVDEC_CORE_CR_PVDEC_CORE_REV_CR_PVDEC_MAJOR_REV_MASK (0x00FF0000)
+#define PVDEC_CORE_CR_PVDEC_CORE_REV_CR_PVDEC_MAJOR_REV_SHIFT (16)
+
+/* PVDEC_CORE, CR_PVDEC_CORE_REV, CR_PVDEC_MINOR_REV */
+#define PVDEC_CORE_CR_PVDEC_CORE_REV_CR_PVDEC_MINOR_REV_MASK (0x0000FF00)
+#define PVDEC_CORE_CR_PVDEC_CORE_REV_CR_PVDEC_MINOR_REV_SHIFT (8)
+
+/* PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS, CR_HOST_READ_TIMEOUT_PROC_IRQ */
+#define PVDEC_CORE_CR_PVDEC_HOST_INTERRUPT_STATUS_CR_HOST_READ_TIMEOUT_PROC_IRQ_SHIFT (27)
+
+/* PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS, CR_HOST_COMMAND_TIMEOUT_PROC_IRQ */
+#define PVDEC_CORE_CR_PVDEC_HOST_INTERRUPT_STATUS_CR_HOST_COMMAND_TIMEOUT_PROC_IRQ_MASK \
+ (0x04000000)
+#define PVDEC_CORE_CR_PVDEC_HOST_INTERRUPT_STATUS_CR_HOST_COMMAND_TIMEOUT_PROC_IRQ_SHIFT \
+ (26)
+
+/* PVDEC_CORE, CR_PVDEC_CORE_ID, CR_GROUP_ID */
+#define PVDEC_CORE_CR_PVDEC_CORE_ID_CR_GROUP_ID_MASK (0xFF000000)
+#define PVDEC_CORE_CR_PVDEC_CORE_ID_CR_GROUP_ID_SHIFT (24)
+
+/* PVDEC_CORE, CR_PVDEC_CORE_REV, CR_PVDEC_MAINT_REV */
+#define PVDEC_CORE_CR_PVDEC_CORE_REV_CR_PVDEC_MAINT_REV_MASK (0x000000FF)
+#define PVDEC_CORE_CR_PVDEC_CORE_REV_CR_PVDEC_MAINT_REV_SHIFT (0)
+
+/* PVDEC_CORE, CR_PVDEC_CORE_ID, CR_CORE_ID */
+#define PVDEC_CORE_CR_PVDEC_CORE_ID_CR_CORE_ID_MASK (0x00FF0000)
+#define PVDEC_CORE_CR_PVDEC_CORE_ID_CR_CORE_ID_SHIFT (16)
+
+/* PVDEC_CORE, CR_PVDEC_CORE_ID, CR_PVDEC_CORE_CONFIG */
+#define PVDEC_CORE_CR_PVDEC_CORE_ID_CR_PVDEC_CORE_CONFIG_MASK (0x0000FFFF)
+#define PVDEC_CORE_CR_PVDEC_CORE_ID_CR_PVDEC_CORE_CONFIG_SHIFT (0)
+
+#endif /* _IMG_PVDEC_CORE_REGS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_pvdec_pixel_regs.h b/drivers/media/platform/vxe-vxd/decoder/img_pvdec_pixel_regs.h
new file mode 100644
index 000000000000..be122c41d4b9
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_pvdec_pixel_regs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG PVDEC pixel Registers
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_PVDEC_PIXEL_REGS_H
+#define _IMG_PVDEC_PIXEL_REGS_H
+
+/* PVDEC_PIXEL, CR_MAX_FRAME_CONFIG, CR_PVDEC_HOR_MSB */
+#define PVDEC_PIXEL_CR_MAX_FRAME_CONFIG_CR_PVDEC_HOR_MSB_MASK (0x001F0000)
+
+#define PVDEC_PIXEL_CR_MAX_FRAME_CONFIG_CR_PVDEC_HOR_MSB_SHIFT (16)
+
+/* PVDEC_PIXEL, CR_MAX_FRAME_CONFIG, CR_PVDEC_VER_MSB */
+#define PVDEC_PIXEL_CR_MAX_FRAME_CONFIG_CR_PVDEC_VER_MSB_MASK (0x1F000000)
+#define PVDEC_PIXEL_CR_MAX_FRAME_CONFIG_CR_PVDEC_VER_MSB_SHIFT (24)
+
+/* PVDEC_PIXEL, CR_MAX_FRAME_CONFIG, CR_MSVDX_HOR_MSB */
+#define PVDEC_PIXEL_CR_MAX_FRAME_CONFIG_CR_MSVDX_HOR_MSB_MASK (0x0000001F)
+#define PVDEC_PIXEL_CR_MAX_FRAME_CONFIG_CR_MSVDX_HOR_MSB_SHIFT (0)
+
+/* PVDEC_PIXEL, CR_MAX_FRAME_CONFIG, CR_MSVDX_VER_MSB */
+#define PVDEC_PIXEL_CR_MAX_FRAME_CONFIG_CR_MSVDX_VER_MSB_MASK (0x00001F00)
+#define PVDEC_PIXEL_CR_MAX_FRAME_CONFIG_CR_MSVDX_VER_MSB_SHIFT (8)
+
+#endif /* _IMG_PVDEC_PIXEL_REGS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_pvdec_test_regs.h b/drivers/media/platform/vxe-vxd/decoder/img_pvdec_test_regs.h
new file mode 100644
index 000000000000..7cf2f2ded360
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_pvdec_test_regs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG PVDEC test Registers
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_PVDEC_TEST_REGS_H
+#define _IMG_PVDEC_TEST_REGS_H
+
+/* PVDEC_TEST, RAND_STL_MEM_RDATA_CONFIG, STALL_ENABLE_MEM_RDATA */
+#define PVDEC_TEST_MEM_READ_LATENCY_OFFSET (0x00F0)
+
+/* PVDEC_TEST, MEM_READ_LATENCY, READ_RESPONSE_RAND_LATENCY */
+#define PVDEC_TEST_MEM_WRITE_RESPONSE_LATENCY_OFFSET (0x00F4)
+
+/* PVDEC_TEST, MEM_WRITE_RESPONSE_LATENCY, WRITE_RESPONSE_RAND_LATENCY */
+#define PVDEC_TEST_MEM_CTRL_OFFSET (0x00F8)
+
+/* PVDEC_TEST, RAND_STL_MEM_WDATA_CONFIG, STALL_ENABLE_MEM_WDATA */
+#define PVDEC_TEST_RAND_STL_MEM_WRESP_CONFIG_OFFSET (0x00E8)
+
+/* PVDEC_TEST, RAND_STL_MEM_WRESP_CONFIG, STALL_ENABLE_MEM_WRESP */
+#define PVDEC_TEST_RAND_STL_MEM_RDATA_CONFIG_OFFSET (0x00EC)
+
+/* PVDEC_TEST, MEMORY_BUS2_MONITOR_2, BUS2_ADDR */
+#define PVDEC_TEST_RAND_STL_MEM_CMD_CONFIG_OFFSET (0x00E0)
+
+/* PVDEC_TEST, RAND_STL_MEM_CMD_CONFIG, STALL_ENABLE_MEM_CMD */
+#define PVDEC_TEST_RAND_STL_MEM_WDATA_CONFIG_OFFSET (0x00E4)
+
+#endif /* _IMG_PVDEC_TEST_REGS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_vdec_fw_msg.h b/drivers/media/platform/vxe-vxd/decoder/img_vdec_fw_msg.h
new file mode 100644
index 000000000000..5a655b552f14
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_vdec_fw_msg.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG VDEC firmware messages
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_VDEC_FW_MSG_H
+#define _IMG_VDEC_FW_MSG_H
+
+#include <linux/types.h>
+
+/* FW_DEVA_COMPLETED ERROR_FLAGS */
+#define FW_DEVA_COMPLETED_ERROR_FLAGS_TYPE unsigned short
+#define FW_DEVA_COMPLETED_ERROR_FLAGS_MASK (0xFFFF)
+#define FW_DEVA_COMPLETED_ERROR_FLAGS_SHIFT (0)
+#define FW_DEVA_COMPLETED_ERROR_FLAGS_OFFSET (0x000C)
+
+/* FW_DEVA_COMPLETED NUM_BEWDTS */
+#define FW_DEVA_COMPLETED_NUM_BEWDTS_TYPE unsigned int
+#define FW_DEVA_COMPLETED_NUM_BEWDTS_MASK (0xFFFFFFFF)
+#define FW_DEVA_COMPLETED_NUM_BEWDTS_SHIFT (0)
+#define FW_DEVA_COMPLETED_NUM_BEWDTS_OFFSET (0x0010)
+
+/* FW_DEVA_COMPLETED NUM_MBSDROPPED */
+#define FW_DEVA_COMPLETED_NUM_MBSDROPPED_TYPE unsigned int
+#define FW_DEVA_COMPLETED_NUM_MBSDROPPED_MASK (0xFFFFFFFF)
+#define FW_DEVA_COMPLETED_NUM_MBSDROPPED_SHIFT (0)
+#define FW_DEVA_COMPLETED_NUM_MBSDROPPED_OFFSET (0x0014)
+
+/* FW_DEVA_COMPLETED NUM_MBSRECOVERED */
+#define FW_DEVA_COMPLETED_NUM_MBSRECOVERED_TYPE unsigned int
+#define FW_DEVA_COMPLETED_NUM_MBSRECOVERED_MASK (0xFFFFFFFF)
+#define FW_DEVA_COMPLETED_NUM_MBSRECOVERED_SHIFT (0)
+#define FW_DEVA_COMPLETED_NUM_MBSRECOVERED_OFFSET (0x0018)
+
+/* FW_DEVA_PANIC ERROR_INT */
+#define FW_DEVA_PANIC_ERROR_INT_TYPE unsigned int
+#define FW_DEVA_PANIC_ERROR_INT_MASK (0xFFFFFFFF)
+#define FW_DEVA_PANIC_ERROR_INT_SHIFT (0)
+#define FW_DEVA_PANIC_ERROR_INT_OFFSET (0x000C)
+
+/* FW_ASSERT FILE_NAME_HASH */
+#define FW_ASSERT_FILE_NAME_HASH_TYPE unsigned int
+#define FW_ASSERT_FILE_NAME_HASH_MASK (0xFFFFFFFF)
+#define FW_ASSERT_FILE_NAME_HASH_SHIFT (0)
+#define FW_ASSERT_FILE_NAME_HASH_OFFSET (0x0004)
+
+/* FW_ASSERT FILE_LINE */
+#define FW_ASSERT_FILE_LINE_TYPE unsigned int
+#define FW_ASSERT_FILE_LINE_MASK (0xFFFFFFFE)
+#define FW_ASSERT_FILE_LINE_SHIFT (1)
+#define FW_ASSERT_FILE_LINE_OFFSET (0x0008)
+
+/* FW_SO TASK_NAME */
+#define FW_SO_TASK_NAME_TYPE unsigned int
+#define FW_SO_TASK_NAME_MASK (0xFFFFFFFF)
+#define FW_SO_TASK_NAME_SHIFT (0)
+#define FW_SO_TASK_NAME_OFFSET (0x0004)
+
+/* FW_DEVA_GENMSG TRANS_ID */
+#define FW_DEVA_GENMSG_TRANS_ID_TYPE unsigned int
+#define FW_DEVA_GENMSG_TRANS_ID_MASK (0xFFFFFFFF)
+#define FW_DEVA_GENMSG_TRANS_ID_SHIFT (0)
+#define FW_DEVA_GENMSG_TRANS_ID_OFFSET (0x0008)
+
+/* FW_DEVA_GENMSG MSG_TYPE */
+#define FW_DEVA_GENMSG_MSG_TYPE_TYPE unsigned char
+#define FW_DEVA_GENMSG_MSG_TYPE_MASK (0xFF)
+#define FW_DEVA_GENMSG_MSG_TYPE_SHIFT (0)
+#define FW_DEVA_GENMSG_MSG_TYPE_OFFSET (0x0001)
+
+/* FW_DEVA_SIGNATURES SIGNATURES */
+#define FW_DEVA_SIGNATURES_SIGNATURES_OFFSET (0x0010)
+
+/* FW_DEVA_SIGNATURES MSG_SIZE */
+#define FW_DEVA_SIGNATURES_MSG_SIZE_TYPE unsigned char
+#define FW_DEVA_SIGNATURES_MSG_SIZE_MASK (0x7F)
+#define FW_DEVA_SIGNATURES_MSG_SIZE_SHIFT (0)
+#define FW_DEVA_SIGNATURES_MSG_SIZE_OFFSET (0x0000)
+
+/* FW_DEVA_CONTIGUITY_WARNING BEGIN_MB_NUM */
+#define FW_DEVA_SIGNATURES_SIZE (20)
+
+/* FW_DEVA_SIGNATURES SIGNATURE_SELECT */
+#define FW_DEVA_SIGNATURES_SIGNATURE_SELECT_TYPE unsigned int
+#define FW_DEVA_SIGNATURES_SIGNATURE_SELECT_MASK (0xFFFFFFFF)
+#define FW_DEVA_SIGNATURES_SIGNATURE_SELECT_SHIFT (0)
+#define FW_DEVA_SIGNATURES_SIGNATURE_SELECT_OFFSET (0x000C)
+
+/* FW_DEVA_GENMSG TRANS_ID */
+#define FW_DEVA_DECODE_SIZE (52)
+
+/* FW_DEVA_DECODE CTRL_ALLOC_ADDR */
+#define FW_DEVA_DECODE_CTRL_ALLOC_ADDR_TYPE unsigned int
+#define FW_DEVA_DECODE_CTRL_ALLOC_ADDR_MASK (0xFFFFFFFF)
+#define FW_DEVA_DECODE_CTRL_ALLOC_ADDR_SHIFT (0)
+#define FW_DEVA_DECODE_CTRL_ALLOC_ADDR_OFFSET (0x0010)
+
+/* FW_DEVA_DECODE BUFFER_SIZE */
+#define FW_DEVA_DECODE_BUFFER_SIZE_TYPE unsigned short
+#define FW_DEVA_DECODE_BUFFER_SIZE_MASK (0xFFFF)
+#define FW_DEVA_DECODE_BUFFER_SIZE_SHIFT (0)
+#define FW_DEVA_DECODE_BUFFER_SIZE_OFFSET (0x000E)
+
+/* FW_DEVA_DECODE OPERATING_MODE */
+#define FW_DEVA_DECODE_OPERATING_MODE_TYPE unsigned int
+#define FW_DEVA_DECODE_OPERATING_MODE_MASK (0xFFFFFFFF)
+#define FW_DEVA_DECODE_OPERATING_MODE_OFFSET (0x0018)
+#define FW_DEVA_DECODE_OPERATING_MODE_SHIFT (0)
+
+/* FW_DEVA_DECODE FLAGS */
+#define FW_DEVA_DECODE_FLAGS_TYPE unsigned short
+#define FW_DEVA_DECODE_FLAGS_MASK (0xFFFF)
+#define FW_DEVA_DECODE_FLAGS_SHIFT (0)
+#define FW_DEVA_DECODE_FLAGS_OFFSET (0x000C)
+
+/* FW_DEVA_DECODE VDEC_FLAGS */
+#define FW_DEVA_DECODE_VDEC_FLAGS_TYPE unsigned char
+#define FW_DEVA_DECODE_VDEC_FLAGS_MASK (0xFF)
+#define FW_DEVA_DECODE_VDEC_FLAGS_SHIFT (0)
+#define FW_DEVA_DECODE_VDEC_FLAGS_OFFSET (0x001E)
+
+/* FW_DEVA_DECODE GENC_ID */
+#define FW_DEVA_DECODE_GENC_ID_TYPE unsigned int
+#define FW_DEVA_DECODE_GENC_ID_MASK (0xFFFFFFFF)
+#define FW_DEVA_DECODE_GENC_ID_SHIFT (0)
+#define FW_DEVA_DECODE_GENC_ID_OFFSET (0x0028)
+
+/* FW_DEVA_DECODE MB_LOAD */
+#define FW_DEVA_DECODE_MB_LOAD_TYPE unsigned int
+#define FW_DEVA_DECODE_MB_LOAD_MASK (0xFFFFFFFF)
+#define FW_DEVA_DECODE_MB_LOAD_OFFSET (0x0030)
+#define FW_DEVA_DECODE_MB_LOAD_SHIFT (0)
+#define FW_DEVA_DECODE_FRAGMENT_SIZE (16)
+
+/* FW_DEVA_DECODE STREAMID */
+#define FW_DEVA_DECODE_STREAMID_TYPE unsigned char
+#define FW_DEVA_DECODE_STREAMID_MASK (0xFF)
+#define FW_DEVA_DECODE_STREAMID_OFFSET (0x001F)
+#define FW_DEVA_DECODE_STREAMID_SHIFT (0)
+
+/* FW_DEVA_DECODE EXT_STATE_BUFFER */
+#define FW_DEVA_DECODE_EXT_STATE_BUFFER_TYPE unsigned int
+#define FW_DEVA_DECODE_EXT_STATE_BUFFER_MASK (0xFFFFFFFF)
+#define FW_DEVA_DECODE_EXT_STATE_BUFFER_OFFSET (0x0020)
+#define FW_DEVA_DECODE_EXT_STATE_BUFFER_SHIFT (0)
+
+/* FW_DEVA_DECODE MSG_ID */
+#define FW_DEVA_DECODE_MSG_ID_TYPE unsigned short
+#define FW_DEVA_DECODE_MSG_ID_MASK (0xFFFF)
+#define FW_DEVA_DECODE_MSG_ID_OFFSET (0x0002)
+#define FW_DEVA_DECODE_MSG_ID_SHIFT (0)
+
+/* FW_DEVA_DECODE TRANS_ID */
+#define FW_DEVA_DECODE_TRANS_ID_TYPE unsigned int
+#define FW_DEVA_DECODE_TRANS_ID_MASK (0xFFFFFFFF)
+#define FW_DEVA_DECODE_TRANS_ID_OFFSET (0x0008)
+#define FW_DEVA_DECODE_TRANS_ID_SHIFT (0)
+
+/* FW_DEVA_DECODE TILE_CFG */
+#define FW_DEVA_DECODE_TILE_CFG_TYPE unsigned int
+#define FW_DEVA_DECODE_TILE_CFG_MASK (0xFFFFFFFF)
+#define FW_DEVA_DECODE_TILE_CFG_OFFSET (0x0024)
+#define FW_DEVA_DECODE_TILE_CFG_SHIFT (0)
+
+/* FW_DEVA_GENMSG MSG_SIZE */
+#define FW_DEVA_GENMSG_MSG_SIZE_TYPE unsigned char
+#define FW_DEVA_GENMSG_MSG_SIZE_MASK (0x7F)
+#define FW_DEVA_GENMSG_MSG_SIZE_OFFSET (0x0000)
+#define FW_DEVA_GENMSG_MSG_SIZE_SHIFT (0)
+
+/* FW_DEVA_DECODE_FRAGMENT CTRL_ALLOC_ADDR */
+#define FW_DEVA_DECODE_FRAGMENT_CTRL_ALLOC_ADDR_TYPE unsigned int
+#define FW_DEVA_DECODE_FRAGMENT_CTRL_ALLOC_ADDR_MASK (0xFFFFFFFF)
+#define FW_DEVA_DECODE_FRAGMENT_CTRL_ALLOC_ADDR_OFFSET (0x000C)
+#define FW_DEVA_DECODE_FRAGMENT_CTRL_ALLOC_ADDR_SHIFT (0)
+
+/* FW_DEVA_DECODE_FRAGMENT BUFFER_SIZE */
+#define FW_DEVA_DECODE_FRAGMENT_BUFFER_SIZE_TYPE unsigned short
+#define FW_DEVA_DECODE_FRAGMENT_BUFFER_SIZE_MASK (0xFFFF)
+#define FW_DEVA_DECODE_FRAGMENT_BUFFER_SIZE_OFFSET (0x000A)
+#define FW_DEVA_DECODE_FRAGMENT_BUFFER_SIZE_SHIFT (0)
+
+#endif /* _IMG_VDEC_FW_MSG_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/img_video_bus4_mmu_regs.h b/drivers/media/platform/vxe-vxd/decoder/img_video_bus4_mmu_regs.h
new file mode 100644
index 000000000000..34c1cf4e55ec
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/img_video_bus4_mmu_regs.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG video bus4 mmu registers
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_VIDEO_BUS4_MMU_REGS_H
+#define _IMG_VIDEO_BUS4_MMU_REGS_H
+
+#define IMG_VIDEO_BUS4_MMU_MMU_DIR_BASE_ADDR_OFFSET (0x0020)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, MMU_BYPASS */
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_MMU_BYPASS_MASK (0x00000001)
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_MMU_BYPASS_SHIFT (0)
+
+/* IMG_VIDEO_BUS4_MMU, REQUEST_LIMITED_THROUGHPUT, REQUEST_GAP */
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_OFFSET (0x0070)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_SOFT_RESET */
+#define IMG_VIDEO_BUS4_MMU_MMU_BANK_INDEX_OFFSET (0x0010)
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL1_MMU_SOFT_RESET_SHIFT (28)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_TILE_MAX_ADDR, TILE_MAX_ADDR */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL0_OFFSET (0x0000)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONTROL0, MMU_TILING_SCHEME */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL0_MMU_TILING_SCHEME_MASK (0x00000001)
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL0_MMU_TILING_SCHEME_SHIFT (0)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_TILE_CFG, TILE_STRIDE */
+#define IMG_VIDEO_BUS4_MMU_MMU_TILE_MIN_ADDR_STRIDE (4)
+#define IMG_VIDEO_BUS4_MMU_MMU_TILE_MIN_ADDR_OFFSET (0x0050)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_TILE_MIN_ADDR, TILE_MIN_ADDR */
+#define IMG_VIDEO_BUS4_MMU_MMU_TILE_MAX_ADDR_OFFSET (0x0060)
+#define IMG_VIDEO_BUS4_MMU_MMU_TILE_MAX_ADDR_STRIDE (4)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_STATUS1, MMU_FAULT_RNW */
+#define IMG_VIDEO_BUS4_MMU_MMU_MEM_REQ_OFFSET (0x0090)
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS1_MMU_FAULT_RNW_MASK (0x10000000)
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS1_MMU_FAULT_RNW_SHIFT (28)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_MEM_REQ, TAG_OUTSTANDING */
+#define IMG_VIDEO_BUS4_MMU_MMU_MEM_REQ_TAG_OUTSTANDING_MASK (0x000003FF)
+#define IMG_VIDEO_BUS4_MMU_MMU_MEM_REQ_TAG_OUTSTANDING_SHIFT (0)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONTROL0, USE_TILE_STRIDE_PER_CONTEXT */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL1_OFFSET (0x0008)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_SOFT_RESET */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL1_MMU_SOFT_RESET_MASK (0x10000000)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_PAUSE_SET */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL1_MMU_PAUSE_SET_MASK (0x01000000)
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL1_MMU_PAUSE_SET_SHIFT (24)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_PAUSE_CLEAR */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL1_MMU_PAUSE_CLEAR_MASK (0x02000000)
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL1_MMU_PAUSE_CLEAR_SHIFT (25)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, UPPER_ADDRESS_FIXED */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG0_OFFSET (0x0080)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_MEM_REQ, INT_PROTOCOL_FAULT */
+#define IMG_VIDEO_BUS4_MMU_MMU_MEM_EXT_OUTSTANDING_OFFSET (0x0094)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONFIG0, TAGS_SUPPORTED */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG1_OFFSET (0x0084)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_INVALDC */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL1_MMU_INVALDC_MASK (0x00000F00)
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL1_MMU_INVALDC_SHIFT (8)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONFIG1, SUPPORT_SECURE */
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS0_OFFSET (0x0088)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_STATUS0, MMU_FAULT_ADDR */
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS1_OFFSET (0x008C)
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS0_MMU_FAULT_ADDR_SHIFT (12)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_STATUS0, MMU_FAULT_ADDR */
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS0_MMU_FAULT_ADDR_MASK (0xFFFFF000)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_STATUS0, MMU_PF_N_RW */
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS0_MMU_PF_N_RW_MASK (0x00000001)
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS0_MMU_PF_N_RW_SHIFT (0)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_STATUS1, MMU_FAULT_REQ_ID */
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS1_MMU_FAULT_REQ_ID_MASK (0x003F0000)
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS1_MMU_FAULT_REQ_ID_SHIFT (16)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_STATUS0, MMU_SECURE_FAULT */
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS0_MMU_SECURE_FAULT_MASK (0x00000002)
+#define IMG_VIDEO_BUS4_MMU_MMU_STATUS0_MMU_SECURE_FAULT_SHIFT (1)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONFIG1, SUPPORT_STRIDE_PER_CONTEXT */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG1_SUPPORT_STRIDE_PER_CONTEXT_MASK (0x20000000)
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG1_SUPPORT_STRIDE_PER_CONTEXT_SHIFT (29)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONFIG1, SUPPORT_SECURE */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG1_SUPPORT_SECURE_MASK (0x80000000)
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG1_SUPPORT_SECURE_SHIFT (31)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONFIG0, EXTENDED_ADDR_RANGE */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG0_EXTENDED_ADDR_RANGE_MASK (0x000000F0)
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG0_EXTENDED_ADDR_RANGE_SHIFT (4)
+
+/* IMG_VIDEO_BUS4_MMU, MMU_CONFIG0, GROUP_OVERRIDE_SIZE */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG0_GROUP_OVERRIDE_SIZE_MASK (0x00000700)
+#define IMG_VIDEO_BUS4_MMU_MMU_CONFIG0_GROUP_OVERRIDE_SIZE_SHIFT (8)
+
+#endif /* _IMG_VIDEO_BUS4_MMU_REGS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/jpeg_secure_parser.c b/drivers/media/platform/vxe-vxd/decoder/jpeg_secure_parser.c
new file mode 100644
index 000000000000..7effd67034be
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/jpeg_secure_parser.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * h.264 secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp_int.h"
+#include "jpeg_secure_parser.h"
+#include "jpegfw_data.h"
+#include "swsr.h"
+
+#define JPEG_MCU_SIZE 8
+
+#define JPEG_MAX_COMPONENTS 4
+#define MAX_SETS_HUFFMAN_TABLES 2
+#define MAX_QUANT_TABLES 4
+
+#define TABLE_CLASS_DC 0
+#define TABLE_CLASS_AC 1
+#define TABLE_CLASS_NUM 2
+
+/* Marker Codes */
+#define CODE_SOF_BASELINE 0xC0
+#define CODE_SOF1 0xC1
+#define CODE_SOF2 0xC2
+#define CODE_SOF3 0xC3
+#define CODE_SOF5 0xC5
+#define CODE_SOF6 0xC6
+#define CODE_SOF7 0xC7
+#define CODE_SOF8 0xC8
+#define CODE_SOF9 0xC9
+#define CODE_SOF10 0xCA
+#define CODE_SOF11 0xCB
+#define CODE_SOF13 0xCD
+#define CODE_SOF14 0xCE
+#define CODE_SOF15 0xCF
+#define CODE_DHT 0xC4
+#define CODE_RST0 0xD0
+#define CODE_RST1 0xD1
+#define CODE_RST2 0xD2
+#define CODE_RST3 0xD3
+#define CODE_RST4 0xD4
+#define CODE_RST5 0xD5
+#define CODE_RST6 0xD6
+#define CODE_RST7 0xD7
+#define CODE_SOI 0xD8
+#define CODE_EOI 0xD9
+#define CODE_SOS 0xDA
+#define CODE_DQT 0xDB
+#define CODE_DRI 0xDD
+#define CODE_APP0 0xE0
+#define CODE_APP1 0xE1
+#define CODE_APP2 0xE2
+#define CODE_APP3 0xE3
+#define CODE_APP4 0xE4
+#define CODE_APP5 0xE5
+#define CODE_APP6 0xE6
+#define CODE_APP7 0xE7
+#define CODE_APP8 0xE8
+#define CODE_APP9 0xE9
+#define CODE_APP10 0xEA
+#define CODE_APP11 0xEB
+#define CODE_APP12 0xEC
+#define CODE_APP13 0xED
+#define CODE_APP14 0xEE
+#define CODE_APP15 0xEF
+#define CODE_M_DAC 0xCC
+#define CODE_COMMENT 0xFE
+
+enum bspp_exception_handler {
+ /* BSPP parse exception handler */
+ BSPP_EXCEPTION_HANDLER_NONE = 0x00,
+ /* Jump at exception (external use) */
+ BSPP_EXCEPTION_HANDLER_JUMP,
+ BSPP_EXCEPTION_HANDLER_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct components {
+ unsigned char identifier;
+ unsigned char horz_factor;
+ unsigned char vert_factor;
+ unsigned char quant_table;
+};
+
+struct jpeg_segment_sof {
+ unsigned char precision;
+ unsigned short height;
+ unsigned short width;
+ unsigned char component;
+ struct components components[JPEG_VDEC_MAX_COMPONENTS];
+};
+
+struct jpeg_segment_header {
+ unsigned char type;
+ unsigned short payload_size;
+};
+
+/*
+ * Read bitstream data that may LOOK like SCP
+ * (but in fact is regular data and should be read as such)
+ * @return 8bits read from the bitstream
+ */
+static unsigned char bspp_jpeg_readbyte_asdata(void *swsr_ctx)
+{
+ if (swsr_check_delim_or_eod(swsr_ctx) == SWSR_FOUND_DELIM) {
+ swsr_consume_delim(swsr_ctx, SWSR_EMPREVENT_NONE, 8, NULL);
+ return 0xFF;
+ } else {
+ return swsr_read_bits(swsr_ctx, 8);
+ }
+}
+
+/*
+ * Read bitstream data that may LOOK like SCP
+ * (but in fact be regular data should be read as such)
+ * @return 16bits read from the bitstream
+ */
+static unsigned short bspp_jpeg_readword_asdata(void *swsr_ctx)
+{
+ unsigned short byte1 = bspp_jpeg_readbyte_asdata(swsr_ctx);
+ unsigned short byte2 = bspp_jpeg_readbyte_asdata(swsr_ctx);
+
+ return (byte1 << 8 | byte2);
+}
+
+/*
+ * Access regular bitstream data that may LOOK like SCP
+ * (but in fact be regular data)
+ */
+static void bspp_jpeg_consume_asdata(void *swsr_ctx, int len)
+{
+ while (len > 0) {
+ bspp_jpeg_readbyte_asdata(swsr_ctx);
+ len--;
+ }
+}
+
+/*
+ * Parse SOF segment
+ */
+static enum bspp_error_type bspp_jpeg_segment_parse_sof(void *swsr_ctx,
+ struct jpeg_segment_sof *sof_header)
+{
+ unsigned char comp_ind;
+
+ sof_header->precision = swsr_read_bits(swsr_ctx, 8);
+ if (sof_header->precision != 8) {
+ pr_warn("Sample precision has invalid value %d\n",
+ sof_header->precision);
+ return BSPP_ERROR_INVALID_VALUE;
+ }
+
+ sof_header->height = bspp_jpeg_readword_asdata(swsr_ctx);
+ sof_header->width = bspp_jpeg_readword_asdata(swsr_ctx);
+ if (sof_header->height < JPEG_MCU_SIZE || sof_header->width < JPEG_MCU_SIZE) {
+ pr_warn("Sample X/Y smaller than macroblock\n");
+ return BSPP_ERROR_INVALID_VALUE;
+ }
+ sof_header->component = swsr_read_bits(swsr_ctx, 8);
+ if (sof_header->component > JPEG_MAX_COMPONENTS) {
+ pr_warn("Number of components (%d) is greater than max allowed\n",
+ sof_header->component);
+ return BSPP_ERROR_INVALID_VALUE;
+ }
+ /* parse the component */
+ for (comp_ind = 0; comp_ind < sof_header->component; comp_ind++) {
+ sof_header->components[comp_ind].identifier = swsr_read_bits(swsr_ctx, 8);
+ sof_header->components[comp_ind].horz_factor = swsr_read_bits(swsr_ctx, 4);
+ sof_header->components[comp_ind].vert_factor = swsr_read_bits(swsr_ctx, 4);
+ sof_header->components[comp_ind].quant_table = swsr_read_bits(swsr_ctx, 8);
+
+ pr_debug("components[%d]=(identifier=%d; horz_factor=%d; vert_factor=%d; quant_table=%d)",
+ comp_ind,
+ sof_header->components[comp_ind].identifier,
+ sof_header->components[comp_ind].horz_factor,
+ sof_header->components[comp_ind].vert_factor,
+ sof_header->components[comp_ind].quant_table);
+ }
+
+ return BSPP_ERROR_NONE;
+}
+
+/*
+ * Seeks to delimeter if we're not already on one
+ */
+static enum swsr_found bspp_jpeg_tryseek_delimeter(void *swsr_ctx)
+{
+ enum swsr_found was_delim_or_eod = swsr_check_delim_or_eod(swsr_ctx);
+
+ if (was_delim_or_eod != SWSR_FOUND_DELIM)
+ was_delim_or_eod = swsr_seek_delim_or_eod(swsr_ctx);
+
+ return was_delim_or_eod;
+}
+
+static enum swsr_found bspp_jpeg_tryconsume_delimeters(void *swsr_ctx)
+{
+ enum swsr_found is_delim_or_eod = swsr_check_delim_or_eod(swsr_ctx);
+
+ while (is_delim_or_eod == SWSR_FOUND_DELIM) {
+ swsr_consume_delim(swsr_ctx, SWSR_EMPREVENT_NONE, 8, NULL);
+ is_delim_or_eod = swsr_check_delim_or_eod(swsr_ctx);
+ }
+ return is_delim_or_eod;
+}
+
+static enum swsr_found bspp_jpeg_tryseek_and_consume_delimeters(void *swsr_ctx)
+{
+ enum swsr_found is_delim_or_eod;
+
+ bspp_jpeg_tryseek_delimeter(swsr_ctx);
+ is_delim_or_eod = bspp_jpeg_tryconsume_delimeters(swsr_ctx);
+ return is_delim_or_eod;
+}
+
+/*
+ * Read segment type and size
+ * @return IMG_TRUE when header is found,
+ * IMG_FALSE if it has to be called again
+ */
+static unsigned char bspp_jpeg_segment_read_header(void *swsr_ctx,
+ struct bspp_unit_data *unit_data,
+ struct jpeg_segment_header *jpeg_segment_header)
+{
+ bspp_jpeg_tryconsume_delimeters(swsr_ctx);
+ jpeg_segment_header->type = swsr_read_bits(swsr_ctx, 8);
+
+ if (jpeg_segment_header->type != 0)
+ pr_debug("NAL=0x%x\n", jpeg_segment_header->type);
+
+ jpeg_segment_header->payload_size = 0;
+
+ switch (jpeg_segment_header->type) {
+ case CODE_SOS:
+ case CODE_DRI:
+ case CODE_SOF_BASELINE:
+ case CODE_SOF1:
+ case CODE_SOF2:
+ case CODE_SOF3:
+ case CODE_SOF5:
+ case CODE_SOF6:
+ case CODE_SOF7:
+ case CODE_SOF8:
+ case CODE_SOF9:
+ case CODE_SOF10:
+ case CODE_SOF11:
+ case CODE_SOF13:
+ case CODE_SOF14:
+ case CODE_SOF15:
+ case CODE_APP0:
+ case CODE_APP1:
+ case CODE_APP2:
+ case CODE_APP3:
+ case CODE_APP4:
+ case CODE_APP5:
+ case CODE_APP6:
+ case CODE_APP7:
+ case CODE_APP8:
+ case CODE_APP9:
+ case CODE_APP10:
+ case CODE_APP11:
+ case CODE_APP12:
+ case CODE_APP13:
+ case CODE_APP14:
+ case CODE_APP15:
+ case CODE_DHT:
+ case CODE_DQT:
+ case CODE_COMMENT:
+ {
+ jpeg_segment_header->payload_size =
+ bspp_jpeg_readword_asdata(swsr_ctx) - 2;
+ }
+ break;
+ case CODE_EOI:
+ case CODE_SOI:
+ case CODE_RST0:
+ case CODE_RST1:
+ case CODE_RST2:
+ case CODE_RST3:
+ case CODE_RST4:
+ case CODE_RST5:
+ case CODE_RST6:
+ case CODE_RST7:
+ /*
+ * jpeg_segment_header->payload_size reset to 0 previously,
+ * so just break.
+ */
+ break;
+ case 0:
+ {
+ /*
+ * Emulation prevention is OFF which means that 0 after
+ * 0xff will not be swallowed
+ * and has to be treated as data
+ */
+ bspp_jpeg_tryseek_and_consume_delimeters(swsr_ctx);
+ return 0;
+ }
+ default:
+ {
+ pr_err("BAD NAL=%#x\n", jpeg_segment_header->type);
+ unit_data->parse_error |= BSPP_ERROR_UNRECOVERABLE;
+ }
+ }
+
+ pr_debug("payloadSize=%#x\n", jpeg_segment_header->payload_size);
+ return 1;
+}
+
+static void bspp_jpeg_calculate_mcus(struct jpeg_segment_sof *data_sof,
+ unsigned char *alignment_width,
+ unsigned char *alignment_height)
+{
+ unsigned char i;
+ unsigned char max_horz_factor = 0;
+ unsigned char max_vert_factor = 0;
+ unsigned short mcu_width = 0;
+ unsigned short mcu_height = 0;
+
+ /* Determine maximum scale factors */
+ for (i = 0; i < data_sof->component; i++) {
+ unsigned char horz_factor = data_sof->components[i].horz_factor;
+ unsigned char vert_factor = data_sof->components[i].vert_factor;
+
+ max_horz_factor = horz_factor > max_horz_factor ? horz_factor : max_horz_factor;
+ max_vert_factor = vert_factor > max_vert_factor ? vert_factor : max_vert_factor;
+ }
+ /*
+ * Alignment we want to have must be:
+ * - mutliple of VDEC_MB_DIMENSION
+ * - at least of the size that will fit whole MCUs
+ */
+ *alignment_width =
+ VDEC_ALIGN_SIZE((8 * max_horz_factor), VDEC_MB_DIMENSION,
+ unsigned int, unsigned int);
+ *alignment_height =
+ VDEC_ALIGN_SIZE((8 * max_vert_factor), VDEC_MB_DIMENSION,
+ unsigned int, unsigned int);
+
+ /* Calculate dimensions in MCUs */
+ mcu_width += (data_sof->width + (8 * max_horz_factor) - 1) / (8 * max_horz_factor);
+ mcu_height += (data_sof->height + (8 * max_vert_factor) - 1) / (8 * max_vert_factor);
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s; w=%d; w[MCU]=%d\n", __func__, data_sof->width, mcu_width);
+ pr_info("%s; h=%d; h[MCU]=%d\n", __func__, data_sof->height, mcu_height);
+#endif
+}
+
+static int bspp_jpeg_common_seq_hdr_populate(struct jpeg_segment_sof *sof_header,
+ struct vdec_comsequ_hdrinfo *com_sequ_hdr_info,
+ unsigned char alignment_width,
+ unsigned char alignment_height)
+{
+ unsigned short i;
+ int res;
+ struct img_pixfmt_desc format_desc;
+
+ memset(&format_desc, 0, sizeof(struct img_pixfmt_desc));
+ memset(com_sequ_hdr_info, 0, sizeof(*com_sequ_hdr_info));
+
+ com_sequ_hdr_info->max_frame_size.width = VDEC_ALIGN_SIZE(sof_header->width,
+ alignment_width,
+ unsigned int, unsigned int);
+ com_sequ_hdr_info->max_frame_size.height = VDEC_ALIGN_SIZE(sof_header->height,
+ alignment_height, unsigned int,
+ unsigned int);
+ com_sequ_hdr_info->frame_size.width = sof_header->width;
+ com_sequ_hdr_info->frame_size.height = sof_header->height;
+ com_sequ_hdr_info->orig_display_region.width = sof_header->width;
+ com_sequ_hdr_info->orig_display_region.height = sof_header->height;
+
+ com_sequ_hdr_info->pixel_info.bitdepth_y = 8;
+ com_sequ_hdr_info->pixel_info.bitdepth_c = 8;
+ com_sequ_hdr_info->pixel_info.num_planes = sof_header->component;
+ /* actually we have to set foramt accroding to the following table
+ * H1 V1 H2 V2 H3 V3 J:a:b h/v
+ * 1 1 1 1 1 1 4:4:4 1/1
+ * 1 2 1 1 1 1 4:4:0 1/2
+ * 1 4 1 1 1 1 4:4:1* 1/4
+ * 1 4 1 2 1 2 4:4:0 1/2
+ * 2 1 1 1 1 1 4:2:2 2/1
+ * 2 2 1 1 1 1 4:2:0 2/2
+ * 2 2 2 1 2 1 4:4:0 1/2
+ * 2 4 1 1 1 1 4:2:1* 2/4
+ * 4 1 1 1 1 1 4:1:1 4/1
+ * 4 1 2 1 2 1 4:2:2 2/1
+ * 4 2 1 1 1 1 4:1:0 4/2
+ * 4 4 2 2 2 2 4:2:0 2/2
+ */
+ if (sof_header->component == (JPEG_MAX_COMPONENTS - 1)) {
+ com_sequ_hdr_info->pixel_info.chroma_fmt = PIXEL_MULTICHROME;
+ if ((sof_header->components[1].horz_factor == 1 &&
+ sof_header->components[1].vert_factor == 1) &&
+ (sof_header->components[2].horz_factor == 1 &&
+ sof_header->components[2].vert_factor == 1)) {
+ if (sof_header->components[0].horz_factor == 1 &&
+ sof_header->components[0].vert_factor == 1) {
+ com_sequ_hdr_info->pixel_info.chroma_fmt_idc = PIXEL_FORMAT_444;
+ } else if (sof_header->components[0].horz_factor == 2) {
+ if (sof_header->components[0].vert_factor == 1) {
+ com_sequ_hdr_info->pixel_info.chroma_fmt_idc =
+ PIXEL_FORMAT_422;
+ } else if (sof_header->components[0].vert_factor == 2) {
+ com_sequ_hdr_info->pixel_info.chroma_fmt_idc =
+ PIXEL_FORMAT_420;
+ } else {
+ com_sequ_hdr_info->pixel_info.chroma_fmt_idc =
+ PIXEL_FORMAT_444;
+ }
+ } else if ((sof_header->components[0].horz_factor == 4) &&
+ (sof_header->components[0].vert_factor == 1)) {
+ com_sequ_hdr_info->pixel_info.chroma_fmt_idc = PIXEL_FORMAT_411;
+ } else {
+ com_sequ_hdr_info->pixel_info.chroma_fmt_idc = PIXEL_FORMAT_444;
+ }
+ } else {
+ com_sequ_hdr_info->pixel_info.chroma_fmt_idc = PIXEL_FORMAT_444;
+ }
+ } else {
+ com_sequ_hdr_info->pixel_info.chroma_fmt = PIXEL_MONOCHROME;
+ com_sequ_hdr_info->pixel_info.chroma_fmt_idc = PIXEL_FORMAT_MONO;
+ }
+
+ for (i = 0; (i < sof_header->component) && (i < IMG_MAX_NUM_PLANES); i++) {
+ format_desc.planes[i] = 1;
+ format_desc.h_numer[i] = sof_header->components[i].horz_factor;
+ format_desc.v_numer[i] = sof_header->components[i].vert_factor;
+ }
+
+ res = pixel_gen_pixfmt(&com_sequ_hdr_info->pixel_info.pixfmt, &format_desc);
+ if (res != 0) {
+ pr_err("Failed to generate pixel format.\n");
+ return res;
+ }
+
+ return 0;
+}
+
+static void bspp_jpeg_pict_hdr_populate(struct jpeg_segment_sof *sof_header,
+ struct bspp_pict_hdr_info *pict_hdr_info)
+{
+ memset(pict_hdr_info, 0, sizeof(*pict_hdr_info));
+
+ pict_hdr_info->intra_coded = 1;
+ pict_hdr_info->ref = 0;
+
+ pict_hdr_info->coded_frame_size.width = (unsigned int)sof_header->width;
+ pict_hdr_info->coded_frame_size.height = (unsigned int)sof_header->height;
+ pict_hdr_info->disp_info.enc_disp_region.width = (unsigned int)sof_header->width;
+ pict_hdr_info->disp_info.enc_disp_region.height = (unsigned int)sof_header->height;
+
+ pict_hdr_info->pict_aux_data.id = BSPP_INVALID;
+ pict_hdr_info->second_pict_aux_data.id = BSPP_INVALID;
+ pict_hdr_info->pict_sgm_data.id = BSPP_INVALID;
+}
+
+static int bspp_jpeg_parse_picture_unit(void *swsr_ctx,
+ struct bspp_unit_data *unit_data)
+{
+ /* assume we'll be fine */
+ unit_data->parse_error = BSPP_ERROR_NONE;
+
+ while ((unit_data->parse_error == BSPP_ERROR_NONE) &&
+ !(unit_data->slice || unit_data->extracted_all_data)) {
+ struct jpeg_segment_header segment_header;
+ /*
+ * Try hard to read segment header. The only limit we set here is EOD-
+ * if it happens, we will get an exception, to stop this madness.
+ */
+ while (!bspp_jpeg_segment_read_header(swsr_ctx, unit_data, &segment_header) &&
+ unit_data->parse_error == BSPP_ERROR_NONE)
+ ;
+
+ switch (segment_header.type) {
+ case CODE_SOF1:
+ case CODE_SOF2:
+ case CODE_SOF3:
+ case CODE_SOF5:
+ case CODE_SOF6:
+ case CODE_SOF8:
+ case CODE_SOF9:
+ case CODE_SOF10:
+ case CODE_SOF11:
+ case CODE_SOF13:
+ case CODE_SOF14:
+ case CODE_SOF15:
+ {
+ bspp_jpeg_consume_asdata(swsr_ctx, segment_header.payload_size);
+ bspp_jpeg_tryseek_delimeter(swsr_ctx);
+ unit_data->extracted_all_data = 1;
+ unit_data->slice = 1;
+ unit_data->parse_error |= BSPP_ERROR_UNSUPPORTED;
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+ case CODE_SOI:
+ {
+ /*
+ * Reinitialize context at the beginning of each image
+ */
+ }
+ break;
+ case CODE_EOI:
+ {
+ /*
+ * Some more frames can be concatenated after SOI,
+ * but we'll discard it for now
+ */
+ while (bspp_jpeg_tryseek_and_consume_delimeters(swsr_ctx) != SWSR_FOUND_EOD)
+ ;
+ unit_data->extracted_all_data = 1;
+ return 0;
+ }
+ case CODE_SOF_BASELINE:
+ {
+ int res;
+ unsigned char alignment_width = 0;
+ unsigned char alignment_height = 0;
+ struct jpeg_segment_sof sof_data;
+
+ struct bspp_sequ_hdr_info *sequ_hdr_info =
+ &unit_data->impl_sequ_hdr_info->sequ_hdr_info;
+
+ memset(&sof_data, 0, sizeof(*&sof_data));
+
+ /* SOF is the only segment we are interested in- parse it */
+ unit_data->parse_error |= bspp_jpeg_segment_parse_sof(swsr_ctx, &sof_data);
+ /*
+ * to correctly allocate size for frame we need to have correct MCUs to
+ * get alignment info
+ */
+ bspp_jpeg_calculate_mcus(&sof_data, &alignment_width, &alignment_height);
+
+ /* fill in headers expected by BSPP framework */
+ res = bspp_jpeg_common_seq_hdr_populate(&sof_data,
+ &sequ_hdr_info->com_sequ_hdr_info,
+ alignment_width,
+ alignment_height);
+ if (res != 0) {
+ unit_data->parse_error |= BSPP_ERROR_UNRECOVERABLE;
+ return res;
+ }
+
+ bspp_jpeg_pict_hdr_populate(&sof_data, unit_data->out.pict_hdr_info);
+
+ /* fill in sequence IDs for header and picture */
+ sequ_hdr_info->sequ_hdr_id = BSPP_DEFAULT_SEQUENCE_ID;
+ unit_data->pict_sequ_hdr_id = BSPP_DEFAULT_SEQUENCE_ID;
+
+ /* reset SOS fields counter value */
+ unit_data->out.pict_hdr_info->sos_count = 0;
+ }
+ break;
+ case CODE_SOS:
+ {
+ /* increment the SOS fields counter */
+ unit_data->out.pict_hdr_info->sos_count++;
+
+ unit_data->slice = 1;
+ bspp_jpeg_consume_asdata(swsr_ctx, segment_header.payload_size);
+ return 0;
+ }
+ case CODE_DRI:
+ break;
+ default:
+ {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("Skipping over 0x%x bytes\n", segment_header.payload_size);
+#endif
+ bspp_jpeg_consume_asdata(swsr_ctx, segment_header.payload_size);
+ }
+ break;
+ }
+ /*
+ * After parsing segment we should already be on delimeter.
+ * Consume it, so header parsing can be started.
+ */
+ bspp_jpeg_tryseek_and_consume_delimeters(swsr_ctx);
+ }
+ return 0;
+}
+
+int bspp_jpeg_unit_parser(void *swsr_ctx, struct bspp_unit_data *unit_data)
+{
+ int retval = 0;
+
+ switch (unit_data->unit_type) {
+ case BSPP_UNIT_PICTURE:
+ {
+ retval = bspp_jpeg_parse_picture_unit(swsr_ctx, unit_data);
+ unit_data->new_closed_gop = 1;
+ }
+ break;
+ default:
+ {
+ unit_data->parse_error = BSPP_ERROR_INVALID_VALUE;
+ }
+ break;
+ }
+
+ return retval;
+}
+
+int bspp_jpeg_setparser_config(enum vdec_bstr_format bstr_format,
+ struct bspp_vid_std_features *pvidstd_features,
+ struct bspp_swsr_ctx *pswsr_ctx,
+ struct bspp_parser_callbacks *pparser_callbacks,
+ struct bspp_inter_pict_data *pinterpict_data)
+{
+ /* Set JPEG parser callbacks. */
+ pparser_callbacks->parse_unit_cb = bspp_jpeg_unit_parser;
+
+ /* Set JPEG specific features. */
+ pvidstd_features->seq_size = sizeof(struct bspp_jpeg_sequ_hdr_info);
+ pvidstd_features->uses_vps = 0;
+ pvidstd_features->uses_pps = 0;
+
+ /* Set JPEG specific shift register config. */
+ pswsr_ctx->emulation_prevention = SWSR_EMPREVENT_NONE;
+ pswsr_ctx->sr_config.delim_type = SWSR_DELIM_SCP;
+ pswsr_ctx->sr_config.delim_length = 8;
+ pswsr_ctx->sr_config.scp_value = 0xFF;
+
+ return 0;
+}
+
+void bspp_jpeg_determine_unit_type(unsigned char bitstream_unittype,
+ int disable_mvc,
+ enum bspp_unit_type *bspp_unittype)
+{
+ *bspp_unittype = BSPP_UNIT_PICTURE;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/jpeg_secure_parser.h b/drivers/media/platform/vxe-vxd/decoder/jpeg_secure_parser.h
new file mode 100644
index 000000000000..439a38504b96
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/jpeg_secure_parser.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * JPEG secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __JPEGSECUREPARSER_H__
+#define __JPEGSECUREPARSER_H__
+
+#include "bspp_int.h"
+
+/**
+ * struct bspp_jpeg_sequ_hdr_info - bspp_jpeg_sequ_hdr_info dummu structure
+ * @dummy: dummy structure
+ */
+struct bspp_jpeg_sequ_hdr_info {
+ unsigned int dummy;
+};
+
+int bspp_jpeg_setparser_config(enum vdec_bstr_format bstr_format,
+ struct bspp_vid_std_features *pvidstd_features,
+ struct bspp_swsr_ctx *pswsr_ctx,
+ struct bspp_parser_callbacks *pparser_callbacks,
+ struct bspp_inter_pict_data *pinterpict_data);
+
+void bspp_jpeg_determine_unit_type(unsigned char bitstream_unittype,
+ int disable_mvc,
+ enum bspp_unit_type *bspp_unittype);
+
+#endif /*__JPEGSECUREPARSER_H__ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/jpegfw_data.h b/drivers/media/platform/vxe-vxd/decoder/jpegfw_data.h
new file mode 100644
index 000000000000..d84e5d73f844
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/jpegfw_data.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Public data structures for the h264 parser firmware module.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include "jpegfw_data_shared.h"
+
+#ifndef _JPEGFW_DATA_H_
+#define _JPEGFW_DATA_H_
+
+#define JPEG_VDEC_8x8_DCT_SIZE 64 //!< Number of elements in 8x8 DCT
+#define JPEG_VDEC_MAX_COMPONENTS 4 //!< Maximum number of component in JPEG
+#define JPEG_VDEC_MAX_SETS_HUFFMAN_TABLES 2 //!< Maximum set of huffman table in JPEG
+#define JPEG_VDEC_MAX_QUANT_TABLES 4 //!< Maximum set of quantisation table in JPEG
+#define JPEG_VDEC_TABLE_CLASS_NUM 2 //!< Maximum set of class of huffman table in JPEG
+#define JPEG_VDEC_PLANE_MAX 4 //!< Maximum number of planes
+
+struct hentry {
+ unsigned short code;
+ unsigned char codelen;
+ unsigned char value;
+};
+
+/**
+ * struct vdec_jpeg_huffman_tableinfo - This structure contains JPEG huffmant table
+ * @bits: number of bits
+ * @values: codeword value
+ *
+ * NOTE: Should only contain JPEG specific information.
+ * JPEG Huffman Table Information
+ */
+struct vdec_jpeg_huffman_tableinfo {
+ /* number of bits */
+ unsigned char bits[16];
+ /* codeword value */
+ unsigned char values[256];
+};
+
+/*
+ * This structure contains JPEG DeQunatisation table
+ * NOTE: Should only contain JPEG specific information.
+ * @brief JPEG Dequantisation Table Information
+ */
+struct vdec_jpeg_de_quant_tableinfo {
+ /* Qunatisation precision */
+ unsigned char precision;
+ /* Qunatisation Value for 8x8 DCT */
+ unsigned short elements[64];
+};
+
+/*
+ * This describes the JPEG parser component "Header data", shown in the
+ * Firmware Memory Layout diagram. This data is required by the JPEG firmware
+ * and should be supplied by the Host.
+ */
+struct jpegfw_header_data {
+ /* Primary decode buffer base addresses */
+ struct vdecfw_image_buffer primary;
+ /* Reference (output) picture base addresses */
+ unsigned int plane_offsets[JPEG_VDEC_PLANE_MAX];
+ /* SOS fields count value */
+ unsigned char hdr_sos_count;
+};
+
+/*
+ * This describes the JPEG parser component "Context data".
+ * JPEG does not need any data to be saved between pictures, this structure
+ * is needed only to fit in firmware framework.
+ */
+struct jpegfw_context_data {
+ unsigned int dummy;
+};
+
+#endif /* _JPEGFW_DATA_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/jpegfw_data_shared.h b/drivers/media/platform/vxe-vxd/decoder/jpegfw_data_shared.h
new file mode 100644
index 000000000000..2448fde864fb
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/jpegfw_data_shared.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Public data structures for the hevc parser firmware module
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifdef USE_SHARING
+#endif
+
+#ifndef _JPEGFW_DATA_H_
+#define _JPEGFW_DATA_H_
+
+#include "vdecfw_share.h"
+#include "vdecfw_shared.h"
+
+#define JPEG_VDEC_8x8_DCT_SIZE 64 //!< Number of elements in 8x8 DCT
+#define JPEG_VDEC_MAX_COMPONENTS 4 //!< Maximum number of component in JPEG
+#define JPEG_VDEC_MAX_SETS_HUFFMAN_TABLES 2 //!< Maximum set of huffman table in JPEG
+#define JPEG_VDEC_MAX_QUANT_TABLES 4 //!< Maximum set of quantisation table in JPEG
+#define JPEG_VDEC_TABLE_CLASS_NUM 2 //!< Maximum set of class of huffman table in JPEG
+#define JPEG_VDEC_PLANE_MAX 4 //!< Maximum number of planes
+
+struct hentry {
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned short, code);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char, codelen);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char, value);
+};
+
+/*
+ * This structure contains JPEG huffmant table
+ * NOTE: Should only contain JPEG specific information.
+ * @brief JPEG Huffman Table Information
+ */
+struct vdec_jpeg_huffman_tableinfo {
+ /* number of bits */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char, bits[16]);
+ /* codeword value */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char, values[256]);
+};
+
+/*
+ * This structure contains JPEG DeQunatisation table
+ * NOTE: Should only contain JPEG specific information.
+ * @brief JPEG Dequantisation Table Information
+ */
+struct vdec_jpeg_de_quant_tableinfo {
+ /* Qunatisation precision */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char, precision);
+ /* Qunatisation Value for 8x8 DCT */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned short, elements[64]);
+};
+
+/*
+ * This describes the JPEG parser component "Header data", shown in the
+ * Firmware Memory Layout diagram. This data is required by the JPEG firmware
+ * and should be supplied by the Host.
+ */
+struct jpegfw_header_data {
+ /* Primary decode buffer base addresses */
+ struct vdecfw_image_buffer primary;
+ /* Reference (output) picture base addresses */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ plane_offsets[JPEG_VDEC_PLANE_MAX]);
+ /* SOS fields count value */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned char, hdr_sos_count);
+};
+
+/*
+ * This describes the JPEG parser component "Context data".
+ * JPEG does not need any data to be saved between pictures, this structure
+ * is needed only to fit in firmware framework.
+ */
+struct jpegfw_context_data {
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int, dummy);
+};
+
+#endif /* _JPEGFW_DATA_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/mem_io.h b/drivers/media/platform/vxe-vxd/decoder/mem_io.h
new file mode 100644
index 000000000000..1e63f889f258
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/mem_io.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG PVDEC pixel Registers
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _MEM_IO_H
+#define _MEM_IO_H
+
+#include <linux/types.h>
+
+#include "reg_io2.h"
+
+#define MEMIO_CHECK_ALIGNMENT(vpmem) \
+ IMG_ASSERT((vpmem))
+
+#define MEMIO_READ_FIELD(vpmem, field) \
+ ((((*((field ## _TYPE *)(((unsigned long)(vpmem)) + field ## _OFFSET))) & \
+ field ## _MASK) >> field ## _SHIFT))
+
+#define MEMIO_WRITE_FIELD(vpmem, field, value, type) \
+ do { \
+ type __vpmem = vpmem; \
+ MEMIO_CHECK_ALIGNMENT(__vpmem); \
+ (*((field ## _TYPE *)(((unsigned long)(__vpmem)) + \
+ field ## _OFFSET))) = \
+ (field ## _TYPE)(((*((field ## _TYPE *)(((unsigned long)(__vpmem)) + \
+ field ## _OFFSET))) & \
+ ~(field ## _TYPE)field ## _MASK) | \
+ (field ## _TYPE)(((value) << field ## _SHIFT) & \
+ field ## _MASK)); \
+ } while (0) \
+
+#endif /* _MEM_IO_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/mmu_defs.h b/drivers/media/platform/vxe-vxd/decoder/mmu_defs.h
new file mode 100644
index 000000000000..0ea65509071d
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/mmu_defs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * V-DEC MMU Definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ */
+
+#ifndef _VXD_MMU_DEF_H_
+#define _VXD_MMU_DEF_H_
+
+/*
+ * This type defines MMU variant.
+ */
+enum mmu_etype {
+ MMU_TYPE_NONE = 0,
+ MMU_TYPE_32BIT,
+ MMU_TYPE_36BIT,
+ MMU_TYPE_40BIT,
+ MMU_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/**
+ * enum mmu_eheap_id - This type defines the MMU heaps.
+ * @MMU_HEAP_IMAGE_BUFFERS_UNTILED: Heap for untiled video buffers
+ * @MMU_HEAP_BITSTREAM_BUFFERS : Heap for bitstream buffers
+ * @MMU_HEAP_STREAM_BUFFERS : Heap for Stream buffers
+ * @MMU_HEAP_MAX : Number of heaps
+ * @MMU_HEAP_FORCE32BITS: MMU_HEAP_FORCE32BITS
+ */
+enum mmu_eheap_id {
+ MMU_HEAP_IMAGE_BUFFERS_UNTILED = 0x00,
+ MMU_HEAP_BITSTREAM_BUFFERS,
+ MMU_HEAP_STREAM_BUFFERS,
+ MMU_HEAP_MAX,
+ MMU_HEAP_FORCE32BITS = 0x7FFFFFFFU
+};
+
+#endif /* _VXD_MMU_DEFS_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/pixel_api.c b/drivers/media/platform/vxe-vxd/decoder/pixel_api.c
new file mode 100644
index 000000000000..a0620662a68e
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/pixel_api.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pixel processing function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_errors.h"
+#include "img_pixfmts.h"
+#include "pixel_api.h"
+#include "vdec_defs.h"
+
+#define NUM_OF_FORMATS 17
+#define PIXNAME(x) /* Pixel name support not enabled */
+#define FACT_SPEC_FORMAT_NUM_PLANES 4
+#define FACT_SPEC_FORMAT_PLANE_UNUSED 0xf
+#define FACT_SPEC_FORMAT_PLANE_CODE_BITS 4
+#define FACT_SPEC_FORMAT_PLANE_CODE_MASK 3
+#define FACT_SPEC_FORMAT_MIN_FACT_VAL 1
+
+/*
+ * @brief Pointer to the default format in the asPixelFormats array
+ * default format is an invalid format
+ * @note pointer set by initSearch()
+ * This pointer is also used to know if the arrays were sorted
+ */
+static struct pixel_pixinfo *def_fmt;
+
+/*
+ * @brief Actual array storing the pixel formats information.
+ */
+static struct pixel_pixinfo pix_fmts[NUM_OF_FORMATS] = {
+ {
+ IMG_PIXFMT_420PL12YUV8,
+ PIXEL_UV_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT8_MP,
+ PIXEL_FORMAT_420,
+ 8,
+ 8,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_420PL12YVU8,
+ PIXEL_VU_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT8_MP,
+ PIXEL_FORMAT_420,
+ 8,
+ 8,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_420PL12YUV10,
+ PIXEL_UV_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_MP,
+ PIXEL_FORMAT_420,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_420PL12YVU10,
+ PIXEL_VU_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_MP,
+ PIXEL_FORMAT_420,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_420PL12YUV10_MSB,
+ PIXEL_UV_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_MSB_MP,
+ PIXEL_FORMAT_420,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_420PL12YVU10_MSB,
+ PIXEL_VU_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_MSB_MP,
+ PIXEL_FORMAT_420,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_420PL12YUV10_LSB,
+ PIXEL_UV_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_LSB_MP,
+ PIXEL_FORMAT_420,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_420PL12YVU10_LSB,
+ PIXEL_VU_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_LSB_MP,
+ PIXEL_FORMAT_420,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_422PL12YUV8,
+ PIXEL_UV_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT8_MP,
+ PIXEL_FORMAT_422,
+ 8,
+ 8,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_422PL12YVU8,
+ PIXEL_VU_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT8_MP,
+ PIXEL_FORMAT_422,
+ 8,
+ 8,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_422PL12YUV10,
+ PIXEL_UV_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_MP,
+ PIXEL_FORMAT_422,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_422PL12YVU10,
+ PIXEL_VU_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_MP,
+ PIXEL_FORMAT_422,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_422PL12YUV10_MSB,
+ PIXEL_UV_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_MSB_MP,
+ PIXEL_FORMAT_422,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_422PL12YVU10_MSB,
+ PIXEL_VU_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_MSB_MP,
+ PIXEL_FORMAT_422,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_422PL12YUV10_LSB,
+ PIXEL_UV_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_LSB_MP,
+ PIXEL_FORMAT_422,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_422PL12YVU10_LSB,
+ PIXEL_VU_ORDER,
+ PIXEL_MULTICHROME,
+ PIXEL_BIT10_LSB_MP,
+ PIXEL_FORMAT_422,
+ 10,
+ 10,
+ 2
+ },
+
+ {
+ IMG_PIXFMT_UNDEFINED,
+ PIXEL_INVALID_CI,
+ 0,
+ (enum pixel_mem_packing)0,
+ PIXEL_FORMAT_INVALID,
+ 0,
+ 0,
+ 0
+ }
+};
+
+static struct pixel_pixinfo_table pixinfo_table[] = {
+ {
+ IMG_PIXFMT_420PL12YUV8_A8,
+ {
+ PIXNAME(IMG_PIXFMT_420PL12YUV8_A8)
+ 16,
+ 16,
+ 16,
+ 0,
+ 16,
+ TRUE,
+ TRUE,
+ 4,
+ TRUE
+ }
+ },
+
+ {
+ IMG_PIXFMT_422PL12YUV8_A8,
+ {
+ PIXNAME(IMG_PIXFMT_422PL12YUV8_A8)
+ 16,
+ 16,
+ 16,
+ 0,
+ 16,
+ TRUE,
+ FALSE,
+ 4,
+ TRUE
+ }
+ },
+
+ {
+ IMG_PIXFMT_420PL12YUV8,
+ {
+ PIXNAME(IMG_PIXFMT_420PL12YUV8)
+ 16,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ TRUE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_420PL12YVU8,
+ {
+ PIXNAME(IMG_PIXFMT_420PL12YVU8)
+ 16,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ TRUE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_420PL12YUV10,
+ {
+ PIXNAME(IMG_PIXFMT_420PL12YUV10)
+ 12,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ TRUE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_420PL12YVU10,
+ {
+ PIXNAME(IMG_PIXFMT_420PL12YVU10)
+ 12,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ TRUE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_420PL12YUV10_MSB,
+ {
+ PIXNAME(IMG_PIXFMT_420PL12YUV10_MSB)
+ 8,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ TRUE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_420PL12YVU10_MSB,
+ {
+ PIXNAME(IMG_PIXFMT_420PL12YVU10_MSB)
+ 8,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ TRUE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_422PL12YUV8,
+ {
+ PIXNAME(IMG_PIXFMT_422PL12YUV8)
+ 16,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ FALSE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_422PL12YVU8,
+ {
+ PIXNAME(IMG_PIXFMT_422PL12YVU8)
+ 16,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ FALSE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_422PL12YUV10,
+ {
+ PIXNAME(IMG_PIXFMT_422PL12YUV10)
+ 12,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ FALSE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_422PL12YVU10,
+ {
+ PIXNAME(IMG_PIXFMT_422PL12YVU10)
+ 12,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ FALSE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_422PL12YUV10_MSB,
+ {
+ PIXNAME(IMG_PIXFMT_422PL12YUV10_MSB)
+ 8,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ FALSE,
+ 4,
+ FALSE
+ }
+ },
+
+ {
+ IMG_PIXFMT_422PL12YVU10_MSB,
+ {
+ PIXNAME(IMG_PIXFMT_422PL12YVU10_MSB)
+ 8,
+ 16,
+ 16,
+ 0,
+ 0,
+ TRUE,
+ FALSE,
+ 4,
+ FALSE
+ }
+ },
+};
+
+static struct pixel_pixinfo_table*
+pixel_get_pixelinfo_from_pixfmt(enum img_pixfmt pix_fmt)
+{
+ unsigned int i;
+ unsigned char found = FALSE;
+ struct pixel_pixinfo_table *this_pixinfo_table_entry = NULL;
+
+ for (i = 0;
+ i < (sizeof(pixinfo_table) / sizeof(struct pixel_pixinfo_table));
+ i++) {
+ if (pix_fmt == pixinfo_table[i].pix_color_fmt) {
+ /*
+ * There must only be one entry per pixel colour format
+ * in the table
+ */
+ VDEC_ASSERT(!found);
+ found = TRUE;
+ this_pixinfo_table_entry = &pixinfo_table[i];
+
+ /*
+ * We deliberately do NOT break here - scan rest of
+ * table to ensure there are not duplicate entries
+ */
+ }
+ }
+ return this_pixinfo_table_entry;
+}
+
+/*
+ * @brief Array containing string lookup of pixel format IDC.
+ * @warning this must be kept in step with PIXEL_FormatIdc.
+ */
+unsigned char pix_fmt_idc_names[6][16] = {
+ "Monochrome",
+ "4:1:1",
+ "4:2:0",
+ "4:2:2",
+ "4:4:4",
+ "Invalid",
+};
+
+static int pixel_compare_pixfmts(const void *a, const void *b)
+{
+ return ((struct pixel_pixinfo *)a)->pixfmt -
+ ((struct pixel_pixinfo *)b)->pixfmt;
+}
+
+static struct pixel_info*
+pixel_get_bufinfo_from_pixfmt(enum img_pixfmt pix_fmt)
+{
+ struct pixel_pixinfo_table *pixinfo_table_entry = NULL;
+ struct pixel_info *pix_info = NULL;
+
+ pixinfo_table_entry = pixel_get_pixelinfo_from_pixfmt(pix_fmt);
+ VDEC_ASSERT(pixinfo_table_entry);
+ if (pixinfo_table_entry)
+ pix_info = &pixinfo_table_entry->info;
+
+ return pix_info;
+}
+
+/*
+ * @brief Search a pixel format based on its attributes rather than its format
+ * enum.
+ * @warning use PIXEL_Comparpix_fmts to search by enum
+ */
+static int pixel_compare_pixinfo(const void *a, const void *b)
+{
+ int result = 0;
+ const struct pixel_pixinfo *fmt_a = (struct pixel_pixinfo *)a;
+ const struct pixel_pixinfo *fmt_b = (struct pixel_pixinfo *)b;
+
+ result = fmt_a->chroma_fmt_idc - fmt_b->chroma_fmt_idc;
+ if (result != 0)
+ return result;
+
+ result = fmt_a->mem_pkg - fmt_b->mem_pkg;
+ if (result != 0)
+ return result;
+
+ result = fmt_a->chroma_interleave - fmt_b->chroma_interleave;
+ if (result != 0)
+ return result;
+
+ result = fmt_a->bitdepth_y - fmt_b->bitdepth_y;
+ if (result != 0)
+ return result;
+
+ result = fmt_a->bitdepth_c - fmt_b->bitdepth_c;
+ if (result != 0)
+ return result;
+
+ result = fmt_a->num_planes - fmt_b->num_planes;
+ if (result != 0)
+ return result;
+
+ return result;
+}
+
+static void pixel_init_search(void)
+{
+ static unsigned int search_inited;
+
+ search_inited++;
+ if (search_inited == 1) {
+ if (!def_fmt) {
+ int i = 0;
+
+ i = NUM_OF_FORMATS - 1;
+ while (i >= 0) {
+ if (IMG_PIXFMT_UNDEFINED ==
+ pix_fmts[i].pixfmt) {
+ def_fmt = &pix_fmts[i];
+ break;
+ }
+ }
+ VDEC_ASSERT(def_fmt);
+ }
+ } else {
+ search_inited--;
+ }
+}
+
+static struct pixel_pixinfo *pixel_search_fmt(const struct pixel_pixinfo *key,
+ unsigned char enum_only)
+{
+ struct pixel_pixinfo *fmt_found = NULL;
+ int (*compar)(const void *pixfmt1, const void *pixfmt2);
+
+ if (enum_only)
+ compar = &pixel_compare_pixfmts;
+ else
+ compar = &pixel_compare_pixinfo;
+
+ {
+ unsigned int i;
+
+ for (i = 0; i < NUM_OF_FORMATS; i++) {
+ if (compar(key, &pix_fmts[i]) == 0) {
+ fmt_found = &pix_fmts[i];
+ break;
+ }
+ }
+ }
+ return fmt_found;
+}
+
+/*
+ * @brief Set a pixel format info structure to the default.
+ * @warning This MODIDIFES the pointer therefore you shouldn't
+ * call it on pointer you got from the library!
+ */
+static void pixel_pixinfo_defaults(struct pixel_pixinfo *to_def)
+{
+ if (!def_fmt)
+ pixel_init_search();
+
+ memcpy(to_def, def_fmt, sizeof(struct pixel_pixinfo));
+}
+
+enum img_pixfmt pixel_get_pixfmt(enum pixel_fmt_idc chroma_fmt_idc,
+ enum pixel_chroma_interleaved
+ chroma_interleaved,
+ enum pixel_mem_packing mem_pkg,
+ unsigned int bitdepth_y, unsigned int bitdepth_c,
+ unsigned int num_planes)
+{
+ unsigned int internal_num_planes = (num_planes == 0 || num_planes > 4) ? 2 :
+ num_planes;
+ struct pixel_pixinfo key;
+ struct pixel_pixinfo *fmt_found = NULL;
+
+ if (chroma_fmt_idc != PIXEL_FORMAT_MONO &&
+ chroma_fmt_idc != PIXEL_FORMAT_411 &&
+ chroma_fmt_idc != PIXEL_FORMAT_420 &&
+ chroma_fmt_idc != PIXEL_FORMAT_422 &&
+ chroma_fmt_idc != PIXEL_FORMAT_444)
+ return IMG_PIXFMT_UNDEFINED;
+
+ /* valid bit depth 8, 9, 10, or 16/0 for 422 */
+ if (bitdepth_y < 8 || bitdepth_y > 10)
+ return IMG_PIXFMT_UNDEFINED;
+
+ /* valid bit depth 8, 9, 10, or 16/0 for 422 */
+ if (bitdepth_c < 8 || bitdepth_c > 10)
+ return IMG_PIXFMT_UNDEFINED;
+
+ key.pixfmt = IMG_PIXFMT_UNDEFINED;
+ key.chroma_fmt_idc = chroma_fmt_idc;
+ key.chroma_interleave = chroma_interleaved;
+ key.mem_pkg = mem_pkg;
+ key.bitdepth_y = bitdepth_y;
+ key.bitdepth_c = bitdepth_c;
+ key.num_planes = internal_num_planes;
+
+ /*
+ * 9 and 10 bits formats are handled in the same way, and there is only
+ * one entry in the PixelFormat table
+ */
+ if (key.bitdepth_y == 9)
+ key.bitdepth_y = 10;
+
+ /*
+ * 9 and 10 bits formats are handled in the same way, and there is only
+ * one entry in the PixelFormat table
+ */
+ if (key.bitdepth_c == 9)
+ key.bitdepth_c = 10;
+
+ pixel_init_search();
+
+ /* do not search by format */
+ fmt_found = pixel_search_fmt(&key, FALSE);
+ if (!fmt_found)
+ return IMG_PIXFMT_UNDEFINED;
+
+ return fmt_found->pixfmt;
+}
+
+static void pixel_get_internal_pixelinfo(struct pixel_pixinfo *pixinfo,
+ struct pixel_info *pix_bufinfo)
+{
+ if (pixinfo->bitdepth_y == 8 && pixinfo->bitdepth_c == 8)
+ pix_bufinfo->pixels_in_bop = 16;
+ else if (pixinfo->mem_pkg == PIXEL_BIT10_MP)
+ pix_bufinfo->pixels_in_bop = 12;
+ else
+ pix_bufinfo->pixels_in_bop = 8;
+
+ if (pixinfo->bitdepth_y == 8)
+ pix_bufinfo->ybytes_in_bop = pix_bufinfo->pixels_in_bop;
+ else
+ pix_bufinfo->ybytes_in_bop = 16;
+
+ if (pixinfo->chroma_fmt_idc == PIXEL_FORMAT_MONO) {
+ pix_bufinfo->uvbytes_in_bop = 0;
+ } else if (pixinfo->bitdepth_c == 8) {
+ pix_bufinfo->uvbytes_in_bop = pix_bufinfo->pixels_in_bop;
+ if (pixinfo->chroma_fmt_idc == PIXEL_FORMAT_422 && pixinfo->num_planes == 1) {
+ pix_bufinfo->uvbytes_in_bop = 0;
+ pix_bufinfo->pixels_in_bop = 8;
+ }
+ } else {
+ pix_bufinfo->uvbytes_in_bop = 16;
+ }
+
+ if (pixinfo->chroma_fmt_idc == PIXEL_FORMAT_444)
+ pix_bufinfo->uvbytes_in_bop *= 2;
+
+ if (pixinfo->chroma_interleave == PIXEL_INVALID_CI) {
+ pix_bufinfo->uvbytes_in_bop /= 2;
+ pix_bufinfo->vbytes_in_bop = pix_bufinfo->uvbytes_in_bop;
+ } else {
+ pix_bufinfo->vbytes_in_bop = 0;
+ }
+
+ pix_bufinfo->alphabytes_in_bop = 0;
+
+ if (pixinfo->num_planes == 1)
+ pix_bufinfo->is_planar = FALSE;
+ else
+ pix_bufinfo->is_planar = TRUE;
+
+ if (pixinfo->chroma_fmt_idc == PIXEL_FORMAT_420)
+ pix_bufinfo->uv_height_halved = TRUE;
+ else
+ pix_bufinfo->uv_height_halved = FALSE;
+
+ if (pixinfo->chroma_fmt_idc == PIXEL_FORMAT_444)
+ pix_bufinfo->uv_stride_ratio_times4 = 8;
+ else
+ pix_bufinfo->uv_stride_ratio_times4 = 4;
+
+ if (pixinfo->chroma_interleave == PIXEL_INVALID_CI)
+ pix_bufinfo->uv_stride_ratio_times4 /= 2;
+
+ pix_bufinfo->has_alpha = FALSE;
+}
+
+static void pixel_yuv_get_descriptor_int(struct pixel_info *pixinfo,
+ struct img_pixfmt_desc *pix_desc)
+{
+ pix_desc->bop_denom = pixinfo->pixels_in_bop;
+ pix_desc->h_denom = (pixinfo->uv_stride_ratio_times4 == 2 ||
+ !pixinfo->is_planar) ? 2 : 1;
+ pix_desc->v_denom = (pixinfo->uv_height_halved || !pixinfo->is_planar)
+ ? 2 : 1;
+
+ pix_desc->planes[0] = TRUE;
+ pix_desc->bop_numer[0] = pixinfo->ybytes_in_bop;
+ pix_desc->h_numer[0] = pix_desc->h_denom;
+ pix_desc->v_numer[0] = pix_desc->v_denom;
+
+ pix_desc->planes[1] = pixinfo->is_planar;
+ pix_desc->bop_numer[1] = pixinfo->uvbytes_in_bop;
+ pix_desc->h_numer[1] = (pix_desc->h_denom * pixinfo->uv_stride_ratio_times4) / 4;
+ pix_desc->v_numer[1] = 1;
+
+ pix_desc->planes[2] = (pixinfo->vbytes_in_bop > 0) ? TRUE : FALSE;
+ pix_desc->bop_numer[2] = pixinfo->vbytes_in_bop;
+ pix_desc->h_numer[2] = (pixinfo->vbytes_in_bop > 0) ? 1 : 0;
+ pix_desc->v_numer[2] = (pixinfo->vbytes_in_bop > 0) ? 1 : 0;
+
+ pix_desc->planes[3] = pixinfo->has_alpha;
+ pix_desc->bop_numer[3] = pixinfo->alphabytes_in_bop;
+ pix_desc->h_numer[3] = pix_desc->h_denom;
+ pix_desc->v_numer[3] = pix_desc->v_denom;
+}
+
+int pixel_yuv_get_desc(struct pixel_pixinfo *pix_info, struct img_pixfmt_desc *pix_desc)
+{
+ struct pixel_info int_pix_info;
+
+ struct pixel_info *int_pix_info_old = NULL;
+ enum img_pixfmt pix_fmt = pixel_get_pixfmt(pix_info->chroma_fmt_idc,
+ pix_info->chroma_interleave,
+ pix_info->mem_pkg,
+ pix_info->bitdepth_y,
+ pix_info->bitdepth_c,
+ pix_info->num_planes);
+
+ /* Validate the output from new function. */
+ if (pix_fmt != IMG_PIXFMT_UNDEFINED)
+ int_pix_info_old = pixel_get_bufinfo_from_pixfmt(pix_fmt);
+
+ pixel_get_internal_pixelinfo(pix_info, &int_pix_info);
+
+ if (int_pix_info_old) {
+ VDEC_ASSERT(int_pix_info_old->has_alpha ==
+ int_pix_info.has_alpha);
+ VDEC_ASSERT(int_pix_info_old->is_planar ==
+ int_pix_info.is_planar);
+ VDEC_ASSERT(int_pix_info_old->uv_height_halved ==
+ int_pix_info.uv_height_halved);
+ VDEC_ASSERT(int_pix_info_old->alphabytes_in_bop ==
+ int_pix_info.alphabytes_in_bop);
+ VDEC_ASSERT(int_pix_info_old->pixels_in_bop ==
+ int_pix_info.pixels_in_bop);
+ VDEC_ASSERT(int_pix_info_old->uvbytes_in_bop ==
+ int_pix_info.uvbytes_in_bop);
+ VDEC_ASSERT(int_pix_info_old->uv_stride_ratio_times4 ==
+ int_pix_info.uv_stride_ratio_times4);
+ VDEC_ASSERT(int_pix_info_old->vbytes_in_bop ==
+ int_pix_info.vbytes_in_bop);
+ VDEC_ASSERT(int_pix_info_old->ybytes_in_bop ==
+ int_pix_info.ybytes_in_bop);
+ }
+
+ pixel_yuv_get_descriptor_int(&int_pix_info, pix_desc);
+
+ return IMG_SUCCESS;
+}
+
+struct pixel_pixinfo *pixel_get_pixinfo(const enum img_pixfmt pix_fmt)
+{
+ struct pixel_pixinfo key;
+ struct pixel_pixinfo *fmt_found = NULL;
+
+ pixel_init_search();
+ pixel_pixinfo_defaults(&key);
+ key.pixfmt = pix_fmt;
+
+ fmt_found = pixel_search_fmt(&key, TRUE);
+ if (!fmt_found)
+ return def_fmt;
+ return fmt_found;
+}
+
+int pixel_get_fmt_desc(enum img_pixfmt pix_fmt, struct img_pixfmt_desc *pix_desc)
+{
+ if (pix_fmt >= IMG_PIXFMT_ARBPLANAR8 && pix_fmt <= IMG_PIXFMT_ARBPLANAR8_LAST) {
+ unsigned int i;
+ unsigned short spec;
+
+ pix_desc->bop_denom = 1;
+ pix_desc->h_denom = 1;
+ pix_desc->v_denom = 1;
+
+ spec = (pix_fmt - IMG_PIXFMT_ARBPLANAR8) & 0xffff;
+ for (i = 0; i < FACT_SPEC_FORMAT_NUM_PLANES; i++) {
+ unsigned char code = (spec >> FACT_SPEC_FORMAT_PLANE_CODE_BITS *
+ (FACT_SPEC_FORMAT_NUM_PLANES - 1 - i)) & 0xf;
+ pix_desc->bop_numer[i] = 1;
+ pix_desc->h_numer[i] = ((code >> 2) & FACT_SPEC_FORMAT_PLANE_CODE_MASK) +
+ FACT_SPEC_FORMAT_MIN_FACT_VAL;
+ pix_desc->v_numer[i] = (code & FACT_SPEC_FORMAT_PLANE_CODE_MASK) +
+ FACT_SPEC_FORMAT_MIN_FACT_VAL;
+ if (i == 0 || code != FACT_SPEC_FORMAT_PLANE_UNUSED) {
+ pix_desc->planes[i] = TRUE;
+
+ pix_desc->h_denom =
+ pix_desc->h_denom > pix_desc->h_numer[i] ?
+ pix_desc->h_denom : pix_desc->h_numer[i];
+
+ pix_desc->v_denom =
+ pix_desc->v_denom > pix_desc->v_numer[i] ?
+ pix_desc->v_denom : pix_desc->v_numer[i];
+ } else {
+ pix_desc->planes[i] = FALSE;
+ }
+ }
+ } else {
+ struct pixel_info *info =
+ pixel_get_bufinfo_from_pixfmt(pix_fmt);
+ if (!info) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ pixel_yuv_get_descriptor_int(info, pix_desc);
+ }
+
+ return IMG_SUCCESS;
+}
+
+int pixel_gen_pixfmt(enum img_pixfmt *pix_fmt, struct img_pixfmt_desc *pix_desc)
+{
+ unsigned short spec = 0, i;
+ unsigned char code;
+
+ for (i = 0; i < FACT_SPEC_FORMAT_NUM_PLANES; i++) {
+ if (pix_desc->planes[i] != 1) {
+ code = FACT_SPEC_FORMAT_PLANE_UNUSED;
+ } else {
+ code = (((pix_desc->h_numer[i] - FACT_SPEC_FORMAT_MIN_FACT_VAL) &
+ FACT_SPEC_FORMAT_PLANE_CODE_MASK) << 2) |
+ ((pix_desc->v_numer[i] - FACT_SPEC_FORMAT_MIN_FACT_VAL) &
+ FACT_SPEC_FORMAT_PLANE_CODE_MASK);
+ }
+ spec |= (code << FACT_SPEC_FORMAT_PLANE_CODE_BITS *
+ (FACT_SPEC_FORMAT_NUM_PLANES - 1 - i));
+ }
+
+ *pix_fmt = (enum img_pixfmt)(IMG_PIXFMT_ARBPLANAR8 | spec);
+
+ return 0;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/pixel_api.h b/drivers/media/platform/vxe-vxd/decoder/pixel_api.h
new file mode 100644
index 000000000000..3648c1b32ea7
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/pixel_api.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Pixel processing functions header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __PIXEL_API_H__
+#define __PIXEL_API_H__
+
+#include <linux/types.h>
+
+#include "img_errors.h"
+#include "img_pixfmts.h"
+
+#define PIXEL_MULTICHROME TRUE
+#define PIXEL_MONOCHROME FALSE
+#define IMG_MAX_NUM_PLANES 4
+#define PIXEL_INVALID_BDC 8
+
+extern unsigned char pix_fmt_idc_names[6][16];
+
+struct img_pixfmt_desc {
+ unsigned char planes[IMG_MAX_NUM_PLANES];
+ unsigned int bop_denom;
+ unsigned int bop_numer[IMG_MAX_NUM_PLANES];
+ unsigned int h_denom;
+ unsigned int v_denom;
+ unsigned int h_numer[IMG_MAX_NUM_PLANES];
+ unsigned int v_numer[IMG_MAX_NUM_PLANES];
+};
+
+/*
+ * @brief This type defines memory chroma interleaved order
+ */
+enum pixel_chroma_interleaved {
+ PIXEL_INVALID_CI = 0,
+ PIXEL_UV_ORDER = 1,
+ PIXEL_VU_ORDER = 2,
+ PIXEL_YAYB_ORDER = 4,
+ PIXEL_AYBY_ORDER = 8,
+ PIXEL_ORDER_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * @brief This macro translates enum pixel_chroma_interleaved values into
+ * value that can be used to write HW registers directly.
+ */
+#define PIXEL_GET_HW_CHROMA_INTERLEAVED(value) \
+ ((value) & PIXEL_VU_ORDER ? TRUE : FALSE)
+
+/*
+ * @brief This type defines memory packing types
+ */
+enum pixel_mem_packing {
+ PIXEL_BIT8_MP = 0,
+ PIXEL_BIT10_MSB_MP = 1,
+ PIXEL_BIT10_LSB_MP = 2,
+ PIXEL_BIT10_MP = 3,
+ PIXEL_DEFAULT_MP = 0xff,
+ PIXEL_DEFAULT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+static inline unsigned char pixel_get_hw_memory_packing(enum pixel_mem_packing value)
+{
+ return value == PIXEL_BIT8_MP ? FALSE :
+ value == PIXEL_BIT10_MSB_MP ? FALSE :
+ value == PIXEL_BIT10_LSB_MP ? FALSE :
+ value == PIXEL_BIT10_MP ? TRUE : FALSE;
+}
+
+/*
+ * @brief This type defines chroma formats
+ */
+enum pixel_fmt_idc {
+ PIXEL_FORMAT_MONO = 0,
+ PIXEL_FORMAT_411 = 1,
+ PIXEL_FORMAT_420 = 2,
+ PIXEL_FORMAT_422 = 3,
+ PIXEL_FORMAT_444 = 4,
+ PIXEL_FORMAT_INVALID = 0xFF,
+ PIXEL_FORMAT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+static inline int pixel_get_hw_chroma_format_idc(enum pixel_fmt_idc value)
+{
+ return value == PIXEL_FORMAT_MONO ? 0 :
+ value == PIXEL_FORMAT_420 ? 1 :
+ value == PIXEL_FORMAT_422 ? 2 :
+ value == PIXEL_FORMAT_444 ? 3 :
+ PIXEL_FORMAT_INVALID;
+}
+
+/*
+ * @brief This structure contains information about the pixel formats
+ */
+struct pixel_pixinfo {
+ enum img_pixfmt pixfmt;
+ enum pixel_chroma_interleaved chroma_interleave;
+ unsigned char chroma_fmt;
+ enum pixel_mem_packing mem_pkg;
+ enum pixel_fmt_idc chroma_fmt_idc;
+ unsigned int bitdepth_y;
+ unsigned int bitdepth_c;
+ unsigned int num_planes;
+};
+
+/*
+ * @brief This type defines the image in memory
+ */
+struct pixel_info {
+ unsigned int pixels_in_bop;
+ unsigned int ybytes_in_bop;
+ unsigned int uvbytes_in_bop;
+ unsigned int vbytes_in_bop;
+ unsigned int alphabytes_in_bop;
+ unsigned char is_planar;
+ unsigned char uv_height_halved;
+ unsigned int uv_stride_ratio_times4;
+ unsigned char has_alpha;
+};
+
+struct pixel_pixinfo_table {
+ enum img_pixfmt pix_color_fmt;
+ struct pixel_info info;
+};
+
+struct pixel_pixinfo *pixel_get_pixinfo(const enum img_pixfmt pixfmt);
+
+enum img_pixfmt pixel_get_pixfmt(enum pixel_fmt_idc chroma_fmt_idc,
+ enum pixel_chroma_interleaved
+ chroma_interleaved,
+ enum pixel_mem_packing mem_packing,
+ unsigned int bitdepth_y, unsigned int bitdepth_c,
+ unsigned int num_planes);
+
+int pixel_yuv_get_desc(struct pixel_pixinfo *pix_info,
+ struct img_pixfmt_desc *desc);
+
+int pixel_get_fmt_desc(enum img_pixfmt pixfmt,
+ struct img_pixfmt_desc *fmt_desc);
+
+int pixel_gen_pixfmt(enum img_pixfmt *pix_fmt, struct img_pixfmt_desc *pix_desc);
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/pvdec_entropy_regs.h b/drivers/media/platform/vxe-vxd/decoder/pvdec_entropy_regs.h
new file mode 100644
index 000000000000..3c495c198853
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/pvdec_entropy_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC Common low level core interface component
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __PVDEC_ENTROPY_REGS_H__
+#define __PVDEC_ENTROPY_REGS_H__
+
+/*
+ * PVDEC_ENTROPY, CR_ENTROPY_SHIFTREG_CONTROL, SR_SW_RESET
+ */
+#define PVDEC_ENTROPY_CR_GENC_BUFFER_SIZE_OFFSET (0x0100)
+
+/*
+ * PVDEC_ENTROPY, CR_GENC_BUFFER_SIZE, GENC_BUFFER_SIZE
+ */
+#define PVDEC_ENTROPY_CR_GENC_BUFFER_BASE_ADDRESS_OFFSET (0x0110)
+
+/*
+ * PVDEC_ENTROPY, CR_ENTROPY_SLICE_PARAMETER_SIZE, SLICE_PARAMETER_SIZE
+ */
+#define PVDEC_ENTROPY_CR_GENC_FRAGMENT_BASE_ADDRESS_OFFSET (0x0098)
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/pvdec_int.h b/drivers/media/platform/vxe-vxd/decoder/pvdec_int.h
new file mode 100644
index 000000000000..01f5a038e69f
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/pvdec_int.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Low-level PVDEC interface component.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __PVDEC_INT_H__
+#define __PVDEC_INT_H__
+
+#include "hw_control.h"
+#include "vxd_ext.h"
+#include "vxd_props.h"
+
+/* How many VLC IDX addresses fits in single address register */
+#define PVDECIO_VLC_IDX_ADDR_PARTS 2
+
+/* How many VLC IDX initial fits in single width register */
+#define PVDECIO_VLC_IDX_WIDTH_PARTS 10
+
+/* How many VLC IDX initial opcodes fits in single opcode register */
+#define PVDECIO_VLC_IDX_OPCODE_PARTS 16
+
+/*
+ * Length (shift) of VLC IDX opcode field. We're taking [0][1] here, as it
+ * corresponds to shift of one element
+ */
+#define PVDECIO_VLC_IDX_ADDR_ID 2
+
+/*
+ * Mask for VLC IDX address field. We're taking [0][0] here, as it corresponds
+ * to unshifted mask
+ */
+#define PVDECIO_VLC_IDX_ADDR_MASK MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR0_VLC_TABLE_ADDR0_MASK
+
+/*
+ * Length (shift) of VLC IDX address field. We're taking [0][1] here, as it
+ * corresponds to shift of one element
+ */
+#define PVDECIO_VLC_IDX_ADDR_SHIFT MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR0_VLC_TABLE_ADDR1_SHIFT
+#define PVDECIO_VLC_IDX_WIDTH_ID 1
+
+/*
+ * Mask for VLC IDX width field. We're taking [0][0] here, as it corresponds
+ * to unshifted mask
+ */
+#define PVDECIO_VLC_IDX_WIDTH_MASK \
+ MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_WIDTH0_VLC_TABLE_INITIAL_WIDTH0_MASK
+
+/*
+ * Length (shift) of VLC IDX width field. We're taking [0][1] here, as it
+ * corresponds to shift of one element
+ */
+#define PVDECIO_VLC_IDX_WIDTH_SHIFT \
+ MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_WIDTH0_VLC_TABLE_INITIAL_WIDTH1_SHIFT
+
+#define PVDECIO_VLC_IDX_OPCODE_ID 0
+
+/*
+ * Length (shift) of VLC IDX opcode field. We're taking [0][1] here, as it
+ * corresponds to shift of one element
+ */
+#define PVDECIO_VLC_IDX_OPCODE_SHIFT \
+ MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_OPCODE0_VLC_TABLE_INITIAL_OPCODE1_SHIFT
+
+/* This comes from DEVA PVDEC FW */
+#define CTRL_ALLOC_MAX_SEGMENT_SIZE 1024
+
+/*
+ * Mask for VLC IDX opcode field. We're taking [0][0] here, as it corresponds
+ * to unshifted mask
+ */
+#define PVDECIO_VLC_IDX_OPCODE_MASK \
+ MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_OPCODE0_VLC_TABLE_INITIAL_OPCODE0_MASK
+
+#endif /* __PVDEC_INT_H__ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/pvdec_vec_be_regs.h b/drivers/media/platform/vxe-vxd/decoder/pvdec_vec_be_regs.h
new file mode 100644
index 000000000000..06593f050a97
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/pvdec_vec_be_regs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC Common low level core interface component
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __PVDEC_VEC_BE_REGS_H__
+#define __PVDEC_VEC_BE_REGS_H__
+
+#define PVDEC_VEC_BE_CR_GENC_BUFFER_SIZE_OFFSET (0x0040)
+
+/*
+ * PVDEC_VEC_BE, CR_GENC_BUFFER_SIZE, GENC_BUFFER_SIZE
+ */
+#define PVDEC_VEC_BE_CR_GENC_BUFFER_BASE_ADDRESS_OFFSET (0x0050)
+
+/*
+ * PVDEC_VEC_BE, CR_MEM_TO_REG_CONTROL, MEM_TO_REG_NUM_PAIRS
+ */
+#define PVDEC_VEC_BE_CR_GENC_FRAGMENT_BASE_ADDRESS_OFFSET (0x0030)
+
+/*
+ * PVDEC_VEC_BE, CR_GENC_CONTEXT1, GENC_CONTEXT1_1
+ */
+#define PVDEC_VEC_BE_CR_ABOVE_PARAM_BASE_ADDRESS_OFFSET (0x00C0)
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/reg_io2.h b/drivers/media/platform/vxe-vxd/decoder/reg_io2.h
new file mode 100644
index 000000000000..a18ffda4efcb
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/reg_io2.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG MSVDX core Registers
+ * This file contains the MSVDX_CORE_REGS_H Definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef REG_IO2_H_
+#define REG_IO2_H_
+
+#define IMG_ASSERT(expected) \
+ ((void)((expected) || \
+ (pr_err("Assertion failed: %s, file %s, line %d\n", \
+ #expected, __FILE__, __LINE__), dump_stack(), 0)))
+
+/* This macro is used to extract a field from a register. */
+#define REGIO_READ_FIELD(regval, group, reg, field) \
+ (((regval) & group ## _ ## reg ## _ ## field ## _MASK) >> \
+ group ## _ ## reg ## _ ## field ## _SHIFT)
+
+#if (defined WIN32 || defined __linux__) && !defined NO_REGIO_CHECK_FIELD_VALUE
+/*
+ * Only provide register field range checking for Windows and
+ * Linux builds
+ * Simple range check that ensures that if bits outside the valid field
+ * range are set, that the provided value is at least consistent with a
+ * negative value (i.e.: all top bits are set to 1).
+ * Cannot perform more comprehensive testing without knowing
+ * whether field
+ * should be interpreted as signed or unsigned.
+ */
+#define REGIO_CHECK_VALUE_FITS_WITHIN_FIELD(group, reg, field, value, type) \
+ { \
+ type __value = value; \
+ unsigned int temp = (unsigned int)(__value); \
+ if (temp > group ## _ ## reg ## _ ## field ## _LSBMASK) { \
+ IMG_ASSERT((((unsigned int)__value) & \
+ (unsigned int)~(group ## _ ## reg ## _ ## field ## _LSBMASK)) == \
+ (unsigned int)~(group ## _ ## reg ## _ ## field ## _LSBMASK)); \
+ } \
+ }
+#else
+#define REGIO_CHECK_VALUE_FITS_WITHIN_FIELD(group, reg, field, value, type)
+#endif
+
+/* This macro is used to update the value of a field in a register. */
+#define REGIO_WRITE_FIELD(regval, group, reg, field, value, reg_type, val_type) \
+ { \
+ reg_type __regval = regval; \
+ val_type __value = value; \
+ REGIO_CHECK_VALUE_FITS_WITHIN_FIELD(group, reg, field, __value, val_type); \
+ (regval) = \
+ ((__regval) & ~(group ## _ ## reg ## _ ## field ## _MASK)) | \
+ (((unsigned int)(__value) << (group ## _ ## reg ## _ ## field ## _SHIFT)) & \
+ (group ## _ ## reg ## _ ## field ## _MASK)); \
+ }
+
+/* This macro is used to update the value of a field in a register. */
+#define REGIO_WRITE_FIELD_LITE(regval, group, reg, field, value, type) \
+{ \
+ type __value = value; \
+ REGIO_CHECK_VALUE_FITS_WITHIN_FIELD(group, reg, field, __value, type); \
+ (regval) |= ((unsigned int)(__value) << (group ## _ ## reg ## _ ## field ## _SHIFT)); \
+}
+
+#endif /* REG_IO2_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/scaler_setup.h b/drivers/media/platform/vxe-vxd/decoder/scaler_setup.h
new file mode 100644
index 000000000000..55dc000e07a2
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/scaler_setup.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC constants calculation and scalling coefficients
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ */
+
+#ifndef _SCALER_SETUP_H
+#define _SCALER_SETUP_H
+
+#define LOWP 11
+#define HIGHP 14
+
+#define FIXED(a, digits) ((int)((a) * (1 << (digits))))
+
+struct scaler_params {
+ unsigned int vert_pitch;
+ unsigned int vert_startpos;
+ unsigned int vert_pitch_chroma;
+ unsigned int vert_startpos_chroma;
+ unsigned int horz_pitch;
+ unsigned int horz_startpos;
+ unsigned int horz_pitch_chroma;
+ unsigned int horz_startpos_chroma;
+ unsigned char fixed_point_shift;
+};
+
+struct scaler_filter {
+ unsigned char bhoriz_bilinear;
+ unsigned char bvert_bilinear;
+};
+
+struct scaler_pitch {
+ int horiz_luma;
+ int vert_luma;
+ int horiz_chroma;
+ int vert_chroma;
+};
+
+struct scaler_config {
+ enum vdec_vid_std vidstd;
+ const struct vxd_coreprops *coreprops;
+ struct pixel_pixinfo *in_pixel_info;
+ const struct pixel_pixinfo *out_pixel_info;
+ unsigned char bfield_coded;
+ unsigned char bseparate_chroma_planes;
+ unsigned int recon_width;
+ unsigned int recon_height;
+ unsigned int mb_width;
+ unsigned int mb_height;
+ unsigned int scale_width;
+ unsigned int scale_height;
+};
+
+#endif /* _SCALER_SETUP_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/swsr.c b/drivers/media/platform/vxe-vxd/decoder/swsr.c
new file mode 100644
index 000000000000..d59f8b06b397
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/swsr.c
@@ -0,0 +1,1657 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Software Shift Register Access fucntions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ * Re-written for upstreming
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "swsr.h"
+#include "vdec_defs.h"
+
+#define NBIT_8BYTE_MASK(n) ((1ULL << (n)) - 1)
+
+/* Input FIFO length (in bytes). */
+#define SWSR_INPUT_FIFO_LENGTH 8
+
+/* Output FIFO length (in bits). */
+#define SWSR_OUTPUT_FIFO_LENGTH 64
+
+#define SWSR_NALTYPE_LENGTH 8
+
+#define SWSR_MAX_SYNTAX_LENGTH 32
+
+#define SWSR_ASSERT(expected) ({WARN_ON(!(expected)); 0; })
+
+struct swsr_buffer {
+ void **lst_link;
+ /* Pointer to bitstream data. */
+ unsigned char *data;
+ /* Number of bytes of bitstream */
+ unsigned long long num_bytes;
+ /* Index (in bytes) to next data within the buffer */
+ unsigned long long byte_offset;
+ /* Number of bytes read from input FIFO */
+ unsigned long long num_bytes_read;
+};
+
+struct swsr_input {
+ /* Bitstream data (byte-based and pre emu prev) - left aligned. */
+ unsigned long long fifo;
+ /* Number of *bytes* in Input FIFO */
+ unsigned int num_bytes;
+ struct swsr_config config;
+ /* Emulation prevention mode used to process data in Input FIFO */
+ enum swsr_emprevent emprevent;
+ /* Number of bytes in emulation prevention sequence */
+ unsigned int emprev_seq_len;
+ /* Size of bitstream declared at initialisation */
+ unsigned long long bitstream_size;
+ /*
+ * Number of bytes required from input buffer before checking
+ * next emulation prevention sequence.
+ */
+ unsigned int bytes_for_next_sequ;
+ /* Byte count read from size delimiter */
+ unsigned long long byte_count;
+ unsigned long long bytes_read_since_delim;
+ /* Cumulative offset (in bytes) into input buffer data */
+ unsigned long long bitstream_offset;
+ /* Bitstream delimiter found (see #SWSR_delim_type) */
+ unsigned char delim_found;
+ /*
+ * No More Valid Data before next delimiter.
+ * Set only for SWSR_EMPREVENT_00000300.
+ */
+ unsigned char no_moredata;
+ /* Pointer to current input buffer in the context of Input FIFO */
+ struct swsr_buffer *buf;
+ /* Start offset within buffer of current delimited unit */
+ long delimited_unit_start_offset;
+ /* Size of current delimited unit (if already calculated) */
+ unsigned int delimited_unit_size;
+ /* Current bit offset within the current delimited unit */
+ unsigned int delimunit_bitofst;
+};
+
+struct swsr_output {
+ /*
+ * Bitstream data (post emulation prevention removal
+ * delimiter checking) - left aligned.
+ */
+ unsigned long long fifo;
+ /* Number of *bits* in Output FIFO */
+ unsigned int num_bits;
+ unsigned long long totalbits_consumed;
+};
+
+struct swsr_buffer_ctx {
+ /*
+ * Callback function to notify event and provide/request data.
+ * See #SWSR_eCbEvent for event types and description
+ * of CB argument usage.
+ */
+ swsr_callback_fxn cb_fxn;
+ /* Caller supplied pointer for callback */
+ void *cb_param;
+ /* List of buffers */
+ struct lst_t free_buffer_list;
+ /*
+ * List of buffers (#SWSR_sBufferCtx) whose data reside
+ * in the Input/Output FIFOs.
+ */
+ struct lst_t used_buffer_list;
+};
+
+struct swsr_context {
+ /* IMG_TRUE if the context is initialised */
+ unsigned char initialised;
+ /* A pointer to an exception handler */
+ swsr_except_handler_fxn exception_handler_fxn;
+ /* Caller supplied pointer */
+ void *pexception_param;
+ /* Last recorded exception */
+ enum swsr_exception exception;
+ /* Buffer context data */
+ struct swsr_buffer_ctx buffer_ctx;
+ /* Context of shift register input. */
+ struct swsr_input input;
+ /* Context of shift register output */
+ struct swsr_output output;
+};
+
+static unsigned long long left_aligned_nbit_8byte_mask(unsigned int mask, unsigned int nbits)
+{
+ return (((unsigned long long)mask << (64 - nbits)) |
+ (unsigned long long)NBIT_8BYTE_MASK(64 - nbits));
+}
+
+/*
+ * buffer has been exhausted and there is still more bytes declared in bitstream
+ */
+static int swsr_extractbyte(struct swsr_context *ctx, unsigned char *byte_ext)
+{
+ struct swsr_input *input;
+ struct swsr_buffer_ctx *buf_ctx;
+ unsigned char byte = 0;
+ unsigned long long cur_byte_offset;
+ unsigned int result = 0;
+
+ if (!ctx || !byte_ext)
+ return IMG_ERROR_FATAL;
+
+ input = &ctx->input;
+ buf_ctx = &ctx->buffer_ctx;
+
+ cur_byte_offset = input->bitstream_offset;
+
+ if (input->buf && input->buf->byte_offset < input->buf->num_bytes) {
+ input->bitstream_offset++;
+ byte = input->buf->data[input->buf->byte_offset++];
+ } else if (input->bitstream_offset < input->bitstream_size) {
+ struct swsr_buffer *buffer;
+
+ buffer = lst_removehead(&buf_ctx->free_buffer_list);
+ if (!buffer)
+ return IMG_ERROR_FATAL;
+
+ buffer->num_bytes_read = 0;
+ buffer->byte_offset = 0;
+
+ buf_ctx->cb_fxn(SWSR_EVENT_INPUT_BUFFER_START,
+ buf_ctx->cb_param, 0,
+ &buffer->data, &buffer->num_bytes);
+ SWSR_ASSERT(buffer->data && buffer->num_bytes > 0);
+
+ if (buffer->data && buffer->num_bytes > 0) {
+ input->buf = buffer;
+
+ /* Add input buffer to output buffer list. */
+ lst_add(&buf_ctx->used_buffer_list, input->buf);
+
+ input->bitstream_offset++;
+ byte = input->buf->data[input->buf->byte_offset++];
+ }
+ }
+
+ {
+ struct swsr_buffer *buffer = input->buf;
+
+ if (!buffer)
+ buffer = lst_first(&buf_ctx->used_buffer_list);
+
+ if (!buffer || buffer->num_bytes_read > buffer->num_bytes) {
+ input->delimited_unit_start_offset = -1;
+ input->delimited_unit_size = 0;
+ }
+ }
+ /* If the bitstream offset hasn't increased we failed to read a byte. */
+ if (cur_byte_offset == input->bitstream_offset) {
+ input->buf = NULL;
+ result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ }
+
+ *byte_ext = byte;
+
+ return result;
+}
+
+static unsigned char swsr_checkfor_delimiter(struct swsr_context *ctx)
+{
+ struct swsr_input *input;
+ unsigned char delim_found = 0;
+
+ input = &ctx->input;
+
+ /* Check for delimiter. */
+ if (input->config.delim_type == SWSR_DELIM_SCP) {
+ unsigned int shift = (SWSR_INPUT_FIFO_LENGTH * 8)
+ - input->config.delim_length;
+ unsigned long long sequ = input->fifo >> shift;
+
+ /*
+ * Check if the SCP value is matched outside of
+ * emulation prevention data.
+ */
+ if (sequ == input->config.scp_value && input->bytes_for_next_sequ == 0)
+ delim_found = 1;
+
+ } else if (input->config.delim_type == SWSR_DELIM_SIZE) {
+ delim_found = (input->bytes_read_since_delim >= input->byte_count) ? 1 : 0;
+ }
+
+ return delim_found;
+}
+
+static int swsr_increment_cur_bufoffset(struct swsr_context *ctx)
+{
+ struct swsr_buffer_ctx *buf_ctx;
+ struct swsr_buffer *cur_buf;
+
+ buf_ctx = &ctx->buffer_ctx;
+
+ /* Update the number of bytes read from input FIFO for current buffer */
+ cur_buf = lst_first(&buf_ctx->used_buffer_list);
+ if (cur_buf->num_bytes_read >= cur_buf->num_bytes) {
+ /* Mark current bitstream buffer as fully consumed */
+ cur_buf->num_bytes_read = cur_buf->num_bytes;
+
+ /* Notify the application that the old buffer is exhausted. */
+ buf_ctx->cb_fxn(SWSR_EVENT_OUTPUT_BUFFER_END,
+ buf_ctx->cb_param, 0,
+ NULL, NULL);
+
+ /*
+ * Discard the buffer whose data was at the head of
+ * the input FIFO.
+ */
+ cur_buf = lst_removehead(&buf_ctx->used_buffer_list);
+ /* Add the buffer container to free list. */
+ lst_add(&buf_ctx->free_buffer_list, cur_buf);
+
+ /*
+ * Since the byte that we read was actually from the next
+ * buffer increment it's counter.
+ */
+ cur_buf = lst_first(&buf_ctx->used_buffer_list);
+ cur_buf->num_bytes_read++;
+ } else {
+ cur_buf->num_bytes_read++;
+ }
+
+ return 0;
+}
+
+static enum swsr_found swsr_readbyte_from_inputfifo(struct swsr_context *ctx,
+ unsigned char *byte)
+{
+ struct swsr_input *input;
+ enum swsr_found found = SWSR_FOUND_NONE;
+ unsigned int result = 0;
+
+ input = &ctx->input;
+
+ input->delim_found |= swsr_checkfor_delimiter(ctx);
+
+ /*
+ * Refill the input FIFO before checking for emulation prevention etc.
+ * The only exception is when there are no more bytes left to extract
+ * from input buffer.
+ */
+ while (input->num_bytes < SWSR_INPUT_FIFO_LENGTH && result == 0) {
+ unsigned char byte;
+
+ result = swsr_extractbyte(ctx, &byte);
+ if (result == 0) {
+ input->fifo |= ((unsigned long long)byte <<
+ ((SWSR_INPUT_FIFO_LENGTH - 1 - input->num_bytes) * 8));
+ input->num_bytes += 1;
+ }
+ }
+
+ if (input->num_bytes == 0) {
+ found = SWSR_FOUND_EOD;
+ } else if (!input->delim_found) {
+ /*
+ * Check for emulation prevention when enabled and enough
+ * bytes are remaining in input FIFO.
+ */
+ if (input->emprevent != SWSR_EMPREVENT_NONE &&
+ /*
+ * Ensure you have enough bytes to check for emulation
+ * prevention.
+ */
+ input->num_bytes >= input->emprev_seq_len &&
+ (input->config.delim_type != SWSR_DELIM_SIZE ||
+ /*
+ * Ensure that you don't remove emu bytes beyond current
+ * delimited unit.
+ */
+ ((input->bytes_read_since_delim + input->emprev_seq_len) <
+ input->byte_count)) && input->bytes_for_next_sequ == 0) {
+ unsigned char emprev_removed = 0;
+ unsigned int shift = (SWSR_INPUT_FIFO_LENGTH - input->emprev_seq_len) * 8;
+ unsigned long long sequ = input->fifo >> shift;
+
+ if (input->emprevent == SWSR_EMPREVENT_00000300) {
+ if ((sequ & 0xffffff00) == 0x00000300) {
+ if ((sequ & 0x000000ff) > 0x03)
+ pr_err("Invalid start code emulation preventionbytes found\n");
+
+ /*
+ * Instead of trying to remove the emulation prevention
+ * byte from the middle of the FIFO simply make it zero
+ * and drop the next byte from the FIFO which will
+ * also be zero.
+ */
+ input->fifo &= left_aligned_nbit_8byte_mask
+ (0xffff00ff,
+ input->emprev_seq_len * 8);
+ input->fifo <<= 8;
+
+ emprev_removed = 1;
+ } else if ((sequ & 0xffffffff) == 0x00000000 ||
+ (sequ & 0xffffffff) == 0x00000001) {
+ input->no_moredata = 1;
+ }
+ } else if (input->emprevent == SWSR_EMPREVENT_ff00) {
+ if (sequ == 0xff00) {
+ /* Remove the zero byte. */
+ input->fifo <<= 8;
+ input->fifo |= (0xff00ULL << shift);
+ emprev_removed = 1;
+ }
+ } else if (input->emprevent == SWSR_EMPREVENT_000002) {
+ /*
+ * Remove the emulation prevention bytes
+ * if we find 22 consecutive 0 bits
+ * (from a byte-aligned position?!)
+ */
+ if (sequ == 0x000002) {
+ /*
+ * Appear to "remove" the 0x02 byte by clearing
+ * it and then dropping the top (zero) byte.
+ */
+ input->fifo &= left_aligned_nbit_8byte_mask
+ (0xffff00,
+ input->emprev_seq_len * 8);
+ input->fifo <<= 8;
+ emprev_removed = 1;
+ }
+ }
+
+ if (emprev_removed) {
+ input->num_bytes--;
+ input->bytes_read_since_delim++;
+
+ /* Increment the buffer offset for the
+ * byte that has been removed.
+ */
+ swsr_increment_cur_bufoffset(ctx);
+
+ /*
+ * Signal that two more new bytes in the emulation
+ * prevention sequence are required before another match
+ * can be made.
+ */
+ input->bytes_for_next_sequ = input->emprev_seq_len - 2;
+ }
+ }
+
+ if (input->bytes_for_next_sequ > 0)
+ input->bytes_for_next_sequ--;
+
+ /* return the first bytes from read data */
+ *byte = (unsigned char)(input->fifo >> ((SWSR_INPUT_FIFO_LENGTH - 1) * 8));
+ input->fifo <<= 8;
+
+ input->num_bytes--;
+ input->bytes_read_since_delim++;
+
+ /* Increment the buffer offset for byte that has been read. */
+ swsr_increment_cur_bufoffset(ctx);
+
+ found = SWSR_FOUND_DATA;
+ } else {
+ found = SWSR_FOUND_DELIM;
+ }
+
+ return found;
+}
+
+static enum swsr_found swsr_consumebyte_from_inputfifo
+ (struct swsr_context *ctx, unsigned char *byte)
+{
+ enum swsr_found found;
+
+ found = swsr_readbyte_from_inputfifo(ctx, byte);
+
+ if (found == SWSR_FOUND_DATA) {
+ /* Only whole bytes can be read from Input FIFO. */
+ ctx->output.totalbits_consumed += 8;
+ ctx->input.delimunit_bitofst += 8;
+ }
+
+ return found;
+}
+
+static int swsr_fill_outputfifo(struct swsr_context *ctx)
+{
+ unsigned char byte;
+ enum swsr_found found = SWSR_FOUND_DATA;
+
+ /* Fill output FIFO with whole bytes up to (but not over) max length */
+ while (ctx->output.num_bits <= (SWSR_OUTPUT_FIFO_LENGTH - 8) && found == SWSR_FOUND_DATA) {
+ found = swsr_readbyte_from_inputfifo(ctx, &byte);
+ if (found == SWSR_FOUND_DATA) {
+ ctx->output.fifo |= ((unsigned long long)byte <<
+ (SWSR_OUTPUT_FIFO_LENGTH - 8 - ctx->output.num_bits));
+ ctx->output.num_bits += 8;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int swsr_getbits_from_outputfifo(struct swsr_context *ctx,
+ unsigned int numbits,
+ unsigned char bconsume)
+{
+ unsigned int bitsread;
+
+ /*
+ * Fetch more bits from the input FIFO if the output FIFO
+ * doesn't have enough bits to satisfy the request on its own.
+ */
+ if (numbits > ctx->output.num_bits)
+ swsr_fill_outputfifo(ctx);
+
+ /* Ensure that are now enough bits in the output FIFO. */
+ if (numbits > ctx->output.num_bits) {
+ /* Tried to access into an SCP or other delimiter. */
+ if (ctx->input.delim_found) {
+ ctx->exception = SWSR_EXCEPT_ACCESS_INTO_SCP;
+ } else {
+ /*
+ * Data has been exhausted if after extracting bits
+ * there are still not enough bits in the internal
+ * storage to fulfil the number requested.
+ */
+ ctx->exception = SWSR_EXCEPT_ACCESS_BEYOND_EOD;
+ }
+
+ ctx->exception_handler_fxn(ctx->exception, ctx->pexception_param);
+
+ /* Return zero if the bits couldn't be obtained */
+ bitsread = 0;
+ } else {
+ unsigned int shift;
+
+ /* Extract all the bits from the output FIFO */
+ shift = (SWSR_OUTPUT_FIFO_LENGTH - numbits);
+ bitsread = (unsigned int)(ctx->output.fifo >> shift);
+
+ if (bconsume) {
+ /* Update output FIFO. */
+ ctx->output.fifo <<= numbits;
+ ctx->output.num_bits -= numbits;
+ }
+ }
+
+ if (bconsume && ctx->exception == SWSR_EXCEPT_NO_EXCEPTION) {
+ ctx->output.totalbits_consumed += numbits;
+ ctx->input.delimunit_bitofst += numbits;
+ }
+
+ /* Return the bits */
+ return bitsread;
+}
+
+int swsr_read_signed_expgoulomb(void *ctx_hndl)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ unsigned int exp_goulomb;
+ unsigned char unsign;
+
+ /* Validate input arguments. */
+ if (!ctx) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ /* Read unsigned value then convert to signed value */
+ exp_goulomb = swsr_read_unsigned_expgoulomb(ctx);
+
+ unsign = exp_goulomb & 1;
+ exp_goulomb >>= 1;
+ exp_goulomb = (unsign) ? exp_goulomb + 1 : -(int)exp_goulomb;
+
+ if (ctx->exception != SWSR_EXCEPT_NO_EXCEPTION)
+ ctx->exception_handler_fxn(ctx->exception, ctx->pexception_param);
+
+ /* Return the signed value */
+ return exp_goulomb;
+}
+
+static unsigned int swsr_readunsigned_expgoulomb(struct swsr_context *ctx)
+{
+ unsigned int numbits = 0;
+ unsigned int bitpeeked;
+ unsigned int bitread;
+ unsigned int setbits;
+ unsigned int expgoulomb;
+
+ /* Loop until we have found a non-zero nibble or reached 31 0-bits */
+ /* first read is 3 bits only to prevent an illegal 32-bit peek */
+ numbits = 1;
+ do {
+ bitpeeked = swsr_peekbits(ctx, numbits);
+ /* Check for non-zero nibble */
+ if (bitpeeked != 0)
+ break;
+
+ numbits++;
+
+ } while (numbits < 32);
+
+ /* Correct the number of leading zero bits */
+ numbits--;
+
+ if (bitpeeked) {
+ /* read leading zeros and 1-bit */
+ bitread = swsr_read_bits(ctx, numbits + 1);
+ if (bitread != 1)
+ ctx->exception = SWSR_EXCEPT_EXPGOULOMB_ERROR;
+ } else {
+ /*
+ * read 31 zero bits - special case to deal with 31 or 32
+ * leading zeros
+ */
+ bitread = swsr_read_bits(ctx, 31);
+ if (bitread != 0)
+ ctx->exception = SWSR_EXCEPT_EXPGOULOMB_ERROR;
+
+ /*
+ * next 3 bits make either 31 0-bit code:'1xx',
+ * or 32 0-bit code:'010'
+ */
+ /*
+ * only valid 32 0-bit code is:'0..010..0'
+ * and results in 0xffffffff
+ */
+ bitpeeked = swsr_peekbits(ctx, 3);
+
+ if (ctx->exception == SWSR_EXCEPT_NO_EXCEPTION) {
+ if (0x4 & bitpeeked) {
+ bitread = swsr_read_bits(ctx, 1);
+ numbits = 31;
+ } else {
+ if (bitpeeked != 2)
+ ctx->exception = SWSR_EXCEPT_EXPGOULOMB_ERROR;
+
+ bitread = swsr_read_bits(ctx, 3);
+ bitread = swsr_read_bits(ctx, 31);
+ if (bitread != 0)
+ ctx->exception = SWSR_EXCEPT_EXPGOULOMB_ERROR;
+
+ return 0xffffffff;
+ }
+ } else {
+ /* encountered an exception while reading code */
+ /* just return a valid value */
+ return 0;
+ }
+ }
+
+ /* read data bits */
+ bitread = 0;
+ if (numbits)
+ bitread = swsr_read_bits(ctx, numbits);
+
+ /* convert exp-goulomb to value */
+ setbits = (1 << numbits) - 1;
+ expgoulomb = setbits + bitread;
+ /* Return the value */
+ return expgoulomb;
+}
+
+unsigned int swsr_read_unsigned_expgoulomb(void *ctx_hndl)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ unsigned int value;
+
+ /* Validate input arguments. */
+ if (!ctx) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ value = swsr_readunsigned_expgoulomb(ctx);
+
+ if (ctx->exception != SWSR_EXCEPT_NO_EXCEPTION)
+ ctx->exception_handler_fxn(ctx->exception, ctx->pexception_param);
+
+ return value;
+}
+
+enum swsr_exception swsr_check_exception(void *ctx_hndl)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ enum swsr_exception exception;
+
+ /* Validate input arguments. */
+ if (!ctx) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return (enum swsr_exception)IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ exception = ctx->exception;
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return (enum swsr_exception)IMG_ERROR_NOT_INITIALISED;
+ }
+
+ ctx->exception = SWSR_EXCEPT_NO_EXCEPTION;
+ return exception;
+}
+
+int swsr_check_more_rbsp_data(void *ctx_hndl, unsigned char *more_rbsp_data)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+
+ int rembitsinbyte;
+ unsigned char currentbyte;
+ int numof_aligned_rembits;
+ unsigned long long rest_alignedbytes;
+ unsigned char moredata = 0;
+
+ /* Validate input arguments. */
+ if (!ctx) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ if (ctx->input.emprevent != SWSR_EMPREVENT_00000300) {
+ pr_err("SWSR cannot determine More RBSP data for a stream without SWSR_EMPREVENT_00000300: %s\n",
+ __func__);
+ return IMG_ERROR_OPERATION_PROHIBITED;
+ }
+
+ /*
+ * Always fill the output FIFO to ensure the no_moredata flag is set
+ * when there are enough remaining bytes
+ */
+
+ swsr_fill_outputfifo(ctx);
+
+ if (ctx->output.num_bits != 0) {
+ /* Calculate the number of bits in the MS byte */
+ rembitsinbyte = (ctx->output.num_bits & 0x7);
+ if (rembitsinbyte == 0)
+ rembitsinbyte = 8;
+
+ numof_aligned_rembits = (ctx->output.num_bits - rembitsinbyte);
+
+ /* Peek the value of last byte. */
+ currentbyte = swsr_peekbits(ctx, rembitsinbyte);
+ rest_alignedbytes = (ctx->output.fifo >>
+ (64 - ctx->output.num_bits)) &
+ ((1ULL << numof_aligned_rembits) - 1);
+
+ if ((currentbyte == (1 << (rembitsinbyte - 1))) &&
+ (numof_aligned_rembits == 0 || (rest_alignedbytes == 0 &&
+ ((((((unsigned int)numof_aligned_rembits >> 3)) <
+ ctx->input.emprev_seq_len) &&
+ ctx->input.num_bytes == 0) || ctx->input.no_moredata))))
+ moredata = 0;
+ else
+ moredata = 1;
+ }
+
+ *more_rbsp_data = moredata;
+
+ return 0;
+}
+
+unsigned int swsr_read_onebit(void *ctx_hndl)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ unsigned int bitread;
+
+ /* Validate input arguments. */
+ if (!ctx_hndl) {
+ VDEC_ASSERT(0);
+ return -EIO;
+ }
+
+ ctx = (struct swsr_context *)ctx_hndl;
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ /* Optimize with inline code (specific version of call below). */
+ bitread = swsr_read_bits(ctx, 1);
+
+ return bitread;
+}
+
+unsigned int swsr_read_bits(void *ctx_hndl, unsigned int no_bits)
+{
+ struct swsr_context *ctx;
+
+ /* Validate input arguments. */
+ if (!ctx_hndl) {
+ VDEC_ASSERT(0);
+ return -EIO;
+ }
+
+ ctx = (struct swsr_context *)ctx_hndl;
+
+ /* Validate input arguments. */
+ if (!ctx->initialised) {
+ pr_err("%s: Invalid SWSR context\n", __func__);
+ ctx->exception = SWSR_EXCEPT_INVALID_CONTEXT;
+ ctx->exception_handler_fxn(ctx->exception, ctx->pexception_param);
+
+ return 0;
+ }
+
+ if (no_bits > SWSR_MAX_SYNTAX_LENGTH) {
+ pr_err("Maximum symbol length exceeded\n");
+ ctx->exception = SWSR_EXCEPT_WRONG_CODEWORD_ERROR;
+ ctx->exception_handler_fxn(ctx->exception, ctx->pexception_param);
+
+ return 0;
+ }
+
+ return swsr_getbits_from_outputfifo(ctx, no_bits, 1);
+}
+
+int swsr_read_signedbits(void *ctx_hndl, unsigned int no_bits)
+{
+ struct swsr_context *ctx;
+ int outbits = 0;
+
+ /* Validate input arguments. */
+ if (!ctx_hndl) {
+ VDEC_ASSERT(0);
+ return -EIO;
+ }
+
+ ctx = (struct swsr_context *)ctx_hndl;
+
+ /* Check if the context has been initialized. */
+ if (!ctx->initialised) {
+ pr_err("%s: Invalid SWSR context\n", __func__);
+ ctx->exception = SWSR_EXCEPT_INVALID_CONTEXT;
+ ctx->exception_handler_fxn(ctx->exception, ctx->pexception_param);
+
+ return 0;
+ }
+
+ if ((no_bits + 1) > SWSR_MAX_SYNTAX_LENGTH) {
+ pr_err("Maximum symbol length exceeded\n");
+ ctx->exception = SWSR_EXCEPT_WRONG_CODEWORD_ERROR;
+ ctx->exception_handler_fxn(ctx->exception, ctx->pexception_param);
+
+ return 0;
+ }
+ outbits = swsr_getbits_from_outputfifo(ctx, no_bits, 1);
+
+ return (swsr_getbits_from_outputfifo(ctx, 1, 1)) ? -outbits : outbits;
+}
+
+unsigned int swsr_peekbits(void *ctx_hndl, unsigned int no_bits)
+{
+ struct swsr_context *ctx;
+
+ /* validate input parameters */
+ if (!ctx_hndl) {
+ VDEC_ASSERT(0);
+ return -EIO;
+ }
+
+ ctx = (struct swsr_context *)ctx_hndl;
+
+ /* Validate input arguments. */
+ if (!ctx->initialised) {
+ pr_err("%s: Invalid SWSR context\n", __func__);
+ ctx->exception = SWSR_EXCEPT_INVALID_CONTEXT;
+ ctx->exception_handler_fxn(ctx->exception, ctx->pexception_param);
+
+ return 0;
+ }
+
+ if (no_bits > SWSR_MAX_SYNTAX_LENGTH) {
+ pr_err("Maximum symbol length exceeded\n");
+ ctx->exception = SWSR_EXCEPT_WRONG_CODEWORD_ERROR;
+ ctx->exception_handler_fxn(ctx->exception, ctx->pexception_param);
+
+ return 0;
+ }
+
+ return swsr_getbits_from_outputfifo(ctx, no_bits, 0);
+}
+
+int swsr_byte_align(void *ctx_hndl)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ unsigned int numbits;
+
+ /* Validate input arguments. */
+ if (!ctx) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ numbits = (ctx->output.num_bits & 0x7);
+ /* Read the required number of bits if not already byte-aligned. */
+ if (numbits != 0)
+ swsr_read_bits(ctx, numbits);
+
+ SWSR_ASSERT((ctx->output.num_bits & 0x7) == 0);
+
+ return 0;
+}
+
+int swsr_get_total_bitsconsumed(void *ctx_hndl, unsigned long long *total_bitsconsumed)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+
+ /* Validate input arguments. */
+ if (!ctx || !total_bitsconsumed) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ *total_bitsconsumed = ctx->output.totalbits_consumed;
+
+ return 0;
+}
+
+int swsr_get_byte_offset_curbuf(void *ctx_hndl, unsigned long long *byte_offset)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ struct swsr_buffer *outbuf;
+
+ /* Validate input arguments. */
+ if (!ctx || !byte_offset) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ if (ctx->output.num_bits != 0) {
+ pr_err("SWSR output FIFO not empty. First seek to next delimiter: %s\n",
+ __func__);
+ return IMG_ERROR_OPERATION_PROHIBITED;
+ }
+
+ outbuf = lst_first(&ctx->buffer_ctx.used_buffer_list);
+ if (outbuf)
+ *byte_offset = outbuf->num_bytes_read;
+ else
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+
+ return 0;
+}
+
+static int swsr_update_emprevent(enum swsr_emprevent emprevent,
+ struct swsr_context *ctx)
+{
+ struct swsr_input *input;
+
+ input = &ctx->input;
+
+ input->emprevent = emprevent;
+ switch (input->emprevent) {
+ case SWSR_EMPREVENT_00000300:
+ input->emprev_seq_len = 4;
+ break;
+
+ case SWSR_EMPREVENT_ff00:
+ input->emprev_seq_len = 2;
+ break;
+
+ case SWSR_EMPREVENT_000002:
+ input->emprev_seq_len = 3;
+ break;
+
+ default:
+ input->emprev_seq_len = 0;
+ break;
+ }
+
+ return 0;
+}
+
+int swsr_consume_delim(void *ctx_hndl, enum swsr_emprevent emprevent,
+ unsigned int size_delim_length, unsigned long long *byte_count)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ struct swsr_input *input;
+ unsigned long long delimiter = 0;
+
+ /* Validate input arguments. */
+ if (!ctx || emprevent >= SWSR_EMPREVENT_MAX ||
+ (ctx->input.config.delim_type == SWSR_DELIM_SIZE &&
+ size_delim_length > SWSR_MAX_DELIM_LENGTH)) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ if (ctx->input.config.delim_type == SWSR_DELIM_SIZE &&
+ size_delim_length == 0 && !byte_count) {
+ pr_err("Byte count value must be provided when size delimiter is zero length: %s\n",
+ __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ input = &ctx->input;
+
+ /*
+ * Ensure that the input is at a delimiter since emulation prevention
+ * removal will not have spanned into this next unit.
+ * This allows emulation prevention detection modes to be changed.
+ * Now check for delimiter.
+ */
+ input->delim_found = swsr_checkfor_delimiter(ctx);
+
+ if (!input->delim_found)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ /* Output bitstream FIFOs should be empty. */
+ /* NOTE: flush output queue using seek function. */
+ SWSR_ASSERT(ctx->output.num_bits == 0);
+
+ /* Only update the delimiter length for size delimiters. */
+ if (input->config.delim_type == SWSR_DELIM_SIZE)
+ input->config.delim_length = size_delim_length;
+
+ /* Update the emulation prevention detection/removal scheme */
+ swsr_update_emprevent(emprevent, ctx);
+
+ /*
+ * Peek at the NAL type and return in callback only
+ * when delimiter is in bitstream.
+ */
+ if (input->config.delim_length) {
+ unsigned int shift;
+ unsigned char naltype;
+
+ /*
+ * Peek at the next 8-bits after the delimiter that
+ * resides in internal FIFO.
+ */
+ shift = SWSR_OUTPUT_FIFO_LENGTH -
+ (input->config.delim_length + SWSR_NALTYPE_LENGTH);
+ naltype = (input->fifo >> shift) & NBIT_8BYTE_MASK(SWSR_NALTYPE_LENGTH);
+
+ /*
+ * Notify caller of NAL type so that bitstream segmentation
+ * can take place before the delimiter is consumed
+ */
+ ctx->buffer_ctx.cb_fxn(SWSR_EVENT_DELIMITER_NAL_TYPE, ctx->buffer_ctx.cb_param,
+ naltype, NULL, NULL);
+ }
+
+ /*
+ * Clear the delimiter found flag and reset bytes read to allow
+ * reading of data from input FIFO.
+ */
+ input->delim_found = 0;
+
+ if (input->config.delim_length != 0) {
+ unsigned long long scpvalue = input->config.scp_value;
+ unsigned int i;
+ unsigned char byte = 0;
+
+ /*
+ * Ensure that delimiter is not detected while delimiter
+ * is read.
+ */
+ if (input->config.delim_type == SWSR_DELIM_SIZE) {
+ input->bytes_read_since_delim = 0;
+ input->byte_count = (input->config.delim_length + 7) / 8;
+ } else if (input->config.delim_type == SWSR_DELIM_SCP) {
+ input->config.scp_value = 0xdeadbeefdeadbeefUL;
+ }
+
+ /*
+ * Fill output FIFO only with bytes at least partially
+ * used for delimiter.
+ */
+ for (i = 0; i < ((input->config.delim_length + 7) / 8); i++) {
+ swsr_readbyte_from_inputfifo(ctx, &byte);
+
+ ctx->output.fifo |= ((unsigned long long)byte <<
+ (SWSR_OUTPUT_FIFO_LENGTH - 8 - ctx->output.num_bits));
+ ctx->output.num_bits += 8;
+ }
+
+ /*
+ * Read delimiter from output FIFO leaving any remaining
+ * non-byte-aligned bits behind.
+ */
+ delimiter = swsr_getbits_from_outputfifo(ctx, input->config.delim_length, 1);
+
+ /* Restore SCP value. */
+ if (input->config.delim_type == SWSR_DELIM_SCP)
+ input->config.scp_value = scpvalue;
+ } else {
+ /*
+ * For size delimited bitstreams without a delimiter use
+ * the byte count provided.
+ */
+ SWSR_ASSERT(*byte_count > 0);
+ delimiter = *byte_count;
+ SWSR_ASSERT(input->config.delim_type == SWSR_DELIM_SIZE);
+ }
+
+ if (input->config.delim_type == SWSR_DELIM_SCP)
+ SWSR_ASSERT((delimiter & NBIT_8BYTE_MASK(input->config.delim_length)) ==
+ input->config.scp_value);
+ else if (input->config.delim_type == SWSR_DELIM_SIZE) {
+ input->byte_count = delimiter;
+
+ /* Return byte count if argument provided. */
+ if (byte_count)
+ *byte_count = input->byte_count;
+ }
+
+ input->bytes_read_since_delim = 0;
+ {
+ struct swsr_buffer *buffer = input->buf;
+
+ if (!buffer)
+ buffer = lst_first(&ctx->buffer_ctx.used_buffer_list);
+ if (buffer)
+ input->delimited_unit_start_offset = (long)buffer->num_bytes_read;
+ else
+ input->delimited_unit_start_offset = 0;
+ }
+ input->delimited_unit_size = 0;
+ input->delimunit_bitofst = 0;
+
+ input->no_moredata = 0;
+
+ return 0;
+}
+
+enum swsr_found swsr_seek_delim_or_eod(void *ctx_hndl)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ enum swsr_found found = SWSR_FOUND_DATA;
+ unsigned char byte;
+
+ /* Validate input arguments. */
+ if (!ctx) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return (enum swsr_found)IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return (enum swsr_found)IMG_ERROR_NOT_INITIALISED;
+ }
+
+ /* Read the residual contents of the output FIFO */
+ swsr_byte_align(ctx);
+ while (ctx->output.num_bits > 0) {
+ SWSR_ASSERT((ctx->output.num_bits & 0x7) == 0);
+ swsr_read_bits(ctx, 8);
+ }
+ SWSR_ASSERT(ctx->output.num_bits == 0);
+ if (ctx->input.config.delim_type == SWSR_DELIM_SCP) {
+ struct swsr_input *input = &ctx->input;
+ struct swsr_output *output = &ctx->output;
+
+ while (found == SWSR_FOUND_DATA) {
+ unsigned char *offset;
+ unsigned int delimlength_inbytes;
+ unsigned char *startoffset;
+ unsigned long long mask;
+ unsigned long long scp;
+ unsigned char scpfirstbyte;
+
+ /*
+ * ensure that all the data in the input FIFO comes
+ * from the current buffer
+ */
+ if (input->buf && input->buf->byte_offset <= input->num_bytes) {
+ found = swsr_consumebyte_from_inputfifo(ctx, &byte);
+ continue;
+ }
+
+ /* consume remaining bytes from the FIFO */
+ if (!input->buf) {
+ found = swsr_consumebyte_from_inputfifo(ctx, &byte);
+ continue;
+ }
+
+ delimlength_inbytes = (input->config.delim_length + 7) / 8;
+
+ /*
+ * Make the mask and the scp value byte aligned to
+ * speed up things
+ */
+ mask = ((1UL << input->config.delim_length) - 1) <<
+ (8 * delimlength_inbytes - input->config.delim_length);
+ scp = input->config.scp_value <<
+ (8 * delimlength_inbytes - input->config.delim_length);
+ scpfirstbyte = (scp >> 8 * (delimlength_inbytes - 1)) & 0xFF;
+
+ /* rollback the input FIFO */
+ input->buf->byte_offset -= input->num_bytes;
+ input->buf->num_bytes_read -= input->num_bytes;
+ input->bitstream_offset -= input->num_bytes;
+ input->num_bytes = 0;
+ input->fifo = 0;
+
+ startoffset = input->buf->data + input->buf->byte_offset;
+
+ while (found == SWSR_FOUND_DATA) {
+ offset = memchr(input->buf->data + input->buf->byte_offset,
+ scpfirstbyte,
+ input->buf->num_bytes -
+ (input->buf->byte_offset + delimlength_inbytes -
+ 1));
+
+ if (offset) {
+ unsigned int i;
+
+ /*
+ * load bytes that might be SCP into
+ * the FIFO
+ */
+ for (i = 0; i < delimlength_inbytes; i++) {
+ input->fifo <<= 8;
+ input->fifo |= offset[i];
+ }
+
+ input->buf->byte_offset = offset - input->buf->data;
+
+ if ((input->fifo & mask) == scp) {
+ unsigned long long bytesread = offset
+ - startoffset;
+
+ /*
+ * Scp found, fill the rest of
+ * the FIFO
+ */
+ for (i = delimlength_inbytes;
+ i < SWSR_INPUT_FIFO_LENGTH &&
+ input->buf->byte_offset + i <
+ input->buf->num_bytes;
+ i++) {
+ input->fifo <<= 8;
+ input->fifo |= offset[i];
+ }
+
+ input->fifo <<= (SWSR_INPUT_FIFO_LENGTH - i) * 8;
+
+ input->bytes_for_next_sequ = 0;
+ input->num_bytes = i;
+
+ input->buf->byte_offset += i;
+
+ input->buf->num_bytes_read = offset -
+ input->buf->data;
+ input->bitstream_offset += bytesread + i;
+
+ output->totalbits_consumed += bytesread * 8;
+
+ input->delimunit_bitofst += bytesread * 8;
+
+ output->num_bits = 0;
+ output->fifo = 0;
+
+ SWSR_ASSERT(swsr_checkfor_delimiter(ctx));
+
+ found = SWSR_FOUND_DELIM;
+ } else {
+ input->buf->byte_offset++;
+ }
+ } else {
+ /* End of the current buffer */
+ unsigned int bytesread = input->buf->num_bytes -
+ (startoffset - input->buf->data);
+ unsigned int i;
+
+ /* update offsets */
+ input->bitstream_offset += bytesread;
+ output->totalbits_consumed += bytesread * 8;
+ input->delimunit_bitofst += bytesread * 8;
+
+ input->buf->byte_offset = input->buf->num_bytes;
+ input->buf->num_bytes_read = input->buf->num_bytes -
+ (delimlength_inbytes - 1);
+
+ /* load remaining bytes to FIFO */
+ offset = input->buf->data +
+ input->buf->num_bytes -
+ (delimlength_inbytes - 1);
+ for (i = 0; i < delimlength_inbytes - 1;
+ i++) {
+ input->fifo <<= 8;
+ input->fifo |= offset[i];
+ }
+
+ input->fifo <<= (SWSR_INPUT_FIFO_LENGTH - i) * 8;
+
+ input->bytes_for_next_sequ = 0;
+ input->num_bytes = delimlength_inbytes - 1;
+
+ output->num_bits = 0;
+ output->fifo = 0;
+
+ /*
+ * Consume a few bytes from the next
+ * byte to check if there is scp on
+ * buffers boundary
+ */
+ for (i = 0;
+ i < delimlength_inbytes && found == SWSR_FOUND_DATA;
+ i++) {
+ found = swsr_consumebyte_from_inputfifo(ctx, &byte);
+ SWSR_ASSERT(found != SWSR_FOUND_NONE);
+ }
+
+ break;
+ }
+ }
+ }
+ } else {
+ /*
+ * Extract data from input FIFO until data is not found either
+ * because we have run out or a SCP has been detected.
+ */
+ while (found == SWSR_FOUND_DATA) {
+ found = swsr_consumebyte_from_inputfifo(ctx, &byte);
+ SWSR_ASSERT(found != SWSR_FOUND_NONE);
+ }
+ }
+
+ /*
+ * When the end of data has been reached there should be no
+ * more data in the input FIFO.
+ */
+ if (found == SWSR_FOUND_EOD)
+ SWSR_ASSERT(ctx->input.num_bytes == 0);
+
+ SWSR_ASSERT(found != SWSR_FOUND_DATA);
+ return found;
+}
+
+enum swsr_found swsr_check_delim_or_eod(void *ctx_hndl)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ enum swsr_found found = SWSR_FOUND_DATA;
+
+ /* Validate input arguments. */
+ if (!ctx) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+
+ return (enum swsr_found)IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+
+ return (enum swsr_found)IMG_ERROR_NOT_INITIALISED;
+ }
+
+ /*
+ * End of data when all FIFOs are empty and there is nothing left to
+ * read from the input buffers.
+ */
+ if (ctx->output.num_bits == 0 && ctx->input.num_bytes == 0 &&
+ ctx->input.bitstream_offset >= ctx->input.bitstream_size)
+ found = SWSR_FOUND_EOD;
+ else if (ctx->output.num_bits == 0 && swsr_checkfor_delimiter(ctx)) {
+ /*
+ * Output queue is empty and delimiter is at the head of
+ * input queue.
+ */
+ found = SWSR_FOUND_DELIM;
+ }
+
+ return found;
+}
+
+int swsr_start_bitstream(void *ctx_hndl, const struct swsr_config *config,
+ unsigned long long bitstream_size, enum swsr_emprevent emprevent)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ struct swsr_buffer *buffer;
+ unsigned int result;
+
+ /* Validate input arguments. */
+ if (!ctx || !config || config->delim_type >= SWSR_DELIM_MAX ||
+ config->delim_length > SWSR_MAX_DELIM_LENGTH ||
+ config->scp_value > NBIT_8BYTE_MASK(config->delim_length) ||
+ emprevent >= SWSR_EMPREVENT_MAX) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ /* Move all used buffers into free list */
+ buffer = lst_removehead(&ctx->buffer_ctx.used_buffer_list);
+ while (buffer) {
+ lst_add(&ctx->buffer_ctx.free_buffer_list, buffer);
+ buffer = lst_removehead(&ctx->buffer_ctx.used_buffer_list);
+ }
+
+ /* Clear all the shift-register state (except config) */
+ memset(&ctx->input, 0, sizeof(ctx->input));
+ memset(&ctx->output, 0, sizeof(ctx->output));
+
+ /* Update input FIFO configuration */
+ ctx->input.bitstream_size = bitstream_size;
+ ctx->input.config = *config;
+ result = swsr_update_emprevent(emprevent, ctx);
+ SWSR_ASSERT(result == 0);
+
+ /*
+ * Signal delimiter found to ensure that no data is read out of
+ * input FIFO
+ * while fetching the first bitstream data into input FIFO.
+ */
+ ctx->input.delim_found = 1;
+ result = swsr_fill_outputfifo(ctx);
+ SWSR_ASSERT(result == 0);
+
+ /* Now check for delimiter. */
+ ctx->input.delim_found = swsr_checkfor_delimiter(ctx);
+
+ return 0;
+}
+
+int swsr_deinitialise(void *ctx_hndl)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ struct swsr_buffer *buffer;
+
+ /* Validate input arguments. */
+ if (!ctx) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ /* Free all used buffer containers */
+ buffer = lst_removehead(&ctx->buffer_ctx.used_buffer_list);
+ while (buffer) {
+ kfree(buffer);
+ buffer = lst_removehead(&ctx->buffer_ctx.used_buffer_list);
+ }
+
+ /* Free all free buffer containers. */
+ buffer = lst_removehead(&ctx->buffer_ctx.free_buffer_list);
+ while (buffer) {
+ kfree(buffer);
+ buffer = lst_removehead(&ctx->buffer_ctx.free_buffer_list);
+ }
+
+ ctx->initialised = 0;
+ kfree(ctx);
+
+ return 0;
+}
+
+int swsr_initialise(swsr_except_handler_fxn exception_handler_fxn,
+ void *exception_cbparam, swsr_callback_fxn callback_fxn,
+ void *cb_param, void **ctx_hndl)
+{
+ struct swsr_context *ctx;
+ struct swsr_buffer *buffer;
+ unsigned int i;
+ unsigned int result;
+
+ /* Validate input arguments. */
+ if (!exception_handler_fxn || !exception_cbparam || !callback_fxn ||
+ !cb_param || !ctx_hndl) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Allocate and initialise shift-register context */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ VDEC_ASSERT(0);
+ return -EINVAL;
+ }
+
+ /* Setup shift-register context */
+ ctx->exception_handler_fxn = exception_handler_fxn;
+ ctx->pexception_param = exception_cbparam;
+
+ ctx->buffer_ctx.cb_fxn = callback_fxn;
+ ctx->buffer_ctx.cb_param = cb_param;
+
+ /*
+ * Allocate a new buffer container for each byte in internal storage.
+ * This is the theoretical maximum number of buffers in the SWSR at
+ * any one time.
+ */
+ for (i = 0; i < SWSR_INPUT_FIFO_LENGTH + (SWSR_OUTPUT_FIFO_LENGTH / 8);
+ i++) {
+ /* Allocate a buffer container */
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ SWSR_ASSERT(buffer);
+ if (!buffer) {
+ result = IMG_ERROR_OUT_OF_MEMORY;
+ goto error;
+ }
+
+ /* Add container to free list */
+ lst_add(&ctx->buffer_ctx.free_buffer_list, buffer);
+ }
+
+ SWSR_ASSERT(SWSR_MAX_SYNTAX_LENGTH <= (sizeof(unsigned int) * 8));
+
+ ctx->initialised = 1;
+ *ctx_hndl = ctx;
+
+ return 0;
+error:
+ buffer = lst_removehead(&ctx->buffer_ctx.free_buffer_list);
+ while (buffer) {
+ kfree(buffer);
+ buffer = lst_removehead(&ctx->buffer_ctx.free_buffer_list);
+ }
+ kfree(ctx);
+
+ return result;
+}
+
+static unsigned char swsr_israwdata_extraction_supported(struct swsr_context *ctx)
+{
+ /*
+ * For now only h.264/HEVC like 0x000001 SCP delimited
+ * bistreams are supported.
+ */
+ if (ctx->input.config.delim_type == SWSR_DELIM_SCP &&
+ ctx->input.config.delim_length == (3 * 8) &&
+ ctx->input.config.scp_value == 0x000001)
+ return 1;
+
+ return 0;
+}
+
+static int swsr_getcurrent_delimited_unitsize(struct swsr_context *ctx, unsigned int *size)
+{
+ struct swsr_buffer *buf;
+
+ buf = ctx->input.buf;
+ if (!buf)
+ buf = lst_first(&ctx->buffer_ctx.used_buffer_list);
+
+ if (buf && ctx->input.delimited_unit_start_offset >= 0 &&
+ ctx->input.delimited_unit_start_offset < buf->num_bytes) {
+ unsigned long long bufptr =
+ (unsigned long long)ctx->input.delimited_unit_start_offset;
+ unsigned int zeros = 0;
+
+ /* Scan the current buffer for the next SCP. */
+ while (1) {
+ /* Look for two consecutive 0 bytes. */
+ while ((bufptr < buf->num_bytes) && (zeros < 2)) {
+ if (buf->data[bufptr++] == 0)
+ zeros++;
+ else
+ zeros = 0;
+ }
+ /*
+ * If we're not at the end of the buffer already and
+ * the next byte is 1, we've got it.
+ */
+ /*
+ * If we're at the end of the buffer, just assume
+ * we've got it too
+ * as we do not support buffer spanning units.
+ */
+ if (bufptr < buf->num_bytes && buf->data[bufptr] == 1) {
+ break;
+ } else if (bufptr == buf->num_bytes) {
+ zeros = 0;
+ break;
+ }
+ /*
+ * Finally just decrease the number of 0s found
+ * already and go on scanning.
+ */
+ else
+ zeros = 1;
+ }
+ /* Calculate the unit size. */
+ ctx->input.delimited_unit_size = (unsigned int)(bufptr -
+ (unsigned long long)ctx->input.delimited_unit_start_offset) - zeros;
+ *size = ctx->input.delimited_unit_size;
+ } else {
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ }
+
+ return 0;
+}
+
+int swsr_get_current_delimited_unitsize(void *ctx_hndl, unsigned int *size)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+
+ /* Validate input arguments. */
+ if (!ctx || !size) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ if (!swsr_israwdata_extraction_supported(ctx))
+ return IMG_ERROR_NOT_SUPPORTED;
+
+ return swsr_getcurrent_delimited_unitsize(ctx, size);
+}
+
+int swsr_get_current_delimited_unit(void *ctx_hndl, unsigned char *data, unsigned int *size)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+ struct swsr_buffer *buf;
+ unsigned int copysize;
+
+ /* Validate input arguments. */
+ if (!ctx || !data || !size || *size == 0) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ if (!swsr_israwdata_extraction_supported(ctx))
+ return IMG_ERROR_NOT_SUPPORTED;
+
+ buf = ctx->input.buf;
+ if (!buf)
+ buf = lst_first(&ctx->buffer_ctx.used_buffer_list);
+
+ if (buf && ctx->input.delimited_unit_start_offset >= 0) {
+ if (ctx->input.delimited_unit_size == 0)
+ swsr_getcurrent_delimited_unitsize(ctx, &copysize);
+
+ if (ctx->input.delimited_unit_size < *size)
+ *size = ctx->input.delimited_unit_size;
+
+ memcpy(data, buf->data + ctx->input.delimited_unit_start_offset, *size);
+ } else {
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ }
+
+ return 0;
+}
+
+int swsr_get_current_delimited_unit_bit_offset(void *ctx_hndl, unsigned int *bit_offset)
+{
+ struct swsr_context *ctx = (struct swsr_context *)ctx_hndl;
+
+ /* Validate input arguments. */
+ if (!ctx || !bit_offset) {
+ pr_err("Invalid arguments to function: %s\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!ctx->initialised) {
+ pr_err("SWSR not yet initialised: %s\n", __func__);
+ return IMG_ERROR_NOT_INITIALISED;
+ }
+
+ if (!swsr_israwdata_extraction_supported(ctx))
+ return IMG_ERROR_NOT_SUPPORTED;
+
+ if (ctx->input.delimited_unit_start_offset >= 0)
+ *bit_offset = ctx->input.delimunit_bitofst;
+
+ return 0;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/swsr.h b/drivers/media/platform/vxe-vxd/decoder/swsr.h
new file mode 100644
index 000000000000..5c27e8c41240
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/swsr.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Software Shift Register Access fucntions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstreming
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef _SWSR_H
+#define _SWSR_H
+
+#include <linux/types.h>
+
+#include "img_errors.h"
+#include "lst.h"
+
+#define SWSR_MAX_DELIM_LENGTH (8 * 8)
+
+enum swsr_exception {
+ SWSR_EXCEPT_NO_EXCEPTION = 0x00,
+ SWSR_EXCEPT_ENCAPULATION_ERROR1,
+ SWSR_EXCEPT_ENCAPULATION_ERROR2,
+ SWSR_EXCEPT_ACCESS_INTO_SCP,
+ SWSR_EXCEPT_ACCESS_BEYOND_EOD,
+ SWSR_EXCEPT_EXPGOULOMB_ERROR,
+ SWSR_EXCEPT_WRONG_CODEWORD_ERROR,
+ SWSR_EXCEPT_NO_SCP,
+ SWSR_EXCEPT_INVALID_CONTEXT,
+ SWSR_EXCEPT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum swsr_cbevent {
+ SWSR_EVENT_INPUT_BUFFER_START = 0,
+ SWSR_EVENT_OUTPUT_BUFFER_END,
+ SWSR_EVENT_DELIMITER_NAL_TYPE,
+ SWSR_EVENT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum swsr_found {
+ SWSR_FOUND_NONE = 0,
+ SWSR_FOUND_EOD,
+ SWSR_FOUND_DELIM,
+ SWSR_FOUND_DATA,
+ SWSR_FOUND_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum swsr_delim_type {
+ SWSR_DELIM_NONE = 0,
+ SWSR_DELIM_SCP,
+ SWSR_DELIM_SIZE,
+ SWSR_DELIM_MAX,
+ SWSR_DELIM_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum swsr_emprevent {
+ SWSR_EMPREVENT_NONE = 0x00,
+ SWSR_EMPREVENT_00000300,
+ SWSR_EMPREVENT_ff00,
+ SWSR_EMPREVENT_000002,
+ SWSR_EMPREVENT_MAX,
+ SWSR_EMPREVENT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct swsr_config {
+ enum swsr_delim_type delim_type;
+ unsigned int delim_length;
+ unsigned long long scp_value;
+};
+
+/*
+ * This is the function prototype for the caller supplier exception handler.
+ *
+ * NOTE: The internally recorded exception is reset to #SWSR_EXCEPT_NO_EXCEPTION
+ * on return from SWSR_CheckException() or a call to the caller supplied
+ * exception handler see #SWSR_pfnExceptHandler.
+ *
+ * NOTE: By defining an exception handler the caller can handle Shift Register
+ * errors as they occur - for example, using a structure exception mechanism
+ * such as setjmp/longjmp.
+ */
+typedef void (*swsr_except_handler_fxn)(enum swsr_exception exception,
+ void *callback_param);
+
+/*
+ * This is the function prototype for the caller supplier to retrieve the data
+ * from the application
+ */
+typedef void (*swsr_callback_fxn)(enum swsr_cbevent event,
+ void *priv_data,
+ unsigned char nal_type, unsigned char **data_buffer,
+ unsigned long long *data_size);
+
+int swsr_get_total_bitsconsumed(void *context, unsigned long long *total_bitsconsumed);
+
+/*
+ * This function is used to return the offset into the current bitstream buffer
+ * on the shift-register output FIFO. Call after #SWSR_SeekDelimOrEOD to
+ * determine the offset of an delimiter.
+ */
+int swsr_get_byte_offset_curbuf(void *context, unsigned long long *byte_offset);
+
+/*
+ * This function is used to read a signed Exp-Goulomb value from the Shift
+ * Register.
+ *
+ * NOTE: If this function is used to attempt to read into a Start-Code-Prefix
+ * or beyond the End-Of-Data then and exception is generated which can be
+ * handled by the caller supplied exception handler see
+ * #SWSR_pfnExceptionHandler. If no exception handler has been supplied (or the
+ * exception handler returns) then the exception is recorded and can be obtained
+ * using SWSR_CheckException(). In this event the function returns 0.
+ */
+int swsr_read_signed_expgoulomb(void *context);
+
+/*
+ * This function is used to read a unsigned Exp-Goulomb value from the Shift
+ * Register.
+ *
+ * NOTE: If this function is used to attempt to read into a Start-Code-Prefix
+ * or beyond the End-Of-Data then and exception is generated which can be
+ * handled by the caller supplied exception handler see
+ * #SWSR_pfnExceptionHandler. If no exception handler has been supplied (or the
+ * exception handler returns) then the exception is recorded and can be obtained
+ * using SWSR_CheckException(). In this event the function returns 0.
+ */
+unsigned int swsr_read_unsigned_expgoulomb(void *context);
+
+/*
+ * This function is used to check for exceptions.
+ *
+ * NOTE: The internally recorded exception is reset to #SWSR_EXCEPT_NO_EXCEPTION
+ * on return from SWSR_CheckException() or a call to the caller supplied
+ * exception handler see #SWSR_pfnExceptionHandler.
+ */
+enum swsr_exception swsr_check_exception(void *context);
+
+/*
+ * This function is used to check for bitstream data with
+ * SWSR_EMPREVENT_00000300 whether more RBSP data is present.
+ */
+int swsr_check_more_rbsp_data(void *context, unsigned char *more_rbsp_data);
+
+/*
+ * This function is used to read a single bit from the Shift Register.
+ *
+ * NOTE: If this function is used to attempt to read into a Start-Code-Prefix
+ * or beyond the End-Of-Data then and exception is generated which can be
+ * handled by the caller supplied exception handler see
+ * #SWSR_pfnExceptionHandler. If no exception handler has been supplied (or the
+ * exception handler returns) then the exception is recorded and can be obtained
+ * using SWSR_CheckException(). In this event the function returns 0.
+ */
+unsigned int swsr_read_onebit(void *context);
+
+/*
+ * This function is used to consume a number of bits from the Shift Register.
+ *
+ * NOTE: If this function is used to attempt to read into a Start-Code-Prefix
+ * or beyond the End-Of-Data then and exception is generated which can be
+ * handled by the caller supplied exception handler see
+ * #SWSR_pfnExceptionHandler. If no exception handler has been supplied (or the
+ * exception handler returns) then the exception is recorded and can be obtained
+ * using SWSR_CheckException(). In this event the function returns 0.
+ */
+unsigned int swsr_read_bits(void *context, unsigned int no_bits);
+
+int swsr_read_signedbits(void *context, unsigned int no_bits);
+
+/*
+ * This function is used to peek at number of bits from the Shift Register. The
+ * bits are not consumed.
+ *
+ * NOTE: If this function is used to attempt to read into a Start-Code-Prefix
+ * or beyond the End-Of-Data then and exception is generated which can be
+ * handled by the caller supplied exception handler see
+ * #SWSR_pfnExceptionHandler. If no exception handler has been supplied (or
+ * the exception handler returns) then the exception is recorded and can be
+ * obtained using SWSR_CheckException(). In this event the function returns 0.
+ */
+unsigned int swsr_peekbits(void *context, unsigned int no_bits);
+
+/*
+ * Makes the shift-register output byte-aligned by consuming the remainder of
+ * the current partially read byte.
+ */
+int swsr_byte_align(void *context);
+
+/*
+ * Consume the next delimiter whose length should be specified if delimiter type
+ * is #SWSR_DELIM_SIZE. The emulation prevention detection/removal scheme can
+ * also be specified for this and subsequent units.
+ *
+ * Consumes the unit delimiter from the bitstream buffer. The delimiter type
+ * depends upon the bitstream format.
+ */
+int swsr_consume_delim(void *context,
+ enum swsr_emprevent emprevent,
+ unsigned int size_delim_length,
+ unsigned long long *byte_count);
+
+/*
+ * Seek for the next delimiter or end of bitstream data if no delimiter is
+ * found.
+ */
+enum swsr_found swsr_seek_delim_or_eod(void *context);
+
+/*
+ * Check if shift-register is at a delimiter or end of data.
+ */
+enum swsr_found swsr_check_delim_or_eod(void *context);
+
+/*
+ * This function automatically fetches the first bitstream buffer (using
+ * callback with event type #SWSR_EVENT_INPUT_BUFFER_START) before returning.
+ */
+int swsr_start_bitstream(void *context,
+ const struct swsr_config *pconfig,
+ unsigned long long bitstream_size,
+ enum swsr_emprevent emprevent);
+
+/*
+ * This function is used to de-initialise the Shift Register.
+ */
+int swsr_deinitialise(void *context);
+
+/*
+ * This function is used to initialise the Shift Register.
+ *
+ * NOTE: If no exception handler is provided (pfnExceptionHandler == IMG_NULL)
+ * then the caller must check for exceptions using the function
+ * SWSR_CheckException().
+ *
+ * NOTE: If pui8RbduBuffer is IMG_NULL then the bit stream is not encapsulated
+ * so the Shift Register needn't perform and de-encapsulation. However,
+ * if this is not IMG_NULL then, from time to time, the Shift Register APIs
+ * will de-encapsulate portions of the bit stream into this intermediate buffer
+ * - the larger the buffer the less frequent the de-encapsulation function
+ * needs to be called.
+ */
+int swsr_initialise(swsr_except_handler_fxn exception_handler_fxn,
+ void *exception_cbparam,
+ swsr_callback_fxn callback_fxn,
+ void *cb_param,
+ void **context);
+
+/*
+ * This function is used to return the size in bytes of the delimited unit
+ * that's currently being processed.
+ *
+ * NOTE: This size includes all the emulation prevention bytes present
+ * in the delimited unit.
+ */
+int swsr_get_current_delimited_unitsize(void *context, unsigned int *size);
+
+/*
+ * This function is used to copy the delimited unit that's currently being
+ * processed to the provided buffer.
+ *
+ * NOTE: This delimited unit includes all the emulation prevention bytes present
+ * in it.
+ */
+int swsr_get_current_delimited_unit(void *context, unsigned char *data, unsigned int *size);
+
+/*
+ * This function is used to return the bit offset the shift register is at
+ * in processing the current delimited unit.
+ *
+ * NOTE: This offset does not count emulation prevention bytes.
+ */
+int swsr_get_current_delimited_unit_bit_offset(void *context, unsigned int *bit_offset);
+
+#endif /* _SWSR_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/translation_api.c b/drivers/media/platform/vxe-vxd/decoder/translation_api.c
new file mode 100644
index 000000000000..af8924bb5173
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/translation_api.c
@@ -0,0 +1,1725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VDECDD translation APIs.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+/* As of now we are defining HAS_H264 */
+#define HAS_H264
+#define VDEC_USE_PVDEC
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "fw_interface.h"
+#ifdef HAS_H264
+#include "h264fw_data.h"
+#endif /* HAS_H264 */
+#include "hw_control.h"
+#include "img_errors.h"
+#include "img_msvdx_cmds.h"
+#include "img_msvdx_vec_regs.h"
+#ifdef VDEC_USE_PVDEC
+#include "pvdec_int.h"
+#include "img_pvdec_core_regs.h"
+#endif
+#include "img_video_bus4_mmu_regs.h"
+#include "lst.h"
+#include "reg_io2.h"
+#include "rman_api.h"
+#include "translation_api.h"
+#include "vdecdd_defs.h"
+#include "vdecdd_utils.h"
+#include "vdecfw_share.h"
+#include "vxd_int.h"
+#include "vxd_props.h"
+
+#ifdef HAS_HEVC
+#include "hevcfw_data.h"
+#include "pvdec_entropy_regs.h"
+#include "pvdec_vec_be_regs.h"
+#endif
+
+#ifdef HAS_JPEG
+#include "jpegfw_data.h"
+#endif /* HAS_JPEG */
+
+#define NO_VALUE 0
+
+/*
+ * Discontinuity in layout of VEC_VLC_TABLE* registers.
+ * Address of VEC_VLC_TABLE_ADDR16 does not immediately follow
+ * VEC_VLC_TABLE_ADDR15, see TRM.
+ */
+#define VEC_VLC_TABLE_ADDR_PT1_SIZE 16 /* in 32-bit words */
+#define VEC_VLC_TABLE_ADDR_DISCONT (VEC_VLC_TABLE_ADDR_PT1_SIZE * \
+ PVDECIO_VLC_IDX_ADDR_PARTS)
+
+/*
+ * now it can be done by VXD_GetCodecMode
+ * Imply standard from OperatingMode.
+ * As of now only H264 supported through the file.
+ */
+#define CODEC_MODE_JPEG 0x0
+#define CODEC_MODE_H264 0x1
+#define CODEC_MODE_REAL8 0x8
+#define CODEC_MODE_REAL9 0x9
+
+/*
+ * This enum defines values of ENTDEC_BE_MODE field of VEC_ENTDEC_BE_CONTROL
+ * register and ENTDEC_FE_MODE field of VEC_ENTDEC_FE_CONTROL register.
+ */
+enum decode_mode {
+ /* JPEG */
+ VDEC_ENTDEC_MODE_JPEG = 0x0,
+ /* H264 (MPEG4/AVC) */
+ VDEC_ENTDEC_MODE_H264 = 0x1,
+ VDEC_ENTDEC_MODE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This has all that it needs to translate a Stream Unit for a picture into a
+ * transaction.
+ */
+static int translation_set_buffer(struct vdecdd_ddpict_buf *picbuf,
+ struct vdecfw_image_buffer *image_buffer)
+{
+ unsigned int i;
+
+ for (i = 0; i < VDEC_PLANE_MAX; i++) {
+ image_buffer->byte_offset[i] =
+ (unsigned int)GET_HOST_ADDR(&picbuf->pict_buf->ddbuf_info) +
+ picbuf->rend_info.plane_info[i].offset;
+ pr_debug("%s image_buffer->byte_offset[%d] = 0x%x\n",
+ __func__, i, image_buffer->byte_offset[i]);
+ }
+ return IMG_SUCCESS;
+}
+
+#ifdef HAS_HEVC
+/*
+ * @Function translation_hevc_header
+ */
+static int translation_hevc_header(struct vdecdd_picture *picture,
+ struct dec_decpict *dec_pict,
+ struct hevcfw_headerdata *header_data)
+{
+ translation_set_buffer(dec_pict->recon_pict, &header_data->primary);
+
+ if (dec_pict->alt_pict)
+ translation_set_buffer(dec_pict->alt_pict, &header_data->alternate);
+
+ VDEC_ASSERT(picture);
+ VDEC_ASSERT(picture->pict_res_int);
+ VDEC_ASSERT(picture->pict_res_int->mb_param_buf);
+ header_data->temporal_outaddr = (unsigned int)GET_HOST_ADDR
+ (&picture->pict_res_int->mb_param_buf->ddbuf_info);
+
+ return IMG_SUCCESS;
+}
+#endif
+
+#ifdef HAS_H264
+static int translation_h264header(struct vdecdd_picture *pspicture,
+ struct dec_decpict *dec_pict,
+ struct h264fw_header_data *psheaderdata,
+ struct vdec_str_configdata *psstrconfigdata)
+{
+ psheaderdata->two_pass_flag = dec_pict->pict_hdr_info->discontinuous_mbs;
+ psheaderdata->disable_mvc = psstrconfigdata->disable_mvc;
+
+ /*
+ * As of now commenting the mb params base address as we are not using,
+ * if needed in future please un comment and make the allocation for
+ * pict_res_int.
+ */
+ /* Obtain the MB parameter address from the stream unit. */
+ if (pspicture->pict_res_int->mb_param_buf) {
+ psheaderdata->mbparams_base_address =
+ (unsigned int)GET_HOST_ADDR(&pspicture->pict_res_int->mb_param_buf->ddbuf_info);
+ psheaderdata->mbparams_size_per_plane =
+ pspicture->pict_res_int->mb_param_buf->ddbuf_info.buf_size / 3;
+ } else {
+ psheaderdata->mbparams_base_address = 0;
+ psheaderdata->mbparams_size_per_plane = 0;
+ }
+ psheaderdata->slicegroupmap_base_address =
+ (unsigned int)GET_HOST_ADDR(&dec_pict->cur_pict_dec_res->h264_sgm_buf);
+
+ translation_set_buffer(dec_pict->recon_pict, &psheaderdata->primary);
+
+ if (dec_pict->alt_pict)
+ translation_set_buffer(dec_pict->alt_pict, &psheaderdata->alternate);
+
+ /* Signal whether we have PPS for the second field. */
+ if (pspicture->dec_pict_aux_info.second_pps_id == BSPP_INVALID)
+ psheaderdata->second_pps = 0;
+ else
+ psheaderdata->second_pps = 1;
+
+ return IMG_SUCCESS;
+}
+#endif /* HAS_H264 */
+
+#ifdef HAS_JPEG
+
+static int translation_jpegheader(const struct bspp_sequ_hdr_info *seq,
+ const struct dec_decpict *dec_pict,
+ const struct bspp_pict_hdr_info *pict_hdrinfo,
+ struct jpegfw_header_data *header_data)
+{
+ unsigned int i;
+
+ /* Output picture planes addresses */
+ for (i = 0; i < seq->com_sequ_hdr_info.pixel_info.num_planes; i++) {
+ header_data->plane_offsets[i] =
+ (unsigned int)GET_HOST_ADDR(&dec_pict->recon_pict->pict_buf->ddbuf_info) +
+ dec_pict->recon_pict->rend_info.plane_info[i].offset;
+ }
+
+ /* copy the expected SOS fields number */
+ header_data->hdr_sos_count = pict_hdrinfo->sos_count;
+
+ translation_set_buffer(dec_pict->recon_pict, &header_data->primary);
+
+ return IMG_SUCCESS;
+}
+#endif /* HAS_JPEG */
+/*
+ * This function translates host video standard enum (VDEC_eVidStd) into
+ * firmware video standard enum (VDECFW_eCodecType);
+ */
+static int translation_get_codec(enum vdec_vid_std evidstd,
+ enum vdecfw_codectype *pecodec)
+{
+ enum vdecfw_codectype ecodec = VDEC_CODEC_NONE;
+ unsigned int result = IMG_ERROR_NOT_SUPPORTED;
+
+ /* Translate from video standard to firmware codec. */
+ switch (evidstd) {
+ #ifdef HAS_H264
+ case VDEC_STD_H264:
+ ecodec = VDECFW_CODEC_H264;
+ result = IMG_SUCCESS;
+ break;
+ #endif /* HAS_H264 */
+#ifdef HAS_HEVC
+ case VDEC_STD_HEVC:
+ ecodec = VDECFW_CODEC_HEVC;
+ result = IMG_SUCCESS;
+ break;
+#endif /* HAS_HEVC */
+#ifdef HAS_JPEG
+ case VDEC_STD_JPEG:
+ ecodec = VDECFW_CODEC_JPEG;
+ result = IMG_SUCCESS;
+ break;
+#endif
+ default:
+ result = IMG_ERROR_NOT_SUPPORTED;
+ break;
+ }
+ *pecodec = ecodec;
+ return result;
+}
+
+/*
+ * This function is used to obtain buffer for sequence header.
+ */
+static int translation_get_seqhdr(struct vdecdd_str_unit *psstrunit,
+ struct dec_decpict *psdecpict,
+ unsigned int *puipseqaddr)
+{
+ /*
+ * ending Sequence info only if its a First Pic of Sequence, or a Start
+ * of Closed GOP
+ */
+ if (psstrunit->pict_hdr_info->first_pic_of_sequence || psstrunit->closed_gop) {
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+ /* Get access to map info context */
+ int result = rman_get_resource(psstrunit->seq_hdr_info->bufmap_id,
+ VDECDD_BUFMAP_TYPE_ID,
+ (void **)&ddbuf_map_info, NULL);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ *puipseqaddr = GET_HOST_ADDR_OFFSET(&ddbuf_map_info->ddbuf_info,
+ psstrunit->seq_hdr_info->buf_offset);
+ } else {
+ *puipseqaddr = 0;
+ }
+ return IMG_SUCCESS;
+}
+
+/*
+ * This function is used to obtain buffer for picture parameter set.
+ */
+static int translation_get_ppshdr(struct vdecdd_str_unit *psstrunit,
+ struct dec_decpict *psdecpict,
+ unsigned int *puipppsaddr)
+{
+ if (psstrunit->pict_hdr_info->pict_aux_data.id != BSPP_INVALID) {
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+ int result;
+
+ VDEC_ASSERT(psstrunit->pict_hdr_info->pict_aux_data.pic_data);
+ /* Get access to map info context */
+ result = rman_get_resource(psstrunit->pict_hdr_info->pict_aux_data.bufmap_id,
+ VDECDD_BUFMAP_TYPE_ID,
+ (void **)&ddbuf_map_info, NULL);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+
+ if (result != IMG_SUCCESS)
+ return result;
+ *puipppsaddr =
+ GET_HOST_ADDR_OFFSET(&ddbuf_map_info->ddbuf_info,
+ psstrunit->pict_hdr_info->pict_aux_data.buf_offset);
+ } else {
+ *puipppsaddr = 0;
+ }
+ return IMG_SUCCESS;
+}
+
+/*
+ * This function is used to obtain buffer for second picture parameter set.
+ */
+static int translation_getsecond_ppshdr(struct vdecdd_str_unit *psstrunit,
+ unsigned int *puisecond_ppshdr)
+{
+ if (psstrunit->pict_hdr_info->second_pict_aux_data.id !=
+ BSPP_INVALID) {
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+ int result;
+ void *pic_data =
+ psstrunit->pict_hdr_info->second_pict_aux_data.pic_data;
+
+ VDEC_ASSERT(pic_data);
+ result = rman_get_resource(psstrunit->pict_hdr_info->second_pict_aux_data.bufmap_id,
+ VDECDD_BUFMAP_TYPE_ID,
+ (void **)&ddbuf_map_info, NULL);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+
+ if (result != IMG_SUCCESS)
+ return result;
+
+ *puisecond_ppshdr =
+ GET_HOST_ADDR_OFFSET
+ (&ddbuf_map_info->ddbuf_info,
+ psstrunit->pict_hdr_info->second_pict_aux_data.buf_offset);
+ } else {
+ *puisecond_ppshdr = 0;
+ }
+ return IMG_SUCCESS;
+}
+
+/*
+ * Returns address from which FW should download its shared context.
+ */
+static unsigned int translation_getctx_loadaddr(struct dec_decpict *psdecpict)
+{
+ if (psdecpict->prev_pict_dec_res)
+ return GET_HOST_ADDR(&psdecpict->prev_pict_dec_res->fw_ctx_buf);
+
+ /*
+ * No previous context exists, using current context leads to
+ * problems on replay so just say to FW to use clean one.
+ * This is NULL as integer to avoid pointer size warnings due
+ * to type casting.
+ */
+ return 0;
+}
+
+static void translation_setup_std_header
+ (struct vdec_str_configdata *str_configdata,
+ struct dec_decpict *dec_pict,
+ struct vdecdd_str_unit *str_unit, unsigned int *psr_hdrsize,
+ struct vdecdd_picture *picture, unsigned int *picture_cmds,
+ enum vdecfw_parsermode *parser_mode)
+{
+ switch (str_configdata->vid_std) {
+#ifdef HAS_H264
+ case VDEC_STD_H264:
+ {
+ struct h264fw_header_data *header_data =
+ (struct h264fw_header_data *)
+ dec_pict->hdr_info->ddbuf_info->cpu_virt;
+ *parser_mode = str_unit->pict_hdr_info->parser_mode;
+
+ if (str_unit->pict_hdr_info->parser_mode !=
+ VDECFW_SCP_ONLY) {
+ pr_warn("VDECFW_SCP_ONLY mode supported in PVDEC FW\n");
+ }
+ /* Reset header data. */
+ memset(header_data, 0, sizeof(*(header_data)));
+
+ /* Prepare active parameter sets. */
+ translation_h264header(picture, dec_pict, header_data, str_configdata);
+
+ /* Setup header size in the transaction. */
+ *psr_hdrsize = sizeof(struct h264fw_header_data);
+ break;
+ }
+#endif /* HAS_H264 */
+
+#ifdef HAS_HEVC
+ case VDEC_STD_HEVC:
+ {
+ struct hevcfw_headerdata *header_data =
+ (struct hevcfw_headerdata *)dec_pict->hdr_info->ddbuf_info->cpu_virt;
+ *parser_mode = str_unit->pict_hdr_info->parser_mode;
+
+ /* Reset header data. */
+ memset(header_data, 0, sizeof(*header_data));
+
+ /* Prepare active parameter sets. */
+ translation_hevc_header(picture, dec_pict, header_data);
+
+ /* Setup header size in the transaction. */
+ *psr_hdrsize = sizeof(struct hevcfw_headerdata);
+ break;
+ }
+#endif
+#ifdef HAS_JPEG
+ case VDEC_STD_JPEG:
+ {
+ struct jpegfw_header_data *header_data =
+ (struct jpegfw_header_data *)dec_pict->hdr_info->ddbuf_info->cpu_virt;
+ const struct bspp_sequ_hdr_info *seq = str_unit->seq_hdr_info;
+ const struct bspp_pict_hdr_info *pict_hdr_info = str_unit->pict_hdr_info;
+
+ /* Reset header data. */
+ memset(header_data, 0, sizeof(*(header_data)));
+
+ /* Prepare active parameter sets. */
+ translation_jpegheader(seq, dec_pict, pict_hdr_info, header_data);
+
+ /* Setup header size in the transaction. */
+ *psr_hdrsize = sizeof(struct jpegfw_header_data);
+ break;
+ }
+#endif
+ default:
+ VDEC_ASSERT(NULL == "Unknown standard!");
+ *psr_hdrsize = 0;
+ break;
+ }
+}
+
+#define VDEC_INITIAL_DEVA_DMA_CMD_SIZE 3
+#define VDEC_SINLGE_DEVA_DMA_CMD_SIZE 2
+
+#ifdef VDEC_USE_PVDEC
+/*
+ * Creates DEVA bitstream segments command and saves is to control allocation
+ * buffer.
+ */
+static int translation_pvdec_adddma_transfers
+ (struct lst_t *decpic_seglist, unsigned int **dma_cmdbuf,
+ int cmd_bufsize, struct dec_decpict *psdecpict, int eop)
+{
+ /*
+ * DEVA's bitstream DMA command is made out of chunks with following
+ * layout ('+' sign is used to mark actual words in command):
+ *
+ * + Bitstream HDR, type unsigned int, consists of:
+ * - command id (CMD_BITSTREAM_SEGMENTS),
+ * - number of segments in this chunk,
+ * - optional CMD_BITSTREAM_SEGMENTS_MORE_FOLLOW_MASK
+ *
+ * + Bitstream total size, type unsigned int,
+ * represents size of all segments in all chunks
+ *
+ * Segments of following type (can repeat up to
+ * CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1 times)
+ *
+ * + Bitstream segment address, type unsigned int
+ *
+ * + Bitstream segment size, type unsigned int
+ *
+ * Subsequent chunks are present when
+ * CMD_BITSTREAM_SEGMENTS_MORE_FOLLOW_MASK flag is set in Bitstream HDR.
+ */
+ struct dec_decpict_seg *dec_picseg = (struct dec_decpict_seg *)lst_first(decpic_seglist);
+ unsigned int *cmd = *dma_cmdbuf;
+ unsigned int *dma_hdr = cmd;
+ unsigned int segcount = 0;
+ unsigned int bitstream_size = 0;
+
+ /*
+ * Two words for DMA command header (setup later as we need to find out
+ * count of BS segments).
+ */
+ cmd += CMD_BITSTREAM_HDR_DW_SIZE;
+ cmd_bufsize -= CMD_BITSTREAM_HDR_DW_SIZE;
+ if (cmd_bufsize < 0) {
+ pr_err("Buffer for DMA command too small.\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!dec_picseg) {
+ /* No segments to be send to FW: preparing fake one */
+ cmd_bufsize -= VDEC_SINLGE_DEVA_DMA_CMD_SIZE;
+ if (cmd_bufsize < 0) {
+ pr_err("Buffer for DMA command too small.\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+ segcount++;
+
+ /* zeroing bitstream size and bitstream offset */
+ *(cmd++) = 0;
+ *(cmd++) = 0;
+ }
+
+ /* Loop through all bitstream segments */
+ while (dec_picseg) {
+ if (dec_picseg->bstr_seg && (dec_picseg->bstr_seg->bstr_seg_flag
+ & VDECDD_BSSEG_SKIP) == 0) {
+ unsigned int result;
+ struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+
+ segcount++;
+ /* Two words for each added bitstream segment */
+ cmd_bufsize -= VDEC_SINLGE_DEVA_DMA_CMD_SIZE;
+ if (cmd_bufsize < 0) {
+ pr_err("Buffer for DMA command too small.\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+ /* Insert SCP/SC if needed */
+ if (dec_picseg->bstr_seg->bstr_seg_flag &
+ VDECDD_BSSEG_INSERTSCP) {
+ unsigned int startcode_length =
+ psdecpict->start_code_bufinfo->buf_size;
+
+ if (dec_picseg->bstr_seg->bstr_seg_flag &
+ VDECDD_BSSEG_INSERT_STARTCODE) {
+ unsigned char *start_code =
+ psdecpict->start_code_bufinfo->cpu_virt;
+ start_code[startcode_length - 1] =
+ dec_picseg->bstr_seg->start_code_suffix;
+ } else {
+ startcode_length -= 1;
+ }
+
+ segcount++;
+ *(cmd++) = startcode_length;
+ bitstream_size += startcode_length;
+
+ *(cmd++) = psdecpict->start_code_bufinfo->dev_virt;
+
+ if (((segcount %
+ (CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1)) == 0))
+ /*
+ * we have reached max number of
+ * bitstream segments for current
+ * command make pui32Cmd point to next
+ * BS command
+ */
+ cmd += CMD_BITSTREAM_HDR_DW_SIZE;
+ }
+ /* Get access to map info context */
+ result = rman_get_resource(dec_picseg->bstr_seg->bufmap_id,
+ VDECDD_BUFMAP_TYPE_ID,
+ (void **)&ddbuf_map_info, NULL);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ *(cmd++) = (dec_picseg->bstr_seg->data_size);
+ bitstream_size += dec_picseg->bstr_seg->data_size;
+
+ *(cmd++) = ddbuf_map_info->ddbuf_info.dev_virt +
+ dec_picseg->bstr_seg->data_byte_offset;
+
+ if (((segcount %
+ (CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1)) == 0) &&
+ (lst_next(dec_picseg)))
+ /*
+ * we have reached max number of bitstream
+ * segments for current command make pui32Cmd
+ * point to next BS command
+ */
+ cmd += CMD_BITSTREAM_HDR_DW_SIZE;
+ }
+ dec_picseg = lst_next(dec_picseg);
+ }
+
+ if (segcount > CMD_BITSTREAM_SEGMENTS_MAX_NUM) {
+ pr_err("Too many bitstream segments to transfer.\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ while (segcount > (CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1)) {
+ *dma_hdr++ = CMD_BITSTREAM_SEGMENTS |
+ CMD_BITSTREAM_SEGMENTS_MORE_FOLLOW_MASK |
+ CMD_BITSTREAM_SEGMENTS_MINUS1_MASK;
+ *dma_hdr++ = bitstream_size;
+ /*
+ * make pui32DmaHdr point to next chunk by skipping bitstream
+ * Segments
+ */
+ dma_hdr += (2 * (CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1));
+ segcount -= (CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1);
+ }
+ *dma_hdr = eop ? CMD_BITSTREAM_EOP_MASK : 0;
+ *dma_hdr++ |= CMD_BITSTREAM_SEGMENTS | (segcount - 1);
+ *dma_hdr = bitstream_size;
+
+ /*
+ * Let caller know where we finished. Pointer to location one word after
+ * end of our command buffer
+ */
+ *dma_cmdbuf = cmd;
+ return IMG_SUCCESS;
+}
+
+/*
+ * Creates DEVA control allocation buffer header.
+ */
+static void translation_pvdec_ctrl_setuphdr
+ (struct ctrl_alloc_header *ctrlalloc_hdr,
+ unsigned int *pic_cmds)
+{
+ ctrlalloc_hdr->cmd_additional_params = CMD_CTRL_ALLOC_HEADER;
+ ctrlalloc_hdr->ext_opmode = pic_cmds[VDECFW_CMD_EXT_OP_MODE];
+ ctrlalloc_hdr->chroma_strides =
+ pic_cmds[VDECFW_CMD_CHROMA_ROW_STRIDE];
+ ctrlalloc_hdr->alt_output_addr[0] =
+ pic_cmds[VDECFW_CMD_LUMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+ ctrlalloc_hdr->alt_output_addr[1] =
+ pic_cmds[VDECFW_CMD_CHROMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+ ctrlalloc_hdr->alt_output_flags =
+ pic_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION];
+}
+
+/*
+ * Creates DEVA VLC DMA command and saves is to control allocation buffer.
+ */
+static int translation_pvdecsetup_vlcdma
+ (struct vidio_ddbufinfo *vlctables_bufinfo,
+ unsigned int **dmacmd_buf, unsigned int cmdbuf_size)
+{
+ unsigned int cmd_dma;
+ unsigned int *cmd = *dmacmd_buf;
+
+ /* Check if VLC tables fit in one DMA transfer */
+ if (vlctables_bufinfo->buf_size > CMD_DMA_DMA_SIZE_MASK) {
+ pr_err("VLC tables won't fit into one DMA transfer!\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Check if we have enough space in control allocation buffer. */
+ if (cmdbuf_size < VDEC_SINLGE_DEVA_DMA_CMD_SIZE) {
+ pr_err("Buffer for DMA command too small.\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Construct DMA command */
+ cmd_dma = CMD_DMA | CMD_DMA_TYPE_VLC_TABLE |
+ vlctables_bufinfo->buf_size;
+
+ /* Add command to control allocation */
+ *cmd++ = cmd_dma;
+ *cmd++ = vlctables_bufinfo->dev_virt;
+
+ /*
+ * Let caller know where we finished. Pointer to location one word after
+ * end of our command buffer
+ */
+ *dmacmd_buf = cmd;
+ return IMG_SUCCESS;
+}
+
+/*
+ * Creates DEVA commands for configuring VLC tables and saves them into
+ * control allocation buffer.
+ */
+static int translation_pvdecsetup_vlctables
+ (unsigned short vlc_index_data[][3], unsigned int num_tables,
+ unsigned int **ctrl_allocbuf, unsigned int ctrl_allocsize,
+ unsigned int msvdx_vecoffset)
+{
+ unsigned int i;
+ unsigned int word_count;
+ unsigned int reg_val;
+ unsigned int *ctrl_allochdr;
+
+ unsigned int *ctrl_alloc = *ctrl_allocbuf;
+
+ /* Calculate the number of words needed for VLC control allocations. */
+ /*
+ * 3 words for control allocation headers (we are writing 3 chunks:
+ * addresses, widths, opcodes)
+ */
+ unsigned int req_elems = 3 +
+ (ALIGN(num_tables, PVDECIO_VLC_IDX_WIDTH_PARTS) /
+ PVDECIO_VLC_IDX_WIDTH_PARTS) +
+ (ALIGN(num_tables, PVDECIO_VLC_IDX_ADDR_PARTS) /
+ PVDECIO_VLC_IDX_ADDR_PARTS) +
+ (ALIGN(num_tables, PVDECIO_VLC_IDX_OPCODE_PARTS) /
+ PVDECIO_VLC_IDX_OPCODE_PARTS);
+
+ /*
+ * Addresses chunk has to be split in two, if number of tables exceeds
+ * VEC_VLC_TABLE_ADDR_DISCONT (see layout of VEC_VLC_TABLE_ADDR*
+ * registers in TRM)
+ */
+ if (num_tables > VEC_VLC_TABLE_ADDR_DISCONT)
+ /* We need additional control allocation header */
+ req_elems += 1;
+
+ if (ctrl_allocsize < req_elems) {
+ pr_err("Buffer for VLC IDX commands too small.\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /*
+ * Write VLC IDX addresses. Chunks for VEC_VLC_TABLE_ADDR[0-15] and
+ * VEC_VLC_TABLE_ADDR[16-18] registers.
+ */
+ ctrl_allochdr = ctrl_alloc++;
+ *ctrl_allochdr = CMD_REGISTER_BLOCK | CMD_REGISTER_BLOCK_FLAG_VLC_DATA |
+ (MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR0_OFFSET + msvdx_vecoffset);
+ /* Reset the word count. */
+ word_count = 0;
+
+ /* Process VLC index table. */
+ i = 0;
+ reg_val = 0;
+ while (i < num_tables) {
+ VDEC_ASSERT((vlc_index_data[i][PVDECIO_VLC_IDX_ADDR_ID] &
+ ~PVDECIO_VLC_IDX_ADDR_MASK) == 0);
+ /* Pack the addresses into a word. */
+ reg_val |= ((vlc_index_data[i][PVDECIO_VLC_IDX_ADDR_ID] &
+ PVDECIO_VLC_IDX_ADDR_MASK) <<
+ ((i % PVDECIO_VLC_IDX_ADDR_PARTS) *
+ PVDECIO_VLC_IDX_ADDR_SHIFT));
+
+ /* If we reached the end of VEC_VLC_TABLE_ADDR[0-15] area... */
+ if (i == VEC_VLC_TABLE_ADDR_DISCONT) {
+ /*
+ * Finalize command header for VEC_VLC_TABLE_ADDR[0-15]
+ * register chunk.
+ */
+ *ctrl_allochdr |= word_count << 16;
+ /*
+ * Reserve and preset command header for
+ * VEC_VLC_TABLE_ADDR[16-18] register chunk.
+ */
+ ctrl_allochdr = ctrl_alloc++;
+ *ctrl_allochdr = CMD_REGISTER_BLOCK |
+ CMD_REGISTER_BLOCK_FLAG_VLC_DATA |
+ (MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR16_OFFSET +
+ msvdx_vecoffset);
+ /* Reset the word count. */
+ word_count = 0;
+ }
+
+ /*
+ * If all the addresses are packed in this word or that's the
+ * last iteration
+ */
+ if (((i % PVDECIO_VLC_IDX_ADDR_PARTS) ==
+ (PVDECIO_VLC_IDX_ADDR_PARTS - 1)) ||
+ (i == (num_tables - 1))) {
+ /*
+ * Add VLC table address to this chunk and increase
+ * words count.
+ */
+ *ctrl_alloc++ = reg_val;
+ word_count++;
+ /* Reset address value. */
+ reg_val = 0;
+ }
+
+ i++;
+ }
+
+ /*
+ * Finalize the current command header for VEC_VLC_TABLE_ADDR register
+ * chunk.
+ */
+ *ctrl_allochdr |= word_count << 16;
+
+ /*
+ * Start new commands chunk for VEC_VLC_TABLE_INITIAL_WIDTH[0-3]
+ * registers.
+ */
+
+ /*
+ * Reserve and preset command header for
+ * VEC_VLC_TABLE_INITIAL_WIDTH[0-3] register chunk.
+ */
+ ctrl_allochdr = ctrl_alloc++;
+ *ctrl_allochdr = CMD_REGISTER_BLOCK | CMD_REGISTER_BLOCK_FLAG_VLC_DATA |
+ (MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_WIDTH0_OFFSET +
+ msvdx_vecoffset);
+ /* Reset the word count. */
+ word_count = 0;
+
+ /* Process VLC index table. */
+ i = 0;
+ reg_val = 0;
+
+ while (i < num_tables) {
+ VDEC_ASSERT((vlc_index_data[i][PVDECIO_VLC_IDX_WIDTH_ID] &
+ ~PVDECIO_VLC_IDX_WIDTH_MASK) == 0);
+ /* Pack the widths into a word. */
+ reg_val |= ((vlc_index_data[i][PVDECIO_VLC_IDX_WIDTH_ID] &
+ PVDECIO_VLC_IDX_WIDTH_MASK) <<
+ (i % PVDECIO_VLC_IDX_WIDTH_PARTS) *
+ PVDECIO_VLC_IDX_WIDTH_SHIFT);
+
+ /*
+ * If all the widths are packed in this word or that's the last
+ * iteration.
+ */
+ if (((i % PVDECIO_VLC_IDX_WIDTH_PARTS) ==
+ (PVDECIO_VLC_IDX_WIDTH_PARTS - 1)) ||
+ (i == (num_tables - 1))) {
+ /*
+ * Add VLC table width to this chunk and increase words
+ * count.
+ */
+ *ctrl_alloc++ = reg_val;
+ word_count++;
+ /* Reset width value. */
+ reg_val = 0;
+ }
+ i++;
+ }
+
+ /*
+ * Finalize command header for VEC_VLC_TABLE_INITIAL_WIDTH[0-3] register
+ * chunk.
+ */
+ *ctrl_allochdr |= word_count << 16;
+
+ /*
+ * Start new commands chunk for VEC_VLC_TABLE_INITIAL_OPCODE[0-2]
+ * registers.
+ * Reserve and preset command header for
+ * VEC_VLC_TABLE_INITIAL_OPCODE[0-2] register chunk
+ */
+ ctrl_allochdr = ctrl_alloc++;
+ *ctrl_allochdr = CMD_REGISTER_BLOCK | CMD_REGISTER_BLOCK_FLAG_VLC_DATA |
+ (MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_OPCODE0_OFFSET +
+ msvdx_vecoffset);
+ /* Reset the word count. */
+ word_count = 0;
+
+ /* Process VLC index table. */
+ i = 0;
+ reg_val = 0;
+
+ while (i < num_tables) {
+ VDEC_ASSERT((vlc_index_data[i][PVDECIO_VLC_IDX_OPCODE_ID] &
+ ~PVDECIO_VLC_IDX_OPCODE_MASK) == 0);
+ /* Pack the opcodes into a word. */
+ reg_val |= ((vlc_index_data[i][PVDECIO_VLC_IDX_OPCODE_ID] &
+ PVDECIO_VLC_IDX_OPCODE_MASK) <<
+ (i % PVDECIO_VLC_IDX_OPCODE_PARTS) *
+ PVDECIO_VLC_IDX_OPCODE_SHIFT);
+
+ /*
+ * If all the opcodes are packed in this word or that's the last
+ * iteration.
+ */
+ if (((i % PVDECIO_VLC_IDX_OPCODE_PARTS) ==
+ (PVDECIO_VLC_IDX_OPCODE_PARTS - 1)) ||
+ (i == (num_tables - 1))) {
+ /*
+ * Add VLC table opcodes to this chunk and increase
+ * words count.
+ */
+ *ctrl_alloc++ = reg_val;
+ word_count++;
+ /* Reset width value. */
+ reg_val = 0;
+ }
+ i++;
+ }
+
+ /*
+ * Finalize command header for VEC_VLC_TABLE_INITIAL_OPCODE[0-2]
+ * register chunk.
+ */
+ *ctrl_allochdr |= word_count << 16;
+
+ /* Update caller with current location of control allocation pointer */
+ *ctrl_allocbuf = ctrl_alloc;
+ return IMG_SUCCESS;
+}
+
+/*
+ * fills in a rendec command chunk in the command buffer.
+ */
+static void fill_rendec_chunk(int num, ...)
+{
+ va_list valist;
+ unsigned int i, j = 0;
+ unsigned int chunk_word_count = 0;
+ unsigned int used_word_count = 0;
+ int aux_array_size = 0;
+ unsigned int *pic_cmds;
+ unsigned int **ctrl_allocbuf;
+ unsigned int ctrl_allocsize;
+ unsigned int vdmc_cmd_offset;
+ unsigned int offset;
+ unsigned int *buf;
+ /* 5 is the fixed arguments passed to fill_rendec_chunk function */
+ enum vdecfw_picture_cmds *aux_array = kmalloc((sizeof(unsigned int) *
+ (num - 5)), GFP_KERNEL);
+ if (!aux_array)
+ return;
+
+ /* initialize valist for num number of arguments */
+ va_start(valist, num);
+
+ pic_cmds = va_arg(valist, unsigned int *);
+ ctrl_allocbuf = va_arg(valist, unsigned int **);
+ ctrl_allocsize = va_arg(valist, unsigned int);
+ vdmc_cmd_offset = va_arg(valist, unsigned int);
+ offset = va_arg(valist, unsigned int);
+ buf = *ctrl_allocbuf;
+
+ aux_array_size = (sizeof(unsigned int) * (num - 5));
+ /*
+ * access all the arguments assigned to valist, we have already
+ * read till 5
+ */
+ for (i = 6, j = 0; i <= num; i++, j++)
+ aux_array[j] = (enum vdecfw_picture_cmds)va_arg(valist, int);
+
+ /* clean memory reserved for valist */
+ va_end(valist);
+ chunk_word_count = aux_array_size /
+ sizeof(enum vdecfw_picture_cmds);
+ if ((chunk_word_count + 1) > (ctrl_allocsize - used_word_count)) {
+ kfree(aux_array);
+ return;
+ }
+ if ((chunk_word_count & ~(CMD_RENDEC_WORD_COUNT_MASK >>
+ CMD_RENDEC_WORD_COUNT_SHIFT)) != 0) {
+ kfree(aux_array);
+ return;
+ }
+ used_word_count += chunk_word_count + 1;
+ *buf++ = CMD_RENDEC_BLOCK | (chunk_word_count << 16) |
+ (vdmc_cmd_offset + offset);
+
+ for (i = 0; i < chunk_word_count; i++)
+ *buf++ = pic_cmds[aux_array[i]];
+
+ *ctrl_allocbuf = buf;
+ /* free the memory */
+ kfree(aux_array);
+}
+
+/*
+ * Creates DEVA commands for configuring rendec and writes them into control
+ * allocation buffer.
+ */
+static void translation_pvdec_setup_commands(unsigned int *pic_cmds,
+ unsigned int **ctrl_allocbuf,
+ unsigned int ctrl_allocsize,
+ unsigned int vdmc_cmd_offset)
+{
+ unsigned int codec_mode;
+
+ codec_mode = REGIO_READ_FIELD(pic_cmds[VDECFW_CMD_OPERATING_MODE],
+ MSVDX_CMDS, OPERATING_MODE, CODEC_MODE);
+
+ if (codec_mode != CODEC_MODE_H264)
+ /* chunk with cache settings at 0x01C */
+ /*
+ * here first argument 6 says there are 6 number of arguments
+ * being passed to fill_rendec_chunk function.
+ */
+ fill_rendec_chunk(6, pic_cmds, ctrl_allocbuf, ctrl_allocsize,
+ vdmc_cmd_offset,
+ MSVDX_CMDS_MC_CACHE_CONFIGURATION_OFFSET,
+ VDECFW_CMD_MC_CACHE_CONFIGURATION);
+
+ /* chunk with extended row stride at 0x03C */
+ /*
+ * here first argument 6 says there are 6 number of arguments
+ * being passed to fill_rendec_chunk function.
+ */
+ fill_rendec_chunk(6, pic_cmds, ctrl_allocbuf, ctrl_allocsize,
+ vdmc_cmd_offset,
+ MSVDX_CMDS_EXTENDED_ROW_STRIDE_OFFSET,
+ VDECFW_CMD_EXTENDED_ROW_STRIDE);
+
+ /* chunk with alternative output control at 0x1B4 */
+ /*
+ * here first argument 6 says there are 6 number of arguments
+ * being passed to fill_rendec_chunk function.
+ */
+ fill_rendec_chunk(6, pic_cmds, ctrl_allocbuf, ctrl_allocsize,
+ vdmc_cmd_offset,
+ MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_OFFSET,
+ VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL);
+
+ /* scaling chunks */
+ if (pic_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE]) {
+ if (codec_mode != CODEC_MODE_REAL8 && codec_mode != CODEC_MODE_REAL9) {
+ /*
+ * chunk with scale display size, scale H/V control at
+ * 0x0050
+ */
+ /*
+ * here first argument 8 says there are 8 number of
+ * arguments being passed to fill_rendec_chunk function.
+ */
+ fill_rendec_chunk(8, pic_cmds, ctrl_allocbuf,
+ ctrl_allocsize, vdmc_cmd_offset,
+ MSVDX_CMDS_SCALED_DISPLAY_SIZE_OFFSET,
+ VDECFW_CMD_SCALED_DISPLAY_SIZE,
+ VDECFW_CMD_HORIZONTAL_SCALE_CONTROL,
+ VDECFW_CMD_VERTICAL_SCALE_CONTROL);
+
+ /* chunk with luma/chorma H/V coeffs at 0x0060 */
+ /*
+ * here first argument 21 says there are 21 number of
+ * arguments being passed to fill_rendec_chunk function.
+ */
+ fill_rendec_chunk(21, pic_cmds, ctrl_allocbuf,
+ ctrl_allocsize, vdmc_cmd_offset,
+ MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET,
+ VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_0,
+ VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_1,
+ VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_2,
+ VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_3,
+ VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_0,
+ VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_1,
+ VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_2,
+ VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_3,
+ VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_0,
+ VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_1,
+ VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_2,
+ VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_3,
+ VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_0,
+ VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_1,
+ VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_2,
+ VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_3);
+
+ /*
+ * chunk with scale output size, scale H/V chroma at
+ * 0x01B8
+ */
+ /*
+ * here first argument 8 says there are 8 number of
+ * arguments being passed to fill_rendec_chunk function.
+ */
+ fill_rendec_chunk(8, pic_cmds, ctrl_allocbuf,
+ ctrl_allocsize, vdmc_cmd_offset,
+ MSVDX_CMDS_SCALE_OUTPUT_SIZE_OFFSET,
+ VDECFW_CMD_SCALE_OUTPUT_SIZE,
+ VDECFW_CMD_SCALE_HORIZONTAL_CHROMA,
+ VDECFW_CMD_SCALE_VERTICAL_CHROMA);
+ }
+ }
+}
+
+#ifdef HAS_HEVC
+/*
+ * @Function translation_pvdec_setup_pvdec_commands
+ */
+static int translation_pvdec_setup_pvdec_commands(struct vdecdd_picture *picture,
+ struct dec_decpict *dec_pict,
+ struct vdecdd_str_unit *str_unit,
+ struct decoder_regsoffsets *regs_offsets,
+ unsigned int **ctrl_allocbuf,
+ unsigned int ctrl_alloc_size,
+ unsigned int *mem_to_reg_host_part,
+ unsigned int *pict_cmds)
+{
+ const unsigned int genc_buf_cnt = 4;
+ /* We have two chunks: for GENC buffers addresses and sizes*/
+ const unsigned int genc_conf_items = 2;
+ const unsigned int pipe = 0xf << 16; /* Instruct H/W to write to current pipe */
+ /* We need to configure address and size of each GENC buffer */
+ const unsigned int genc_words_cnt = genc_buf_cnt * genc_conf_items;
+ struct vdecdd_ddbuf_mapinfo **genc_buffers =
+ picture->pict_res_int->seq_resint->genc_buffers;
+ unsigned int memto_reg_used; /* in bytes */
+ unsigned int i;
+ unsigned int *ctrl_alloc = *ctrl_allocbuf;
+ unsigned int *mem_to_reg = (unsigned int *)dec_pict->pvdec_info->ddbuf_info->cpu_virt;
+ unsigned int reg = 0;
+
+ if (ctrl_alloc_size < genc_words_cnt + genc_conf_items) {
+ pr_err("Buffer for GENC config too small.");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Insert command header for GENC buffers sizes */
+ *ctrl_alloc++ = CMD_REGISTER_BLOCK | (genc_buf_cnt << 16) |
+ (PVDEC_ENTROPY_CR_GENC_BUFFER_SIZE_OFFSET + regs_offsets->entropy_offset);
+ for (i = 0; i < genc_buf_cnt; i++)
+ *ctrl_alloc++ = genc_buffers[i]->ddbuf_info.buf_size;
+
+ /* Insert command header for GENC buffers addresses */
+ *ctrl_alloc++ = CMD_REGISTER_BLOCK | (genc_buf_cnt << 16) |
+ (PVDEC_ENTROPY_CR_GENC_BUFFER_BASE_ADDRESS_OFFSET + regs_offsets->entropy_offset);
+ for (i = 0; i < genc_buf_cnt; i++)
+ *ctrl_alloc++ = genc_buffers[i]->ddbuf_info.dev_virt;
+
+ /* Insert GENC fragment buffer address */
+ *ctrl_alloc++ = CMD_REGISTER_BLOCK | (1 << 16) |
+ (PVDEC_ENTROPY_CR_GENC_FRAGMENT_BASE_ADDRESS_OFFSET + regs_offsets->entropy_offset);
+ *ctrl_alloc++ = picture->pict_res_int->genc_fragment_buf->ddbuf_info.dev_virt;
+
+ /* Return current location in control allocation buffer to caller */
+ *ctrl_allocbuf = ctrl_alloc;
+
+ reg = 0;
+ REGIO_WRITE_FIELD_LITE
+ (reg,
+ MSVDX_CMDS, PVDEC_DISPLAY_PICTURE_SIZE, PVDEC_DISPLAY_PICTURE_WIDTH_MIN1,
+ str_unit->pict_hdr_info->coded_frame_size.width - 1, unsigned int);
+ REGIO_WRITE_FIELD_LITE
+ (reg,
+ MSVDX_CMDS, PVDEC_DISPLAY_PICTURE_SIZE, PVDEC_DISPLAY_PICTURE_HEIGHT_MIN1,
+ str_unit->pict_hdr_info->coded_frame_size.height - 1, unsigned int);
+
+ /*
+ * Pvdec operating mode needs to be submitted before any other commands.
+ * This will be set in FW. Make sure it's the first command in Mem2Reg buffer.
+ */
+ VDEC_ASSERT((unsigned int *)dec_pict->pvdec_info->ddbuf_info->cpu_virt == mem_to_reg);
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_PVDEC_OPERATING_MODE_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = 0x0; /* has to be updated in the F/W */
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_MC_CACHE_CONFIGURATION_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = 0x0; /* has to be updated in the F/W */
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_PVDEC_DISPLAY_PICTURE_SIZE_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = reg;
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_PVDEC_CODED_PICTURE_SIZE_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = reg;
+
+ /* scaling configuration */
+ if (pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE]) {
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_PVDEC_SCALED_DISPLAY_SIZE_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_HORIZONTAL_SCALE_CONTROL_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_SCALE_CONTROL];
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_VERTICAL_SCALE_CONTROL_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_SCALE_CONTROL];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_SCALE_OUTPUT_SIZE_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_SCALE_OUTPUT_SIZE];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_SCALE_HORIZONTAL_CHROMA_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_SCALE_HORIZONTAL_CHROMA];
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_SCALE_VERTICAL_CHROMA_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_SCALE_VERTICAL_CHROMA];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_0];
+ *mem_to_reg++ = pipe |
+ (4 + MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_1];
+ *mem_to_reg++ = pipe |
+ (8 + MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_2];
+ *mem_to_reg++ = pipe |
+ (12 + MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_3];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_VERTICAL_LUMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_0];
+ *mem_to_reg++ = pipe |
+ (4 + MSVDX_CMDS_VERTICAL_LUMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_1];
+ *mem_to_reg++ = pipe |
+ (8 + MSVDX_CMDS_VERTICAL_LUMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_2];
+ *mem_to_reg++ = pipe |
+ (12 + MSVDX_CMDS_VERTICAL_LUMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_3];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_0];
+ *mem_to_reg++ = pipe |
+ (4 + MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_1];
+ *mem_to_reg++ = pipe |
+ (8 + MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_2];
+ *mem_to_reg++ = pipe |
+ (12 + MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_3];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_VERTICAL_CHROMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_0];
+ *mem_to_reg++ = pipe |
+ (4 + MSVDX_CMDS_VERTICAL_CHROMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_1];
+ *mem_to_reg++ = pipe |
+ (8 + MSVDX_CMDS_VERTICAL_CHROMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_2];
+ *mem_to_reg++ = pipe |
+ (12 + MSVDX_CMDS_VERTICAL_CHROMA_COEFFICIENTS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_3];
+ }
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_EXTENDED_ROW_STRIDE_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_EXTENDED_ROW_STRIDE];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_CHROMA_ROW_STRIDE_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_CHROMA_ROW_STRIDE];
+
+ /* Setup MEM_TO_REG buffer */
+ for (i = 0; i < genc_buf_cnt; i++) {
+ *mem_to_reg++ = pipe | (PVDEC_VEC_BE_CR_GENC_BUFFER_SIZE_OFFSET +
+ regs_offsets->vec_be_regs_offset + i * sizeof(unsigned int));
+ *mem_to_reg++ = genc_buffers[i]->ddbuf_info.buf_size;
+ *mem_to_reg++ = pipe | (PVDEC_VEC_BE_CR_GENC_BUFFER_BASE_ADDRESS_OFFSET +
+ regs_offsets->vec_be_regs_offset + i * sizeof(unsigned int));
+ *mem_to_reg++ = genc_buffers[i]->ddbuf_info.dev_virt;
+ }
+
+ *mem_to_reg++ = pipe |
+ (PVDEC_VEC_BE_CR_GENC_FRAGMENT_BASE_ADDRESS_OFFSET +
+ regs_offsets->vec_be_regs_offset);
+ *mem_to_reg++ = picture->pict_res_int->genc_fragment_buf->ddbuf_info.dev_virt;
+
+ *mem_to_reg++ = pipe |
+ (PVDEC_VEC_BE_CR_ABOVE_PARAM_BASE_ADDRESS_OFFSET +
+ regs_offsets->vec_be_regs_offset);
+
+ *mem_to_reg++ = dec_pict->pvdec_info->ddbuf_info->dev_virt +
+ MEM_TO_REG_BUF_SIZE + SLICE_PARAMS_BUF_SIZE;
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+
+ /* alternative picture configuration */
+ if (dec_pict->alt_pict) {
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_VC1_LUMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_LUMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_VC1_CHROMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET +
+ regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_CHROMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+ }
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_AUX_LINE_BUFFER_BASE_ADDRESS_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_AUX_LINE_BUFFER_BASE_ADDRESS];
+
+ *mem_to_reg++ = pipe |
+ (MSVDX_CMDS_INTRA_BUFFER_BASE_ADDRESS_OFFSET + regs_offsets->vdmc_cmd_offset);
+ *mem_to_reg++ = pict_cmds[VDECFW_CMD_INTRA_BUFFER_BASE_ADDRESS];
+
+ /* Make sure we fit in buffer */
+ memto_reg_used = (unsigned long)mem_to_reg -
+ (unsigned long)dec_pict->pvdec_info->ddbuf_info->cpu_virt;
+
+ VDEC_ASSERT(memto_reg_used < MEM_TO_REG_BUF_SIZE);
+
+ *mem_to_reg_host_part = memto_reg_used / sizeof(unsigned int);
+
+ return IMG_SUCCESS;
+}
+#endif
+
+/*
+ * Creates DEVA commands for configuring rendec and writes them into control
+ * allocation buffer.
+ */
+static int translation_pvdecsetup_vdecext
+ (struct vdec_ext_cmd *vdec_ext,
+ struct dec_decpict *dec_pict, unsigned int *pic_cmds,
+ struct vdecdd_str_unit *str_unit, enum vdec_vid_std vid_std,
+ enum vdecfw_parsermode parser_mode)
+{
+ int result;
+ unsigned int trans_id = dec_pict->transaction_id;
+
+ VDEC_ASSERT(dec_pict->recon_pict);
+
+ vdec_ext->cmd = CMD_VDEC_EXT;
+ vdec_ext->trans_id = trans_id;
+
+ result = translation_get_seqhdr(str_unit, dec_pict, &vdec_ext->seq_addr);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ result = translation_get_ppshdr(str_unit, dec_pict, &vdec_ext->pps_addr);
+ VDEC_ASSERT(result == IMG_SUCCESS);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ result = translation_getsecond_ppshdr(str_unit, &vdec_ext->pps_2addr);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ vdec_ext->hdr_addr = GET_HOST_ADDR(dec_pict->hdr_info->ddbuf_info);
+
+ vdec_ext->ctx_load_addr = translation_getctx_loadaddr(dec_pict);
+ vdec_ext->ctx_save_addr = GET_HOST_ADDR(&dec_pict->cur_pict_dec_res->fw_ctx_buf);
+ vdec_ext->buf_ctrl_addr = GET_HOST_ADDR(&dec_pict->pict_ref_res->fw_ctrlbuf);
+ if (dec_pict->prev_pict_dec_res) {
+ /*
+ * Copy the previous firmware context to the current one in case
+ * picture management fails in firmware.
+ */
+ memcpy(dec_pict->cur_pict_dec_res->fw_ctx_buf.cpu_virt,
+ dec_pict->prev_pict_dec_res->fw_ctx_buf.cpu_virt,
+ dec_pict->prev_pict_dec_res->fw_ctx_buf.buf_size);
+ }
+
+ vdec_ext->last_luma_recon =
+ pic_cmds[VDECFW_CMD_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+ vdec_ext->last_chroma_recon =
+ pic_cmds[VDECFW_CMD_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+
+ vdec_ext->luma_err_base =
+ pic_cmds[VDECFW_CMD_LUMA_ERROR_PICTURE_BASE_ADDRESS];
+ vdec_ext->chroma_err_base =
+ pic_cmds[VDECFW_CMD_CHROMA_ERROR_PICTURE_BASE_ADDRESS];
+
+ vdec_ext->scaled_display_size =
+ pic_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE];
+ vdec_ext->horz_scale_control =
+ pic_cmds[VDECFW_CMD_HORIZONTAL_SCALE_CONTROL];
+ vdec_ext->vert_scale_control =
+ pic_cmds[VDECFW_CMD_VERTICAL_SCALE_CONTROL];
+ vdec_ext->scale_output_size = pic_cmds[VDECFW_CMD_SCALE_OUTPUT_SIZE];
+
+ vdec_ext->intra_buf_base_addr =
+ pic_cmds[VDECFW_CMD_INTRA_BUFFER_BASE_ADDRESS];
+ vdec_ext->intra_buf_size_per_pipe =
+ pic_cmds[VDECFW_CMD_INTRA_BUFFER_SIZE_PER_PIPE];
+ vdec_ext->intra_buf_size_per_plane =
+ pic_cmds[VDECFW_CMD_INTRA_BUFFER_PLANE_SIZE];
+ vdec_ext->aux_line_buffer_base_addr =
+ pic_cmds[VDECFW_CMD_AUX_LINE_BUFFER_BASE_ADDRESS];
+ vdec_ext->aux_line_buf_size_per_pipe =
+ pic_cmds[VDECFW_CMD_AUX_LINE_BUFFER_SIZE_PER_PIPE];
+ vdec_ext->alt_output_pict_rotation =
+ pic_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION];
+ vdec_ext->chroma2reconstructed_addr =
+ pic_cmds[VDECFW_CMD_CHROMA2_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+ vdec_ext->luma_alt_addr =
+ pic_cmds[VDECFW_CMD_LUMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+ vdec_ext->chroma_alt_addr =
+ pic_cmds[VDECFW_CMD_CHROMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+ vdec_ext->chroma2alt_addr =
+ pic_cmds[VDECFW_CMD_CHROMA2_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+
+ if (vid_std == VDEC_STD_VC1) {
+ struct vidio_ddbufinfo *vlc_idx_tables_bufinfo =
+ dec_pict->vlc_idx_tables_bufinfo;
+ struct vidio_ddbufinfo *vlc_tables_bufinfo =
+ dec_pict->vlc_tables_bufinfo;
+
+ vdec_ext->vlc_idx_table_size = vlc_idx_tables_bufinfo->buf_size;
+ vdec_ext->vlc_idx_table_addr = vlc_idx_tables_bufinfo->buf_size;
+ vdec_ext->vlc_tables_size = vlc_tables_bufinfo->buf_size;
+ vdec_ext->vlc_tables_size = vlc_tables_bufinfo->buf_size;
+ } else {
+ vdec_ext->vlc_idx_table_size = 0;
+ vdec_ext->vlc_idx_table_addr = 0;
+ vdec_ext->vlc_tables_size = 0;
+ vdec_ext->vlc_tables_size = 0;
+ }
+
+ vdec_ext->display_picture_size = pic_cmds[VDECFW_CMD_DISPLAY_PICTURE];
+ vdec_ext->parser_mode = parser_mode;
+
+ /* miscellaneous flags */
+ vdec_ext->is_chromainterleaved =
+ REGIO_READ_FIELD(pic_cmds[VDECFW_CMD_OPERATING_MODE], MSVDX_CMDS, OPERATING_MODE,
+ CHROMA_INTERLEAVED);
+ vdec_ext->is_discontinuousmbs =
+ dec_pict->pict_hdr_info->discontinuous_mbs;
+
+#ifdef HAS_HEVC
+ if (dec_pict->pvdec_info) {
+ vdec_ext->mem_to_reg_addr = dec_pict->pvdec_info->ddbuf_info->dev_virt;
+ vdec_ext->slice_params_addr = dec_pict->pvdec_info->ddbuf_info->dev_virt +
+ MEM_TO_REG_BUF_SIZE;
+ vdec_ext->slice_params_size = SLICE_PARAMS_BUF_SIZE;
+ }
+ if (vid_std == VDEC_STD_HEVC) {
+ struct vdecdd_picture *picture = (struct vdecdd_picture *)str_unit->dd_pict_data;
+
+ VDEC_ASSERT(picture);
+ /* 10-bit packed output format indicator */
+ vdec_ext->is_packedformat = picture->op_config.pixel_info.mem_pkg ==
+ PIXEL_BIT10_MP ? 1 : 0;
+ }
+#endif
+ return IMG_SUCCESS;
+}
+
+/*
+ * NOTE :
+ * translation_configure_tiling is not supported as of now.
+ */
+int translation_ctrl_alloc_prepare(struct vdec_str_configdata *pstr_config_data,
+ struct vdecdd_str_unit *str_unit,
+ struct dec_decpict *dec_pict,
+ const struct vxd_coreprops *core_props,
+ struct decoder_regsoffsets *regs_offset)
+{
+ int result;
+ unsigned int *cmd_buf;
+ unsigned int hdr_size = 0;
+ unsigned int pict_cmds[VDECFW_CMD_MAX];
+ enum vdecfw_codectype codec;
+ struct vxd_buffers buffers;
+ struct vdec_ext_cmd *vdec_ext;
+ enum vdecfw_parsermode parser_mode = VDECFW_SCP_ONLY;
+ struct vidio_ddbufinfo *batch_msgbuf_info =
+ dec_pict->batch_msginfo->ddbuf_info;
+ struct lst_t *decpic_seg_list = &dec_pict->dec_pict_seg_list;
+ unsigned int memto_reg_host_part = 0;
+
+ unsigned long ctrl_alloc = (unsigned long)batch_msgbuf_info->cpu_virt;
+ unsigned long ctrl_alloc_end = ctrl_alloc + batch_msgbuf_info->buf_size;
+
+ struct vdecdd_picture *picture =
+ (struct vdecdd_picture *)str_unit->dd_pict_data;
+
+ memset(pict_cmds, 0, sizeof(pict_cmds));
+ memset(&buffers, 0, sizeof(buffers));
+
+ VDEC_ASSERT(batch_msgbuf_info->buf_size >= CTRL_ALLOC_MAX_SEGMENT_SIZE);
+ memset(batch_msgbuf_info->cpu_virt, 0, batch_msgbuf_info->buf_size);
+
+ /* Construct transaction based on new picture. */
+ VDEC_ASSERT(str_unit->str_unit_type == VDECDD_STRUNIT_PICTURE_START);
+
+ /* Obtain picture data. */
+ picture = (struct vdecdd_picture *)str_unit->dd_pict_data;
+ dec_pict->recon_pict = &picture->disp_pict_buf;
+
+ result = translation_get_codec(pstr_config_data->vid_std, &codec);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ translation_setup_std_header(pstr_config_data, dec_pict, str_unit, &hdr_size, picture,
+ pict_cmds, &parser_mode);
+
+ buffers.recon_pict = dec_pict->recon_pict;
+ buffers.alt_pict = dec_pict->alt_pict;
+
+#ifdef HAS_HEVC
+ /* Set pipe offsets to device buffers */
+ if (pstr_config_data->vid_std == VDEC_STD_HEVC) {
+ /* FW in multipipe requires this buffers to be allocated per stream */
+ if (picture->pict_res_int && picture->pict_res_int->seq_resint &&
+ picture->pict_res_int->seq_resint->intra_buffer &&
+ picture->pict_res_int->seq_resint->aux_buffer) {
+ buffers.intra_bufinfo =
+ &picture->pict_res_int->seq_resint->intra_buffer->ddbuf_info;
+ buffers.auxline_bufinfo =
+ &picture->pict_res_int->seq_resint->aux_buffer->ddbuf_info;
+ }
+ } else {
+ buffers.intra_bufinfo = dec_pict->intra_bufinfo;
+ buffers.auxline_bufinfo = dec_pict->auxline_bufinfo;
+ }
+
+ if (buffers.intra_bufinfo)
+ buffers.intra_bufsize_per_pipe = buffers.intra_bufinfo->buf_size /
+ core_props->num_pixel_pipes;
+ if (buffers.auxline_bufinfo)
+ buffers.auxline_bufsize_per_pipe = buffers.auxline_bufinfo->buf_size /
+ core_props->num_pixel_pipes;
+#endif
+
+#ifdef ERROR_CONCEALMENT
+ if (picture->pict_res_int && picture->pict_res_int->seq_resint)
+ if (picture->pict_res_int->seq_resint->err_pict_buf)
+ buffers.err_pict_bufinfo =
+ &picture->pict_res_int->seq_resint->err_pict_buf->ddbuf_info;
+#endif
+
+ /*
+ * Prepare Reconstructed Picture Configuration
+ * Note: we are obtaining values of registers prepared basing on header
+ * files generated from MSVDX *dev files.
+ * That's allowed, as layout of registers: MSVDX_CMDS_OPERATING_MODE,
+ * MSVDX_CMDS_EXTENDED_ROW_STRIDE,
+ * MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+ * MSVDX_CMDS_CHROMA_ROW_STRIDE is the same for both MSVDX and PVDEC.
+ */
+ vxd_set_reconpictcmds(str_unit, pstr_config_data, &picture->op_config, core_props,
+ &buffers, pict_cmds);
+
+ /* Alternative Picture Configuration */
+ if (dec_pict->alt_pict) {
+ dec_pict->twopass = picture->op_config.force_oold;
+ buffers.btwopass = dec_pict->twopass;
+ /*
+ * Alternative Picture Configuration
+ * Note: we are obtaining values of registers prepared basing
+ * on header files generated from MSVDX *dev files.
+ * That's allowed, as layout of registers:
+ * MSVDX_CMDS_OPERATING_MODE, MSVDX_CMDS_EXTENDED_ROW_STRIDE,
+ * MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+ * MSVDX_CMDS_CHROMA_ROW_STRIDE is the same for both MSVDX and
+ * PVDEC.
+ */
+ /*
+ * Configure second buffer for out-of-loop processing
+ * (e.g. scaling etc.).
+ */
+ vxd_set_altpictcmds(str_unit, pstr_config_data, &picture->op_config, core_props,
+ &buffers, pict_cmds);
+ }
+
+ /*
+ * Setup initial simple bitstream configuration to be used by parser
+ * task
+ */
+ cmd_buf = (unsigned int *)ctrl_alloc;
+ result = translation_pvdec_adddma_transfers
+ (decpic_seg_list, &cmd_buf,
+ (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+ dec_pict, str_unit->eop);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ if ((unsigned long)(cmd_buf + (sizeof(struct ctrl_alloc_header) +
+ sizeof(struct vdec_ext_cmd)) / sizeof(unsigned int)) >=
+ ctrl_alloc_end)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /*
+ * Setup regular control allocation message. Start with control
+ * allocation header
+ */
+ translation_pvdec_ctrl_setuphdr((struct ctrl_alloc_header *)cmd_buf, pict_cmds);
+ /* Setup additional params for VP8 */
+ cmd_buf += sizeof(struct ctrl_alloc_header) / sizeof(unsigned int);
+
+ /* Reserve space for VDEC extension command and fill it */
+ vdec_ext = (struct vdec_ext_cmd *)cmd_buf;
+ cmd_buf += sizeof(struct vdec_ext_cmd) / sizeof(unsigned int);
+
+ result = translation_pvdecsetup_vdecext(vdec_ext, dec_pict, pict_cmds,
+ str_unit,
+ pstr_config_data->vid_std,
+ parser_mode);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ vdec_ext->hdr_size = hdr_size;
+
+ /* Add VLC tables to control allocation, skip when VC1 */
+ if (pstr_config_data->vid_std != VDEC_STD_VC1 &&
+ dec_pict->vlc_idx_tables_bufinfo &&
+ dec_pict->vlc_idx_tables_bufinfo->cpu_virt) {
+ unsigned short *vlc_idx_tables = (unsigned short *)
+ dec_pict->vlc_idx_tables_bufinfo->cpu_virt;
+ /*
+ * Get count of elements in VLC idx table. Each element is made
+ * of 3 IMG_UINT16, see e.g. mpeg2_idx.c
+ */
+ unsigned int vlc_idx_count =
+ dec_pict->vlc_idx_tables_bufinfo->buf_size /
+ (3 * sizeof(unsigned short));
+
+ /* Add command to DMA VLC */
+ result = translation_pvdecsetup_vlcdma
+ (dec_pict->vlc_tables_bufinfo, &cmd_buf,
+ (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int));
+
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Add command to configure VLC tables */
+ result = translation_pvdecsetup_vlctables
+ ((unsigned short (*)[3])vlc_idx_tables, vlc_idx_count, &cmd_buf,
+ (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+ regs_offset->vec_offset);
+
+ if (result != IMG_SUCCESS)
+ return result;
+ }
+
+ /* Setup commands for standards other than HEVC */
+ if (pstr_config_data->vid_std != VDEC_STD_HEVC) {
+ translation_pvdec_setup_commands
+ (pict_cmds, &cmd_buf,
+ (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+ regs_offset->vdmc_cmd_offset);
+ }
+
+ /* Setup commands for HEVC */
+ vdec_ext->mem_to_reg_size = 0;
+
+#ifdef HAS_HEVC
+ if (pstr_config_data->vid_std == VDEC_STD_HEVC) {
+ result = translation_pvdec_setup_pvdec_commands
+ (picture, dec_pict, str_unit,
+ regs_offset, &cmd_buf,
+ (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+ &memto_reg_host_part, pict_cmds);
+ if (result != IMG_SUCCESS) {
+ pr_err("Failed to setup VDMC & VDEB firmware commands.");
+ return result;
+ }
+
+ /* Set size of MemToReg buffer in VDEC extension command */
+ VDEC_ASSERT(MEM_TO_REG_BUF_SIZE <
+ (MEM2REG_SIZE_BUF_TOTAL_MASK >> MEM2REG_SIZE_BUF_TOTAL_SHIFT));
+ VDEC_ASSERT(memto_reg_host_part <
+ (MEM2REG_SIZE_HOST_PART_MASK >> MEM2REG_SIZE_HOST_PART_SHIFT));
+
+ vdec_ext->mem_to_reg_size = (MEM_TO_REG_BUF_SIZE << MEM2REG_SIZE_BUF_TOTAL_SHIFT) |
+ (memto_reg_host_part << MEM2REG_SIZE_HOST_PART_SHIFT);
+
+ dec_pict->genc_id = picture->pict_res_int->seq_resint->genc_buf_id;
+ dec_pict->genc_bufs = picture->pict_res_int->seq_resint->genc_buffers;
+ }
+#endif
+ /* Finally mark end of commands */
+ *(cmd_buf++) = CMD_COMPLETION;
+
+ /* Print message for debugging */
+ {
+ int i;
+
+ for (i = 0; i < ((unsigned long)cmd_buf - ctrl_alloc) / sizeof(unsigned int); i++)
+ pr_debug("ctrl_alloc_buf[%d] == %08x\n", i,
+ ((unsigned int *)ctrl_alloc)[i]);
+ }
+ /* Transfer control allocation command to device memory */
+ dec_pict->ctrl_alloc_bytes = ((unsigned long)cmd_buf - ctrl_alloc);
+ dec_pict->ctrl_alloc_offset = dec_pict->ctrl_alloc_bytes;
+ dec_pict->operating_op = pict_cmds[VDECFW_CMD_OPERATING_MODE];
+
+ /*
+ * NOTE : Nothing related to tiling will be used.
+ * result = translation_ConfigureTiling(psStrUnit, psDecPict,
+ * psCoreProps);
+ */
+
+ return result;
+};
+
+int translation_fragment_prepare(struct dec_decpict *dec_pict,
+ struct lst_t *decpic_seg_list, int eop,
+ struct dec_pict_fragment *pict_fragement)
+{
+ int result;
+ unsigned int *cmd_buf;
+ struct vidio_ddbufinfo *batchmsg_bufinfo;
+ unsigned long ctrl_alloc;
+ unsigned long ctrl_alloc_end;
+
+ if (!dec_pict || !dec_pict->batch_msginfo ||
+ !decpic_seg_list || !pict_fragement)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ batchmsg_bufinfo = dec_pict->batch_msginfo->ddbuf_info;
+
+ ctrl_alloc = (unsigned long)batchmsg_bufinfo->cpu_virt +
+ dec_pict->ctrl_alloc_offset;
+ ctrl_alloc_end = (unsigned long)batchmsg_bufinfo->cpu_virt +
+ batchmsg_bufinfo->buf_size;
+
+ /*
+ * Setup initial simple bitstream configuration to be used by parser
+ * task
+ */
+ cmd_buf = (unsigned int *)ctrl_alloc;
+ result = translation_pvdec_adddma_transfers
+ (decpic_seg_list, &cmd_buf,
+ (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+ dec_pict, eop);
+
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Finally mark end of commands */
+ *(cmd_buf++) = CMD_COMPLETION;
+
+ /* Transfer control allocation command to device memory */
+ pict_fragement->ctrl_alloc_offset = dec_pict->ctrl_alloc_offset;
+ pict_fragement->ctrl_alloc_bytes =
+ ((unsigned long)cmd_buf - ctrl_alloc);
+
+ dec_pict->ctrl_alloc_offset += pict_fragement->ctrl_alloc_bytes;
+
+ return result;
+};
+#endif /* VDEC_USE_PVDEC */
diff --git a/drivers/media/platform/vxe-vxd/decoder/translation_api.h b/drivers/media/platform/vxe-vxd/decoder/translation_api.h
new file mode 100644
index 000000000000..43c570760d57
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/translation_api.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VDECDD translation API's.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+#ifndef __TRANSLATION_API_H__
+#define __TRANSLATION_API_H__
+
+#include "decoder.h"
+#include "hw_control.h"
+#include "vdecdd_defs.h"
+#include "vdec_defs.h"
+#include "vxd_props.h"
+
+/*
+ * This function submits a stream unit for translation
+ * into a control allocation buffer used in PVDEC operation.
+ */
+int translation_ctrl_alloc_prepare
+ (struct vdec_str_configdata *psstr_config_data,
+ struct vdecdd_str_unit *psstrunit,
+ struct dec_decpict *psdecpict,
+ const struct vxd_coreprops *core_props,
+ struct decoder_regsoffsets *regs_offset);
+
+/*
+ * TRANSLATION_FragmentPrepare.
+ */
+int translation_fragment_prepare(struct dec_decpict *psdecpict,
+ struct lst_t *decpic_seg_list, int eop,
+ struct dec_pict_fragment *pict_fragement);
+
+#endif /* __TRANSLATION_API_H__ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vdec_defs.h b/drivers/media/platform/vxe-vxd/decoder/vdec_defs.h
new file mode 100644
index 000000000000..34ba605b478e
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vdec_defs.h
@@ -0,0 +1,549 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Decoder common header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __VDEC_DEFS_H__
+#define __VDEC_DEFS_H__
+
+#include "img_mem.h"
+#include "img_pixfmts.h"
+#ifdef HAS_JPEG
+#include "jpegfw_data.h"
+#endif
+#include "pixel_api.h"
+#include "vdecfw_shared.h"
+
+#define VDEC_MAX_PANSCAN_WINDOWS 4
+#define VDEC_MB_DIMENSION (16)
+
+#define MAX_PICS_IN_SYSTEM (8)
+#define SEQUENCE_SLOTS (8)
+#define PPS_SLOTS (8)
+/* Only for HEVC */
+#define VPS_SLOTS (16)
+#define MAX_VPSS (MAX_PICS_IN_SYSTEM + VPS_SLOTS)
+#define MAX_SEQUENCES (MAX_PICS_IN_SYSTEM + SEQUENCE_SLOTS)
+#define MAX_PPSS (MAX_PICS_IN_SYSTEM + PPS_SLOTS)
+
+#define VDEC_H264_MAXIMUMVALUEOFCPB_CNT 32
+#define VDEC_H264_MVC_MAX_VIEWS (H264FW_MAX_NUM_VIEWS)
+
+#define VDEC_ASSERT(expected) ({ WARN_ON(!(expected)); 0; })
+
+#define VDEC_ALIGN_SIZE(_val, _alignment, val_type, align_type) \
+ ({ \
+ val_type val = _val; \
+ align_type alignment = _alignment; \
+ (((val) + (alignment) - 1) & ~((alignment) - 1)); })
+
+/*
+ * This type defines the video standard.
+ * @brief VDEC Video Standards
+ */
+enum vdec_vid_std {
+ VDEC_STD_UNDEFINED = 0,
+ VDEC_STD_MPEG2,
+ VDEC_STD_MPEG4,
+ VDEC_STD_H263,
+ VDEC_STD_H264,
+ VDEC_STD_VC1,
+ VDEC_STD_AVS,
+ VDEC_STD_REAL,
+ VDEC_STD_JPEG,
+ VDEC_STD_VP6,
+ VDEC_STD_VP8,
+ VDEC_STD_SORENSON,
+ VDEC_STD_HEVC,
+ VDEC_STD_MAX,
+ VDEC_STD_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines the bitstream format. Should be done at the
+ * start of decoding.
+ * @brief VDEC Bitstream Format
+ */
+enum vdec_bstr_format {
+ VDEC_BSTRFORMAT_UNDEFINED = 0,
+ VDEC_BSTRFORMAT_ELEMENTARY,
+ VDEC_BSTRFORMAT_DEMUX_BYTESTREAM,
+ VDEC_BSTRFORMAT_DEMUX_SIZEDELIMITED,
+ VDEC_BSTRFORMAT_MAX,
+ VDEC_BSTRFORMAT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines the Type of payload. Could change with every buffer.
+ * @brief VDEC Bitstream Element Type
+ */
+enum vdec_bstr_element_type {
+ VDEC_BSTRELEMENT_UNDEFINED = 0,
+ VDEC_BSTRELEMENT_UNSPECIFIED,
+ VDEC_BSTRELEMENT_CODEC_CONFIG,
+ VDEC_BSTRELEMENT_PICTURE_DATA,
+ VDEC_BSTRELEMENT_MAX,
+ VDEC_BSTRELEMENT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure contains the stream configuration details.
+ * @brief VDEC Stream Configuration Information
+ */
+struct vdec_str_configdata {
+ enum vdec_vid_std vid_std;
+ enum vdec_bstr_format bstr_format;
+ unsigned int user_str_id;
+ unsigned char update_yuv;
+ unsigned char bandwidth_efficient;
+ unsigned char disable_mvc;
+ unsigned char full_scan;
+ unsigned char immediate_decode;
+ unsigned char intra_frame_closed_gop;
+};
+
+/*
+ * This type defines the buffer type categories.
+ * @brief Buffer Types
+ */
+enum vdec_buf_type {
+ VDEC_BUFTYPE_BITSTREAM,
+ VDEC_BUFTYPE_PICTURE,
+ VDEC_BUFTYPE_ALL,
+ VDEC_BUFTYPE_MAX,
+ VDEC_BUFTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure contains information related to a picture plane.
+ * @brief Picture Plane Information
+ */
+struct vdec_plane_info {
+ unsigned int offset;
+ unsigned int stride;
+ unsigned int size;
+};
+
+/*
+ * This structure describes the VDEC picture dimensions.
+ * @brief VDEC Picture Size
+ */
+struct vdec_pict_size {
+ unsigned int width;
+ unsigned int height;
+};
+
+/*
+ * This enumeration defines the colour plane indices.
+ * @brief Colour Plane Indices
+ */
+enum vdec_color_planes {
+ VDEC_PLANE_VIDEO_Y = 0,
+ VDEC_PLANE_VIDEO_YUV = 0,
+ VDEC_PLANE_VIDEO_U = 1,
+ VDEC_PLANE_VIDEO_UV = 1,
+ VDEC_PLANE_VIDEO_V = 2,
+ VDEC_PLANE_VIDEO_A = 3,
+ VDEC_PLANE_LIGHT_R = 0,
+ VDEC_PLANE_LIGHT_G = 1,
+ VDEC_PLANE_LIGHT_B = 2,
+ VDEC_PLANE_INK_C = 0,
+ VDEC_PLANE_INK_M = 1,
+ VDEC_PLANE_INK_Y = 2,
+ VDEC_PLANE_INK_K = 3,
+ VDEC_PLANE_MAX = 4,
+ VDEC_PLANE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure describes the rendered region of a picture buffer (i.e. where
+ * the image data is written.
+ * @brief Picture Buffer Render Information
+ */
+struct vdec_pict_rendinfo {
+ unsigned int rendered_size;
+ struct vdec_plane_info plane_info[VDEC_PLANE_MAX];
+ unsigned int stride_alignment;
+ struct vdec_pict_size rend_pict_size;
+};
+
+/*
+ * This structure contains information required to configure the picture
+ * buffers
+ * @brief Picture Buffer Configuration
+ */
+struct vdec_pict_bufconfig {
+ unsigned int coded_width;
+ unsigned int coded_height;
+ enum img_pixfmt pixel_fmt;
+ unsigned int stride[IMG_MAX_NUM_PLANES];
+ unsigned int stride_alignment;
+ unsigned char byte_interleave;
+ unsigned int buf_size;
+ unsigned char packed;
+ unsigned int chroma_offset[IMG_MAX_NUM_PLANES];
+ unsigned int plane_size[IMG_MAX_NUM_PLANES];
+};
+
+/*
+ * This structure describes the VDEC Display Rectangle.
+ * @brief VDEC Display Rectangle
+ */
+struct vdec_rect {
+ unsigned int top_offset;
+ unsigned int left_offset;
+ unsigned int width;
+ unsigned int height;
+};
+
+/*
+ * This structure contains the Color Space Description that may be present
+ * in SequenceDisplayExtn(MPEG2), VUI parameters(H264), Visual Object(MPEG4)
+ * for the application to use.
+ * @brief Stream Color Space Properties
+ */
+struct vdec_color_space_desc {
+ unsigned char is_present;
+ unsigned char color_primaries;
+ unsigned char transfer_characteristics;
+ unsigned char matrix_coefficients;
+};
+
+/*
+ * This structure contains common (standard agnostic) sequence header
+ * information, which is required for image buffer allocation and display.
+ * @brief Sequence Header Information (common)
+ */
+struct vdec_comsequ_hdrinfo {
+ unsigned int codec_profile;
+ unsigned int codec_level;
+ unsigned int bitrate;
+ long frame_rate;
+ unsigned int frame_rate_num;
+ unsigned int frame_rate_den;
+ unsigned int aspect_ratio_num;
+ unsigned int aspect_ratio_den;
+ unsigned char interlaced_frames;
+ struct pixel_pixinfo pixel_info;
+ struct vdec_pict_size max_frame_size;
+ unsigned int max_ref_frame_num;
+ struct vdec_pict_size frame_size;
+ unsigned char field_codec_mblocks;
+ unsigned int min_pict_buf_num;
+ unsigned char picture_reordering;
+ unsigned char post_processing;
+ struct vdec_rect orig_display_region;
+ struct vdec_rect raw_display_region;
+ unsigned int num_views;
+ unsigned int max_reorder_picts;
+ unsigned char separate_chroma_planes;
+ unsigned char not_dpb_flush;
+ struct vdec_color_space_desc color_space_info;
+};
+
+/*
+ * This structure contains the standard specific codec configuration
+ * @brief Codec configuration
+ */
+struct vdec_codec_config {
+ unsigned int default_height;
+ unsigned int default_width;
+};
+
+/*
+ * This structure describes the decoded picture attributes (relative to the
+ * encoded, where necessary, e.g. rotation angle).
+ * @brief Stream Output Configuration
+ */
+struct vdec_str_opconfig {
+ struct pixel_pixinfo pixel_info;
+ unsigned char force_oold;
+};
+
+/*
+ * This type defines the "play" mode.
+ * @brief Play Mode
+ */
+enum vdec_play_mode {
+ VDEC_PLAYMODE_PARSE_ONLY,
+ VDEC_PLAYMODE_NORMAL_DECODE,
+ VDEC_PLAYMODE_MAX,
+ VDEC_PLAYMODE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines the bitstream processing error info.
+ * @brief Bitstream Processing Error Info
+ */
+struct vdec_bstr_err_info {
+ unsigned int sequence_err;
+ unsigned int picture_err;
+ unsigned int other_err;
+};
+
+/*
+ * This structure describes the VDEC Pan Scan Window.
+ * @brief VDEC Pan Scan Window
+ */
+struct vdec_window {
+ unsigned int ui32topoffset;
+ unsigned int ui32leftoffset;
+ unsigned int ui32width;
+ unsigned int ui32height;
+};
+
+/*
+ * This structure contains the VDEC picture display properties.
+ * @brief VDEC Picture Display Properties
+ */
+struct vdec_pict_disp_info {
+ struct vdec_rect enc_disp_region;
+ struct vdec_rect disp_region;
+ struct vdec_rect raw_disp_region;
+ unsigned char top_fld_first;
+ unsigned char out_top_fld_first;
+ unsigned int max_frm_repeat;
+ unsigned int repeat_first_fld;
+ unsigned int num_pan_scan_windows;
+ struct vdec_window pan_scan_windows[VDEC_MAX_PANSCAN_WINDOWS];
+};
+
+/*
+ * This structure contains VXD hardware signatures.
+ * @brief VXD Hardware signatures
+ */
+struct vdec_pict_hwcrc {
+ unsigned char first_fld_rcvd;
+ unsigned int crc_vdmc_pix_recon;
+ unsigned int vdeb_sysmem_wrdata;
+};
+
+struct vdec_features {
+ unsigned char valid;
+ unsigned char mpeg2;
+ unsigned char mpeg4;
+ unsigned char h264;
+ unsigned char vc1;
+ unsigned char avs;
+ unsigned char real;
+ unsigned char jpeg;
+ unsigned char vp6;
+ unsigned char vp8;
+ unsigned char hevc;
+ unsigned char hd;
+ unsigned char rotation;
+ unsigned char scaling;
+ unsigned char scaling_oold;
+ unsigned char scaling_extnd_strides;
+};
+
+/*
+ * This type defines the auxiliary info for picture queued for decoding.
+ * @brief Auxiliary Decoding Picture Info
+ */
+struct vdec_dec_pict_auxinfo {
+ unsigned int seq_hdr_id;
+ unsigned int pps_id;
+ unsigned int second_pps_id;
+ unsigned char not_decoded;
+};
+
+/*
+ * This type defines the decoded picture state.
+ * @brief Decoded Picture State
+ */
+enum vdec_pict_state {
+ VDEC_PICT_STATE_NOT_DECODED,
+ VDEC_PICT_STATE_DECODED,
+ VDEC_PICT_STATE_TERMINATED,
+ VDEC_PICT_STATE_MAX,
+ VDEC_PICT_STATE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines the container for various picture tags.
+ * @brief Picture Tag Container
+ */
+struct vdec_pict_tag_container {
+ enum img_buffer_type pict_type;
+ unsigned long long pict_tag_param;
+ unsigned long long sideband_info;
+ struct vdec_pict_hwcrc pict_hwcrc;
+};
+
+/*
+ * This structure describes raw bitstream data chunk.
+ * @brief Raw Bitstream Data Chunk
+ */
+struct vdec_raw_bstr_data {
+ unsigned int size;
+ unsigned int bit_offset;
+ unsigned char *data;
+ struct vdec_raw_bstr_data *next;
+};
+
+/*
+ * This type defines the supplementary picture data.
+ * @brief Supplementary Picture Data
+ */
+struct vdec_pict_supl_data {
+ struct vdec_raw_bstr_data *raw_vui_data;
+ struct vdec_raw_bstr_data *raw_sei_list_first_fld;
+ struct vdec_raw_bstr_data *raw_sei_list_second_fld;
+ union {
+ struct h264_pict_supl_data {
+ unsigned char nal_ref_idc;
+ unsigned short frame_num;
+ } data;
+ };
+};
+
+/*
+ * This structure contains decoded picture information for display.
+ * @brief Decoded Picture Information
+ */
+struct vdec_dec_pict_info {
+ enum vdec_pict_state pict_state;
+ enum img_buffer_type buf_type;
+ unsigned char interlaced_flds;
+ unsigned int err_flags;
+ unsigned int err_level;
+ struct vdec_pict_tag_container first_fld_tag_container;
+ struct vdec_pict_tag_container second_fld_tag_container;
+ struct vdec_str_opconfig op_config;
+ struct vdec_pict_rendinfo rend_info;
+ struct vdec_pict_disp_info disp_info;
+ unsigned int last_in_seq;
+ unsigned int decode_id;
+ unsigned int id_for_hwcrc_chk;
+ unsigned short view_id;
+ unsigned int timestamp;
+ struct vdec_pict_supl_data pict_supl_data;
+};
+
+struct vdec_pict_rend_config {
+ struct vdec_pict_size coded_pict_size;
+ unsigned char packed;
+ unsigned char byte_interleave;
+ unsigned int stride_alignment;
+};
+
+/*
+ * This structure contains unsupported feature flags.
+ * @brief Unsupported Feature Flags
+ */
+struct vdec_unsupp_flags {
+ unsigned int str_cfg;
+ unsigned int str_opcfg;
+ unsigned int op_bufcfg;
+ unsigned int seq_hdr;
+ unsigned int pict_hdr;
+};
+
+/*
+ * This type defines the error , error in parsing, error in decoding etc.
+ * @brief VDEC parsing/decoding error Information
+ */
+enum vdec_error_type {
+ VDEC_ERROR_NONE = (0),
+ VDEC_ERROR_SR_ERROR = (1 << 0),
+ VDEC_ERROR_FEHW_TIMEOUT = (1 << 1),
+ VDEC_ERROR_FEHW_DECODE = (1 << 2),
+ VDEC_ERROR_BEHW_TIMEOUT = (1 << 3),
+ VDEC_ERROR_SERVICE_TIMER_EXPIRY = (1 << 4),
+ VDEC_ERROR_MISSING_REFERENCES = (1 << 5),
+ VDEC_ERROR_MMU_FAULT = (1 << 6),
+ VDEC_ERROR_DEVICE = (1 << 7),
+ VDEC_ERROR_CORRUPTED_REFERENCE = (1 << 8),
+ VDEC_ERROR_MMCO = (1 << 9),
+ VDEC_ERROR_MBS_DROPPED = (1 << 10),
+ VDEC_ERROR_MAX = (1 << 11),
+ VDEC_ERROR_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure contains information relating to a buffer.
+ * @brief Buffer Information
+ */
+struct vdec_buf_info {
+ void *cpu_linear_addr;
+ unsigned int buf_id;
+ struct vdec_pict_bufconfig pictbuf_cfg;
+ int fd;
+ /* The following are fields used internally within VDEC... */
+ unsigned int buf_size;
+ enum sys_emem_attrib mem_attrib;
+ void *buf_alloc_handle;
+ void *buf_map_handle;
+ unsigned long dma_addr;
+};
+
+#ifdef HAS_JPEG
+/*
+ * This structure contains JPEG sequence header information.
+ * NOTE: Should only contain JPEG specific information.
+ * @brief JPEG sequence header Information
+ */
+struct vdec_jpeg_sequ_hdr_info {
+ /* total component in jpeg */
+ unsigned char num_component;
+ /* precision */
+ unsigned char precision;
+};
+
+/*
+ * This structure contains JPEG start of frame segment header
+ * NOTE: Should only contain JPEG specific information.
+ * @brief JPEG SOF header Information
+ */
+struct vdec_jpeg_sof_component_hdr {
+ /* component identifier. */
+ unsigned char identifier;
+ /* Horizontal scaling. */
+ unsigned char horz_factor;
+ /* Verticale scaling */
+ unsigned char vert_factor;
+ /* Qunatisation tables . */
+ unsigned char quant_table;
+};
+
+/*
+ * This structure contains JPEG start of scan segment header
+ * NOTE: Should only contain JPEG specific information.
+ * @brief JPEG SOS header Information
+ */
+struct vdec_jpeg_sos_component_hdr {
+ /* component identifier. */
+ unsigned char component_index;
+ /* Huffman DC tables. */
+ unsigned char dc_table;
+ /* Huffman AC table .*/
+ unsigned char ac_table;
+};
+
+struct vdec_jpeg_pict_hdr_info {
+ /* Start of frame component header */
+ struct vdec_jpeg_sof_component_hdr sof_comp[JPEG_VDEC_MAX_COMPONENTS];
+ /* Start of Scan component header */
+ struct vdec_jpeg_sos_component_hdr sos_comp[JPEG_VDEC_MAX_COMPONENTS];
+ /* Huffman tables */
+ struct vdec_jpeg_huffman_tableinfo huff_tables[JPEG_VDEC_TABLE_CLASS_NUM]
+ [JPEG_VDEC_MAX_SETS_HUFFMAN_TABLES];
+ /* Quantization tables */
+ struct vdec_jpeg_de_quant_tableinfo quant_tables[JPEG_VDEC_MAX_QUANT_TABLES];
+ /* Number of MCU in the restart interval */
+ unsigned short interval;
+ unsigned int test;
+};
+#endif
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/vdec_mmu_wrapper.c b/drivers/media/platform/vxe-vxd/decoder/vdec_mmu_wrapper.c
new file mode 100644
index 000000000000..384ce840b4dc
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vdec_mmu_wrapper.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VDEC MMU Functions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include "img_dec_common.h"
+#include "lst.h"
+#include "talmmu_api.h"
+#include "vdec_defs.h"
+#include "vdec_mmu_wrapper.h"
+#include "vxd_dec.h"
+
+#define GUARD_BAND 0x1000
+
+struct mmuheap {
+ unsigned char *name;
+ enum mmu_eheap_id heap_id;
+ enum talmmu_heap_type heap_type;
+ unsigned int start_offset;
+ unsigned int size;
+ unsigned char *mem_space;
+ unsigned char use_guard_band;
+ unsigned char image_buffers;
+};
+
+static const struct mmuheap mmu_heaps[MMU_HEAP_MAX] = {
+ { "Image untiled", MMU_HEAP_IMAGE_BUFFERS_UNTILED,
+ TALMMU_HEAP_PERCONTEXT, PVDEC_HEAP_UNTILED_START,
+ PVDEC_HEAP_UNTILED_SIZE, "MEMBE", 1, 1 },
+
+ { "Bitstream", MMU_HEAP_BITSTREAM_BUFFERS,
+ TALMMU_HEAP_PERCONTEXT, PVDEC_HEAP_BITSTREAM_START,
+ PVDEC_HEAP_BITSTREAM_SIZE, "MEMDMAC_02", 1, 0 },
+
+ { "Stream", MMU_HEAP_STREAM_BUFFERS,
+ TALMMU_HEAP_PERCONTEXT, PVDEC_HEAP_STREAM_START,
+ PVDEC_HEAP_STREAM_SIZE, "MEM", 1, 0 },
+};
+
+/*
+ * @Heap ID
+ * @Heap type
+ * @Heap flags
+ * @Memory space name
+ * @Start address (virtual)
+ * @Size of heap, in bytes
+ */
+static struct talmmu_heap_info heap_info = {
+ MMU_HEAP_IMAGE_BUFFERS_UNTILED,
+ TALMMU_HEAP_PERCONTEXT,
+ TALMMU_HEAPFLAGS_NONE,
+ "MEMBE",
+ 0,
+ 0,
+};
+
+/*
+ * This structure contains the device context.
+ * @brief VDECDD MMU Device Context
+ * @devmem_template_hndl: Handle for MMU template.
+ * @devmem_ctx_hndl: Handle for MMU context.
+ * @str_list: List of streams.
+ */
+struct mmu_dev_context {
+ void *devmem_template_hndl;
+ void *devmem_ctx_hndl;
+ struct lst_t str_list;
+ unsigned int ctx_id;
+ unsigned int next_ctx_id;
+};
+
+/*
+ * This structure contains the stream context.
+ * @brief VDECDD MMU Stream Context
+ * @LST_LINK: List link (allows the structure to be part of a MeOS list).
+ * @devmem_ctx_hndl: Handle for MMU context.
+ * @dev_ctx: Pointer to device context.
+ * @ctx_id: MMU context Id.
+ * km_str_id: Stream ID used in communication with new KM interface
+ */
+struct mmu_str_context {
+ void **link;
+ void *devmem_ctx_hndl;
+ struct mmu_dev_context *dev_ctx;
+ unsigned int ctx_id;
+ void *ptd_memspace_hndl;
+ unsigned int int_reg_num;
+ unsigned int km_str_id;
+ struct vxd_dec_ctx *vxd_dec_context;
+};
+
+static unsigned int set_attributes(enum sys_emem_attrib mem_attrib)
+{
+ unsigned int attrib = 0;
+
+ if (mem_attrib & SYS_MEMATTRIB_CACHED)
+ attrib |= MEM_ATTR_CACHED;
+
+ if (mem_attrib & SYS_MEMATTRIB_UNCACHED)
+ attrib |= MEM_ATTR_UNCACHED;
+
+ if (mem_attrib & SYS_MEMATTRIB_WRITECOMBINE)
+ attrib |= MEM_ATTR_WRITECOMBINE;
+
+ if (mem_attrib & SYS_MEMATTRIB_SECURE)
+ attrib |= MEM_ATTR_SECURE;
+
+ return attrib;
+}
+
+/*
+ * @Function mmu_dev_mem_context_create
+ */
+static int mmu_devmem_context_create(struct mmu_dev_context *dev_ctx, void **mmu_ctx_hndl)
+{
+ int result;
+ void *devmem_heap_hndl;
+ union talmmu_heap_options heap_opt1;
+ unsigned int i;
+ unsigned char use_guardband;
+ enum talmmu_heap_option_id heap_option_id;
+
+ dev_ctx->next_ctx_id++;
+
+ /* Create a context from the template */
+ result = talmmu_devmem_ctx_create(dev_ctx->devmem_template_hndl, dev_ctx->next_ctx_id,
+ mmu_ctx_hndl);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Apply options to heaps. */
+ heap_opt1.guardband_opt.guardband = GUARD_BAND;
+
+ for (i = 0; i < MMU_HEAP_MAX; i++) {
+ result = talmmu_get_heap_handle(mmu_heaps[i].heap_id, *mmu_ctx_hndl,
+ &devmem_heap_hndl);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ use_guardband = mmu_heaps[i].use_guard_band;
+ heap_option_id = TALMMU_HEAP_OPT_ADD_GUARD_BAND;
+ if (use_guardband)
+ talmmu_devmem_heap_options(devmem_heap_hndl, heap_option_id, heap_opt1);
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function mmu_device_create
+ */
+int mmu_device_create(enum mmu_etype mmu_type_arg,
+ unsigned int ptd_alignment,
+ void **mmudev_handle)
+{
+ int result = IMG_SUCCESS;
+ enum talmmu_mmu_type talmmu_type =
+ TALMMU_MMUTYPE_4K_PAGES_32BIT_ADDR;
+ unsigned int i;
+ struct mmu_dev_context *dev_ctx;
+ struct talmmu_devmem_info dev_mem_info;
+
+ /* Set the TAL MMU type. */
+ switch (mmu_type_arg) {
+ case MMU_TYPE_32BIT:
+ talmmu_type = TALMMU_MMUTYPE_4K_PAGES_32BIT_ADDR;
+ break;
+
+ case MMU_TYPE_36BIT:
+ talmmu_type = TALMMU_MMUTYPE_4K_PAGES_36BIT_ADDR;
+ break;
+
+ case MMU_TYPE_40BIT:
+ talmmu_type = TALMMU_MMUTYPE_4K_PAGES_40BIT_ADDR;
+ break;
+
+ default:
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Allocate a device context structure */
+ dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
+ if (!dev_ctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ /* Initialise stream list. */
+ lst_init(&dev_ctx->str_list);
+
+ /* Initialise TALMMU. */
+ result = talmmu_init();
+ if (result != IMG_SUCCESS)
+ goto error_tal_init;
+
+ dev_mem_info.device_id = 0;
+ dev_mem_info.mmu_type = talmmu_type;
+ dev_mem_info.dev_flags = TALMMU_DEVFLAGS_NONE;
+ dev_mem_info.pagedir_memspace_name = "MEM";
+ dev_mem_info.pagetable_memspace_name = NULL;
+ dev_mem_info.page_size = DEV_MMU_PAGE_SIZE;
+ dev_mem_info.ptd_alignment = ptd_alignment;
+
+ result = talmmu_devmem_template_create(&dev_mem_info, &dev_ctx->devmem_template_hndl);
+ if (result != IMG_SUCCESS)
+ goto error_tal_template;
+
+ /* Add heaps to template */
+ for (i = 0; i < MMU_HEAP_MAX; i++) {
+ heap_info.heap_id = mmu_heaps[i].heap_id;
+ heap_info.heap_type = mmu_heaps[i].heap_type;
+ heap_info.memspace_name = mmu_heaps[i].name;
+ heap_info.size = mmu_heaps[i].size;
+ heap_info.basedev_virtaddr = mmu_heaps[i].start_offset;
+
+ result = talmmu_devmem_heap_add(dev_ctx->devmem_template_hndl, &heap_info);
+ if (result != IMG_SUCCESS)
+ goto error_tal_heap;
+ }
+
+ /* Create the device context. */
+ result = mmu_devmem_context_create(dev_ctx, &dev_ctx->devmem_ctx_hndl);
+ if (result != IMG_SUCCESS)
+ goto error_mmu_context;
+
+ dev_ctx->ctx_id = dev_ctx->next_ctx_id;
+
+ /* Return the device context. */
+ *mmudev_handle = dev_ctx;
+
+ return IMG_SUCCESS;
+
+ /* Roll back in case of errors. */
+error_mmu_context:
+error_tal_heap:
+ talmmu_devmem_template_destroy(dev_ctx->devmem_template_hndl);
+error_tal_template:
+ talmmu_deinit();
+error_tal_init:
+ kfree(dev_ctx);
+ return result;
+}
+
+/*
+ * @Function mmu_device_destroy
+ */
+int mmu_device_destroy(void *mmudev_handle)
+{
+ struct mmu_dev_context *dev_ctx = mmudev_handle;
+ unsigned int result;
+ struct mmu_str_context *str_ctx;
+
+ /* Validate inputs. */
+ if (!mmudev_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Destroy all streams associated with the device. */
+ str_ctx = lst_first(&dev_ctx->str_list);
+ while (str_ctx) {
+ result = mmu_stream_destroy(str_ctx);
+ if (result != IMG_SUCCESS)
+ return result;
+ /* See if there are more streams. */
+ str_ctx = lst_first(&dev_ctx->str_list);
+ }
+
+ /* Destroy the device context */
+ result = talmmu_devmem_ctx_destroy(dev_ctx->devmem_ctx_hndl);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Destroy the template. */
+ result = talmmu_devmem_template_destroy(dev_ctx->devmem_template_hndl);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ talmmu_deinit();
+
+ kfree(dev_ctx);
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function mmu_stream_create
+ * @Description
+ * This function is used to create and initialise the MMU stream context.
+ * @Input mmudev_handle : The MMU device handle.
+ * @Input km_str_id : Stream Id used in communication with KM driver.
+ * @Output mmu_str_hndl : A pointer used to return the MMU stream
+ * handle.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_stream_create(void *mmudev_handle,
+ unsigned int km_str_id,
+ void *vxd_dec_ctx_arg,
+ void **mmu_str_hndl)
+{
+ struct mmu_dev_context *dev_ctx = mmudev_handle;
+ struct mmu_str_context *str_ctx;
+ int res;
+
+ /* Validate inputs. */
+ if (!mmudev_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Allocate a stream context structure */
+ str_ctx = kzalloc(sizeof(*str_ctx), GFP_KERNEL);
+ if (!str_ctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ str_ctx->km_str_id = km_str_id;
+ str_ctx->dev_ctx = dev_ctx;
+ str_ctx->int_reg_num = 32;
+ str_ctx->vxd_dec_context = (struct vxd_dec_ctx *)vxd_dec_ctx_arg;
+
+ /* Create a stream context. */
+ res = mmu_devmem_context_create(dev_ctx, &str_ctx->devmem_ctx_hndl);
+ if (res != IMG_SUCCESS) {
+ kfree(str_ctx);
+ return res;
+ }
+
+ str_ctx->ctx_id = dev_ctx->next_ctx_id;
+
+ /* Add stream to list. */
+ lst_add(&dev_ctx->str_list, str_ctx);
+
+ *mmu_str_hndl = str_ctx;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function mmu_stream_destroy
+ * @Description
+ * This function is used to create and initialise the MMU stream context.
+ * NOTE: Destroy automatically frees and memory allocated using
+ * mmu_stream_malloc().
+ * @Input mmu_str_hndl : The MMU stream handle.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_stream_destroy(void *mmu_str_hndl)
+{
+ struct mmu_str_context *str_ctx = mmu_str_hndl;
+ int res;
+
+ /* Validate inputs. */
+ if (!mmu_str_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* remove stream to list. */
+ lst_remove(&str_ctx->dev_ctx->str_list, str_ctx);
+
+ /* Destroy the device context */
+ res = talmmu_devmem_ctx_destroy(str_ctx->devmem_ctx_hndl);
+ if (res != IMG_SUCCESS)
+ return res;
+
+ kfree(str_ctx);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function mmu_malloc
+ */
+static int mmu_alloc(void *devmem_ctx_hndl,
+ struct vxd_dec_ctx *vxd_dec_ctx_arg,
+ enum mmu_eheap_id heap_id,
+ unsigned int mem_heap_id,
+ enum sys_emem_attrib mem_attrib,
+ unsigned int size,
+ unsigned int alignment,
+ struct vidio_ddbufinfo *ddbuf_info)
+{
+ int result;
+ void *devmem_heap_hndl;
+ struct vxd_free_data free_data;
+ struct vxd_dec_ctx *ctx;
+ struct vxd_dev *vxd;
+ struct vxd_alloc_data alloc_data;
+ unsigned int flags;
+
+ if (!devmem_ctx_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Set buffer size. */
+ ddbuf_info->buf_size = size;
+
+ /* Round size up to next multiple of physical pages */
+ if ((size % HOST_MMU_PAGE_SIZE) != 0)
+ size = ((size / HOST_MMU_PAGE_SIZE) + 1) * HOST_MMU_PAGE_SIZE;
+
+ /* Allocate memory */
+ ctx = vxd_dec_ctx_arg;
+ vxd = ctx->dev;
+
+ alloc_data.heap_id = mem_heap_id;
+ alloc_data.size = ddbuf_info->buf_size;
+
+ alloc_data.attributes = set_attributes(mem_attrib);
+
+ result = img_mem_alloc(vxd->dev, ctx->mem_ctx, alloc_data.heap_id, alloc_data.size,
+ (enum mem_attr)alloc_data.attributes,
+ (int *)&ddbuf_info->buff_id);
+ if (result != IMG_SUCCESS)
+ goto error_alloc;
+
+ ddbuf_info->is_internal = 1;
+
+ if (mem_attrib & SYS_MEMATTRIB_SECURE) {
+ ddbuf_info->cpu_virt = NULL;
+ } else {
+ /* Map the buffer to CPU */
+ result = img_mem_map_km(ctx->mem_ctx, ddbuf_info->buff_id);
+ if (result) {
+ dev_err(vxd->dev, "%s: failed to map buf to cpu!(%d)\n", __func__, result);
+ goto error_get_heap_handle;
+ }
+ ddbuf_info->cpu_virt = img_mem_get_kptr(ctx->mem_ctx, ddbuf_info->buff_id);
+ }
+
+ /* Get heap handle */
+ result = talmmu_get_heap_handle(heap_id, devmem_ctx_hndl, &devmem_heap_hndl);
+ if (result != IMG_SUCCESS)
+ goto error_get_heap_handle;
+
+ /* Allocate device "virtual" memory. */
+ result = talmmu_devmem_addr_alloc(devmem_ctx_hndl, devmem_heap_hndl, size, alignment,
+ &ddbuf_info->hndl_memory);
+ if (result != IMG_SUCCESS)
+ goto error_mem_map_ext_mem;
+
+ /* Get the device virtual address. */
+ result = talmmu_get_dev_virt_addr(ddbuf_info->hndl_memory, &ddbuf_info->dev_virt);
+ if (result != IMG_SUCCESS)
+ goto error_get_dev_virt_addr;
+
+ flags = VXD_MAP_FLAG_NONE;
+
+ if (mem_attrib & SYS_MEMATTRIB_CORE_READ_ONLY)
+ flags |= VXD_MAP_FLAG_READ_ONLY;
+
+ if (mem_attrib & SYS_MEMATTRIB_CORE_WRITE_ONLY)
+ flags |= VXD_MAP_FLAG_WRITE_ONLY;
+
+ result = vxd_map_buffer(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id,
+ ddbuf_info->dev_virt,
+ flags);
+
+ if (result != IMG_SUCCESS)
+ goto error_map_dev;
+
+ return IMG_SUCCESS;
+
+error_map_dev:
+error_get_dev_virt_addr:
+ talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+ ddbuf_info->hndl_memory = NULL;
+error_mem_map_ext_mem:
+error_get_heap_handle:
+ free_data.buf_id = ddbuf_info->buff_id;
+ img_mem_free(ctx->mem_ctx, free_data.buf_id);
+error_alloc:
+ return result;
+}
+
+/*
+ * @Function mmu_stream_malloc
+ */
+int mmu_stream_alloc(void *mmu_str_hndl,
+ enum mmu_eheap_id heap_id,
+ unsigned int mem_heap_id,
+ enum sys_emem_attrib mem_attrib,
+ unsigned int size,
+ unsigned int alignment,
+ struct vidio_ddbufinfo *ddbuf_info)
+{
+ struct mmu_str_context *str_ctx =
+ (struct mmu_str_context *)mmu_str_hndl;
+ int result;
+
+ /* Validate inputs. */
+ if (!mmu_str_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Check if device level heap. */
+ switch (heap_id) {
+ case MMU_HEAP_IMAGE_BUFFERS_UNTILED:
+ case MMU_HEAP_BITSTREAM_BUFFERS:
+ case MMU_HEAP_STREAM_BUFFERS:
+ break;
+
+ default:
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ ddbuf_info->kmstr_id = str_ctx->km_str_id;
+
+ /* Allocate device memory. */
+ result = mmu_alloc(str_ctx->devmem_ctx_hndl, str_ctx->vxd_dec_context, heap_id, mem_heap_id,
+ mem_attrib, size, alignment, ddbuf_info);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function mmu_stream_map_ext_sg
+ */
+int mmu_stream_map_ext_sg(void *mmu_str_hndl,
+ enum mmu_eheap_id heap_id,
+ void *sgt,
+ unsigned int size,
+ unsigned int alignment,
+ enum sys_emem_attrib mem_attrib,
+ void *cpu_linear_addr,
+ struct vidio_ddbufinfo *ddbuf_info,
+ unsigned int *buff_id)
+{
+ struct mmu_str_context *str_ctx =
+ (struct mmu_str_context *)mmu_str_hndl;
+ int result;
+ void *devmem_heap_hndl;
+ unsigned int flags;
+
+ struct vxd_dec_ctx *ctx = str_ctx->vxd_dec_context;
+ struct vxd_dev *vxd = ctx->dev;
+
+ /* Validate inputs. */
+ if (!mmu_str_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Check if device level heap. */
+ switch (heap_id) {
+ case MMU_HEAP_IMAGE_BUFFERS_UNTILED:
+ case MMU_HEAP_BITSTREAM_BUFFERS:
+ case MMU_HEAP_STREAM_BUFFERS:
+ break;
+
+ default:
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!str_ctx->devmem_ctx_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Set buffer size. */
+ ddbuf_info->buf_size = size;
+
+ /* Round size up to next multiple of physical pages */
+ if ((size % HOST_MMU_PAGE_SIZE) != 0)
+ size = ((size / HOST_MMU_PAGE_SIZE) + 1) * HOST_MMU_PAGE_SIZE;
+
+ result = img_mem_import(vxd->dev, ctx->mem_ctx, ddbuf_info->buf_size,
+ (enum mem_attr)set_attributes(mem_attrib),
+ (int *)buff_id);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ if (mem_attrib & SYS_MEMATTRIB_SECURE)
+ ddbuf_info->cpu_virt = NULL;
+
+ ddbuf_info->buff_id = *buff_id;
+ ddbuf_info->is_internal = 0;
+
+ ddbuf_info->kmstr_id = str_ctx->km_str_id;
+
+ /* Set buffer size. */
+ ddbuf_info->buf_size = size;
+
+ /* Ensure the address of the buffer is at least page aligned. */
+ ddbuf_info->cpu_virt = cpu_linear_addr;
+
+ /* Get heap handle */
+ result = talmmu_get_heap_handle(heap_id, str_ctx->devmem_ctx_hndl, &devmem_heap_hndl);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Allocate device "virtual" memory. */
+ result = talmmu_devmem_addr_alloc(str_ctx->devmem_ctx_hndl, devmem_heap_hndl, size,
+ alignment,
+ &ddbuf_info->hndl_memory);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Get the device virtual address. */
+ result = talmmu_get_dev_virt_addr(ddbuf_info->hndl_memory, &ddbuf_info->dev_virt);
+ if (result != IMG_SUCCESS)
+ goto error_get_dev_virt_addr;
+
+ /* Map memory to the device */
+ flags = VXD_MAP_FLAG_NONE;
+
+ if (mem_attrib & SYS_MEMATTRIB_CORE_READ_ONLY)
+ flags |= VXD_MAP_FLAG_READ_ONLY;
+
+ if (mem_attrib & SYS_MEMATTRIB_CORE_WRITE_ONLY)
+ flags |= VXD_MAP_FLAG_WRITE_ONLY;
+
+ result = vxd_map_buffer_sg(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id, sgt,
+ ddbuf_info->dev_virt,
+ flags);
+
+ if (result != IMG_SUCCESS)
+ goto error_map_dev;
+
+ return IMG_SUCCESS;
+
+error_map_dev:
+error_get_dev_virt_addr:
+ talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+ ddbuf_info->hndl_memory = NULL;
+ return result;
+}
+
+/*
+ * @Function mmu_stream_map_ext
+ */
+int mmu_stream_map_ext(void *mmu_str_hndl,
+ enum mmu_eheap_id heap_id,
+ unsigned int buff_id,
+ unsigned int size,
+ unsigned int alignment,
+ enum sys_emem_attrib mem_attrib,
+ void *cpu_linear_addr,
+ struct vidio_ddbufinfo *ddbuf_info)
+{
+ struct mmu_str_context *str_ctx =
+ (struct mmu_str_context *)mmu_str_hndl;
+ int result;
+ void *devmem_heap_hndl;
+ struct vxd_dec_ctx *ctx;
+ struct vxd_dev *vxd;
+ unsigned int flags;
+
+ /* Validate inputs. */
+ if (!mmu_str_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Check if device level heap. */
+ switch (heap_id) {
+ case MMU_HEAP_IMAGE_BUFFERS_UNTILED:
+ case MMU_HEAP_BITSTREAM_BUFFERS:
+ case MMU_HEAP_STREAM_BUFFERS:
+ break;
+
+ default:
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Round size up to next multiple of physical pages */
+ if ((size % HOST_MMU_PAGE_SIZE) != 0)
+ size = ((size / HOST_MMU_PAGE_SIZE) + 1) * HOST_MMU_PAGE_SIZE;
+
+ ddbuf_info->buff_id = buff_id;
+ ddbuf_info->is_internal = 0;
+
+ ddbuf_info->kmstr_id = str_ctx->km_str_id;
+
+ /* Set buffer size. */
+ ddbuf_info->buf_size = size;
+
+ /* Ensure the address of the buffer is at least page aligned. */
+ ddbuf_info->cpu_virt = cpu_linear_addr;
+
+ /* Get heap handle */
+ result = talmmu_get_heap_handle(heap_id, str_ctx->devmem_ctx_hndl, &devmem_heap_hndl);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Allocate device "virtual" memory. */
+ result = talmmu_devmem_addr_alloc(str_ctx->devmem_ctx_hndl, devmem_heap_hndl, size,
+ alignment,
+ &ddbuf_info->hndl_memory);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Get the device virtual address. */
+ result = talmmu_get_dev_virt_addr(ddbuf_info->hndl_memory, &ddbuf_info->dev_virt);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /*
+ * Map device memory (allocated from outside VDEC)
+ * into the stream PTD.
+ */
+ ctx = str_ctx->vxd_dec_context;
+ vxd = ctx->dev;
+
+ flags = VXD_MAP_FLAG_NONE;
+
+ if (mem_attrib & SYS_MEMATTRIB_CORE_READ_ONLY)
+ flags |= VXD_MAP_FLAG_READ_ONLY;
+
+ if (mem_attrib & SYS_MEMATTRIB_CORE_WRITE_ONLY)
+ flags |= VXD_MAP_FLAG_WRITE_ONLY;
+
+ result = vxd_map_buffer(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id,
+ ddbuf_info->dev_virt,
+ flags);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function mmu_free_mem
+ */
+int mmu_free_mem(void *mmustr_hndl, struct vidio_ddbufinfo *ddbuf_info)
+{
+ int tmp_result;
+ int result = IMG_SUCCESS;
+ struct vxd_dec_ctx *ctx;
+ struct vxd_dev *vxd;
+
+ struct mmu_str_context *str_ctx =
+ (struct mmu_str_context *)mmustr_hndl;
+
+ /* Validate inputs. */
+ if (!ddbuf_info)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (!str_ctx)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Unmap the memory mapped to the device */
+ ctx = str_ctx->vxd_dec_context;
+ vxd = ctx->dev;
+
+ tmp_result = vxd_unmap_buffer(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id);
+ if (tmp_result != IMG_SUCCESS)
+ result = tmp_result;
+
+ /*
+ * Unmapping the memory mapped to the device - done
+ * Free the memory.
+ */
+ tmp_result = talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+ if (tmp_result != IMG_SUCCESS)
+ result = tmp_result;
+
+ if (ddbuf_info->is_internal) {
+ struct vxd_free_data free_data = { ddbuf_info->buff_id };
+
+ img_mem_free(ctx->mem_ctx, free_data.buf_id);
+ }
+
+ return result;
+}
+
+/*
+ * @Function mmu_free_mem
+ */
+int mmu_free_mem_sg(void *mmustr_hndl, struct vidio_ddbufinfo *ddbuf_info)
+{
+ int tmp_result;
+ int result = IMG_SUCCESS;
+ struct vxd_dec_ctx *ctx;
+ struct vxd_dev *vxd;
+ struct vxd_free_data free_data;
+
+ struct mmu_str_context *str_ctx =
+ (struct mmu_str_context *)mmustr_hndl;
+
+ /* Validate inputs. */
+ if (!ddbuf_info)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ if (!str_ctx)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ free_data.buf_id = ddbuf_info->buff_id;
+ /* Unmap the memory mapped to the device */
+ ctx = str_ctx->vxd_dec_context;
+ vxd = ctx->dev;
+
+ tmp_result = vxd_unmap_buffer(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id);
+ if (tmp_result != IMG_SUCCESS)
+ result = tmp_result;
+
+ /*
+ * Unmapping the memory mapped to the device - done
+ * Free the memory.
+ */
+ tmp_result = talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+ if (tmp_result != IMG_SUCCESS)
+ result = tmp_result;
+
+ /*
+ * for external mem manager buffers, just cleanup the idr list and
+ * buffer objects
+ */
+ img_mem_free_bufid(ctx->mem_ctx, free_data.buf_id);
+
+ return result;
+}
+
+/*
+ * @Function MMU_GetHeap
+ */
+int mmu_get_heap(unsigned int image_stride, enum mmu_eheap_id *heap_id)
+{
+ unsigned int i;
+ unsigned char found = FALSE;
+
+ for (i = 0; i < MMU_HEAP_MAX; i++) {
+ if (mmu_heaps[i].image_buffers) {
+ *heap_id = mmu_heaps[i].heap_id;
+ found = TRUE;
+ break;
+ }
+ }
+
+ VDEC_ASSERT(found);
+ if (!found)
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+
+ return IMG_SUCCESS;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/vdec_mmu_wrapper.h b/drivers/media/platform/vxe-vxd/decoder/vdec_mmu_wrapper.h
new file mode 100644
index 000000000000..50bed98240a6
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vdec_mmu_wrapper.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VDEC MMU Functions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_errors.h"
+#include "img_mem.h"
+#include "lst.h"
+#include "mmu_defs.h"
+#include "vid_buf.h"
+
+#ifndef _VXD_MMU_H_
+#define _VXD_MMU_H_
+
+/* Page size of the device MMU */
+#define DEV_MMU_PAGE_SIZE (0x1000)
+/* Page alignment of the device MMU */
+#define DEV_MMU_PAGE_ALIGNMENT (0x1000)
+
+#define HOST_MMU_PAGE_SIZE PAGE_SIZE
+
+/*
+ * @Function mmu_stream_get_ptd_handle
+ * @Description
+ * This function is used to obtain the stream PTD (Page Table Directory)handle
+ * @Input mmu_str_handle : MMU stream handle.
+ * @Output str_ptd : Pointer to stream PTD handle.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_stream_get_ptd_handle(void *mmu_str_handle, void **str_ptd);
+
+/*
+ * @Function mmu_device_create
+ * @Description
+ * This function is used to create and initialise the MMU device context.
+ * @Input mmu_type : MMU type.
+ * @Input ptd_alignment : Alignment of Page Table directory.
+ * @Output mmudev_hndl : A pointer used to return the
+ * MMU device handle.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_device_create(enum mmu_etype mmu_type,
+ unsigned int ptd_alignment,
+ void **mmudev_hndl);
+
+/*
+ * @Function mmu_device_destroy
+ * @Description
+ * This function is used to create and initialise the MMU device context.
+ * NOTE: Destroy device automatically destroys any streams and frees and
+ * memory allocated using MMU_StreamMalloc().
+ * @Input mmudev_hndl : The MMU device handle.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_device_destroy(void *mmudev_hndl);
+
+/*
+ * @Function mmu_stream_create
+ * @Description
+ * This function is used to create and initialise the MMU stream context.
+ * @Input mmudev_hndl : The MMU device handle.
+ * @Input km_str_id : Stream Id used in communication with KM driver.
+ * @Output mmustr_hndl : A pointer used to return the MMU stream handle.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_stream_create(void *mmudev_hndl, unsigned int km_str_id, void *vxd_dec_ctx,
+ void **mmustr_hndl);
+
+/**
+ * mmu_stream_destroy - This function is used to create and initialise the MMU stream context.
+ * @mmustr_hndl : The MMU stream handle.
+ * Return IMG_SUCCESS or an error code.
+ *
+ * NOTE: Destroy automatically frees and memory allocated using
+ * mmu_stream_malloc().
+ */
+int mmu_stream_destroy(void *mmustr_hndl);
+
+/*
+ * @Function mmu_stream_alloc
+ * @Description
+ * This function is used to allocate stream memory.
+ * @Input mmustr_hndl : The MMU stream handle.
+ * @Input heap_id : The MMU heap Id.
+ * @Input mem_heap_id : Memory heap id
+ * @Input mem_attrib : Memory attributes
+ * @Input size : The size, in bytes, to be allocated.
+ * @Input alignment : The required byte alignment
+ * (1, 2, 4, 8, 16 etc).
+ * @Output ddbuf_info : A pointer to a #vidio_ddbufinfo structure
+ * used to return the buffer info.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_stream_alloc(void *mmustr_hndl,
+ enum mmu_eheap_id heap_id,
+ unsigned int mem_heap_id,
+ enum sys_emem_attrib mem_attrib,
+ unsigned int size,
+ unsigned int alignment,
+ struct vidio_ddbufinfo *ddbuf_info);
+
+/*
+ * @Function mmu_stream_map_ext
+ * @Description
+ * This function is used to malloc device memory (virtual memory), but mapping
+ * this to memory that has already been allocated (externally).
+ * NOTE: Memory can be freed using MMU_Free(). However, this does not
+ * free the memory provided by the caller via pvCpuLinearAddr.
+ * @Input mmustr_hndl : The MMU stream handle.
+ * @Input heap_id : The heap Id.
+ * @Input buff_id : The buffer Id.
+ * @Input size : The size, in bytes, to be allocated.
+ * @Input alignment : The required byte alignment (1, 2, 4, 8, 16 etc).
+ * @Input mem_attrib : Memory attributes
+ * @Input cpu_linear_addr : CPU linear address of the memory
+ * to be allocated for the device.
+ * @Output ddbuf_info : A pointer to a #vidio_ddbufinfo structure
+ * used to return the buffer info.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_stream_map_ext(void *mmustr_hndl,
+ enum mmu_eheap_id heap_id,
+ unsigned int buff_id,
+ unsigned int size,
+ unsigned int alignment,
+ enum sys_emem_attrib mem_attrib,
+ void *cpu_linear_addr,
+ struct vidio_ddbufinfo *ddbuf_info);
+
+int mmu_stream_map_ext_sg(void *mmustr_hndl,
+ enum mmu_eheap_id heap_id,
+ void *sgt,
+ unsigned int size,
+ unsigned int alignment,
+ enum sys_emem_attrib mem_attrib,
+ void *cpu_linear_addr,
+ struct vidio_ddbufinfo *ddbuf_info,
+ unsigned int *buff_id);
+
+/*
+ * @Function mmu_free_mem
+ * @Description
+ * This function is used to free device memory.
+ * @Input ps_dd_buf_info : A pointer to a #vidio_ddbufinfo structure.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_free_mem(void *mmustr_hndl, struct vidio_ddbufinfo *ddbuf_info);
+
+/*
+ * @Function mmu_free_mem
+ * @Description
+ * This function is used to free device memory.
+ * @Input ps_dd_buf_info : A pointer to a #vidio_ddbufinfo structure.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int mmu_free_mem_sg(void *mmustr_hndl, struct vidio_ddbufinfo *ddbuf_info);
+
+int mmu_get_heap(unsigned int image_stride, enum mmu_eheap_id *heap_id);
+
+#endif /* _VXD_MMU_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vdecdd_defs.h b/drivers/media/platform/vxe-vxd/decoder/vdecdd_defs.h
new file mode 100644
index 000000000000..dc4c2695c390
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vdecdd_defs.h
@@ -0,0 +1,446 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Decoder device driver header definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com
+ */
+
+#ifndef __VDECDD_DEFS_H__
+#define __VDECDD_DEFS_H__
+
+#include "bspp.h"
+#include "lst.h"
+#include "vdec_defs.h"
+#include "vdecfw_shared.h"
+#include "vid_buf.h"
+#include "vxd_mmu_defs.h"
+
+/* RMAN type for streams. */
+#define VDECDD_STREAM_TYPE_ID (0xB0B00001)
+
+/* RMAN type for buffer mappings. */
+#define VDECDD_BUFMAP_TYPE_ID (0xB0B00002)
+
+/*
+ * This type contains core feature flags.
+ * @brief Core Feature Flags
+ */
+enum vdecdd_core_feature_flags {
+ VDECDD_COREFEATURE_MPEG2 = (1 << 0),
+ VDECDD_COREFEATURE_MPEG4 = (1 << 1),
+ VDECDD_COREFEATURE_H264 = (1 << 2),
+ VDECDD_COREFEATURE_VC1 = (1 << 3),
+ VDECDD_COREFEATURE_AVS = (1 << 4),
+ VDECDD_COREFEATURE_REAL = (1 << 5),
+ VDECDD_COREFEATURE_JPEG = (1 << 6),
+ VDECDD_COREFEATURE_VP6 = (1 << 7),
+ VDECDD_COREFEATURE_VP8 = (1 << 8),
+ VDECDD_COREFEATURE_HEVC = (1 << 9),
+ VDECDD_COREFEATURE_HD_DECODE = (1 << 10),
+ VDECDD_COREFEATURE_ROTATION = (1 << 11),
+ VDECDD_COREFEATURE_SCALING = (1 << 12),
+ VDECDD_COREFEATURE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure contains configuration relating to a device.
+ * @brief Device Configuration
+ */
+struct vdecdd_dd_devconfig {
+ unsigned int num_slots_per_pipe;
+};
+
+/*
+ * This structure contains the Decoder decoding picture status.
+ * @brief Decoder Decoding Picture Status
+ */
+struct vdecdd_dec_pict_status {
+ unsigned int transaction_id;
+ enum vdecfw_progresscheckpoint fw_cp;
+ enum vdecfw_progresscheckpoint fehw_cp;
+ enum vdecfw_progresscheckpoint behw_cp;
+ unsigned int dmac_status;
+ unsigned int fe_mb_x;
+ unsigned int fe_mb_y;
+ unsigned int be_mb_x;
+ unsigned int be_mb_y;
+ unsigned char fw_control_msg[VDECFW_MSGID_CONTROL_TYPES];
+ unsigned char fw_decode_msg[VDECFW_MSGID_DECODE_TYPES];
+ unsigned char fw_completion_msg[VDECFW_MSGID_COMPLETION_TYPES];
+ unsigned char host_control_msg[VDECFW_MSGID_CONTROL_TYPES];
+ unsigned char host_decode_msg[VDECFW_MSGID_DECODE_TYPES];
+ unsigned char host_completion_msg[VDECFW_MSGID_COMPLETION_TYPES];
+};
+
+/*
+ * This structure contains the Decoder decoding picture status.
+ * @brief Core Status
+ */
+struct vdecdd_core_status {
+ unsigned int mtx_pc;
+ unsigned int mtx_pcx;
+ unsigned int mtx_enable;
+ unsigned int mtx_st_bits;
+ unsigned int mtx_fault0;
+ unsigned int mtx_a0_stack_ptr;
+ unsigned int mtx_a0_frame_ptr;
+ unsigned int dma_setup[3];
+ unsigned int dma_count[3];
+ unsigned int dma_peripheral_addr[3];
+};
+
+/*
+ * This structure contains the Decoder component stream status.
+ * @brief Decoder Component Stream Status
+ */
+struct vdecdd_decstr_status {
+ unsigned int num_pict_decoding;
+ struct vdecdd_dec_pict_status dec_pict_st[VDECFW_MAX_NUM_PICTURES];
+ unsigned int num_pict_decoded;
+ unsigned int decoded_picts[VDECFW_MAX_NUM_PICTURES];
+ unsigned int features;
+ struct vdecdd_core_status core_st;
+ unsigned int display_pics;
+ unsigned int release_pics;
+ unsigned int next_display_items[VDECFW_MAX_NUM_PICTURES];
+ unsigned int next_display_item_parent[VDECFW_MAX_NUM_PICTURES];
+ unsigned int next_release_items[VDECFW_MAX_NUM_PICTURES];
+ unsigned int next_release_item_parent[VDECFW_MAX_NUM_PICTURES];
+ unsigned int flds_as_frm_decodes;
+ unsigned int total_pict_decoded;
+ unsigned int total_pict_displayed;
+ unsigned int total_pict_finished;
+};
+
+/*
+ * This structure contains the device context.
+ * @brief VDECDD Device Context
+ */
+struct vdecdd_dddev_context {
+ void *dev_handle;
+ void *dec_context;
+ unsigned int internal_heap_id;
+ void *res_buck_handle;
+};
+
+/*
+ * This type defines the stream state.
+ * @brief VDEDD Stream State
+ */
+enum vdecdd_ddstr_state {
+ VDECDD_STRSTATE_STOPPED = 0x00,
+ VDECDD_STRSTATE_PLAYING,
+ VDECDD_STRSTATE_STOPPING,
+ VDECDD_STRSTATE_MAX,
+ VDECDD_STRSTATE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure contains the mapped output buffer configuration.
+ * @brief VDECDD Mapped Output Buffer Configuration
+ */
+struct vdecdd_mapbuf_info {
+ unsigned int buf_size;
+ unsigned int num_buf;
+ unsigned char byte_interleave;
+};
+
+struct vdecdd_ddstr_ctx;
+
+/*
+ * This structure contains the map info.
+ * @brief VDECDD Map Info
+ */
+struct vdecdd_ddbuf_mapinfo {
+ void **link; /* to be part of single linked list */
+ void *res_handle;
+ unsigned int buf_id;
+ unsigned int buf_map_id;
+ struct vdecdd_ddstr_ctx *ddstr_context;
+ void *buf_cb_param;
+ enum vdec_buf_type buf_type;
+ enum mmu_eheap_id mmuheap_id;
+ struct vidio_ddbufinfo ddbuf_info;
+};
+
+/*
+ * This structure contains the information about the picture buffer
+ * and its structure.
+ * @brief VDECDD Picture Buffer Info
+ */
+struct vdecdd_ddpict_buf {
+ struct vdecdd_ddbuf_mapinfo *pict_buf;
+ struct vdec_pict_rendinfo rend_info;
+ struct vdec_pict_bufconfig buf_config;
+};
+
+/*
+ * This structure contains the stream context.
+ * @brief VDECDD Stream Context
+ */
+struct vdecdd_ddstr_ctx {
+ void **link; /* to be part of single linked list */
+ unsigned int res_str_id;
+ unsigned int km_str_id;
+ void *res_buck_handle;
+ void *res_handle;
+ struct vdecdd_dddev_context *dd_dev_context;
+ struct vdec_str_configdata str_config_data;
+ unsigned char preempt;
+ enum vdecdd_ddstr_state dd_str_state;
+ void *mmu_str_handle;
+ void *dec_ctx;
+ enum vdec_play_mode play_mode;
+ struct vdec_comsequ_hdrinfo comseq_hdr_info;
+ struct vdecdd_ddpict_buf disp_pict_buf;
+ struct vdecdd_mapbuf_info map_buf_info;
+ struct vdec_str_opconfig opconfig;
+ unsigned char str_op_configured;
+ struct vdec_comsequ_hdrinfo prev_comseq_hdr_info;
+ struct bspp_pict_hdr_info prev_pict_hdr_info;
+};
+
+/*
+ * This type defines the stream unit type.
+ * @brief Stream Unit Type
+ */
+enum vdecdd_str_unit_type {
+ VDECDD_STRUNIT_ANONYMOUS = 0,
+ VDECDD_STRUNIT_SEQUENCE_START,
+ VDECDD_STRUNIT_CLOSED_GOP,
+ VDECDD_STRUNIT_SEQUENCE_END,
+ VDECDD_STRUNIT_PICTURE_PORTENT,
+ VDECDD_STRUNIT_PICTURE_START,
+ VDECDD_STRUNIT_PICTURE_FRAGMENT,
+ VDECDD_STRUNIT_PICTURE_END,
+ VDECDD_STRUNIT_FENCE,
+ VDECDD_STRUNIT_STOP,
+ VDECDD_STRUNIT_MAX,
+ VDECDD_STRUNIT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure contains a front end stream unit.
+ * @brief Front End Stream Unit
+ */
+struct vdecdd_str_unit {
+ void *lst_padding;
+ enum vdecdd_str_unit_type str_unit_type;
+ void *str_unit_handle;
+ unsigned int err_flags;
+ struct lst_t bstr_seg_list;
+ void *dd_data;
+ struct bspp_sequ_hdr_info *seq_hdr_info;
+ unsigned int seq_hdr_id;
+ unsigned char closed_gop;
+ unsigned char eop;
+ struct bspp_pict_hdr_info *pict_hdr_info;
+ void *dd_pict_data;
+ unsigned char last_pict_in_seq;
+ void *str_unit_tag;
+ unsigned char decode;
+ unsigned int features;
+ unsigned char eos;
+};
+
+/*
+ * This structure contains a set of picture resources required at all the
+ * interim stages of decoding it as it flows around the internals. It originates
+ * in the plant.
+ * @brief Picture Resources (stream)
+ */
+struct vdecdd_pict_resint {
+ void **link; /* to be part of single linked list */
+ struct vdecdd_ddbuf_mapinfo *mb_param_buf;
+ unsigned int ref_cnt;
+
+#ifdef HAS_HEVC
+ /* GENC fragment buffer */
+ struct vdecdd_ddbuf_mapinfo *genc_fragment_buf;
+#endif
+#if defined(HAS_HEVC) || defined(ERROR_CONCEALMENT)
+ /* Sequence resources (GENC buffers) */
+ struct vdecdd_seq_resint *seq_resint;
+#endif
+};
+
+/*
+ * This structure contains the supplementary information for decoded picture.
+ * @brief Event Callback Information
+ */
+struct vdecdd_pict_sup_data {
+ void *raw_vui_data;
+ void *raw_sei_list_first_fld;
+ void *raw_sei_list_second_fld;
+ unsigned char merged_flds;
+ union {
+ struct vdecdd_h264_pict_supl_data {
+ unsigned char nal_ref_idc;
+ unsigned short frame_num;
+ } h264_pict_supl_data;
+#ifdef HAS_HEVC
+ struct vdecdd_hevc_pict_supl_data {
+ unsigned int pic_order_cnt;
+ } hevc_pict_supl_data;
+#endif
+ };
+};
+
+/*
+ * This structure contains a set of resources representing a picture
+ * at all the stages of processing it.
+ * @brief Picture
+ */
+struct vdecdd_picture {
+ unsigned int pict_id;
+ unsigned char last_pict_in_seq;
+ struct vdecdd_pict_resint *pict_res_int;
+ struct vdecdd_ddpict_buf disp_pict_buf;
+ struct vdec_str_opconfig op_config;
+ struct vdec_dec_pict_info *dec_pict_info;
+ struct vdecdd_pict_sup_data dec_pict_sup_data;
+ struct vdec_dec_pict_auxinfo dec_pict_aux_info;
+};
+
+/*
+ * This structure contains the information required to check Decoder support
+ * Only pointers that are non-null will be used in validation.
+ * @brief VDECDD Support Check Information
+ */
+struct vdecdd_supp_check {
+ /* Inputs */
+ const struct vdec_comsequ_hdrinfo *comseq_hdrinfo;
+ const struct vdec_str_opconfig *op_cfg;
+ const struct vdecdd_ddpict_buf *disp_pictbuf;
+ const struct bspp_pict_hdr_info *pict_hdrinfo;
+ unsigned char non_cfg_req;
+
+ /* Outputs */
+ struct vdec_unsupp_flags unsupp_flags;
+ unsigned int features;
+};
+
+/*
+ * This type defines unsupported stream configuration features.
+ * @brief Unsupported Stream Configuration Flags
+ */
+enum vdec_unsupp_strcfg {
+ VDECDD_UNSUPPORTED_STRCONFIG_STD = (1 << 0),
+ VDECDD_UNSUPPORTED_STRCONFIG_BSTRFORMAT = (1 << 1),
+ VDECDD_UNSUPPORTED_STRCONFIG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines unsupported output configuration features.
+ * @brief Unsupported Output Configuration Flags
+ */
+enum vdec_unsupp_opcfg {
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_ROTATION = (1 << 0),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_ROTATION_WITH_FIELDS = (1 << 1),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_ROTATION_WITH_SCALING = (1 << 2),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_SCALING = (1 << 3),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_UP_DOWNSAMPLING = (1 << 4),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_SCALING_WITH_OOLD = (1 << 5),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_SCALING_MONOCHROME = (1 << 6),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_SCALING_SIZE = (1 << 7),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_X_WITH_JPEG = (1 << 8),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_SCALESIZE = (1 << 9),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_PIXFORMAT = (1 << 10),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_ROTATION_WITH_HIGH_COLOUR =
+ (1 << 11),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_DOWNSAMPLING_WITH_ROTATION =
+ (1 << 12),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_ROTATION_WITH_10BIT_PACKED =
+ (1 << 13),
+ VDECDD_UNSUPPORTED_OUTPUTCONFIG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines unsupported output configuration features.
+ * @brief Unsupported Output Configuration Flags
+ */
+enum vdec_unsupp_op_bufcfg {
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_EXTENDED_STRIDE = (1 << 0),
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_64BYTE_STRIDE = (1 << 1),
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_FIXED_STRIDE = (1 << 2),
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_PICTURE_SIZE = (1 << 3),
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_BUFFER_SIZE = (1 << 4),
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_Y_SIZE = (1 << 5),
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_UV_SIZE = (1 << 6),
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_Y_STRIDE = (1 << 7),
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_UV_STRIDE = (1 << 8),
+ VDECDD_UNSUPPORTED_OUTPUTBUFCONFIG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines unsupported sequence header features.
+ * @brief Unsupported Sequence Header Flags
+ */
+enum vdec_unsupp_seqhdr {
+ VDECDD_UNSUPPORTED_SEQUHDR_PIXFORMAT_BIT_DEPTH = (1 << 0),
+ VDECDD_UNSUPPORTED_SEQUHDR_SCALING = (1 << 1),
+ VDECDD_UNSUPPORTED_SEQUHDR_PIXEL_FORMAT = (1 << 2),
+ VDECDD_UNSUPPORTED_SEQUHDR_NUM_OF_VIEWS = (1 << 3),
+ VDECDD_UNSUPPORTED_SEQUHDR_CODED_HEIGHT = (1 << 4),
+ VDECDD_UNSUPPORTED_SEQUHDR_SEP_COLOUR_PLANE = (1 << 5),
+ VDECDD_UNSUPPORTED_SEQUHDR_SIZE = (1 << 6),
+ VDECDD_UNSUPPORTED_SEQUHDR_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines unsupported picture header features.
+ * @brief Unsupported Picture Header Flags
+ */
+enum vdec_unsupp_picthdr {
+ VDECDD_UNSUPPORTED_PICTHDR_UPSCALING = (1 << 0),
+ VDECDD_UNSUPPORTED_PICTHDR_OVERSIZED_SGM = (1 << 1),
+ VDECDD_UNSUPPORTED_PICTHDR_DISCONTINUOUS_MBS = (1 << 2),
+ VDECDD_UNSUPPORTED_PICTHDR_RESOLUTION = (1 << 3),
+ VDECDD_UNSUPPORTED_PICTHDR_SCALING_ORIGINALSIZE = (1 << 4),
+ VDECDD_UNSUPPORTED_PICTHDR_SCALING_SIZE = (1 << 5),
+ VDECDD_UNSUPPORTED_PICTHDR_HEVC_RANGE_EXT = (1 << 6),
+ VDECDD_UNSUPPORTED_PICTHDR_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines the Bitstream Segment type.
+ * @brief Bitstream segment type
+ */
+enum vdecdd_bstr_segtype {
+ VDECDD_BSSEG_LASTINBUFF = (1 << 0),
+ VDECDD_BSSEG_SKIP = (1 << 1),
+ VDECDD_BSSEG_INSERTSCP = (1 << 2),
+ VDECDD_BSSEG_INSERT_STARTCODE = (1 << 3) | VDECDD_BSSEG_INSERTSCP,
+ VDECDD_BSSEG_INSERT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct vdecdd_seq_resint {
+ void **link;
+
+#ifdef HAS_HEVC
+ unsigned short genc_buf_id;
+ struct vdecdd_ddbuf_mapinfo *genc_buffers[4]; /* GENC buffers */
+ struct vdecdd_ddbuf_mapinfo *intra_buffer; /* GENC buffers */
+ struct vdecdd_ddbuf_mapinfo *aux_buffer; /* GENC buffers */
+#endif
+ struct vdecdd_ddbuf_mapinfo *err_pict_buf; /* Pointer to "Error
+ * Recovery Frame Store" buffer.
+ */
+
+ /*
+ * Ref. counter (number of users) of sequence resources
+ * NOTE: Internal buffer reference counters are not used
+ * for buffers allocated as sequence resources.
+ */
+ unsigned int ref_count;
+};
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/vdecdd_utils.c b/drivers/media/platform/vxe-vxd/decoder/vdecdd_utils.c
new file mode 100644
index 000000000000..7fd7a80d46ae
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vdecdd_utils.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD Decoder device driver utility functions implementation
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp.h"
+#include "vdecdd_utils.h"
+
+/*
+ * @Function VDECDDUTILS_FreeStrUnit
+ */
+int vdecddutils_free_strunit(struct vdecdd_str_unit *str_unit)
+{
+ struct bspp_bitstr_seg *bstr_seg;
+
+ /* Loop over bit stream segments */
+ bstr_seg = (struct bspp_bitstr_seg *)lst_removehead(&str_unit->bstr_seg_list);
+ while (bstr_seg) {
+ /* Free segment. */
+ kfree(bstr_seg);
+
+ /* Get next segment. */
+ bstr_seg = (struct bspp_bitstr_seg *)lst_removehead(&str_unit->bstr_seg_list);
+ }
+
+ /* Free the sequence header */
+ if (str_unit->seq_hdr_info) {
+ str_unit->seq_hdr_info->ref_count--;
+ if (str_unit->seq_hdr_info->ref_count == 0) {
+ kfree(str_unit->seq_hdr_info);
+ str_unit->seq_hdr_info = NULL;
+ }
+ }
+
+ /* Free the picture header... */
+ if (str_unit->pict_hdr_info) {
+ kfree(str_unit->pict_hdr_info->pict_sgm_data.pic_data);
+ str_unit->pict_hdr_info->pict_sgm_data.pic_data = NULL;
+
+ kfree(str_unit->pict_hdr_info);
+ str_unit->pict_hdr_info = NULL;
+ }
+
+ /* Free stream unit. */
+ kfree(str_unit);
+ str_unit = NULL;
+
+ /* Return success */
+ return IMG_SUCCESS;
+}
+
+/*
+ * @Function: VDECDDUTILS_CreateStrUnit
+ * @Description: this function allocate a structure for a complete data unit
+ */
+int vdecddutils_create_strunit(struct vdecdd_str_unit **str_unit_handle,
+ struct lst_t *bs_list)
+{
+ struct vdecdd_str_unit *str_unit;
+ struct bspp_bitstr_seg *bstr_seg;
+
+ str_unit = kzalloc(sizeof(*str_unit), GFP_KERNEL);
+ VDEC_ASSERT(str_unit);
+ if (!str_unit)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ if (bs_list) {
+ /* copy BS list to this list */
+ lst_init(&str_unit->bstr_seg_list);
+ for (bstr_seg = lst_first(bs_list); bstr_seg;
+ bstr_seg = lst_first(bs_list)) {
+ bstr_seg = lst_removehead(bs_list);
+ lst_add(&str_unit->bstr_seg_list, bstr_seg);
+ }
+ }
+
+ *str_unit_handle = str_unit;
+
+ return IMG_SUCCESS;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/vdecdd_utils.h b/drivers/media/platform/vxe-vxd/decoder/vdecdd_utils.h
new file mode 100644
index 000000000000..233b7c80fe10
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vdecdd_utils.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Decoder device driver utility header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __VDECDD_UTILS_H__
+#define __VDECDD_UTILS_H__
+
+#include "img_errors.h"
+#include "vdecdd_defs.h"
+
+/* The picture buffer alignment (in bytes) for VXD. */
+#define VDEC_VXD_PICTBUF_ALIGNMENT (64)
+/* The buffer alignment (in bytes) for VXD. */
+#define VDEC_VXD_BUF_ALIGNMENT (4096)
+/* The extended stride alignment for VXD. */
+#define VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT (64)
+/* Macroblock dimension (width and height) in pixels. */
+#define VDEC_MB_DIMENSION (16)
+
+static inline unsigned int vdec_size_min(unsigned int a, unsigned int b)
+{
+ return a <= b ? a : b;
+}
+
+static inline unsigned char vdec_size_lt(struct vdec_pict_size sa, struct vdec_pict_size sb)
+{
+ return (sa.width < sb.width && sa.height <= sb.height) ||
+ (sa.width <= sb.width && sa.height < sb.height);
+}
+
+static inline unsigned char vdec_size_ge(struct vdec_pict_size sa, struct vdec_pict_size sb)
+{
+ return sa.width >= sb.width && sa.height >= sb.height;
+}
+
+static inline unsigned char vdec_size_ne(struct vdec_pict_size sa, struct vdec_pict_size sb)
+{
+ return sa.width != sb.width || sa.height != sb.height;
+}
+
+static inline unsigned char vdec_size_nz(struct vdec_pict_size sa)
+{
+ return sa.width != 0 && sa.height != 0;
+}
+
+int vdecddutils_free_strunit(struct vdecdd_str_unit *str_unit);
+
+int vdecddutils_create_strunit(struct vdecdd_str_unit **str_unit_handle,
+ struct lst_t *bs_list);
+
+int vdecddutils_ref_pict_get_maxnum(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_comsequ_hdrinfo *comseq_hdr_info,
+ unsigned int *num_picts);
+
+int vdecddutils_get_minrequired_numpicts(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_comsequ_hdrinfo *comseq_hdr_info,
+ const struct vdec_str_opconfig *op_cfg,
+ unsigned int *num_picts);
+
+int vdecddutils_pictbuf_getconfig(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_pict_rend_config *pict_rend_cfg,
+ const struct vdec_str_opconfig *str_opcfg,
+ struct vdec_pict_bufconfig *pict_bufcfg);
+
+int vdecddutils_pictbuf_getinfo(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_pict_rend_config *pict_rend_cfg,
+ const struct vdec_str_opconfig *str_opcfg,
+ struct vdec_pict_rendinfo *pict_rend_info);
+
+int vdecddutils_convert_buffer_config(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_pict_bufconfig *pict_bufcfg,
+ struct vdec_pict_rendinfo *pict_rend_info);
+
+int vdecddutils_get_display_region(const struct vdec_pict_size *coded_size,
+ const struct vdec_rect *orig_disp_region,
+ struct vdec_rect *disp_region);
+
+void vdecddutils_buf_vxd_adjust_size(unsigned int *buf_size);
+
+int vdecddutils_ref_pic_hevc_get_maxnum(const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ unsigned int *max_ref_picnum);
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/decoder/vdecdd_utils_buf.c b/drivers/media/platform/vxe-vxd/decoder/vdecdd_utils_buf.c
new file mode 100644
index 000000000000..3a6ad609f981
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vdecdd_utils_buf.c
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD Decoder device driver buffer utility functions implementation
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstream
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_profiles_levels.h"
+#include "pixel_api.h"
+#include "vdecdd_utils.h"
+
+/*
+ * Tests if chroma offset (immediately after size of luma) is exactly
+ * aligned to buffer alignment constraint.
+ */
+static inline unsigned char is_packedbuf_chroma_aligned(unsigned int offset,
+ unsigned int color_plane,
+ unsigned int align)
+{
+ return(color_plane != VDEC_PLANE_VIDEO_Y ? TRUE :
+ (offset == ALIGN(offset, align) ? TRUE : FALSE));
+}
+
+/*
+ * < h.264 MaxDpbMbs values per profile (see Table A-1 of Rec. ITU-T H.264
+ * (03/2010)).
+ * NOTE: Level 1b will be treated as 1.1 in case of Baseline,
+ * Constrained Baseline, Main, and Extended profiles as the value of the
+ * constraint_set3_flag is not available in #VDEC_sComSequHdrInfo structure.
+ */
+static unsigned int h264_max_dpb_mbs[H264_LEVEL_MAJOR_NUM][H264_LEVEL_MINOR_NUM] = {
+ /* level: n/a n/a n/a 1.0b */
+ { 396, 396, 396, 396 },
+ /* level: 1.0 1.1 1.2 1.3 */
+ { 396, 900, 2376, 2376 },
+ /* level: 2.0 2.1 2.2 n/a */
+ { 2376, 4752, 8100, 8100 },
+ /* level: 3.0 3.1 3.2 n/a */
+ { 8100, 18000, 20480, 20480},
+ /* level: 4.0 4.1 4.2 n/a */
+ { 32768, 32768, 34816, 34816},
+ /* level: 5.0 5.1 5.2 n/a */
+ { 110400, 184320, 184320, 184320}
+};
+
+typedef int (*fn_ref_pic_get_max_num)(const struct vdec_comsequ_hdrinfo
+ *comseq_hdrinfo, unsigned int *max_ref_pic_num);
+
+void vdecddutils_buf_vxd_adjust_size(unsigned int *buf_size)
+{
+ /* Align the buffer size to VXD page size. */
+ *buf_size = ALIGN(*buf_size, VDEC_VXD_BUF_ALIGNMENT);
+}
+
+static int vdecddutils_ref_pic_h264_get_maxnum
+ (const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ unsigned int *max_ref_pic_num)
+{
+ unsigned int pic_width_mb;
+ unsigned int pic_height_mb;
+ unsigned int lvl_major = 0;
+ unsigned int lvl_minor = 0;
+
+ /* Pre-validate level. */
+ if (comseq_hdrinfo->codec_level < H264_LEVEL_MIN ||
+ comseq_hdrinfo->codec_level > H264_LEVEL_MAX) {
+ pr_warn("Wrong H264 level value: %u",
+ comseq_hdrinfo->codec_level);
+ }
+
+ if (comseq_hdrinfo->max_reorder_picts) {
+ *max_ref_pic_num = comseq_hdrinfo->max_reorder_picts;
+ } else {
+ /* Calculate level major and minor. */
+ lvl_major = comseq_hdrinfo->codec_level / 10;
+ lvl_minor = comseq_hdrinfo->codec_level % 10;
+
+ /* Calculate picture sizes in MBs. */
+ pic_width_mb = (comseq_hdrinfo->max_frame_size.width +
+ (VDEC_MB_DIMENSION - 1)) / VDEC_MB_DIMENSION;
+ pic_height_mb = (comseq_hdrinfo->max_frame_size.height +
+ (VDEC_MB_DIMENSION - 1)) / VDEC_MB_DIMENSION;
+
+ /* Validate lvl_minor */
+ if (lvl_minor > 3) {
+ pr_warn("Wrong H264 lvl_minor level value: %u, overriding with 3",
+ lvl_minor);
+ lvl_minor = 3;
+ }
+ /* Validate lvl_major */
+ if (lvl_major > 5) {
+ pr_warn("Wrong H264 lvl_major level value: %u, overriding with 5",
+ lvl_major);
+ lvl_major = 5;
+ }
+
+ /*
+ * Calculate the maximum number of reference pictures
+ * required based on level.
+ */
+ *max_ref_pic_num = h264_max_dpb_mbs[lvl_major][lvl_minor] /
+ (pic_width_mb * pic_height_mb);
+ if (*max_ref_pic_num > 16)
+ *max_ref_pic_num = 16;
+ }
+
+ /* Return success. */
+ return IMG_SUCCESS;
+}
+
+#ifdef HAS_HEVC
+/*
+ * @Function vdecddutils_ref_pic_hevc_get_maxnum
+ */
+int vdecddutils_ref_pic_hevc_get_maxnum(const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ unsigned int *max_ref_picnum)
+{
+ static const unsigned int HEVC_LEVEL_IDC_MIN = 30;
+ static const unsigned int HEVC_LEVEL_IDC_MAX = 186;
+
+ static const unsigned int
+ max_luma_ps_list[HEVC_LEVEL_MAJOR_NUM][HEVC_LEVEL_MINOR_NUM] = {
+ /* level: 1.0 1.1 1.2 */
+ { 36864, 0, 0, },
+ /* level: 2.0 2.1 2.2 */
+ { 122880, 245760, 0, },
+ /* level: 3.0 3.1 3.2 */
+ { 552960, 983040, 0, },
+ /* level: 4.0 4.1 4.2 */
+ { 2228224, 2228224, 0, },
+ /* level: 5.0 5.1 5.2 */
+ { 8912896, 8912896, 8912896, },
+ /* level: 6.0 6.1 6.2 */
+ { 35651584, 35651584, 35651584, }
+ };
+
+ /* ITU-T H.265 04/2013 A.4.1 */
+
+ const unsigned int max_dpb_picbuf = 6;
+
+ /* this is rounded to whole Ctbs */
+ unsigned int pic_size_in_samples_Y = comseq_hdrinfo->frame_size.height *
+ comseq_hdrinfo->frame_size.width;
+
+ signed char level_maj, level_min;
+ unsigned int max_luma_ps;
+
+ /* some error resilience */
+ if (comseq_hdrinfo->codec_level > HEVC_LEVEL_IDC_MAX ||
+ comseq_hdrinfo->codec_level < HEVC_LEVEL_IDC_MIN) {
+ pr_warn("HEVC Codec level out of range: %u, falling back to %u",
+ comseq_hdrinfo->codec_level,
+ comseq_hdrinfo->min_pict_buf_num);
+
+ *max_ref_picnum = comseq_hdrinfo->min_pict_buf_num;
+ return IMG_SUCCESS;
+ }
+
+ level_maj = comseq_hdrinfo->codec_level / 30;
+ level_min = (comseq_hdrinfo->codec_level % 30) / 3;
+
+ if (level_maj > 0 && level_maj <= HEVC_LEVEL_MAJOR_NUM &&
+ level_min >= 0 && level_min < HEVC_LEVEL_MINOR_NUM) {
+ max_luma_ps = max_luma_ps_list[level_maj - 1][level_min];
+ } else {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (max_luma_ps == 0) {
+ pr_err("Wrong HEVC level value: %u.%u (general_level_idc: %u)",
+ level_maj, level_min, comseq_hdrinfo->codec_level);
+
+ return IMG_ERROR_VALUE_OUT_OF_RANGE;
+ }
+
+ if (max_luma_ps < pic_size_in_samples_Y)
+ pr_warn("HEVC PicSizeInSamplesY too large for level (%u > %u)",
+ pic_size_in_samples_Y, max_luma_ps);
+
+ if (pic_size_in_samples_Y <= (max_luma_ps >> 2))
+ *max_ref_picnum = vdec_size_min(4 * max_dpb_picbuf, 16);
+ else if (pic_size_in_samples_Y <= (max_luma_ps >> 1))
+ *max_ref_picnum = vdec_size_min(2 * max_dpb_picbuf, 16);
+ else if (pic_size_in_samples_Y <= ((3 * max_luma_ps) >> 2))
+ *max_ref_picnum = vdec_size_min((4 * max_dpb_picbuf) / 3, 16);
+ else
+ *max_ref_picnum = max_dpb_picbuf;
+
+ /* Return success. */
+ return IMG_SUCCESS;
+}
+#endif
+
+#ifdef HAS_JPEG
+static int vdecddutils_ref_pic_jpeg_get_maxnum(const struct vdec_comsequ_hdrinfo *comseq_hdrinfo,
+ unsigned int *max_ref_picnum)
+{
+ /* No reference frames for JPEG. */
+ *max_ref_picnum = 0;
+
+ /* Return success. */
+ return IMG_SUCCESS;
+}
+#endif
+
+/*
+ * The array of pointers to functions calculating the maximum number
+ * of reference pictures required for each supported video standard.
+ * NOTE: The table is indexed by #VDEC_eVidStd enum values.
+ */
+static fn_ref_pic_get_max_num ref_pic_get_maxnum[VDEC_STD_MAX - 1] = {
+ NULL,
+ NULL,
+ NULL,
+ vdecddutils_ref_pic_h264_get_maxnum,
+ NULL,
+ NULL,
+ NULL,
+#ifdef HAS_JPEG
+ vdecddutils_ref_pic_jpeg_get_maxnum,
+#else
+ NULL,
+#endif
+ NULL,
+ NULL,
+ NULL,
+#ifdef HAS_HEVC
+ vdecddutils_ref_pic_hevc_get_maxnum
+#else
+ NULL
+#endif
+};
+
+int
+vdecddutils_ref_pict_get_maxnum(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_comsequ_hdrinfo *comseq_hdr_info,
+ unsigned int *num_picts)
+{
+ int ret = IMG_SUCCESS;
+
+ /* Validate input params. */
+ if (str_cfg_data->vid_std == VDEC_STD_UNDEFINED || str_cfg_data->vid_std >= VDEC_STD_MAX)
+ return IMG_ERROR_VALUE_OUT_OF_RANGE;
+
+ /* Call the function related to the provided video standard. */
+ ret = ref_pic_get_maxnum[str_cfg_data->vid_std - 1](comseq_hdr_info,
+ num_picts);
+ if (ret != IMG_SUCCESS)
+ pr_warn("[USERSID=0x%08X] Failed to get number of reference pictures",
+ str_cfg_data->user_str_id);
+
+ /*
+ * For non-conformant stream use the
+ * max(*pui32NumPicts,comseq_hdrinfo->ui32MinPicBufNum)
+ */
+ if (*num_picts < comseq_hdr_info->min_pict_buf_num)
+ *num_picts = comseq_hdr_info->min_pict_buf_num;
+
+ /*
+ * Increase for MVC: mvcScaleFactor = 2 (H.10.2) and additional pictures
+ * for a StoreInterViewOnlyRef case (C.4.5.2)
+ */
+ if (comseq_hdr_info->num_views > 1) {
+ *num_picts *= 2;
+ *num_picts += comseq_hdr_info->num_views - 1;
+ }
+
+ return ret;
+}
+
+static void vdecddutils_update_rend_pictsize(struct vdec_pict_size pict_size,
+ struct vdec_pict_size *rend_pict_size)
+{
+ if (rend_pict_size->width == 0) {
+ rend_pict_size->width = pict_size.width;
+ } else {
+ /* Take the smallest resolution supported by all the planes */
+ rend_pict_size->width = (pict_size.width <
+ rend_pict_size->width) ?
+ pict_size.width :
+ rend_pict_size->width;
+ }
+ if (rend_pict_size->height == 0) {
+ rend_pict_size->height = pict_size.height;
+ } else {
+ /* Take the smallest resolution supported by all the planes. */
+ rend_pict_size->height = (pict_size.height <
+ rend_pict_size->height) ?
+ pict_size.height :
+ rend_pict_size->height;
+ }
+}
+
+int vdecddutils_convert_buffer_config(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_pict_bufconfig *pict_bufcfg,
+ struct vdec_pict_rendinfo *pict_rend_info)
+{
+ const struct pixel_pixinfo *pix_info;
+ struct img_pixfmt_desc pixfmt;
+ unsigned int i;
+ unsigned int total_vert_samples = 0;
+ unsigned int vert_samples[IMG_MAX_NUM_PLANES];
+ unsigned int plane_size = 0;
+ unsigned int plane_offset = 0;
+ struct vdec_pict_size pict_size;
+
+ /* Validate inputs. */
+ VDEC_ASSERT(str_cfg_data);
+ VDEC_ASSERT(pict_bufcfg);
+ VDEC_ASSERT(pict_rend_info);
+
+ /* Reset picture buffer allocation data. */
+ memset(pict_rend_info, 0x0, sizeof(*pict_rend_info));
+
+ pr_debug("%s picture buffer pixel_fmt = %d\n", __func__, pict_bufcfg->pixel_fmt);
+ /* Get pixel format info for regular pixel formats... */
+ if (pict_bufcfg->pixel_fmt < IMG_PIXFMT_ARBPLANAR8) {
+ pix_info = pixel_get_pixinfo(pict_bufcfg->pixel_fmt);
+ pixel_yuv_get_desc((struct pixel_pixinfo *)pix_info, &pixfmt);
+ } else {
+ pixel_get_fmt_desc(pict_bufcfg->pixel_fmt, &pixfmt);
+ }
+
+ /*
+ * Construct the render region information from the picture
+ * buffer configuration.
+ */
+ for (i = 0; i < IMG_MAX_NUM_PLANES; i++) {
+ if (pixfmt.planes[i]) {
+ unsigned int plane_align = VDEC_VXD_PICTBUF_ALIGNMENT;
+
+ /*
+ * Determine the offset (in bytes) to this plane.
+ * This is zero for the first (luma) plane and at the
+ * end of the previous plane for all subsequent planes.
+ */
+ plane_offset = plane_offset + plane_size;
+
+ /*
+ * Calculate the minimum number of vertical samples
+ * for this plane.
+ */
+ vert_samples[i] =
+ ((pict_bufcfg->coded_height +
+ pixfmt.v_denom - 1) / pixfmt.v_denom) *
+ pixfmt.v_numer[i];
+
+ /*
+ * Calculate the mimimum plane size from the stride and
+ * decode picture height. Packed buffers have the luma
+ * and chroma exactly adjacent and consequently the
+ * chroma plane offset is equal to this plane size.
+ */
+ plane_size = pict_bufcfg->stride[i] * vert_samples[i];
+ plane_size = ALIGN(plane_size, plane_align);
+
+ if (!pict_bufcfg->packed && pict_bufcfg->chroma_offset[i]) {
+ unsigned int max_plane_size;
+
+ max_plane_size =
+ pict_bufcfg->chroma_offset[i] - plane_offset;
+
+ if (plane_size > max_plane_size) {
+ pr_err("Chroma offset [%d bytes] is not large enough to fit minimum plane data [%d bytes] at offset [%d]",
+ pict_bufcfg->chroma_offset[i],
+ plane_size, plane_offset);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ plane_size = max_plane_size;
+
+ vert_samples[i] = plane_size /
+ pict_bufcfg->stride[i];
+ } else {
+ if (pict_bufcfg->chroma_offset[i] && (plane_offset + plane_size) !=
+ pict_bufcfg->chroma_offset[i]) {
+ pr_err("Chroma offset specified [%d bytes] should match that required for plane size calculated from stride and height [%d bytes]",
+ pict_bufcfg->chroma_offset[i],
+ plane_offset + plane_size);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+ }
+
+ pict_rend_info->plane_info[i].offset = plane_offset;
+ pict_rend_info->plane_info[i].stride =
+ pict_bufcfg->stride[i];
+ pict_rend_info->plane_info[i].size = plane_size;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("VDECDDUTILS_ConvertBufferConfig() plane %d stride %u size %u offset %u",
+ i, pict_rend_info->plane_info[i].stride,
+ pict_rend_info->plane_info[i].size,
+ pict_rend_info->plane_info[i].offset);
+#endif
+
+ pict_rend_info->rendered_size +=
+ pict_rend_info->plane_info[i].size;
+
+ total_vert_samples += vert_samples[i];
+
+ /* Calculate the render region maximum picture size. */
+ pict_size.width = (pict_rend_info->plane_info[i].stride *
+ pixfmt.bop_denom) / pixfmt.bop_numer[i];
+ pict_size.height = (vert_samples[i] * pixfmt.v_denom) / pixfmt.v_numer[i];
+ vdecddutils_update_rend_pictsize(pict_size,
+ &pict_rend_info->rend_pict_size);
+ }
+ }
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("VDECDDUTILS_ConvertBufferConfig() total required %u (inc. alignment for addressing/tiling) vs. buffer %u",
+ pict_rend_info->rendered_size, pict_bufcfg->buf_size);
+#endif
+
+ /* Ensure that the buffer size is large enough to hold the data */
+ if (pict_bufcfg->buf_size < pict_rend_info->rendered_size) {
+ pr_err("Buffer size [%d bytes] should be at least as large as rendered data (inc. any enforced gap between planes) [%d bytes]",
+ pict_bufcfg->buf_size,
+ pict_rend_info->rendered_size);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Whole buffer should be marked as rendered region */
+ pict_rend_info->rendered_size = pict_bufcfg->buf_size;
+ /* Use the actual stride alignment */
+ pict_rend_info->stride_alignment = pict_bufcfg->stride_alignment;
+
+ return IMG_SUCCESS;
+}
+
+static unsigned char vdecddutils_is_secondary_op_required(const struct vdec_comsequ_hdrinfo
+ *comseq_hdr_info,
+ const struct vdec_str_opconfig
+ *op_cfg)
+{
+ unsigned char result = TRUE;
+
+ if (!op_cfg->force_oold &&
+ !comseq_hdr_info->post_processing &&
+ comseq_hdr_info->pixel_info.chroma_fmt_idc ==
+ op_cfg->pixel_info.chroma_fmt_idc &&
+ comseq_hdr_info->pixel_info.bitdepth_y ==
+ op_cfg->pixel_info.bitdepth_y &&
+ comseq_hdr_info->pixel_info.bitdepth_c ==
+ op_cfg->pixel_info.bitdepth_c)
+ /*
+ * The secondary output is not required (if we have it we will
+ * not use it for transformation (e.g. scaling. rotating or
+ * up/down-sampling).
+ */
+ result = FALSE;
+
+ return result;
+}
+
+int vdecddutils_get_minrequired_numpicts(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_comsequ_hdrinfo *comseq_hdr_info,
+ const struct vdec_str_opconfig *op_cfg,
+ unsigned int *num_picts)
+{
+ int ret;
+ unsigned int max_held_picnum;
+
+ /* If any operation requiring internal buffers is to be applied... */
+ if (vdecddutils_is_secondary_op_required(comseq_hdr_info, op_cfg)) {
+ /*
+ * Reference picture buffers will be allocated internally,
+ * but there may be a number of picture buffers to which
+ * out-of-display-order pictures will be decoded. These
+ * buffers need to be allocated externally, so there's a
+ * need to calculate the number of out-of-(display)-order
+ * pictures required for the provided video standard.
+ */
+ ret = vdecddutils_ref_pict_get_maxnum(str_cfg_data, comseq_hdr_info,
+ &max_held_picnum);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ } else {
+ /*
+ * All the reference picture buffers have to be allocated
+ * externally, so there's a need to calculate the number of
+ * reference picture buffers required for the provided video
+ * standard.
+ */
+ ret = vdecddutils_ref_pict_get_maxnum(str_cfg_data, comseq_hdr_info,
+ &max_held_picnum);
+ if (ret != IMG_SUCCESS)
+ return ret;
+ }
+
+ /*
+ * Calculate the number of picture buffers required as the maximum
+ * number of picture buffers to be held onto by the driver plus the
+ * current picture buffer.
+ */
+ *num_picts = max_held_picnum +
+ (comseq_hdr_info->interlaced_frames ? 2 : 1);
+
+ return IMG_SUCCESS;
+}
+
+static void vdecddutils_get_codedsize(const struct vdec_pict_rend_config *pict_rend_cfg,
+ struct vdec_pict_size *decoded_pict_size)
+{
+ decoded_pict_size->width = pict_rend_cfg->coded_pict_size.width;
+ decoded_pict_size->height = pict_rend_cfg->coded_pict_size.height;
+}
+
+static unsigned char vdecddutils_is_packed(const struct vdec_pict_rendinfo *pict_rend_info,
+ const struct vdec_pict_rend_config *pict_rend_cfg)
+{
+ unsigned char packed = TRUE;
+ unsigned int pict_buf_align;
+
+ /* Validate inputs. */
+ VDEC_ASSERT(pict_rend_info);
+ VDEC_ASSERT(pict_rend_cfg);
+
+ pict_buf_align = VDEC_VXD_PICTBUF_ALIGNMENT;
+
+ if (pict_rend_info->plane_info[VDEC_PLANE_VIDEO_Y].size !=
+ pict_rend_info->plane_info[VDEC_PLANE_VIDEO_UV].offset) {
+ /* Planes that are not adjacent cannot be packed */
+ packed = FALSE;
+ } else if (!is_packedbuf_chroma_aligned(pict_rend_info->plane_info
+ [VDEC_PLANE_VIDEO_UV].offset,
+ VDEC_PLANE_VIDEO_Y,
+ pict_buf_align)) {
+ /* Chroma plane must be aligned for packed buffers. */
+ VDEC_ASSERT(pict_rend_info->plane_info[VDEC_PLANE_VIDEO_Y].size ==
+ pict_rend_info->plane_info[VDEC_PLANE_VIDEO_UV].offset);
+ packed = FALSE;
+ }
+
+ return packed;
+}
+
+static int vdecddutils_get_stride
+ (const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_pict_rend_config *pict_rend_cfg,
+ unsigned int vert_samples, unsigned int *h_stride,
+ enum vdec_color_planes color_planes)
+{
+ unsigned int hw_h_stride = *h_stride;
+
+ /*
+ * If extended strides are to be used or indexed strides failed,
+ * make extended stride alignment.
+ */
+ hw_h_stride = ALIGN(hw_h_stride,
+ pict_rend_cfg->stride_alignment > 0 ?
+ pict_rend_cfg->stride_alignment :
+ VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT);
+
+ /* A zero-value indicates unsupported stride */
+ if (hw_h_stride == 0)
+ /* No valid stride found */
+ return IMG_ERROR_NOT_SUPPORTED;
+
+ *h_stride = hw_h_stride;
+
+ return IMG_SUCCESS;
+}
+
+static int vdecddutils_get_render_info(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_pict_rend_config *pict_rend_cfg,
+ const struct pixel_pixinfo *pix_info,
+ struct vdec_pict_rendinfo *pict_rend_info)
+{
+ unsigned int i;
+ struct img_pixfmt_desc pixfmt;
+ struct vdec_pict_size coded_pict_size;
+ unsigned char single_stride = FALSE;
+ unsigned int vert_sample[IMG_MAX_NUM_PLANES] = {0};
+ unsigned int total_vert_samples;
+ unsigned int largest_stride;
+ unsigned int result;
+
+ /* Reset the output structure. */
+ memset(pict_rend_info, 0, sizeof(*pict_rend_info));
+
+ /* Ensure that the coded sizes are in whole macroblocks. */
+ if ((pict_rend_cfg->coded_pict_size.width &
+ (VDEC_MB_DIMENSION - 1)) != 0 ||
+ (pict_rend_cfg->coded_pict_size.height &
+ (VDEC_MB_DIMENSION - 1)) != 0) {
+ pr_err("Invalid render configuration coded picture size [%d x %d]. It should be a whole number of MBs in each dimension",
+ pict_rend_cfg->coded_pict_size.width,
+ pict_rend_cfg->coded_pict_size.height);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Check if the stride alignment is multiple of default. */
+ if ((pict_rend_cfg->stride_alignment &
+ (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT - 1)) != 0) {
+ pr_err("Invalid stride alignment %d used. It should be multiple of %d.",
+ pict_rend_cfg->stride_alignment,
+ VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Get pixel format info for regular pixel formats... */
+ if (pix_info->pixfmt < IMG_PIXFMT_ARBPLANAR8)
+ pixel_yuv_get_desc((struct pixel_pixinfo *)pix_info, &pixfmt);
+ else
+ pixel_get_fmt_desc(pix_info->pixfmt, &pixfmt);
+
+ /* Get the coded size for the appropriate orientation */
+ vdecddutils_get_codedsize(pict_rend_cfg, &coded_pict_size);
+
+ /*
+ * Calculate the hardware (inc. constraints) strides and
+ * number of vertical samples for each plane.
+ */
+ total_vert_samples = 0;
+ largest_stride = 0;
+ for (i = 0; i < IMG_MAX_NUM_PLANES; i++) {
+ if (pixfmt.planes[i]) {
+ unsigned int h_stride;
+
+ /* Horizontal stride must be for a multiple of BOPs. */
+ h_stride = ((coded_pict_size.width +
+ pixfmt.bop_denom - 1) /
+ pixfmt.bop_denom) * pixfmt.bop_numer[i];
+
+ /*
+ * Vertical only has to satisfy whole pixel of
+ * samples.
+ */
+ vert_sample[i] = ((coded_pict_size.height +
+ pixfmt.v_denom - 1) /
+ pixfmt.v_denom) * pixfmt.v_numer[i];
+
+ /*
+ * Obtain a horizontal stride supported by the hardware
+ * (inc. constraints).
+ */
+ result = vdecddutils_get_stride(str_cfg_data, pict_rend_cfg, vert_sample[i],
+ &h_stride, (enum vdec_color_planes)i);
+ if (result != IMG_SUCCESS) {
+ VDEC_ASSERT(0);
+ pr_err("No valid VXD stride found for picture with decoded dimensions [%d x %d] and min stride [%d]",
+ coded_pict_size.width, coded_pict_size.height, h_stride);
+ return result;
+ }
+
+ pict_rend_info->plane_info[i].stride = h_stride;
+ if (i == VDEC_PLANE_VIDEO_UV && (str_cfg_data->vid_std == VDEC_STD_H264 ||
+ str_cfg_data->vid_std == VDEC_STD_HEVC)) {
+ struct pixel_pixinfo *info =
+ pixel_get_pixinfo(pix_info->pixfmt);
+ VDEC_ASSERT(PIXEL_FORMAT_INVALID !=
+ info->chroma_fmt_idc);
+ }
+
+ total_vert_samples += vert_sample[i];
+ if (h_stride > largest_stride)
+ largest_stride = h_stride;
+ }
+ }
+ pict_rend_info->stride_alignment =
+ pict_rend_cfg->stride_alignment > 0 ?
+ pict_rend_cfg->stride_alignment :
+ VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT;
+
+ if (pict_rend_cfg->packed)
+ single_stride = TRUE;
+
+#ifdef HAS_JPEG
+ /* JPEG hardware uses a single (luma) stride for all planes. */
+ if (str_cfg_data->vid_std == VDEC_STD_JPEG) {
+ single_stride = true;
+
+ /* Luma should be largest for this to be used for all planes. */
+ VDEC_ASSERT(largest_stride ==
+ pict_rend_info->plane_info[VDEC_PLANE_VIDEO_Y].stride);
+ }
+#endif
+
+ /* Calculate plane sizes. */
+ for (i = 0; i < IMG_MAX_NUM_PLANES; i++) {
+ if (pixfmt.planes[i]) {
+ struct vdec_pict_size pict_size;
+ unsigned int vert_samples = vert_sample[i];
+ unsigned int plane_align = VDEC_VXD_PICTBUF_ALIGNMENT;
+
+ if (single_stride)
+ pict_rend_info->plane_info[i].stride =
+ largest_stride;
+
+ pict_rend_info->plane_info[i].size =
+ pict_rend_info->plane_info[i].stride *
+ vert_samples;
+ pict_rend_info->plane_info[i].size =
+ ALIGN(pict_rend_info->plane_info[i].size, plane_align);
+ /*
+ * Ensure that the total buffer rendered size is
+ * rounded-up to the picture buffer alignment so that
+ * this plane (within this single buffer) can be
+ * correctly addressed by the hardware at this byte
+ * offset.
+ */
+ if (i == 1 && pict_rend_cfg->packed)
+ /*
+ * Packed buffers must have chroma plane
+ * already aligned since this was factored
+ * into the stride/size calculation.
+ */
+ VDEC_ASSERT(pict_rend_info->rendered_size ==
+ ALIGN(pict_rend_info->rendered_size, plane_align));
+
+ pict_rend_info->plane_info[i].offset = pict_rend_info->rendered_size;
+
+ /* Update the total buffer size (inc. this plane). */
+ pict_rend_info->rendered_size +=
+ pict_rend_info->plane_info[i].size;
+
+ /*
+ * Update the maximum render picture size supported
+ * by all planes of this buffer.
+ */
+ pict_size.width = (pict_rend_info->plane_info[i].stride *
+ pixfmt.bop_denom) / pixfmt.bop_numer[i];
+
+ pict_size.height = (vert_sample[i] * pixfmt.v_denom) / pixfmt.v_numer[i];
+
+ vdecddutils_update_rend_pictsize(pict_size,
+ &pict_rend_info->rend_pict_size);
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("vdecddutils_GetRenderInfo() plane %d stride %u size %u offset %u",
+ i, pict_rend_info->plane_info[i].stride,
+ pict_rend_info->plane_info[i].size,
+ pict_rend_info->plane_info[i].offset);
+#endif
+ }
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("vdecddutils_GetRenderInfo() total %u (inc. alignment for addressing/tiling)",
+ pict_rend_info->rendered_size);
+#endif
+
+ return IMG_SUCCESS;
+}
+
+int vdecddutils_pictbuf_getconfig(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_pict_rend_config *pict_rend_cfg,
+ const struct vdec_str_opconfig *str_opcfg,
+ struct vdec_pict_bufconfig *pict_bufcfg)
+{
+ struct vdec_pict_rendinfo disp_pict_rendinfo;
+ struct vdec_pict_size coded_pict_size;
+ unsigned int ret, i;
+ unsigned int size0, size1;
+
+ /* Validate inputs. */
+ VDEC_ASSERT(str_cfg_data);
+ VDEC_ASSERT(pict_rend_cfg);
+ VDEC_ASSERT(str_opcfg);
+ VDEC_ASSERT(pict_bufcfg);
+
+ /* Clear the picture buffer config before populating */
+ memset(pict_bufcfg, 0, sizeof(struct vdec_pict_bufconfig));
+
+ /* Determine the rounded-up coded sizes (compatible with hardware) */
+ ret = vdecddutils_get_render_info(str_cfg_data,
+ pict_rend_cfg,
+ &str_opcfg->pixel_info,
+ &disp_pict_rendinfo);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Get the coded size for the appropriate orientation */
+ vdecddutils_get_codedsize(pict_rend_cfg, &coded_pict_size);
+
+ pict_bufcfg->coded_width = coded_pict_size.width;
+ pict_bufcfg->coded_height = coded_pict_size.height;
+
+ /*
+ * Use the luma stride for all planes in buffer.
+ * Additional chroma stride may be needed for other pixel formats.
+ */
+ for (i = 0; i < VDEC_PLANE_MAX; i++)
+ pict_bufcfg->stride[i] = disp_pict_rendinfo.plane_info[i].stride;
+
+ /*
+ * Pixel information is taken from that
+ * specified for display.
+ */
+ pict_bufcfg->pixel_fmt = str_opcfg->pixel_info.pixfmt;
+ pr_debug("picture buffer pixel_fmt = %d\n", pict_bufcfg->pixel_fmt);
+
+ /* Tiling scheme is taken from render configuration */
+ pict_bufcfg->byte_interleave = pict_rend_cfg->byte_interleave;
+ pr_debug("picture buffer byte_interleave = %d\n", pict_bufcfg->byte_interleave);
+ /* Stride alignment */
+ pict_bufcfg->stride_alignment = pict_rend_cfg->stride_alignment > 0 ?
+ pict_rend_cfg->stride_alignment : VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT;
+
+ pr_debug("picture buffer stride_alignment = %d\n", pict_bufcfg->stride_alignment);
+ /* Chroma offset taken as calculated for render configuration. */
+ pict_bufcfg->chroma_offset[0] = disp_pict_rendinfo.plane_info[VDEC_PLANE_VIDEO_UV].offset;
+ pict_bufcfg->chroma_offset[1] = disp_pict_rendinfo.plane_info[VDEC_PLANE_VIDEO_V].offset;
+
+ if (pict_rend_cfg->packed && str_opcfg->pixel_info.num_planes > 1) {
+ pict_bufcfg->packed = vdecddutils_is_packed(&disp_pict_rendinfo, pict_rend_cfg);
+ if (!pict_bufcfg->packed) {
+ /* Report if unable to meet request to pack. */
+ pr_err("Request for packed buffer could not be met");
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ size0 = ALIGN(pict_bufcfg->chroma_offset[0], VDEC_VXD_PICTBUF_ALIGNMENT);
+ size1 = ALIGN(pict_bufcfg->chroma_offset[1], VDEC_VXD_PICTBUF_ALIGNMENT);
+
+ if (pict_bufcfg->chroma_offset[0] != size0 ||
+ pict_bufcfg->chroma_offset[1] != size1) {
+ pr_err("Chroma plane could not be located on a %d byte boundary (investigate stride calculations)",
+ VDEC_VXD_PICTBUF_ALIGNMENT);
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+ } else {
+ pict_bufcfg->packed = FALSE;
+ }
+
+ pict_bufcfg->buf_size = disp_pict_rendinfo.rendered_size;
+
+ /* Return success */
+ return IMG_SUCCESS;
+}
+
+int vdecddutils_get_display_region(const struct vdec_pict_size *coded_size,
+ const struct vdec_rect *orig_disp_region,
+ struct vdec_rect *disp_region)
+{
+ int ret = IMG_SUCCESS;
+
+ /* Validate inputs. */
+ VDEC_ASSERT(coded_size);
+ VDEC_ASSERT(orig_disp_region);
+ VDEC_ASSERT(disp_region);
+ if (!coded_size || !orig_disp_region || !disp_region)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /*
+ * In the simplest case the display region is the same as
+ * that defined in the bitstream.
+ */
+ *disp_region = *orig_disp_region;
+
+ if (orig_disp_region->height == 0 || orig_disp_region->width == 0 ||
+ coded_size->height == 0 || coded_size->width == 0) {
+ pr_err("Invalid params to calculate display region:");
+ pr_err("Display Size: [%d,%d]", orig_disp_region->width, orig_disp_region->height);
+ pr_err("Coded Size : [%d,%d]", coded_size->width, coded_size->height);
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ return ret;
+}
+
+int vdecddutils_pictbuf_getinfo(const struct vdec_str_configdata *str_cfg_data,
+ const struct vdec_pict_rend_config *pict_rend_cfg,
+ const struct vdec_str_opconfig *str_op_cfg,
+ struct vdec_pict_rendinfo *pict_rend_info)
+{
+ unsigned int ret;
+
+ /* Validate inputs. */
+ VDEC_ASSERT(str_cfg_data);
+ VDEC_ASSERT(pict_rend_cfg);
+ VDEC_ASSERT(str_op_cfg);
+ VDEC_ASSERT(pict_rend_info);
+
+ ret = vdecddutils_get_render_info(str_cfg_data, pict_rend_cfg,
+ &str_op_cfg->pixel_info,
+ pict_rend_info);
+ VDEC_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ return IMG_SUCCESS;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/vdecfw_share.h b/drivers/media/platform/vxe-vxd/decoder/vdecfw_share.h
new file mode 100644
index 000000000000..7c6b9df00472
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vdecfw_share.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC SYSDEV and UI Interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef _VDECFW_SHARE_H_
+#define _VDECFW_SHARE_H_
+
+/*
+ * This macro sets alignment for a field structure.
+ * Parameters :
+ * a - alignment value
+ * t - field type
+ * n - field name
+ */
+#define IMG_ALIGN_FIELD(a, t, n) t n __aligned(a)
+
+/* END of vdecfw_share_macros.h */
+
+/*
+ * Field alignments in shared data structures
+ */
+/* Default field alignment */
+#define VDECFW_SHARE_DEFAULT_ALIGNMENT 4
+/* Pointer field alignment */
+#define VDECFW_SHARE_PTR_ALIGNMENT 4
+
+#endif /* _VDECFW_SHARE_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vdecfw_shared.h b/drivers/media/platform/vxe-vxd/decoder/vdecfw_shared.h
new file mode 100644
index 000000000000..a582987d45bb
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vdecfw_shared.h
@@ -0,0 +1,893 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Public data structures and enums for the firmware
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifdef USE_SHARING
+#endif
+
+#ifndef _VDECFW_H_
+#define _VDECFW_H_
+
+#include "img_msvdx_core_regs.h"
+#include "vdecfw_share.h"
+
+/* brief This type defines the buffer type */
+enum img_buffer_type {
+ IMG_BUFFERTYPE_FRAME = 0,
+ IMG_BUFFERTYPE_FIELD_TOP,
+ IMG_BUFFERTYPE_FIELD_BOTTOM,
+ IMG_BUFFERTYPE_PAIR,
+ IMG_BUFFERTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Number of scaling coefficients */
+#define VDECFW_NUM_SCALE_COEFFS 4
+
+/*
+ * maximum number of pictures handled by the firmware
+ * for H.264 (largest requirement): 32 for 4 view MVC
+ */
+#define VDECFW_MAX_NUM_PICTURES 32
+#define VDECFW_MAX_NUM_VIEWS 4
+#define EMERALD_CORE 6
+
+/*
+ * maximum number of colocated pictures handled by
+ * firmware in FWBSP mode
+ */
+#define VDECFWBSP_MAX_NUM_COL_PICS 16
+
+/* Maximum number of colour planes. */
+#define VDECFW_PLANE_MAX 4
+
+#define VDECFW_NON_EXISTING_PICTURE_TID (0xffffffff)
+
+#define NO_VALUE 0
+
+/* Indicates whether a cyclic sequence number (x) has reached another (y). */
+#define HAS_X_REACHED_Y(x, y, range, type) \
+ ({ \
+ type __x = x; \
+ type __y = y; \
+ type __range = range; \
+ (((((__x) - (__y) + (__range)) % (__range)) <= \
+ (((__y) - (__x) + (__range)) % (__range))) ? TRUE : FALSE); })
+
+/* Indicates whether a cyclic sequence number (x) has passed another (y). */
+#define HAS_X_PASSED_Y(x, y, range, type) \
+ ({ \
+ type __x = x; \
+ type __y = y; \
+ type __range = range; \
+ (((((__x) - (__y) + (__range)) % (__range)) < \
+ (((__y) - (__x) + (__range)) % (__range))) ? TRUE : FALSE); })
+
+#define FWIF_BIT_MASK(num) ((1 << (num)) - 1)
+
+/*
+ * Number of bits in transaction ID used to represent picture number in stream.
+ */
+#define FWIF_NUMBITS_STREAM_PICTURE_ID 16
+/* Number of bits in transaction ID used to represent picture number in core. */
+#define FWIF_NUMBITS_CORE_PICTURE_ID 4
+/* Number of bits in transaction ID used to represent stream id. */
+#define FWIF_NUMBITS_STREAM_ID 8
+/* Number of bits in transaction ID used to represent core id. */
+#define FWIF_NUMBITS_CORE_ID 4
+
+/* Offset in transaction ID to picture number in stream. */
+#define FWIF_OFFSET_STREAM_PICTURE_ID 0
+/* Offset in transaction ID to picture number in core. */
+#define FWIF_OFFSET_CORE_PICTURE_ID \
+ (FWIF_OFFSET_STREAM_PICTURE_ID + FWIF_NUMBITS_STREAM_PICTURE_ID)
+/* Offset in transaction ID to stream id. */
+#define FWIF_OFFSET_STREAM_ID \
+ (FWIF_OFFSET_CORE_PICTURE_ID + FWIF_NUMBITS_CORE_PICTURE_ID)
+/* Offset in transaction ID to core id. */
+#define FWIF_OFFSET_CORE_ID \
+ (FWIF_OFFSET_STREAM_ID + FWIF_NUMBITS_STREAM_ID)
+
+/* Picture id (stream) from transaction id. */
+#define GET_STREAM_PICTURE_ID(transaction_id) \
+ ((transaction_id) & FWIF_BIT_MASK(FWIF_NUMBITS_STREAM_PICTURE_ID))
+/* Picture id (core) from transaction id. */
+#define GET_CORE_PICTURE_ID(transaction_id) \
+ (((transaction_id) >> FWIF_OFFSET_CORE_PICTURE_ID) & \
+ FWIF_BIT_MASK(FWIF_NUMBITS_CORE_PICTURE_ID))
+/* Stream id from transaction id. */
+#define GET_STREAM_ID(transaction_id) \
+ (((transaction_id) >> FWIF_OFFSET_STREAM_ID) & \
+ FWIF_BIT_MASK(FWIF_NUMBITS_STREAM_ID))
+/* Core id from transaction id. */
+#define GET_CORE_ID(transaction_id) \
+ (((transaction_id) >> FWIF_OFFSET_CORE_ID) & \
+ FWIF_BIT_MASK(FWIF_NUMBITS_CORE_ID))
+
+/* Picture id (stream) for transaction id. */
+#define SET_STREAM_PICTURE_ID(str_pic_id) \
+ (((str_pic_id) & FWIF_BIT_MASK(FWIF_NUMBITS_STREAM_PICTURE_ID)) << \
+ FWIF_OFFSET_STREAM_PICTURE_ID)
+/* Picture id (core) for transaction id. */
+#define SET_CORE_PICTURE_ID(core_pic_id) \
+ (((core_pic_id) % (1 << FWIF_NUMBITS_CORE_PICTURE_ID)) << \
+ FWIF_OFFSET_CORE_PICTURE_ID)
+/* Stream id for transaction id. */
+#define SET_STREAM_ID(stream_id) \
+ (((stream_id) & FWIF_BIT_MASK(FWIF_NUMBITS_STREAM_ID)) << \
+ FWIF_OFFSET_STREAM_ID)
+/* Core id for transaction id. */
+#define SET_CORE_ID(core_id) \
+ (((core_id) & FWIF_BIT_MASK(FWIF_NUMBITS_CORE_ID)) << \
+ FWIF_OFFSET_CORE_ID)
+/* flag checking */
+#define FLAG_MASK(_flagname_) ((1 << _flagname_ ## _SHIFT))
+#define FLAG_IS_SET(_flagsword_, _flagname_) \
+ (((_flagsword_) & FLAG_MASK(_flagname_)) ? TRUE : FALSE)
+
+/* This type defines the parser component types */
+enum vdecfw_codectype {
+ VDECFW_CODEC_H264 = 0, /* H.264, AVC, MVC */
+ VDECFW_CODEC_MPEG4, /* MPEG4, H.263, DivX, Sorenson */
+ VDECFW_CODEC_VP8, /* VP8 */
+
+ VDECFW_CODEC_VC1, /* VC1 (includes WMV9) */
+ VDECFW_CODEC_MPEG2, /* MPEG2 */
+
+ VDECFW_CODEC_JPEG, /* JPEG */
+
+ VDECFW_CODEC_VP6, /* VP6 */
+ VDECFW_CODEC_AVS, /* AVS */
+ VDECFW_CODEC_RV, /* RV30, RV40 */
+
+ VDECFW_CODEC_HEVC, /* HEVC/H265 */
+
+ VDECFW_CODEC_VP9, /* VP9 */
+
+ VDECFW_CODEC_MAX, /* End Marker */
+
+ VDEC_CODEC_NONE = -1, /* No codec */
+ VDEC_CODEC_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* This type defines the FW parser mode - SCP, size delimited, etc. */
+enum vdecfw_parsermode {
+ /* Every NAL is expected to have SCP */
+ VDECFW_SCP_ONLY = 0,
+ /* Every NAL is expect to be size delimited with field size 4 */
+ VDECFW_SIZE_DELIMITED_4_ONLY,
+ /* Every NAL is expect to be size delimited with field size 2 */
+ VDECFW_SIZE_DELIMITED_2_ONLY,
+ /* Every NAL is expect to be size delimited with field size 1 */
+ VDECFW_SIZE_DELIMITED_1_ONLY,
+ /* Size of NAL is provided in the picture header */
+ VDECFW_SIZE_SIDEBAND,
+ /* Unit is a skipped picture with no data to process */
+ VDECFW_SKIPPED_PICTURE,
+ VDECFW_SKIPPED_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This enum defines values of ENTDEC_BE_MODE field of VEC_ENTDEC_BE_CONTROL
+ * register and ENTDEC_FE_MODE field of VEC_ENTDEC_FE_CONTROL register.
+ */
+enum vdecfw_msvdxentdecmode {
+ /* JPEG */
+ VDECFW_ENTDEC_MODE_JPEG = 0x0,
+ /* H264 (MPEG4/AVC) */
+ VDECFW_ENTDEC_MODE_H264 = 0x1,
+ /* VC1 */
+ VDECFW_ENTDEC_MODE_VC1 = 0x2,
+ /* MPEG2 */
+ VDECFW_ENTDEC_MODE_MPEG2 = 0x3,
+ /* MPEG4 */
+ VDECFW_ENTDEC_MODE_MPEG4 = 0x4,
+ /* AVS */
+ VDECFW_ENTDEC_MODE_AVS = 0x5,
+ /* WMV9 */
+ VDECFW_ENTDEC_MODE_WMV9 = 0x6,
+ /* MPEG1 */
+ VDECFW_ENTDEC_MODE_MPEG1 = 0x7,
+ /* RealVideo8, with ENTDEC_[BE|FE]_EXTENDED_MODE bit set */
+ VDECFW_ENTDEC_MODE_EXT_REAL8 = 0x0,
+ /* RealVideo9, with ENTDEC_[BE|FE]_EXTENDED_MODE bit set */
+ VDECFW_ENTDEC_MODE_EXT_REAL9 = 0x1,
+ /* VP6, with ENTDEC_[BE|FE]_EXTENDED_MODE bit set */
+ VDECFW_ENTDEC_MODE_EXT_VP6 = 0x2,
+ /* VP8, with ENTDEC_[BE|FE]_EXTENDED_MODE bit set */
+ VDECFW_ENTDEC_MODE_EXT_VP8 = 0x3,
+ /* SVC, with ENTDEC_[BE|FE]_EXTENDED_MODE bit set */
+ VDECFW_ENTDEC_MODE_EXT_SVC = 0x4,
+ VDECFW_ENTDEC_MODE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This describes the Firmware Parser checkpoints in VEC Local RAM.
+ * Each checkpoint is updated with the TransactionID of the picture as it passes
+ * that point in its decode. Together they describe the current position of
+ * pictures in the VXD/Firmware pipeline.
+ *
+ * Numbers indicate point in the "VDEC Firmware Component Timing" diagram.
+ */
+enum vdecfw_progresscheckpoint {
+ /* Decode message has been read */
+ VDECFW_CHECKPOINT_PICTURE_STARTED = 1,
+ /* Firmware has been loaded and bitstream DMA started */
+ VDECFW_CHECKPOINT_FIRMWARE_READY = 2,
+ /* Picture management operations have completed */
+ VDECFW_CHECKPOINT_PICMAN_COMPLETE = 3,
+ /* Firmware context for this picture has been saved */
+ VDECFW_CHECKPOINT_FIRMWARE_SAVED = 4,
+ /*
+ * 1st Picture/Slice header has been read,
+ * registers written and Entdec started
+ */
+ VDECFW_CHECKPOINT_ENTDEC_STARTED = 5,
+ /* 1st Slice has been completed by Entdec */
+ VDECFW_CHECKPOINT_FE_1SLICE_DONE = 6,
+ /* Parsing of picture has completed on FE */
+ VDECFW_CHECKPOINT_FE_PARSE_DONE = 7,
+ /* Picture end code has been read and picture closed */
+ VDECFW_CHECKPOINT_FE_PICTURE_COMPLETE = 8,
+ /* Picture has started decoding on VXD Backend */
+ VDECFW_CHECKPOINT_BE_PICTURE_STARTED = 9,
+ /* 1st Slice has completed on VXD Backend */
+ VDECFW_CHECKPOINT_BE_1SLICE_DONE = 10,
+ /* Picture decode has completed and done message sent to the Host */
+ VDECFW_CHECKPOINT_BE_PICTURE_COMPLETE = 11,
+#ifndef FW_STACK_USAGE_TRACKING
+ /* General purpose check point 1 */
+ VDECFW_CHECKPOINT_AUX1 = 12,
+ /* General purpose check point 2 */
+ VDECFW_CHECKPOINT_AUX2 = 13,
+ /* General purpose check point 3 */
+ VDECFW_CHECKPOINT_AUX3 = 14,
+ /* General purpose check point 4 */
+ VDECFW_CHECKPOINT_AUX4 = 15,
+#endif /* ndef FW_STACK_USAGE_TRACKING */
+ VDECFW_CHECKPOINT_MAX,
+ /*
+ * Indicate which checkpoints mark the start and end of each
+ * group (FW, FE and BE).
+ * The start and end values should be updated if new checkpoints are
+ * added before the current start or after the current end of any group.
+ */
+ VDECFW_CHECKPOINT_FW_START = VDECFW_CHECKPOINT_PICTURE_STARTED,
+ VDECFW_CHECKPOINT_FW_END = VDECFW_CHECKPOINT_FIRMWARE_SAVED,
+ VDECFW_CHECKPOINT_FE_START = VDECFW_CHECKPOINT_ENTDEC_STARTED,
+ VDECFW_CHECKPOINT_FE_END = VDECFW_CHECKPOINT_FE_PICTURE_COMPLETE,
+ VDECFW_CHECKPOINT_BE_START = VDECFW_CHECKPOINT_BE_PICTURE_STARTED,
+ VDECFW_CHECKPOINT_BE_END = VDECFW_CHECKPOINT_BE_PICTURE_COMPLETE,
+ VDECFW_CHECKPOINT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Number of auxiliary firmware checkpoints. */
+#define VDECFW_CHECKPOINT_AUX_COUNT 4
+/* This describes the action currently being done by the Firmware. */
+enum vdecfw_firmwareaction {
+ VDECFW_FWACT_IDLE = 1, /* Firmware is currently doing nothing */
+ VDECFW_FWACT_BASE_LOADING_PSR, /* Loading parser context */
+ VDECFW_FWACT_BASE_SAVING_PSR, /* Saving parser context */
+ VDECFW_FWACT_BASE_LOADING_BEMOD, /* Loading Backend module */
+ VDECFW_FWACT_BASE_LOADING_FEMOD, /* Loading Frontend module */
+ VDECFW_FWACT_PARSER_SLICE, /* Parser active: parsing slice */
+ VDECFW_FWACT_PARSER_PM, /* Parser active: picture management */
+ VDECFE_FWACT_BEMOD_ACTIVE, /* Backend module active */
+ VDECFE_FWACT_FEMOD_ACTIVE, /* Frontend module active */
+ VDECFW_FWACT_MAX,
+ VDECFW_FWACT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This describes the FE_ERR flags word in the VDECFW_MSGID_PIC_DECODED message
+ */
+enum vdecfw_msgflagdecodedfeerror {
+ /* Front-end hardware watchdog timeout (FE_WDT_CM0) */
+ VDECFW_MSGFLAG_DECODED_FEERROR_HWWDT_SHIFT = 0,
+ /* Front-end entdec error (VEC_ERROR_DETECTED_ENTDEC) */
+ VDECFW_MSGFLAG_DECODED_FEERROR_ENTDECERROR_SHIFT,
+ /* Shift-register error (VEC_ERROR_DETECTED_SR) */
+ VDECFW_MSGFLAG_DECODED_FEERROR_SRERROR_SHIFT,
+ /* For cases when B frame comes after I without P. */
+ VDECFW_MSGFLAG_DECODED_MISSING_REFERENCES_SHIFT,
+ /* MMCO operation failed. */
+ VDECFW_MSGFLAG_DECODED_MMCO_ERROR_SHIFT,
+ /* Back-end WDT timeout */
+ VDECFW_MSGFLAG_DECODED_BEERROR_HWWDT_SHIFT,
+ /* Some macroblocks were dropped */
+ VDECFW_MSGFLAG_DECODED_MBS_DROPPED_ERROR_SHIFT,
+ VDECFW_MSGFLAG_DECODED_FEERROR_MAX,
+ VDECFW_MSGFLAG_DECODED_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This type defines the IDs of the messages used to communicate with the
+ * Firmware.
+ *
+ * The Firmware has 3 message buffers, each buffer uses a different set of IDs.
+ * The buffers are:
+ * Host -> FW -Control messages(High Priority: processed in interrupt context)
+ * Host -> FW -Decode commands and associated information
+ * (Normal Priority: processed in baseloop)
+ * FW -> Host -Completion message
+ */
+enum vdecfw_message_id {
+ /* Control Messages */
+ /*
+ * Host -> FW Padding message
+ * Sent to optionally pad the message buffer
+ */
+ VDECFW_MSGID_BASE_PADDING = 0x01,
+ /*
+ * Host -> FW Initialisation message Initialisation should be
+ * sent *immediately* after loading the base component
+ * ie. while the FW is idle
+ */
+ VDECFW_MSGID_FIRMWARE_INIT,
+ /*
+ * Host -> FW Configuration message
+ * Configuration should be setup after loading the base component
+ * and before decoding the next picture ie. while the FW is idle
+ */
+ VDECFW_MSGID_FIRMWARE_CONFIG,
+ /*
+ * Host -> FW Control message
+ * Firmware control command to have immediate affect
+ * eg. Stop stream, return CRCs, return Performance Data
+ */
+ VDECFW_MSGID_FIRMWARE_CONTROL,
+ VDECFW_MSGID_CONTROL_MAX,
+ /* Decode Commands */
+ /*
+ * Host -> FW Padding message
+ * Sent to optionally pad the message buffer
+ */
+ VDECFW_MSGID_PSR_PADDING = 0x40,
+ /*
+ * Host -> FW Decode message
+ * Describes the picture to decode
+ */
+ VDECFW_MSGID_DECODE_PICTURE,
+ /*
+ * Host -> FW Bitstream buffer information
+ * Information describing a bitstream buffer to DMA to VXD
+ */
+ VDECFW_MSGID_BITSTREAM_BUFFER,
+ /*
+ * Host -> FW Fence message
+ * Generate an interrupt when this is read,
+ * FenceID should be written to a location in VLR
+ */
+ VDECFW_MSGID_FENCE,
+ /*
+ * Host -> FW Batch message
+ * Contains a pointer to a host memory buffer
+ * containing a batch of decode command FW messages
+ */
+ VDECFW_MSGID_BATCH,
+ VDECFW_MSGID_DECODE_MAX,
+ /* Completion Messages */
+ /*
+ * FW -> Host Padding message
+ * Sent to optionally pad the message buffer
+ */
+ VDECFW_MSGID_BE_PADDING = 0x80,
+ /*
+ * FW -> Host Decoded Picture message
+ * Notification of decoded picture including errors recorded
+ */
+ VDECFW_MSGID_PIC_DECODED,
+ /*
+ * FW -> Host CRC message
+ * Optionally sent with Decoded Picture message, contains VXD CRCs
+ */
+ VDECFW_MSGID_PIC_CRCS,
+ /*
+ * FW -> Host Performance message
+ * Optional timestamps at the decode checkpoints and other information
+ * about the image to assist in measuring performance
+ */
+ VDECFW_MSGID_PIC_PERFORMANCE,
+ /* FW -> Host POST calculation test message */
+ VDECFW_MSGID_PIC_POST_RESP,
+ VDECFW_MSGID_COMPLETION_MAX,
+ VDECFW_MSGID_FORCE32BITS = 0x7FFFFFFFU
+};
+
+#define VDECFW_MSGID_CONTROL_TYPES \
+ (VDECFW_MSGID_CONTROL_MAX - VDECFW_MSGID_BASE_PADDING)
+#define VDECFW_MSGID_DECODE_TYPES \
+ (VDECFW_MSGID_DECODE_MAX - VDECFW_MSGID_PSR_PADDING)
+#define VDECFW_MSGID_COMPLETION_TYPES \
+ (VDECFW_MSGID_COMPLETION_MAX - VDECFW_MSGID_BE_PADDING)
+
+/* This describes the layout of PVDEC Firmware state indicators in Comms RAM. */
+
+/* Maximum number of PVDEC decoding pipes per core supported. */
+#define VDECFW_MAX_DP 3
+
+struct vdecfw_pvdecpipestate {
+ /* TransactionID at each checkpoint */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, check_point[VDECFW_CHECKPOINT_MAX]);
+ /* VDECFW_eFirmwareAction (UINT32 used to guarantee size) */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, firmware_action);
+ /* Number of FE Slices processed for the last picture in FE */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, fe_slices);
+ /* Number of BE Slices processed for the last picture in BE */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, be_slices);
+ /*
+ * Number of FE Slices being detected as erroed for the last picture
+ * in FE
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, fe_errored_slices);
+ /*
+ * Number of BE Slices being detected as erroed for the last picture
+ * in BE
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, be_errored_slices);
+ /* Number of BE macroblocks dropped for the last picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, be_mbs_dropped);
+ /* Number of BE macroblocks recovered for the last picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, be_mbs_recovered);
+ /* Number of FE macroblocks processed for the last picture in FE */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, last_fe_mb_xy);
+ /* Number of BE macroblocks processed for the last picture in BE */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, last_be_mb_xy);
+ /* VDECFW_eCodecType - Codec currently loaded */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, curr_codec);
+ /* TRUE if this pipe is available for processing */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, pipe_present);
+};
+
+#ifdef FW_STACK_USAGE_TRACKING
+/* Stack usage info array size. */
+#define VDECFW_STACK_INFO_SIZE (VDECFW_MAX_DP * VDECFW_CHECKPOINT_AUX_COUNT)
+#endif /* FW_STACK_USAGE_TRACKING */
+struct vdecfw_pvdecfirmwarestate {
+ /*
+ * Indicates generic progress taken by firmware
+ * (must be the first item)
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int, fwstep);
+ /* Pipe state array. */
+ struct vdecfw_pvdecpipestate pipestate[VDECFW_MAX_DP];
+#ifdef FW_STACK_USAGE_TRACKING
+ /* Stack usage info array. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, unsigned int,
+ stackinfo[VDECFW_STACK_INFO_SIZE]);
+#endif /* FW_STACK_USAGE_TRACKING */
+};
+
+/*
+ * This describes the flags word in the aui8DisplayFlags
+ * in VDECFW_sBufferControl
+ */
+enum vdecfw_bufflagdisplay {
+ /* TID has been flushed with a "no display" indication */
+ VDECFW_BUFFLAG_DISPLAY_NODISPLAY_SHIFT = 0,
+ /* TID contains an unpaired field */
+ VDECFW_BUFFLAG_DISPLAY_SINGLE_FIELD_SHIFT = 1,
+ /* TID contains field coded picture(s) - single field or pair */
+ VDECFW_BUFFLAG_DISPLAY_FIELD_CODED_SHIFT = 2,
+ /* if TID contains a single field, this defines which field that is */
+ VDECFW_BUFFLAG_DISPLAY_BOTTOM_FIELD_SHIFT = 3,
+ /* if TID contains a frame with two interlaced fields */
+ VDECFW_BUFFLAG_DISPLAY_INTERLACED_FIELDS_SHIFT = 4,
+ /* End marker */
+ VDECFW_BUFFLAG_DISPLAY_MAX = 8,
+ VDECFW_BUFFLAG_DISPLAY_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This describes the flags in the ui8PictMgmtFlags field in
+ * VDECFW_sBufferControl
+ */
+enum vdecfw_picmgmflags {
+ /* Picture management for this picture successfully executed */
+ VDECFW_PICMGMTFLAG_PICTURE_EXECUTED_SHIFT = 0,
+ /*
+ * Picture management for the first field of this picture
+ * successfully executed
+ */
+ VDECFW_PICMGMTFLAG_1ST_FIELD_EXECUTED_SHIFT = 0,
+ /*
+ * Picture management for the second field of this picture
+ * successfully executed
+ */
+ VDECFW_PICMGMTFLAG_2ND_FIELD_EXECUTED_SHIFT = 1,
+ VDECFW_PICMGMTFLAG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Macro for checking if picture management was successfully executed for
+ * field coded picture
+ */
+#define VDECFW_PICMGMT_FIELD_CODED_PICTURE_EXECUTED(_flagsword_) \
+ ((FLAG_IS_SET(buf_control->picmgmt_flags, \
+ VDECFW_PICMGMTFLAG_1ST_FIELD_EXECUTED) && \
+ FLAG_IS_SET(buf_control->picmgmt_flags, \
+ VDECFW_PICMGMTFLAG_2ND_FIELD_EXECUTED)) ? \
+ TRUE : FALSE)
+/* This describes the REAL related data for the current picture. */
+struct vdecfw_real_data {
+ /* Picture width */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, width);
+ /* Picture height */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, height);
+ /* Scaled Picture Width */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, scaled_width);
+ /* Scaled Picture Height */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, scaled_height);
+ /* Timestamp parsed in the firmware */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, timestamp);
+};
+
+/* This describes the HEVC related data for the current picture. */
+struct vdecfw_hevcdata {
+ /* POC */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT, int, pic_order_count);
+};
+
+/*
+ * This describes the buffer control structure that is used by the firmware to
+ * signal to the Host to control the display and release of buffers.
+ */
+struct vdecfw_buffer_control {
+ /*
+ * List of TransactionIDs indicating buffers ready to display,
+ * in display order
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, display_list[VDECFW_MAX_NUM_PICTURES]);
+ /*
+ * List of TransactionIDs indicating buffers that are no longer
+ * required for reference
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int,
+ release_list[VDECFW_MAX_NUM_PICTURES +
+ VDECFW_MAX_NUM_VIEWS]);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short,
+ display_view_ids[VDECFW_MAX_NUM_PICTURES]);
+ /* List of flags for each TID in the DisplayList */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, display_flags[VDECFW_MAX_NUM_PICTURES]);
+ /* Number of TransactionIDs in aui32DisplayList */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, display_list_length);
+ /* Number of TransactionIDs in aui32ReleaseList */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, release_list_length);
+ union {
+ struct vdecfw_real_data real_data;
+ struct vdecfw_hevcdata hevc_data;
+ };
+ /*
+ * Refers to the picture decoded with the current transaction ID
+ * (not affected by merge with field of previous transaction ID)
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum img_buffer_type, dec_pict_type);
+ /* Set if the current field is a pair to the previous field */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, second_field_of_pair);
+ /*
+ * Set if for a pair we decoded first the top field or
+ * if we have only top field
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, top_field_first);
+ /* Top field is first to be displayed */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, out_top_field_first);
+ /* Picture management flags for this picture */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, picmgmt_flags);
+ /*
+ * List of TransactionIDs indicating buffers used as references
+ * when decoding current picture
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, ref_list[VDECFW_MAX_NUM_PICTURES]);
+};
+
+/*
+ * This describes an image buffer for one picture supplied to
+ * the firmware by the host
+ */
+struct vdecfw_image_buffer {
+ /* Virtual Address of each plane */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, byte_offset[VDECFW_PLANE_MAX]);
+};
+
+/* This type defines the picture commands that are prepared for the firmware. */
+enum vdecfw_picture_cmds {
+ /* Reconstructed buffer */
+ /* DISPLAY_PICTURE_SIZE */
+ VDECFW_CMD_DISPLAY_PICTURE,
+ /* CODED_PICTURE_SIZE */
+ VDECFW_CMD_CODED_PICTURE,
+ /* OPERATING_MODE */
+ VDECFW_CMD_OPERATING_MODE,
+ /* LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES */
+ VDECFW_CMD_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS,
+ /* CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES */
+ VDECFW_CMD_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS,
+ /* CHROMA2_RECONSTRUCTED_PICTURE_BASE_ADDRESSES */
+ VDECFW_CMD_CHROMA2_RECONSTRUCTED_PICTURE_BASE_ADDRESS,
+ /* VC1_LUMA_RANGE_MAPPING_BASE_ADDRESS */
+ VDECFW_CMD_LUMA_ALTERNATIVE_PICTURE_BASE_ADDRESS,
+ /* VC1_CHROMA_RANGE_MAPPING_BASE_ADDRESS */
+ VDECFW_CMD_CHROMA_ALTERNATIVE_PICTURE_BASE_ADDRESS,
+ /* CHROMA2_ALTERNATIVE_PICTURE_BASE_ADDRESS */
+ VDECFW_CMD_CHROMA2_ALTERNATIVE_PICTURE_BASE_ADDRESS,
+ /* LUMA_ERROR_PICTURE_BASE_ADDRESSES */
+ VDECFW_CMD_LUMA_ERROR_PICTURE_BASE_ADDRESS,
+ /* CHROMA_ERROR_PICTURE_BASE_ADDRESSES */
+ VDECFW_CMD_CHROMA_ERROR_PICTURE_BASE_ADDRESS,
+ /* AUX_MSB_BUFFER_BASE_ADDRESSES (VC-1 only) */
+ VDECFW_CMD_AUX_MSB_BUFFER,
+ /* INTRA_BUFFER_BASE_ADDRESS (various) */
+ VDECFW_CMD_INTRA_BUFFER_BASE_ADDRESS,
+ /* AUX_LINE_BUFFER_BASE_ADDRESS */
+ VDECFW_CMD_AUX_LINE_BUFFER_BASE_ADDRESS,
+ /* MBFLAGS_BUFFER_BASE_ADDRESSES (VP8 only) */
+ VDECFW_CMD_MBFLAGS_BUFFER_BASE_ADDRESS,
+ /* FIRST_PARTITION_BASE_ADDRESSES (VP8 only) */
+ VDECFW_CMD_FIRST_PARTITION_BUFFER_BASE_ADDRESS,
+ /* CURRENT_PICTURE_BUFFER_BASE_ADDRESSES (VP8 only) */
+ VDECFW_CMD_CURRENT_PICTURE_BUFFER_BASE_ADDRESS,
+ /* SEGMENTID_BUFFER_BASE_ADDRESSES (VP8 only) */
+ VDECFW_CMD_SEGMENTID_BASE_ADDRESS,
+ /* EXT_OP_MODE (H.264 only) */
+ VDECFW_CMD_EXT_OP_MODE,
+ /* MC_CACHE_CONFIGURATION */
+ VDECFW_CMD_MC_CACHE_CONFIGURATION,
+ /* Alternative output buffer (rotation etc.) */
+ /* ALTERNATIVE_OUTPUT_PICTURE_ROTATION */
+ VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+ /* EXTENDED_ROW_STRIDE */
+ VDECFW_CMD_EXTENDED_ROW_STRIDE,
+ /* CHROMA_ROW_STRIDE (H.264 only) */
+ VDECFW_CMD_CHROMA_ROW_STRIDE,
+ /* ALTERNATIVE_OUTPUT_CONTROL */
+ VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL,
+ /* RPR specific commands */
+ /* RPR_AX_INITIAL */
+ VDECFW_CMD_RPR_AX_INITIAL,
+ /* RPR_AX_INCREMENT */
+ VDECFW_CMD_RPR_AX_INCREMENT,
+ /* RPR_AY_INITIAL */
+ VDECFW_CMD_RPR_AY_INITIAL,
+ /* RPR_AY_INCREMENT */
+ VDECFW_CMD_RPR_AY_INCREMENT,
+ /* RPR_PICTURE_SIZE */
+ VDECFW_CMD_RPR_PICTURE_SIZE,
+ /* Scaling specific params */
+ /* SCALED_DISPLAY_SIZE */
+ VDECFW_CMD_SCALED_DISPLAY_SIZE,
+ /* HORIZONTAL_SCALE_CONTROL */
+ VDECFW_CMD_HORIZONTAL_SCALE_CONTROL,
+ /* SCALE_HORIZONTAL_CHROMA (H.264 only) */
+ VDECFW_CMD_SCALE_HORIZONTAL_CHROMA,
+ /* VERTICAL_SCALE_CONTROL */
+ VDECFW_CMD_VERTICAL_SCALE_CONTROL,
+ /* SCALE_VERTICAL_CHROMA (H.264 only) */
+ VDECFW_CMD_SCALE_VERTICAL_CHROMA,
+ /* HORIZONTAL_LUMA_COEFFICIENTS_0 */
+ VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_0,
+ /* HORIZONTAL_LUMA_COEFFICIENTS_1 */
+ VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_1,
+ /* HORIZONTAL_LUMA_COEFFICIENTS_2 */
+ VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_2,
+ /* HORIZONTAL_LUMA_COEFFICIENTS_3 */
+ VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_3,
+ /* VERTICAL_LUMA_COEFFICIENTS_0 */
+ VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_0,
+ /* VERTICAL_LUMA_COEFFICIENTS_1 */
+ VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_1,
+ /* VERTICAL_LUMA_COEFFICIENTS_2 */
+ VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_2,
+ /* VERTICAL_LUMA_COEFFICIENTS_3 */
+ VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_3,
+ /* HORIZONTAL_CHROMA_COEFFICIENTS_0 */
+ VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_0,
+ /* HORIZONTAL_CHROMA_COEFFICIENTS_1 */
+ VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_1,
+ /* HORIZONTAL_CHROMA_COEFFICIENTS_2 */
+ VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_2,
+ /* HORIZONTAL_CHROMA_COEFFICIENTS_3 */
+ VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_3,
+ /* VERTICAL_CHROMA_COEFFICIENTS_0 */
+ VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_0,
+ /* VERTICAL_CHROMA_COEFFICIENTS_1 */
+ VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_1,
+ /* VERTICAL_CHROMA_COEFFICIENTS_2 */
+ VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_2,
+ /* VERTICAL_CHROMA_COEFFICIENTS_3 */
+ VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_3,
+ /* SCALE_OUTPUT_SIZE */
+ VDECFW_CMD_SCALE_OUTPUT_SIZE,
+ /* VDECFW_CMD_INTRA_BUFFER_PLANE_SIZE */
+ VDECFW_CMD_INTRA_BUFFER_PLANE_SIZE,
+ /* VDECFW_CMD_INTRA_BUFFER_SIZE_PER_PIPE */
+ VDECFW_CMD_INTRA_BUFFER_SIZE_PER_PIPE,
+ /* VDECFW_CMD_AUX_LINE_BUFFER_SIZE_PER_PIPE */
+ VDECFW_CMD_AUX_LINE_BUFFER_SIZE_PER_PIPE,
+ VDECFW_SLICE_X_MB_OFFSET,
+ VDECFW_SLICE_Y_MB_OFFSET,
+ VDECFW_SLICE_TYPE,
+ VDECFW_CMD_MAX,
+ VDECFW_CMD_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Size of relocation data attached to VDECFW_sTransaction message in words */
+#define VDECFW_RELOC_SIZE 125
+
+/* This structure defines the MMU Tile configuration. */
+struct vdecfw_mmu_tile_config {
+ /* MMU_CONTROL2 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned char, tilig_scheme);
+ /* MMU_TILE */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int,
+ mmu_tiling[MSVDX_CORE_CR_MMU_TILE_NO_ENTRIES]);
+ /* MMU_TILE_EXT */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int,
+ mmu_tiling_ext[MSVDX_CORE_CR_MMU_TILE_EXT_NO_ENTRIES]);
+};
+
+/*
+ * This structure contains the transaction attributes to be given to the
+ * firmware
+ * @brief Transaction Attributes
+ */
+struct vdecfw_transaction {
+ /* Unique identifier for the picture (driver-wide). */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, transation_id);
+ /* Codec */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum vdecfw_codectype, codec);
+ /*
+ * Flag to indicate that the stream needs to ge handled
+ * in secure memory (if available)
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ int, secure_stream);
+ /* Unique identifier for the current stream */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, stream_id);
+ /* Dictates to the FW parser how the NALs are delimited */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ enum vdecfw_parsermode, parser_mode);
+ /* Address from which to load the parser context data. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_PTR_ALIGNMENT,
+ unsigned int, ctx_load_addr);
+ /*
+ * Address to save the parser state data including the updated
+ * "parser context data".
+ */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_PTR_ALIGNMENT,
+ unsigned int, ctx_save_addr);
+ /* Size of the parser context data in bytes. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, ctx_size);
+ /* Address to save the "buffer control" data. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_PTR_ALIGNMENT,
+ unsigned int, ctrl_save_addr);
+ /* Size of the buffer control data in bytes. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, ctrl_size);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, pict_cmds[VDECFW_CMD_MAX]);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, pic_width_inmbs);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, pic_height_inmbs);
+
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, mbparams_base_addr);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, mbparams_size_per_plane);
+ /* Address of VLC table data. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_PTR_ALIGNMENT,
+ unsigned int, vlc_tables_addr);
+ /* Size of the VLC table data in bytes. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, vlc_tables_size);
+ /* Address of VLC index table data. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_PTR_ALIGNMENT,
+ unsigned int, vlc_index_table_addr);
+ /* Size of the VLC index table data in bytes. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, vlc_index_table_size);
+ /* Address of parser picture header. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_PTR_ALIGNMENT,
+ unsigned int, psr_hdr_addr);
+ /* Size of the parser picture header in bytes. */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, psr_hdr_size);
+ /* Address of Sequence Info in the Host (secure) */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_PTR_ALIGNMENT,
+ unsigned int, sequence_info_source);
+ /* Address of PPS Info in the Host (secure) */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_PTR_ALIGNMENT,
+ unsigned int, pps_info_source);
+ /* Address of Second PPS Info in the Host (secure) */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_PTR_ALIGNMENT,
+ unsigned int, second_pps_info_source);
+ /* MMU Tile config comes down with each transaction. */
+ struct vdecfw_mmu_tile_config mmu_tile_config;
+};
+
+/*
+ * This structure contains the info for extracting a subset of VLC tables
+ * indexed inside the index table.
+ * aui32VlcTablesOffset is the offset to the first table inside the index table
+ * aui32VlcConsecutiveTables indicates the consecutive number of entries (from
+ * aui32VlcTablesOffset to aui32VlcTablesOffset+aui32VlcConsecutiveTables)
+ * which will be copied.
+ */
+struct vdecfw_vlc_table_info {
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, vlc_table_offset);
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned short, vlc_consecutive_tables);
+};
+
+/* This structure defines the RENDEC buffer configuration. */
+struct vdecfw_rendec_config {
+ /* VEC_RENDEC_CONTROL0 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, regvec_rendec_control0);
+ /* VEC_RENDEC_CONTROL1 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, regvec_rendec_control1);
+ /* VEC_RENDEC_BASE_ADDR0 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, rendec_buffer_baseaddr0);
+ /* VEC_RENDEC_BASE_ADDR1 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, rendec_buffer_baseaddr1);
+ /* VEC_RENDEC_BUFFER_SIZE */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, regvec_rendec_buffer_size);
+ /* VEC_RENDEC_CONTEXT0 - VEC_RENDEC_CONTEXT5 */
+ IMG_ALIGN_FIELD(VDECFW_SHARE_DEFAULT_ALIGNMENT,
+ unsigned int, rendec_initial_ctx[6]);
+};
+
+#endif /* _VDECFW_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_core.c b/drivers/media/platform/vxe-vxd/decoder/vxd_core.c
new file mode 100644
index 000000000000..b283841e2f71
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_core.c
@@ -0,0 +1,1684 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC VXD Core component function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/time64.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_dec_common.h"
+#include "vxd_pvdec_priv.h"
+#include "img_errors.h"
+
+#define VXD_RENDEC_SIZE (5 * 1024 * 1024)
+
+#define VXD_MSG_CNT_SHIFT 8
+#define VXD_MSG_CNT_MASK 0xff00
+#define VXD_MAX_MSG_CNT ((1 << VXD_MSG_CNT_SHIFT) - 1)
+#define VXD_MSG_STR_MASK 0xff
+#define VXD_INVALID_ID (-1)
+
+#define MAP_FIRMWARE_TO_STREAM 1
+
+/* Has to be used with VXD->mutex acquired! */
+#define VXD_GEN_MSG_ID(VXD, STR_ID, MSG_ID, vxd_type, str_type) \
+ do { \
+ vxd_type __VXD = VXD; \
+ str_type __STR_ID = STR_ID; \
+ WARN_ON((__STR_ID) > VXD_MSG_STR_MASK); \
+ (__VXD)->msg_cnt = (__VXD)->msg_cnt + 1 % (VXD_MAX_MSG_CNT); \
+ (MSG_ID) = ((__VXD)->msg_cnt << VXD_MSG_CNT_SHIFT) | \
+ ((__STR_ID) & VXD_MSG_STR_MASK); \
+ } while (0)
+
+/* Have to be used with VXD->mutex acquired! */
+#define VXD_RET_MSG_ID(VXD) ((VXD)->msg_cnt--)
+
+#define VXD_MSG_ID_GET_STR_ID(MSG_ID) \
+ ((MSG_ID) & VXD_MSG_STR_MASK)
+
+#define VXD_MSG_ID_GET_CNT(MSG_ID) \
+ (((MSG_ID) & VXD_MSG_CNT_MASK) >> VXD_MSG_CNT_SHIFT)
+
+static const unsigned char *drv_fw_name = "pvdec_full_bin.fw";
+
+/* Driver context */
+static struct {
+ /* Available memory heaps. List of <struct vxd_heap> */
+ struct list_head heaps;
+ /* heap id for all internal allocations (rendec, firmware) */
+ int internal_heap_id;
+
+ /* Memory Management context for driver */
+ struct mem_ctx *mem_ctx;
+
+ /* List of associated <struct vxd_dev> */
+ struct list_head devices;
+
+ /* Virtual addresses of shared buffers, common for all streams. */
+ struct {
+ unsigned int fw_addr; /* Firmware blob */
+ unsigned int rendec_addr; /* Rendec buffer */
+ } virt_space;
+
+ int initialised;
+} vxd_drv;
+
+/*
+ * struct vxd_heap - node for heaps list
+ * @id: heap id
+ * @list: Entry in <struct vxd_drv:heaps>
+ */
+struct vxd_heap {
+ int id;
+ struct list_head list;
+};
+
+static void img_mmu_callback(enum mmu_callback_type callback_type,
+ int buff_id, void *data)
+{
+ struct vxd_dev *vxd = data;
+
+ if (!vxd)
+ return;
+
+ if (callback_type == MMU_CALLBACK_MAP)
+ return;
+
+ if (vxd->hw_on)
+ vxd_pvdec_mmu_flush(vxd->dev, vxd->reg_base);
+}
+
+static int vxd_is_apm_required(struct vxd_dev *vxd)
+{
+ return vxd->hw_on;
+}
+
+/*
+ * Power on the HW.
+ * Call with vxd->mutex acquired.
+ */
+static int vxd_make_hw_on_locked(struct vxd_dev *vxd, unsigned int fw_ptd)
+{
+ unsigned int fw_size;
+ struct vxd_fw_hdr *fw_hdr;
+ struct vxd_ena_params ena_params;
+ int ret;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s:%d\n", __func__, __LINE__);
+#endif
+ if (vxd->hw_on)
+ return 0;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: enabling HW\n", __func__);
+#endif
+
+ fw_size = vxd->firmware.fw_size;
+ fw_hdr = vxd->firmware.hdr;
+ if (!fw_size || !fw_hdr) {
+ dev_err(vxd->dev, "%s: firmware missing!\n", __func__);
+ return -ENOENT;
+ }
+
+ memset(&ena_params, 0, sizeof(struct vxd_ena_params));
+
+ ena_params.fw_buf_size = fw_size - sizeof(struct vxd_fw_hdr);
+ ena_params.fw_buf_virt_addr = vxd_drv.virt_space.fw_addr;
+ ena_params.ptd = fw_ptd;
+ ena_params.boot_poll.msleep_cycles = 50;
+ ena_params.crc = 0;
+ ena_params.rendec_addr = vxd_drv.virt_space.rendec_addr;
+ ena_params.rendec_size = (VXD_NUM_PIX_PIPES(vxd->props) *
+ VXD_RENDEC_SIZE) / 4096u;
+
+ ena_params.secure = 0;
+ ena_params.wait_dbg_fifo = 0;
+ ena_params.mem_staller.data = NULL;
+ ena_params.mem_staller.size = 0;
+
+ ret = vxd_pvdec_ena(vxd->dev, vxd->reg_base, &ena_params,
+ fw_hdr, &vxd->freq_khz);
+ /*
+ * Ignore the return code, proceed as usual, it will be returned anyway.
+ * The HW is turned on, so we can perform post mortem analysis,
+ * and collect the fw logs when available.
+ */
+
+ vxd->hw_on = 1;
+
+ return ret;
+}
+
+/*
+ * Power off the HW.
+ * Call with vxd->mutex acquired.
+ */
+static void vxd_make_hw_off_locked(struct vxd_dev *vxd, unsigned char suspending)
+{
+ int ret;
+
+ if (!vxd->hw_on)
+ return;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s:%d\n", __func__, __LINE__);
+#endif
+
+ ret = vxd_pvdec_dis(vxd->dev, vxd->reg_base);
+ vxd->hw_on = 0;
+ if (ret)
+ dev_err(vxd->dev, "%s: failed to power off the VXD!\n", __func__);
+}
+
+/*
+ * Moves all valid items from the queue of items being currently processed to
+ * the pending queue.
+ * Call with vxd->mutex locked
+ */
+static void vxd_rewind_msgs_locked(struct vxd_dev *vxd)
+{
+ struct vxd_item *item, *tmp;
+
+ if (list_empty(&vxd->msgs))
+ return;
+
+ list_for_each_entry_safe(item, tmp, &vxd->msgs, list)
+ list_move(&item->list, &vxd->pend);
+}
+
+static void vxd_report_item_locked(struct vxd_dev *vxd,
+ struct vxd_item *item,
+ unsigned int flags)
+{
+ struct vxd_stream *stream;
+
+ __list_del_entry(&item->list);
+ stream = idr_find(vxd->streams, item->stream_id);
+ if (!stream) {
+ /*
+ * Failed to find associated stream. Probably it was
+ * already destroyed -- drop the item
+ */
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: drop item %p [0x%x]\n", __func__, item, item->msg_id);
+#endif
+ kfree(item);
+ } else {
+ item->msg.out_flags |= flags;
+ list_add_tail(&item->list, &stream->ctx->items_done);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: waking %p\n", __func__, stream->ctx);
+
+ dev_info(vxd->dev, "%s: signaling worker for %p\n", __func__, stream->ctx);
+#endif
+ schedule_work(stream->ctx->work);
+ }
+}
+
+/*
+ * Rewind all items to the pending queue and report those to listener.
+ * Postpone the reset.
+ * Call with vxd->mutex acquired.
+ */
+static void vxd_emrg_reset_locked(struct vxd_dev *vxd, unsigned int flags)
+{
+ cancel_delayed_work(vxd->dwork);
+
+ vxd->emergency = 1;
+
+#ifdef ERROR_RECOVERY_SIMULATION
+ if (disable_fw_irq_value != 0) {
+ /*
+ * Previously we have disabled IRQ, now enable it. This
+ * condition will occur only when the firmware non responsiveness
+ * will be detected on vxd_worker thread. Once we reproduce the
+ * issue we will enable the IRQ so that the code flow continues.
+ */
+ enable_irq(g_module_irq);
+ }
+#endif
+
+ /*
+ * If the firmware sends more than one reply per item, it's possible
+ * that corresponding item was already removed from vxd-msgs, but the
+ * HW was still processing it and MMU page fault could happen and
+ * trigger execution of this function. So make sure that vxd->msgs
+ * is not empty before rewinding items.
+ */
+ if (!list_empty(&vxd->msgs))
+ /* Move all valid items to the pending queue */
+ vxd_rewind_msgs_locked(vxd);
+
+ {
+ struct vxd_item *item, *tmp;
+
+ list_for_each_entry_safe(item, tmp, &vxd->pend, list) {
+ /*
+ * Exclusive items that were on the pending list
+ * must be reported as canceled
+ */
+ if ((item->msg.out_flags & VXD_FW_MSG_FLAG_EXCL) && !item->msg_id)
+ item->msg.out_flags |= VXD_FW_MSG_FLAG_CANCELED;
+
+ vxd_report_item_locked(vxd, item, flags);
+ }
+ }
+}
+
+static void vxd_handle_io_error_locked(struct vxd_dev *vxd)
+{
+ struct vxd_item *item, *tmp;
+ unsigned int pend_flags = !vxd->hw_on ? VXD_FW_MSG_FLAG_DEV_ERR :
+ VXD_FW_MSG_FLAG_CANCELED;
+
+ list_for_each_entry_safe(item, tmp, &vxd->msgs, list)
+ vxd_report_item_locked(vxd, item, VXD_FW_MSG_FLAG_DEV_ERR);
+
+ list_for_each_entry_safe(item, tmp, &vxd->pend, list)
+ vxd_report_item_locked(vxd, item, pend_flags);
+}
+
+static void vxd_sched_worker_locked(struct vxd_dev *vxd, unsigned int delay_ms)
+{
+ unsigned long long work_at = jiffies + msecs_to_jiffies(delay_ms);
+ int ret;
+
+ /*
+ * Try to queue the work.
+ * This may be also called from the worker context,
+ * so we need to re-arm anyway in case of error
+ */
+ ret = schedule_delayed_work(vxd->dwork, work_at - jiffies);
+ if (ret) {
+ /* Work is already in the queue */
+ /*
+ * Check if new requested time is "before"
+ * the last "time" we scheduled this work at,
+ * if not, do nothing, the worker will do
+ * recalculation for APM/DWR afterwards
+ */
+ if (time_before((unsigned long)work_at, (unsigned long)vxd->work_sched_at)) {
+ /*
+ * Canceling & rescheduling might be problematic,
+ * so just modify it, when needed
+ */
+ ret = mod_delayed_work(system_wq, vxd->dwork, work_at - jiffies);
+ if (!ret)
+ dev_err(vxd->dev, "%s: failed to modify work!\n", __func__);
+ /*
+ * Record the 'time' this work
+ * has been rescheduled at
+ */
+ vxd->work_sched_at = work_at;
+ }
+ } else {
+ /* Record the 'time' this work has been scheduled at */
+ vxd->work_sched_at = work_at;
+ }
+}
+
+static void vxd_monitor_locked(struct vxd_dev *vxd)
+{
+ /* HW is dead, not much sense in rescheduling */
+ if (vxd->hw_dead)
+ return;
+
+ /*
+ * We are not processing anything, but pending list is not empty
+ * probably the message fifo is full, so retrigger the worker.
+ */
+ if (!list_empty(&vxd->pend) && list_empty(&vxd->msgs))
+ vxd_sched_worker_locked(vxd, 1);
+
+ if (list_empty(&vxd->pend) && list_empty(&vxd->msgs) && vxd_is_apm_required(vxd)) {
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: scheduling APM work (%d ms)!\n", __func__, vxd->hw_pm_delay);
+#endif
+ /*
+ * No items to process and no items being processed -
+ * disable the HW
+ */
+ vxd->pm_start = jiffies;
+ vxd_sched_worker_locked(vxd, vxd->hw_pm_delay);
+ return;
+ }
+
+ if (vxd->hw_dwr_period > 0 && !list_empty(&vxd->msgs)) {
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: scheduling DWR work (%d ms)!\n",
+ __func__, vxd->hw_dwr_period);
+#endif
+ vxd->dwr_start = jiffies;
+ vxd_sched_worker_locked(vxd, vxd->hw_dwr_period);
+ }
+}
+
+/*
+ * Take first item from pending list and submit it to the hardware.
+ * Has to be called with vxd->mutex locked.
+ */
+static int vxd_sched_single_locked(struct vxd_dev *vxd)
+{
+ struct vxd_item *item = NULL;
+ unsigned long msg_size;
+ int ret;
+
+ item = list_first_entry(&vxd->pend, struct vxd_item, list);
+
+ msg_size = item->msg.payload_size / sizeof(unsigned int);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: checking msg_size: %zu, item: %p\n", __func__, msg_size, item);
+#endif
+
+ /*
+ * In case of exclusive item check if hw/fw is
+ * currently processing anything.
+ * If so we need to wait until items are returned back.
+ */
+ if ((item->msg.out_flags & VXD_FW_MSG_FLAG_EXCL) && !list_empty(&vxd->msgs) &&
+ /*
+ * We can move forward if message
+ * is about to be dropped.
+ */
+ !(item->msg.out_flags & VXD_FW_MSG_FLAG_DROP))
+
+ ret = -EBUSY;
+ else
+ /*
+ * Check if there's enough space
+ * in comms RAM to submit the message.
+ */
+ ret = vxd_pvdec_msg_fit(vxd->dev, vxd->reg_base, msg_size);
+
+ if (ret == 0) {
+ unsigned short msg_id;
+
+ VXD_GEN_MSG_ID(vxd, item->stream_id, msg_id, struct vxd_dev*, unsigned int);
+
+ /* submit the message to the hardware */
+ ret = vxd_pvdec_send_msg(vxd->dev, vxd->reg_base,
+ (unsigned int *)item->msg.payload, msg_size,
+ msg_id, vxd);
+ if (ret) {
+ dev_err(vxd->dev, "%s: failed to send msg!\n", __func__);
+ VXD_RET_MSG_ID(vxd);
+ } else {
+ if (item->msg.out_flags & VXD_FW_MSG_FLAG_DROP) {
+ __list_del_entry(&item->list);
+ kfree(item);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: drop msg 0x%x! (user requested)\n",
+ __func__, msg_id);
+#endif
+ } else {
+ item->msg_id = msg_id;
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev,
+ "%s: moving item %p, id 0x%x to msgs\n",
+ __func__, item, item->msg_id);
+#endif
+ list_move(&item->list, &vxd->msgs);
+ }
+
+ vxd_monitor_locked(vxd);
+ }
+
+ } else if (ret == -EINVAL) {
+ dev_warn(vxd->dev, "%s: invalid msg!\n", __func__);
+ vxd_report_item_locked(vxd, item, VXD_FW_MSG_FLAG_INV);
+ /*
+ * HW is ok, the message was invalid, so don't return an
+ * error
+ */
+ ret = 0;
+ } else if (ret == -EBUSY) {
+ /*
+ * Not enough space. Message is already in the pending queue,
+ * so it will be submitted once we've got space. Delayed work
+ * might have been canceled (if we are currently processing
+ * threaded irq), so make sure that DWR will trigger if it's
+ * enabled.
+ */
+ vxd_monitor_locked(vxd);
+ } else {
+ dev_err(vxd->dev, "%s: failed to check space for msg!\n", __func__);
+ }
+
+ return ret;
+}
+
+/*
+ * Take items from pending list and submit them to the hardware, if space is
+ * available in the ring buffer.
+ * Call with vxd->mutex locked
+ */
+static void vxd_schedule_locked(struct vxd_dev *vxd)
+{
+ unsigned char emergency = vxd->emergency;
+ int ret;
+
+ /* if HW is dead, inform the UM and skip */
+ if (vxd->hw_dead) {
+ vxd_handle_io_error_locked(vxd);
+ return;
+ }
+
+ if (!vxd->hw_on && !list_empty(&vxd->msgs))
+ dev_err(vxd->dev, "%s: msgs not empty when the HW is off!\n", __func__);
+
+ if (list_empty(&vxd->pend)) {
+ vxd_monitor_locked(vxd);
+ return;
+ }
+
+ /*
+ * If the emergency routine was fired, the hw was left ON,so the UM
+ * could do the post mortem analysis before submitting the next items.
+ * Now we can switch off the hardware.
+ */
+ if (emergency) {
+ vxd->emergency = 0;
+ vxd_make_hw_off_locked(vxd, FALSE);
+ usleep_range(1000, 2000);
+ }
+
+ /* Try to schedule */
+ ret = 0;
+ while (!list_empty(&vxd->pend) && ret == 0) {
+ struct vxd_item *item;
+ struct vxd_stream *stream;
+
+ item = list_first_entry(&vxd->pend, struct vxd_item, list);
+ stream = idr_find(vxd->streams, item->stream_id);
+
+ ret = vxd_make_hw_on_locked(vxd, stream->ptd);
+ if (ret) {
+ dev_err(vxd->dev, "%s: failed to start HW!\n", __func__);
+ vxd->hw_dead = 1;
+ vxd_handle_io_error_locked(vxd);
+ return;
+ }
+
+ ret = vxd_sched_single_locked(vxd);
+ }
+
+ if (ret != 0 && ret != -EBUSY) {
+ dev_err(vxd->dev, "%s: failed to schedule, emrg: %d!\n", __func__, emergency);
+ if (emergency) {
+ /*
+ * Failed to schedule in the emergency mode --
+ * there's no hope. Power off the HW, mark all
+ * items as failed and return them.
+ */
+ vxd_handle_io_error_locked(vxd);
+ return;
+ }
+ /* Let worker try to handle it */
+ vxd_sched_worker_locked(vxd, 0);
+ }
+}
+
+static void stream_worker(void *work)
+{
+ struct vxd_dec_ctx *ctx = NULL;
+ struct vxd_dev *vxd = NULL;
+ struct vxd_item *item;
+
+ work = get_work_buff(work, FALSE);
+ ctx = container_of(work, struct vxd_dec_ctx, work);
+ vxd = ctx->dev;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: got work for ctx %p\n", __func__, ctx);
+#endif
+
+ mutex_lock_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+
+ while (!list_empty(&ctx->items_done)) {
+ item = list_first_entry(&ctx->items_done, struct vxd_item, list);
+
+ item->msg.out_flags &= VXD_FW_MSG_RD_FLAGS_MASK;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(vxd->dev, "%s: item: %p, payload_size: %d, flags: 0x%x\n",
+ __func__, item, item->msg.payload_size,
+ item->msg.out_flags);
+#endif
+
+ if (ctx->cb)
+ ctx->cb(ctx->res_str_id, item->msg.payload,
+ item->msg.payload_size, item->msg.out_flags);
+
+ __list_del_entry(&item->list);
+ kfree(item);
+ }
+ mutex_unlock(ctx->mutex);
+}
+
+int vxd_create_ctx(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx)
+{
+ int ret = 0;
+ unsigned int fw_load_retries = 2 * 1000;
+
+ while (!vxd->firmware.ready) {
+ usleep_range(1000, 2000);
+ fw_load_retries--;
+ }
+ if (vxd->firmware.buf_id == 0) {
+ dev_err(vxd->dev, "%s: request fw not yet done!\n", __func__);
+ return -EAGAIN;
+ }
+
+ /* Create memory management context for HW buffers */
+ ret = img_mem_create_ctx(&ctx->mem_ctx);
+ if (ret) {
+ dev_err(vxd->dev, "%s: failed to create mem context (err:%d)!\n", __func__, ret);
+ return ret;
+ }
+
+ ret = img_mmu_ctx_create(vxd->dev, vxd->mmu_config_addr_width,
+ ctx->mem_ctx, vxd_drv.internal_heap_id,
+ img_mmu_callback, vxd, &ctx->mmu_ctx);
+ if (ret) {
+ dev_err(vxd->dev, "%s:%d: failed to create mmu ctx\n", __func__, __LINE__);
+ ret = -EPERM;
+ goto out_destroy_ctx;
+ }
+
+ ret = img_mmu_map(ctx->mmu_ctx, vxd->mem_ctx, vxd->firmware.buf_id,
+ vxd_drv.virt_space.fw_addr,
+ VXD_MMU_PTD_FLAG_READ_ONLY);
+ if (ret) {
+ dev_err(vxd->dev, "%s:%d: failed to map firmware buffer\n", __func__, __LINE__);
+ ret = -EPERM;
+ goto out_destroy_mmu_ctx;
+ }
+
+ ret = img_mmu_map(ctx->mmu_ctx, vxd->mem_ctx, vxd->rendec_buf_id,
+ vxd_drv.virt_space.rendec_addr,
+ VXD_MMU_PTD_FLAG_NONE);
+ if (ret) {
+ dev_err(vxd->dev, "%s:%d: failed to map rendec buffer\n", __func__, __LINE__);
+ ret = -EPERM;
+ goto out_unmap_fw;
+ }
+
+ ret = img_mmu_get_ptd(ctx->mmu_ctx, &ctx->ptd);
+ if (ret) {
+ dev_err(vxd->dev, "%s:%d: failed to get PTD\n", __func__, __LINE__);
+ ret = -EPERM;
+ goto out_unmap_rendec;
+ }
+
+ /* load fw - turned Hw on */
+ ret = vxd_make_hw_on_locked(vxd, ctx->ptd);
+ if (ret) {
+ dev_err(vxd->dev, "%s:%d: failed to start HW\n", __func__, __LINE__);
+ ret = -EPERM;
+ vxd->hw_on = FALSE;
+ goto out_unmap_rendec;
+ }
+
+ init_work(&ctx->work, stream_worker, HWA_DECODER);
+ if (!ctx->work) {
+ ret = ENOMEM;
+ goto out_unmap_rendec;
+ }
+
+ vxd->fw_refcnt++;
+
+ return ret;
+
+out_unmap_rendec:
+ img_mmu_unmap(ctx->mmu_ctx, vxd->mem_ctx, vxd->rendec_buf_id);
+out_unmap_fw:
+ img_mmu_unmap(ctx->mmu_ctx, vxd->mem_ctx, vxd->firmware.buf_id);
+
+out_destroy_mmu_ctx:
+ img_mmu_ctx_destroy(ctx->mmu_ctx);
+out_destroy_ctx:
+ img_mem_destroy_ctx(ctx->mem_ctx);
+ return ret;
+}
+
+void vxd_destroy_ctx(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx)
+{
+ vxd->fw_refcnt--;
+
+ flush_work(ctx->work);
+
+ img_mmu_unmap(ctx->mmu_ctx, vxd->mem_ctx, vxd->rendec_buf_id);
+
+ img_mmu_unmap(ctx->mmu_ctx, vxd->mem_ctx, vxd->firmware.buf_id);
+
+ img_mmu_ctx_destroy(ctx->mmu_ctx);
+
+ img_mem_destroy_ctx(ctx->mem_ctx);
+
+ if (vxd->fw_refcnt == 0) {
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(vxd->dev, "FW: put %s\n", drv_fw_name);
+#endif
+ /* Poke the monitor to finally switch off the hw, when needed */
+ vxd_monitor_locked(vxd);
+ }
+}
+
+/* Top half */
+irqreturn_t vxd_handle_irq(void *dev)
+{
+ struct vxd_dev *vxd = ((const struct device *)dev)->driver_data;
+ struct vxd_hw_state *hw_state = &vxd->state.hw_state;
+ int ret;
+
+ if (!vxd)
+ return IRQ_NONE;
+
+ ret = vxd_pvdec_clear_int(vxd->reg_base, &hw_state->irq_status);
+
+ if (!hw_state->irq_status || ret == IRQ_NONE)
+ dev_warn(dev, "Got spurious interrupt!\n");
+
+ return (irqreturn_t)ret;
+}
+
+static void vxd_drop_msg_locked(const struct vxd_dev *vxd)
+{
+ int ret;
+
+ ret = vxd_pvdec_recv_msg(vxd->dev, vxd->reg_base, NULL, 0, (struct vxd_dev *)vxd);
+ if (ret)
+ dev_warn(vxd->dev, "%s: failed to receive msg!\n", __func__);
+}
+
+#ifdef DEBUG_DECODER_DRIVER
+static void vxd_dbg_dump_msg(const void *dev, const unsigned char *func,
+ const unsigned int *payload,
+ unsigned long msg_size)
+{
+ unsigned int i;
+
+ for (i = 0; i < msg_size; i++)
+ dev_dbg(dev, "%s: msg %d: 0x%08x\n", func, i, payload[i]);
+}
+#endif
+
+static struct vxd_item *vxd_get_orphaned_item_locked(struct vxd_dev *vxd,
+ unsigned short msg_id,
+ unsigned long msg_size)
+{
+ struct vxd_stream *stream;
+ struct vxd_item *item;
+ unsigned short str_id = VXD_MSG_ID_GET_STR_ID(msg_id);
+
+ /* Try to find associated stream */
+ stream = idr_find(vxd->streams, str_id);
+ if (!stream) {
+ /* Failed to find associated stream. */
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: failed to find str_id: %u\n", __func__, str_id);
+#endif
+ return NULL;
+ }
+
+ item = kzalloc(sizeof(*item) + (msg_size * sizeof(unsigned int)), GFP_KERNEL);
+ if (!item)
+ return NULL;
+
+ item->msg.out_flags = 0;
+ item->stream_id = str_id;
+ item->msg.payload_size = msg_size * sizeof(unsigned int);
+ if (vxd_pvdec_recv_msg(vxd->dev, vxd->reg_base, item->msg.payload, msg_size, vxd)) {
+ dev_err(vxd->dev, "%s: failed to receive msg from VXD!\n", __func__);
+ item->msg.out_flags |= VXD_FW_MSG_FLAG_DEV_ERR;
+ }
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: item: %p str_id: %u\n", __func__, item, str_id);
+#endif
+ /*
+ * Need to put this item on the vxd->msgs list.
+ * It will be removed after.
+ */
+ list_add_tail(&item->list, &vxd->msgs);
+
+#ifdef DEBUG_DECODER_DRIVER
+ vxd_dbg_dump_msg(vxd->dev, __func__, item->msg.payload, msg_size);
+#endif
+
+ return item;
+}
+
+/*
+ * Fetch and process a single message from the MTX->host ring buffer.
+ * <no_more> parameter is used to indicate if there are more messages pending.
+ * <fatal> parameter indicates if there is some serious situation detected.
+ * Has to be called with vxd->mutex locked.
+ */
+static void vxd_handle_single_msg_locked(struct vxd_dev *vxd,
+ unsigned char *no_more,
+ unsigned char *fatal)
+{
+ int ret;
+ unsigned short msg_id, str_id;
+ unsigned long msg_size; /* size in dwords */
+ struct vxd_item *item = NULL, *tmp, *it;
+ struct vxd_stream *stream;
+ void *dev = vxd->dev;
+ unsigned char not_last_msg;
+
+ /* get the message size and id */
+ ret = vxd_pvdec_pend_msg_info(dev, vxd->reg_base, &msg_size, &msg_id,
+ &not_last_msg);
+ if (ret) {
+ dev_err(dev, "%s: failed to get pending msg size!\n", __func__);
+ *no_more = TRUE; /* worker will HW failure */
+ return;
+ }
+
+ if (msg_size == 0) {
+ *no_more = TRUE;
+ return;
+ }
+ *no_more = FALSE;
+
+ str_id = VXD_MSG_ID_GET_STR_ID(msg_id);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: [msg] size: %zu, cnt: %u, str_id: %u, id: 0x%x\n",
+ __func__, msg_size, VXD_MSG_ID_GET_CNT(msg_id),
+ str_id, msg_id);
+ dev_dbg(dev, "%s: [msg] not last: %u\n", __func__, not_last_msg);
+#endif
+
+ cancel_delayed_work(vxd->dwork);
+
+ /* Find associated item */
+ list_for_each_entry_safe_reverse(it, tmp, &vxd->msgs, list) {
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: checking item %p [0x%x] [des: %d]\n",
+ __func__, it, it->msg_id, it->destroy);
+#endif
+ if (it->msg_id == msg_id) {
+ item = it;
+ break;
+ }
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: found item %p [destroy: %d]\n",
+ __func__, item, item ? item->destroy : VXD_INVALID_ID);
+#endif
+
+ /* Find associated stream */
+ stream = idr_find(vxd->streams, str_id);
+ /*
+ * Check for firmware condition in case
+ * when unexpected item is received.
+ */
+ if (!item && !stream && vxd_pvdec_check_fw_status(dev, vxd->reg_base)) {
+ struct vxd_item *orphan;
+ /*
+ * Lets forward the fatal info to listeners first, relaying
+ * on the head of the msg queue.
+ */
+ /* TODO: forward fatal info to all attached processes */
+ item = list_entry(vxd->msgs.prev, struct vxd_item, list);
+ orphan = vxd_get_orphaned_item_locked(vxd, item->msg_id, msg_size);
+ if (!orphan) {
+ dev_warn(dev, "%s: drop msg 0x%x! (no orphan)\n", __func__, item->msg_id);
+ vxd_drop_msg_locked(vxd);
+ }
+
+ *fatal = TRUE;
+ return;
+ }
+
+ if ((item && item->destroy) || !stream) {
+ /*
+ * Item was marked for destruction or we failed to find
+ * associated stream. Probably it was already destroyed --
+ * just ignore the message.
+ */
+ if (item) {
+ __list_del_entry(&item->list);
+ kfree(item);
+ item = NULL;
+ }
+ dev_warn(dev, "%s: drop msg 0x%x! (no owner)\n", __func__, msg_id);
+ vxd_drop_msg_locked(vxd);
+ return;
+ }
+
+ /* Remove item from vxd->msgs list */
+ if (item && item->msg_id == msg_id && !not_last_msg)
+ __list_del_entry(&item->list);
+
+ /*
+ * If there's no such item on a <being processed> list, or the one
+ * found is too small to fit the output, or it's not supposed to be
+ * released, allocate a new one.
+ */
+ if (!item || (msg_size * sizeof(unsigned int) > item->msg.payload_size) || not_last_msg) {
+ struct vxd_item *new_item;
+
+ new_item = kzalloc(sizeof(*new_item) +
+ (msg_size * sizeof(unsigned int)), GFP_KERNEL);
+ if (item) {
+ if (!new_item) {
+ /*
+ * Failed to allocate new item. Mark item as
+ * errored and continue best effort, provide
+ * only part of the message to the userspace
+ */
+ dev_err(dev, "%s: failed to alloc new item!\n", __func__);
+ msg_size = item->msg.payload_size / sizeof(unsigned int);
+ item->msg.out_flags |= VXD_FW_MSG_FLAG_DRV_ERR;
+ } else {
+ *new_item = *item;
+ /*
+ * Do not free the old item if subsequent
+ * messages are expected (it also wasn't
+ * removed from the vxd->msgs list, so we are
+ * not losing a pointer here).
+ */
+ if (!not_last_msg)
+ kfree(item);
+ item = new_item;
+ }
+ } else {
+ if (!new_item) {
+ /*
+ * We have no place to put the message, we have
+ * to drop it
+ */
+ dev_err(dev, "%s: drop msg 0x%08x! (no mem)\n", __func__, msg_id);
+ vxd_drop_msg_locked(vxd);
+ return;
+ }
+ /*
+ * There was no corresponding item on the
+ * <being processed> list and we've allocated
+ * a new one. Initialize it
+ */
+ new_item->msg.out_flags = 0;
+ new_item->stream_id = str_id;
+ item = new_item;
+ }
+ }
+ ret = vxd_pvdec_recv_msg(dev, vxd->reg_base, item->msg.payload, msg_size, vxd);
+ if (ret) {
+ dev_err(dev, "%s: failed to receive msg from VXD!\n", __func__);
+ item->msg.out_flags |= VXD_FW_MSG_FLAG_DEV_ERR;
+ }
+ item->msg.payload_size = msg_size * sizeof(unsigned int);
+
+#ifdef DEBUG_DECODER_DRIVER
+ vxd_dbg_dump_msg(dev, __func__, item->msg.payload, msg_size);
+
+ dev_dbg(dev, "%s: adding to done list, item: %p, msg_size: %zu\n",
+ __func__, item, msg_size);
+#endif
+ list_add_tail(&item->list, &stream->ctx->items_done);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(dev, "%s: signaling worker for %p\n", __func__, stream->ctx);
+#endif
+ schedule_work(stream->ctx->work);
+}
+
+/* Bottom half */
+irqreturn_t vxd_handle_thread_irq(void *dev)
+{
+ unsigned char no_more = FALSE;
+ unsigned char fatal = FALSE;
+ struct vxd_dev *vxd = ((const struct device *)dev)->driver_data;
+ struct vxd_hw_state *hw_state = &vxd->state.hw_state;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ if (!vxd)
+ return IRQ_NONE;
+
+ mutex_lock(vxd->mutex);
+
+ /* Spurious interrupt? */
+ if (unlikely(!vxd->hw_on || vxd->hw_dead)) {
+ ret = IRQ_NONE;
+ goto out_unlock;
+ }
+
+ /* Check for critical exception - only MMU faults for now */
+ if (vxd_pvdec_check_irq(dev, vxd->reg_base, hw_state->irq_status) < 0) {
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(vxd->dev, "device MMU fault: resetting!!!\n");
+#endif
+ vxd_emrg_reset_locked(vxd, VXD_FW_MSG_FLAG_MMU_FAULT);
+ goto out_unlock;
+ }
+
+ /*
+ * Single interrupt can correspond to multiple messages, handle them
+ * all.
+ */
+ while (!no_more)
+ vxd_handle_single_msg_locked(vxd, &no_more, &fatal);
+
+ if (fatal) {
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(vxd->dev, "fw fatal condition: resetting!!!\n");
+#endif
+ /* Try to recover ... */
+ vxd_emrg_reset_locked(vxd, VXD_FW_MSG_FLAG_FATAL);
+ } else {
+ /* Try to submit items to the HW */
+ vxd_schedule_locked(vxd);
+ }
+
+out_unlock:
+ hw_state->irq_status = 0;
+ mutex_unlock(vxd->mutex);
+
+ return ret;
+}
+
+static void vxd_worker(void *work)
+{
+ struct vxd_dev *vxd = NULL;
+ struct vxd_hw_state state = { 0 };
+ struct vxd_item *item_tail;
+
+ work = get_delayed_work_buff(work, FALSE);
+ vxd = container_of(work, struct vxd_dev, dwork);
+ mutex_lock(vxd->mutex);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: jif: %lu, pm: %llu dwr: %llu\n", __func__,
+ jiffies, vxd->pm_start, vxd->dwr_start);
+#endif
+
+ /*
+ * Disable the hardware if it has been idle for vxd->hw_pm_delay
+ * milliseconds. Or simply leave the function without doing anything
+ * if the HW is not supposed to be turned off.
+ */
+ if (list_empty(&vxd->pend) && list_empty(&vxd->msgs)) {
+ if (vxd_is_apm_required(vxd)) {
+ unsigned long long dst = vxd->pm_start +
+ msecs_to_jiffies(vxd->hw_pm_delay);
+
+ if (time_is_before_eq_jiffies((unsigned long)dst)) {
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: pm, power off\n", __func__);
+#endif
+ vxd_make_hw_off_locked(vxd, FALSE);
+ } else {
+ unsigned long long targ = dst - jiffies;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: pm, reschedule: %llu\n", __func__, targ);
+#endif
+ vxd_sched_worker_locked(vxd, jiffies_to_msecs(targ));
+ }
+ }
+ goto out_unlock;
+ }
+
+ /*
+ * We are not processing anything, but pending list is not empty (if it
+ * was, we would enter <if statement> above. This can happen upon
+ * specific conditions, when input message occupies almost whole
+ * host->MTX ring buffer and is followed by large padding message.
+ */
+ if (list_empty(&vxd->msgs)) {
+ vxd_schedule_locked(vxd);
+ goto out_unlock;
+ }
+
+ /* Skip emergency reset if it's disabled. */
+ if (vxd->hw_dwr_period <= 0) {
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: skip watchdog\n", __func__);
+#endif
+ goto out_unlock;
+ } else {
+ /* Recalculate DWR when needed */
+ unsigned long long dst = vxd->dwr_start +
+ msecs_to_jiffies(vxd->hw_dwr_period);
+
+ if (time_is_after_jiffies((unsigned long)dst)) {
+ unsigned long long targ = dst - jiffies;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: dwr, reschedule: %llu\n", __func__, targ);
+#endif
+ vxd_sched_worker_locked(vxd, jiffies_to_msecs(targ));
+ goto out_unlock;
+ }
+ }
+
+ /* Get ID of the oldest item being processed by the HW */
+ item_tail = list_entry(vxd->msgs.prev, struct vxd_item, list);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: tail_item: %p, id: 0x%x\n", __func__, item_tail,
+ item_tail->msg_id);
+#endif
+
+ /* Get HW and firmware state */
+ vxd_pvdec_get_state(vxd->dev, vxd->reg_base, VXD_NUM_PIX_PIPES(vxd->props), &state);
+
+ if (vxd->state.msg_id_tail == item_tail->msg_id &&
+ !memcmp(&state, &vxd->state.hw_state,
+ sizeof(struct vxd_hw_state))) {
+ vxd->state.msg_id_tail = 0;
+ memset(&vxd->state.hw_state, 0, sizeof(vxd->state.hw_state));
+ dev_err(vxd->dev, "device DWR(%ums) expired: resetting!!!\n",
+ vxd->hw_dwr_period);
+ vxd_emrg_reset_locked(vxd, VXD_FW_MSG_FLAG_DWR);
+ } else {
+ /* Record current state */
+ vxd->state.msg_id_tail = item_tail->msg_id;
+ vxd->state.hw_state = state;
+
+ /* Submit items to the HW, if space is available. */
+ vxd_schedule_locked(vxd);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: scheduling DWR work (%d ms)!\n",
+ __func__, vxd->hw_dwr_period);
+#endif
+ vxd_sched_worker_locked(vxd, vxd->hw_dwr_period);
+ }
+
+out_unlock:
+ mutex_unlock(vxd->mutex);
+}
+
+/*
+ * Lazy initialization of main driver context (when first core is probed -- we
+ * need heap configuration from sysdev to allocate firmware buffers.
+ */
+int vxd_init(void *dev, struct vxd_dev *vxd,
+ const struct heap_config heap_configs[], int heaps)
+{
+ int ret, i;
+
+ INIT_LIST_HEAD(&vxd_drv.heaps);
+ vxd_drv.internal_heap_id = VXD_INVALID_ID;
+
+ vxd_drv.mem_ctx = NULL;
+
+ INIT_LIST_HEAD(&vxd_drv.devices);
+
+ vxd_drv.virt_space.fw_addr = 0x42000;
+ vxd_drv.virt_space.rendec_addr = 0xe0000000;
+
+ vxd_drv.initialised = 0;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: vxd drv init, params:\n", __func__);
+#endif
+
+ /* Initialise memory management component */
+ for (i = 0; i < heaps; i++) {
+ struct vxd_heap *heap;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: adding heap of type %d\n",
+ __func__, heap_configs[i].type);
+#endif
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap) {
+ ret = -ENOMEM;
+ goto heap_add_failed;
+ }
+
+ ret = img_mem_add_heap(&heap_configs[i], &heap->id);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to init heap (type %d)!\n",
+ __func__, heap_configs[i].type);
+ kfree(heap);
+ goto heap_add_failed;
+ }
+ list_add(&heap->list, &vxd_drv.heaps);
+
+ /* Implicitly, first heap is used for internal allocations */
+ if (vxd_drv.internal_heap_id < 0) {
+ vxd_drv.internal_heap_id = heap->id;
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: using heap %d for internal alloc\n",
+ __func__, vxd_drv.internal_heap_id);
+#endif
+ }
+ }
+
+ /* Do not proceed if internal heap not defined */
+ if (vxd_drv.internal_heap_id < 0) {
+ dev_err(dev, "%s: failed to locate heap for internal alloc\n", __func__);
+ ret = -EINVAL;
+ /* Loop registered heaps just for sanity */
+ goto heap_add_failed;
+ }
+
+ /* Create memory management context for HW buffers */
+ ret = img_mem_create_ctx(&vxd_drv.mem_ctx);
+ if (ret) {
+ dev_err(dev, "%s: failed to create mem context (err:%d)!\n", __func__, ret);
+ goto create_mem_context_failed;
+ }
+
+ vxd->mem_ctx = vxd_drv.mem_ctx;
+
+ /* Allocate rendec buffer */
+ ret = img_mem_alloc(dev, vxd_drv.mem_ctx, vxd_drv.internal_heap_id,
+ VXD_RENDEC_SIZE * VXD_NUM_PIX_PIPES(vxd->props),
+ (enum mem_attr)0, &vxd->rendec_buf_id);
+ if (ret) {
+ dev_err(dev, "%s: alloc rendec buffer failed (err:%d)!\n", __func__, ret);
+ goto create_mem_context_failed;
+ }
+
+ init_delayed_work(&vxd->dwork, vxd_worker, HWA_DECODER);
+ if (!vxd->dwork) {
+ ret = ENOMEM;
+ goto create_mem_context_failed;
+ }
+
+ vxd_drv.initialised = 1;
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: vxd drv init done\n", __func__);
+#endif
+ return 0;
+
+create_mem_context_failed:
+heap_add_failed:
+ while (!list_empty(&vxd_drv.heaps)) {
+ struct vxd_heap *heap;
+
+ heap = list_first_entry(&vxd_drv.heaps, struct vxd_heap, list);
+ __list_del_entry(&heap->list);
+ img_mem_del_heap(heap->id);
+ kfree(heap);
+ }
+ vxd_drv.internal_heap_id = VXD_INVALID_ID;
+ return ret;
+}
+
+/*
+ * Get internal_heap_id
+ * TODO: Only error checking is if < 0, so if the stored value is < 0, then
+ * just passing the value to caller still conveys error.
+ * Caller must error check.
+ */
+int vxd_g_internal_heap_id(void)
+{
+ return vxd_drv.internal_heap_id;
+}
+
+void vxd_deinit(struct vxd_dev *vxd)
+{
+ cancel_delayed_work_sync(vxd->dwork);
+ vxd_make_hw_off_locked(vxd, FALSE);
+
+ /* Destroy memory management context */
+ if (vxd_drv.mem_ctx) {
+ /* Deallocate rendec buffer */
+ img_mem_free(vxd_drv.mem_ctx, vxd->rendec_buf_id);
+
+ img_mem_destroy_ctx(vxd_drv.mem_ctx);
+ vxd_drv.mem_ctx = NULL;
+ }
+
+ /* Deinitialize memory management component */
+ while (!list_empty(&vxd_drv.heaps)) {
+ struct vxd_heap *heap;
+
+ heap = list_first_entry(&vxd_drv.heaps, struct vxd_heap, list);
+ __list_del_entry(&heap->list);
+ img_mem_del_heap(heap->id);
+ kfree(heap);
+ }
+
+ vxd_drv.internal_heap_id = VXD_INVALID_ID;
+ vxd_drv.mem_ctx = NULL;
+ vxd_drv.virt_space.fw_addr = 0x0;
+ vxd_drv.virt_space.rendec_addr = 0x0;
+ vxd_drv.initialised = 0;
+
+#ifdef ERROR_RECOVERY_SIMULATION
+ /* free the kernel object created to debug */
+ kobject_put(vxd_dec_kobject);
+#endif
+}
+
+static void vxd_fw_loaded(const struct firmware *fw, void *context)
+{
+ struct vxd_dev *vxd = context;
+ unsigned long bin_size;
+ int buf_id;
+ struct vxd_fw_hdr *hdr;
+ void *buf_kptr;
+ int ret;
+ unsigned long size = 0;
+ const unsigned char *data = NULL;
+
+ if (!fw) {
+ dev_err(vxd->dev, "Firmware binary is not present\n");
+ vxd->no_fw = 1;
+ return;
+ }
+
+ size = fw->size;
+ data = fw->data;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(vxd->dev, "FW: acquired %s size %zu\n", drv_fw_name, size);
+#endif
+
+ /* Sanity verification of the firmware */
+ if (size < sizeof(struct vxd_fw_hdr)) {
+ dev_err(vxd->dev, "%s: firmware file too small!\n", __func__);
+ goto out;
+ }
+
+ bin_size = size - sizeof(struct vxd_fw_hdr);
+ ret = img_mem_alloc(vxd->dev, vxd_drv.mem_ctx, vxd_drv.internal_heap_id,
+ bin_size, (enum mem_attr)0, &buf_id);
+ if (ret) {
+ dev_err(vxd->dev, "%s: failed to alloc fw buffer (err:%d)!\n", __func__, ret);
+ goto out;
+ }
+
+ hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+ if (!hdr)
+ goto out_release_buf;
+
+ /* Store firmware header in vxd context */
+ memcpy(hdr, data, sizeof(struct vxd_fw_hdr));
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(vxd->dev, "FW: info cs: %u, bs: %u, id: 0x%08x, ts: %u\n",
+ hdr->core_size, hdr->blob_size,
+ hdr->firmware_id, hdr->timestamp);
+#endif
+
+ /* Check if header is consistent */
+ if (hdr->core_size > bin_size || hdr->blob_size > bin_size) {
+ dev_err(vxd->dev, "%s: got invalid firmware!\n", __func__);
+ goto out_release_hdr;
+ }
+
+ /* Map the firmware buffer to CPU */
+ ret = img_mem_map_km(vxd_drv.mem_ctx, buf_id);
+ if (ret) {
+ dev_err(vxd->dev, "%s: failed to map FW buf to cpu! (%d)\n", __func__, ret);
+ goto out_release_hdr;
+ }
+
+ /* Copy firmware to device buffer */
+ buf_kptr = img_mem_get_kptr(vxd_drv.mem_ctx, buf_id);
+ memcpy(buf_kptr, data + sizeof(struct vxd_fw_hdr), size - sizeof(struct vxd_fw_hdr));
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: FW: copied to buffer %d kptr 0x%p\n", __func__, buf_id, buf_kptr);
+#endif
+
+ img_mem_sync_cpu_to_device(vxd_drv.mem_ctx, buf_id);
+
+ vxd->firmware.fw_size = size;
+ vxd->firmware.buf_id = buf_id;
+ vxd->firmware.hdr = hdr;
+ vxd->firmware.ready = TRUE;
+
+ release_firmware(fw);
+ complete_all(vxd->firmware_loading_complete);
+ pr_debug("Firmware loaded successfully ..!!\n");
+ return;
+
+out_release_hdr:
+ kfree(hdr);
+out_release_buf:
+ img_mem_free(vxd_drv.mem_ctx, buf_id);
+out:
+ release_firmware(fw);
+ complete_all(vxd->firmware_loading_complete);
+ kfree(vxd->firmware_loading_complete);
+ vxd->firmware_loading_complete = NULL;
+}
+
+/*
+ * Takes the firmware from the file system and allocates a buffer
+ */
+int vxd_prepare_fw(struct vxd_dev *vxd)
+{
+ int ret;
+
+ /* Fetch firmware from the file system */
+ struct completion **firmware_loading_complete =
+ (struct completion **)&vxd->firmware_loading_complete;
+
+ *firmware_loading_complete = kmalloc(sizeof(*firmware_loading_complete), GFP_KERNEL);
+ if (!(*firmware_loading_complete)) {
+ pr_err("Memory allocation failed for init_completion\n");
+ return -ENOMEM;
+ }
+ init_completion(*firmware_loading_complete);
+
+ if (!vxd->firmware_loading_complete)
+ return -ENOMEM;
+
+ vxd->firmware.ready = FALSE;
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ drv_fw_name, vxd->dev, GFP_KERNEL, vxd,
+ vxd_fw_loaded);
+ if (ret < 0) {
+ dev_err(vxd->dev, "request_firmware_nowait err: %d\n", ret);
+ complete_all(vxd->firmware_loading_complete);
+ kfree(vxd->firmware_loading_complete);
+ vxd->firmware_loading_complete = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * Cleans firmware resources
+ */
+void vxd_clean_fw_resources(struct vxd_dev *vxd)
+{
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s:%d\n", __func__, __LINE__);
+#endif
+
+ wait_for_completion(vxd->firmware_loading_complete);
+ kfree(vxd->firmware_loading_complete);
+ vxd->firmware_loading_complete = NULL;
+
+ if (vxd->firmware.fw_size) {
+ img_mem_free(vxd_drv.mem_ctx, vxd->firmware.buf_id);
+ kfree(vxd->firmware.hdr);
+ vxd->firmware.hdr = NULL;
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(vxd->dev, "FW: released %s\n", drv_fw_name);
+#endif
+ vxd->firmware.buf_id = VXD_INVALID_ID;
+ }
+}
+
+/*
+ * Submit a message to the VXD.
+ * <ctx> is used to verify that requested stream id (item->stream_id) is valid
+ * for this ctx
+ */
+int vxd_send_msg(struct vxd_dec_ctx *ctx, struct vxd_fw_msg *msg)
+{
+ struct vxd_dev *vxd = ctx->dev;
+ unsigned long msg_size;
+ struct vxd_item *item;
+ struct vxd_stream *stream;
+ int ret;
+
+ if (msg->payload_size < VXD_MIN_INPUT_SIZE)
+ return -EINVAL;
+
+ if (msg->payload_size % sizeof(unsigned int)) {
+ dev_err(vxd->dev, "msg size not aligned! (%u)\n",
+ msg->payload_size);
+ return -EINVAL;
+ }
+
+ msg_size = VXD_MSG_SIZE(*msg);
+
+ if (msg_size > VXD_MAX_INPUT_SIZE)
+ return -EINVAL;
+
+ /* Verify that the gap was left for stream PTD */
+ if (msg->payload[VXD_PTD_MSG_OFFSET] != 0) {
+ dev_err(vxd->dev, "%s: PTD gap missing!\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+ if (ret)
+ return ret;
+
+ stream = idr_find(vxd->streams, ctx->stream.id);
+ if (!stream) {
+ dev_warn(vxd->dev, "%s: invalid stream id requested! (%u)\n",
+ __func__, ctx->stream.id);
+
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ item = kmalloc(sizeof(*item) + msg->payload_size, GFP_KERNEL);
+ if (!item) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ memcpy(&item->msg, msg, msg_size);
+
+ msg->out_flags &= VXD_FW_MSG_WR_FLAGS_MASK;
+ item->stream_id = ctx->stream.id;
+ item->msg_id = 0;
+ item->msg.out_flags = msg->out_flags;
+ item->destroy = 0;
+
+ /*
+ * Inject the stream PTD into the message. It was already verified that
+ * there is enough space.
+ */
+ item->msg.payload[VXD_PTD_MSG_OFFSET] = stream->ptd;
+
+ list_add_tail(&item->list, &vxd->pend);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev,
+ "%s: added item %p to pend, ptd: 0x%x, str: %u flags: 0x%x\n",
+ __func__, item, stream->ptd, stream->id, item->msg.out_flags);
+#endif
+
+ vxd_schedule_locked(vxd);
+
+out_unlock:
+ mutex_unlock(ctx->mutex);
+
+ return ret;
+}
+
+int vxd_suspend_dev(void *dev)
+{
+ struct vxd_dev *vxd = platform_get_drvdata(to_platform_device(dev));
+
+ mutex_lock(vxd->mutex);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: taking a nap!\n", __func__);
+#endif
+
+ /* Cancel the worker first */
+ cancel_delayed_work(vxd->dwork);
+
+ /* Forcing hardware disable */
+ vxd_make_hw_off_locked(vxd, TRUE);
+
+ /* Move all valid items to the pending queue */
+ vxd_rewind_msgs_locked(vxd);
+
+ mutex_unlock(vxd->mutex);
+
+ return 0;
+}
+
+int vxd_resume_dev(void *dev)
+{
+ struct vxd_dev *vxd = platform_get_drvdata(to_platform_device(dev));
+ int ret = 0;
+
+ mutex_lock(vxd->mutex);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: waking up!\n", __func__);
+#endif
+
+ mutex_unlock(vxd->mutex);
+
+ return ret;
+}
+
+int vxd_map_buffer_sg(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx,
+ unsigned int str_id,
+ unsigned int buff_id,
+ void *sgt, unsigned int virt_addr,
+ unsigned int map_flags)
+{
+ struct vxd_stream *stream;
+ unsigned int flags = VXD_MMU_PTD_FLAG_NONE;
+ int ret;
+
+ ret = mutex_lock_interruptible_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+ if (ret)
+ return ret;
+
+ stream = idr_find(vxd->streams, str_id);
+ if (!stream) {
+ dev_err(vxd->dev, "%s: stream %d not found!\n", __func__, str_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if ((map_flags & (VXD_MAP_FLAG_READ_ONLY | VXD_MAP_FLAG_WRITE_ONLY))
+ == (VXD_MAP_FLAG_READ_ONLY | VXD_MAP_FLAG_WRITE_ONLY)) {
+ dev_err(vxd->dev, "%s: Bogus mapping flags 0x%x!\n", __func__,
+ map_flags);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Convert permission flags to internal definitions */
+ if (map_flags & VXD_MAP_FLAG_READ_ONLY)
+ flags |= VXD_MMU_PTD_FLAG_READ_ONLY;
+
+ if (map_flags & VXD_MAP_FLAG_WRITE_ONLY)
+ flags |= VXD_MMU_PTD_FLAG_WRITE_ONLY;
+
+ ret = img_mmu_map_sg(stream->mmu_ctx, ctx->mem_ctx, buff_id, sgt, virt_addr, flags);
+ if (ret) {
+ dev_err(vxd->dev, "%s: map failed!\n", __func__);
+ goto out_unlock;
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev,
+ "%s: mapped buf %u to 0x%08x, str_id: %u flags: 0x%x\n",
+ __func__, buff_id, virt_addr, str_id, flags);
+#endif
+
+out_unlock:
+ mutex_unlock(ctx->mutex);
+ return ret;
+}
+
+int vxd_map_buffer(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx, unsigned int str_id,
+ unsigned int buff_id,
+ unsigned int virt_addr,
+ unsigned int map_flags)
+{
+ struct vxd_stream *stream;
+ unsigned int flags = VXD_MMU_PTD_FLAG_NONE;
+ int ret;
+
+ ret = mutex_lock_interruptible_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+ if (ret)
+ return ret;
+
+ stream = idr_find(vxd->streams, str_id);
+ if (!stream) {
+ dev_err(vxd->dev, "%s: stream %d not found!\n", __func__, str_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if ((map_flags & (VXD_MAP_FLAG_READ_ONLY | VXD_MAP_FLAG_WRITE_ONLY))
+ == (VXD_MAP_FLAG_READ_ONLY | VXD_MAP_FLAG_WRITE_ONLY)) {
+ dev_err(vxd->dev, "%s: Bogus mapping flags 0x%x!\n", __func__, map_flags);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Convert permission flags to internal definitions */
+ if (map_flags & VXD_MAP_FLAG_READ_ONLY)
+ flags |= VXD_MMU_PTD_FLAG_READ_ONLY;
+
+ if (map_flags & VXD_MAP_FLAG_WRITE_ONLY)
+ flags |= VXD_MMU_PTD_FLAG_WRITE_ONLY;
+
+ ret = img_mmu_map(stream->mmu_ctx, ctx->mem_ctx, buff_id, virt_addr, flags);
+ if (ret) {
+ dev_err(vxd->dev, "%s: map failed!\n", __func__);
+ goto out_unlock;
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev,
+ "%s: mapped buf %u to 0x%08x, str_id: %u flags: 0x%x\n",
+ __func__, buff_id, virt_addr, str_id, flags);
+#endif
+
+out_unlock:
+ mutex_unlock(ctx->mutex);
+ return ret;
+}
+
+int vxd_unmap_buffer(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx,
+ unsigned int str_id, unsigned int buff_id)
+{
+ struct vxd_stream *stream;
+ int ret;
+
+ ret = mutex_lock_interruptible_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+ if (ret)
+ return ret;
+
+ stream = idr_find(vxd->streams, str_id);
+ if (!stream) {
+ dev_err(vxd->dev, "%s: stream %d not found!\n", __func__, str_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = img_mmu_unmap(stream->mmu_ctx, ctx->mem_ctx, buff_id);
+ if (ret) {
+ dev_err(vxd->dev, "%s: map failed!\n", __func__);
+ goto out_unlock;
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(vxd->dev, "%s: unmapped buf %u str_id: %u\n", __func__, buff_id, str_id);
+#endif
+
+out_unlock: mutex_unlock(ctx->mutex);
+ return ret;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_dec.c b/drivers/media/platform/vxe-vxd/decoder/vxd_dec.c
new file mode 100644
index 000000000000..cf3cf9b7b6f0
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_dec.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC SYSDEV and UI Interface function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "core.h"
+#include "h264fw_data.h"
+#include "hevcfw_data.h"
+#include "img_dec_common.h"
+#include "vxd_pvdec_priv.h"
+
+unsigned int get_nbuffers(enum vdec_vid_std std, int w, int h,
+ unsigned int max_num_ref_frames)
+{
+ unsigned int nbuffers;
+
+ switch (std) {
+ case VDEC_STD_H264:
+ /*
+ * Request number of buffers from header bspp information
+ * using formula N + Display Lag
+ * Parser is passing (2*N)
+ */
+ if (max_num_ref_frames == 0) {
+ nbuffers = DISPLAY_LAG + min(MAX_CAPBUFS_H264,
+ (184320 / ((w / 16) * (h / 16))));
+ } else {
+ nbuffers = max_num_ref_frames + DISPLAY_LAG;
+ }
+ break;
+ case VDEC_STD_HEVC:
+ if (max_num_ref_frames == 0) {
+ if ((w * h) <= (HEVC_MAX_LUMA_PS >> 2))
+ nbuffers = 16;
+ else if ((w * h) <= (HEVC_MAX_LUMA_PS >> 1))
+ nbuffers = 12;
+ else if ((w * h) <= ((3 * HEVC_MAX_LUMA_PS) >> 2))
+ nbuffers = 8;
+ else
+ nbuffers = 6;
+ nbuffers += DISPLAY_LAG;
+ } else {
+ nbuffers = max_num_ref_frames + DISPLAY_LAG;
+ }
+ break;
+#ifdef HAS_JPEG
+ case VDEC_STD_JPEG:
+ /*
+ * Request number of output buffers based on h264 spec
+ * + display delay
+ */
+ nbuffers = DISPLAY_LAG + min(MAX_CAPBUFS_H264,
+ (184320 / ((w / 16) * (h / 16))));
+ break;
+#endif
+ default:
+ nbuffers = 0;
+ }
+
+ return nbuffers;
+}
+
+int vxd_dec_alloc_bspp_resource(struct vxd_dec_ctx *ctx, enum vdec_vid_std vid_std)
+{
+ struct vxd_dev *vxd_dev = ctx->dev;
+ struct device *dev = vxd_dev->v4l2_dev.dev;
+ struct vdec_buf_info buf_info;
+ struct bspp_ddbuf_array_info *fw_sequ = ctx->fw_sequ;
+ struct bspp_ddbuf_array_info *fw_pps = ctx->fw_pps;
+ int attributes = 0, heap_id = 0, size = 0;
+ int i, ret = 0;
+
+ attributes = SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE |
+ SYS_MEMATTRIB_INTERNAL | SYS_MEMATTRIB_CPU_WRITE;
+ heap_id = vxd_g_internal_heap_id();
+
+ size = vid_std == VDEC_STD_HEVC ?
+ sizeof(struct hevcfw_sequence_ps) : sizeof(struct h264fw_sequence_ps);
+
+#ifdef HAS_JPEG
+ if (vid_std == VDEC_STD_JPEG)
+ size = sizeof(struct vdec_jpeg_sequ_hdr_info);
+#endif
+
+ for (i = 0; i < MAX_SEQUENCES; i++) {
+ ret = img_mem_alloc(vxd_dev->dev, ctx->mem_ctx, heap_id,
+ size, (enum mem_attr)attributes,
+ (int *)&fw_sequ[i].ddbuf_info.buf_id);
+ if (ret) {
+ dev_err(dev, "Couldn't allocate sequ buffer %d\n", i);
+ return -ENOMEM;
+ }
+ ret = img_mem_map_km(ctx->mem_ctx, fw_sequ[i].ddbuf_info.buf_id);
+ if (ret) {
+ dev_err(dev, "Couldn't map sequ buffer %d\n", i);
+ return -ENOMEM;
+ }
+ fw_sequ[i].ddbuf_info.cpu_virt_addr = img_mem_get_kptr
+ (ctx->mem_ctx,
+ fw_sequ[i].ddbuf_info.buf_id);
+ fw_sequ[i].buf_offset = 0;
+ fw_sequ[i].buf_element_size = size;
+ fw_sequ[i].ddbuf_info.buf_size = size;
+ fw_sequ[i].ddbuf_info.mem_attrib = (enum sys_emem_attrib)attributes;
+ memset(fw_sequ[i].ddbuf_info.cpu_virt_addr, 0, size);
+
+ buf_info.cpu_linear_addr =
+ fw_sequ[i].ddbuf_info.cpu_virt_addr;
+ buf_info.buf_size = size;
+ buf_info.fd = -1;
+ buf_info.buf_id = fw_sequ[i].ddbuf_info.buf_id;
+ buf_info.mem_attrib =
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE |
+ SYS_MEMATTRIB_INPUT | SYS_MEMATTRIB_CPU_WRITE);
+
+ ret = core_stream_map_buf(ctx->res_str_id, VDEC_BUFTYPE_BITSTREAM, &buf_info,
+ &fw_sequ[i].ddbuf_info.bufmap_id);
+ if (ret) {
+ dev_err(dev, "sps core_stream_map_buf failed\n");
+ return ret;
+ }
+ }
+
+#ifdef HAS_JPEG
+ if (vid_std == VDEC_STD_JPEG)
+ return 0;
+#endif
+
+ size = vid_std == VDEC_STD_HEVC ?
+ sizeof(struct hevcfw_picture_ps) : sizeof(struct h264fw_picture_ps);
+
+ for (i = 0; i < MAX_PPSS; i++) {
+ ret = img_mem_alloc(vxd_dev->dev, ctx->mem_ctx, heap_id, size,
+ (enum mem_attr)attributes,
+ (int *)&fw_pps[i].ddbuf_info.buf_id);
+ if (ret) {
+ dev_err(dev, "Couldn't allocate sequ buffer %d\n", i);
+ return -ENOMEM;
+ }
+ ret = img_mem_map_km(ctx->mem_ctx, fw_pps[i].ddbuf_info.buf_id);
+ if (ret) {
+ dev_err(dev, "Couldn't map sequ buffer %d\n", i);
+ return -ENOMEM;
+ }
+ fw_pps[i].ddbuf_info.cpu_virt_addr = img_mem_get_kptr(ctx->mem_ctx,
+ fw_pps[i].ddbuf_info.buf_id);
+ fw_pps[i].buf_offset = 0;
+ fw_pps[i].buf_element_size = size;
+ fw_pps[i].ddbuf_info.buf_size = size;
+ fw_pps[i].ddbuf_info.mem_attrib = (enum sys_emem_attrib)attributes;
+ memset(fw_pps[i].ddbuf_info.cpu_virt_addr, 0, size);
+
+ buf_info.cpu_linear_addr =
+ fw_pps[i].ddbuf_info.cpu_virt_addr;
+ buf_info.buf_size = size;
+ buf_info.fd = -1;
+ buf_info.buf_id = fw_pps[i].ddbuf_info.buf_id;
+ buf_info.mem_attrib =
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE |
+ SYS_MEMATTRIB_INPUT | SYS_MEMATTRIB_CPU_WRITE);
+
+ ret = core_stream_map_buf(ctx->res_str_id, VDEC_BUFTYPE_BITSTREAM, &buf_info,
+ &fw_pps[i].ddbuf_info.bufmap_id);
+ if (ret) {
+ dev_err(dev, "pps core_stream_map_buf failed\n");
+ return ret;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_dec.h b/drivers/media/platform/vxe-vxd/decoder/vxd_dec.h
new file mode 100644
index 000000000000..896bce6fc925
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_dec.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG DEC SYSDEV and UI Interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef _VXD_DEC_H
+#define _VXD_DEC_H
+
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/types.h>
+
+#include "bspp.h"
+#include "img_dec_common.h"
+#include "img_mem_man.h"
+#include "img_pixfmts.h"
+#include "pixel_api.h"
+#include "vdecdd_defs.h"
+#include "vdec_defs.h"
+#include "work_queue.h"
+
+#define VXD_MIN_STREAM_ID 1
+#define VXD_MAX_STREAMS_PER_DEV 254
+#define VXD_MAX_STREAM_ID (VXD_MIN_STREAM_ID + VXD_MAX_STREAMS_PER_DEV)
+
+#define CODEC_NONE -1
+#define CODEC_H264_DEC 0
+#define CODEC_MPEG4_DEC 1
+#define CODEC_VP8_DEC 2
+#define CODEC_VC1_DEC 3
+#define CODEC_MPEG2_DEC 4
+#define CODEC_JPEG_DEC 5
+#define CODEC_VP9_DEC 6
+#define CODEC_HEVC_DEC 7
+
+#define MAX_SEGMENTS 6
+#define HW_ALIGN 64
+
+#define MAX_BUF_TRACE 30
+
+#define MAX_CAPBUFS_H264 16
+#define DISPLAY_LAG 3
+#define HEVC_MAX_LUMA_PS 35651584
+
+#define MAX_PLANES 3
+
+enum {
+ Q_DATA_SRC = 0,
+ Q_DATA_DST = 1,
+ Q_DATA_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum {
+ IMG_DEC_FMT_TYPE_CAPTURE = 0x01,
+ IMG_DEC_FMT_TYPE_OUTPUT = 0x10,
+ IMG_DEC_FMT_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum vxd_map_flags {
+ VXD_MAP_FLAG_NONE = 0x0,
+ VXD_MAP_FLAG_READ_ONLY = 0x1,
+ VXD_MAP_FLAG_WRITE_ONLY = 0x2,
+ VXD_MAP_FLAG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * struct vxd_fw_msg - This structure holds the information about the message
+ * exchanged in read/write between Kernel and firmware.
+ *
+ * @out_flags: indicating the type of message
+ * @payload_size: size of payload in bytes
+ * @payload: data which is send to firmware
+ */
+struct vxd_fw_msg {
+ unsigned int out_flags;
+ unsigned int payload_size;
+ unsigned int payload[0];
+};
+
+/* HW state */
+struct vxd_hw_state {
+ unsigned int fw_counter;
+ unsigned int fe_status[VXD_MAX_PIPES];
+ unsigned int be_status[VXD_MAX_PIPES];
+ unsigned int dmac_status[VXD_MAX_PIPES][2]; /* Cover DMA chan 2/3*/
+ unsigned int irq_status;
+};
+
+/*
+ * struct vxd_state - contains VXD HW state
+ *
+ * @hw_state: HW state
+ * @msg_id_tail: msg id of the oldest item being processed
+ */
+struct vxd_state {
+ struct vxd_hw_state hw_state;
+ unsigned short msg_id_tail;
+};
+
+/*
+ * struct vxd_dec_fmt - contains info for each of the supported video format
+ *
+ * @fourcc: V4L2 pixel format FCC identifier
+ * @num_planes: number of planes required for luma and chroma
+ * @type: CAPTURE or OUTPUT
+ * @std: VDEC video standard
+ * @pixfmt: IMG pixel format
+ * @interleave: Chroma interleave order
+ * @idc: Chroma format
+ * @size_num: Numberator used to calculate image size
+ * @size_den: Denominator used to calculate image size
+ * @bytes_pp: Bytes per pixel for this format
+ */
+struct vxd_dec_fmt {
+ unsigned int fourcc;
+ unsigned int num_planes;
+ unsigned char type;
+ enum vdec_vid_std std;
+ enum img_pixfmt pixfmt;
+ enum pixel_chroma_interleaved interleave;
+ enum pixel_fmt_idc idc;
+ int size_num;
+ int size_den;
+ int bytes_pp;
+};
+
+/*
+ * struct vxd_item - contains information about the item sent to fw
+ *
+ * @list: item to be linked list to items_done, msgs, or pend.
+ * @stream_id: stream id
+ * @msg_id: message id
+ * @destroy: item belongs to the stream which is destroyed
+ * @msg: contains msg between kernel and fw
+ */
+struct vxd_item {
+ struct list_head list;
+ unsigned int stream_id;
+ unsigned int msg_id;
+ struct {
+ unsigned destroy : 1;
+ };
+ struct vxd_fw_msg msg;
+};
+
+enum vxd_cb_type {
+ VXD_CB_STRUNIT_PROCESSED,
+ VXD_CB_SPS_RELEASE,
+ VXD_CB_PPS_RELEASE,
+ VXD_CB_PICT_DECODED,
+ VXD_CB_PICT_DISPLAY,
+ VXD_CB_PICT_RELEASE,
+ VXD_CB_PICT_END,
+ VXD_CB_STR_END,
+ VXD_CB_ERROR_FATAL,
+ VXD_CB_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * vxd_cb - Return a resource to vxd
+ *
+ * @ctx: the vxd stream context
+ * @type: the type of message
+ * @buf_map_id: the buf_map_id of the resource being returned
+ */
+typedef void (*vxd_cb)(void *ctx, enum vxd_cb_type type, unsigned int buf_map_id);
+
+/*
+ * struct vxd_return - contains information about items returning from core
+ *
+ * @type: Type of item being returned
+ * @buf_map_id: mmu mapped id of buffer being returned
+ */
+struct vxd_return {
+ void *work;
+ struct vxd_dec_ctx *ctx;
+ enum vxd_cb_type type;
+ unsigned int buf_map_id;
+};
+
+/*
+ * struct vxd_dec_q_data - contains queue data information
+ *
+ * @fmt: format info
+ * @width: frame width
+ * @height: frame height
+ * @bytesperline: bytes per line in memory
+ * @size_image: image size in memory
+ */
+struct vxd_dec_q_data {
+ struct vxd_dec_fmt *fmt;
+ unsigned int width;
+ unsigned int height;
+ unsigned int bytesperline[MAX_PLANES];
+ unsigned int size_image[MAX_PLANES];
+};
+
+/*
+ * struct time_prof - contains time taken by decoding information
+ *
+ * @id: id info
+ * @start_time: start time
+ * @end_time: end time
+ */
+struct time_prof {
+ unsigned int id;
+ long long start_time;
+ long long end_time;
+};
+
+/*
+ * struct vxd_dev - The struct containing decoder driver internal parameters.
+ *
+ * @v4l2_dev: main struct of V4L2 device drivers
+ * @dev: platform device driver
+ * @vfd_dec: video device structure to create and manage the V4L2 device node.
+ * @plat_dev: linux platform device
+ * @struct v4l2_m2m_dev: mem2mem device
+ * @mutex: mutex to protect certain ongoing operation.
+ * @module_irq: a threaded request IRQ for the device
+ * @reg_base: base address of the IMG VXD hw registers
+ * @props: contains HW properties
+ * @mmu_config_addr_width: indicates the number of extended address bits
+ * (above 32) that the external memory interface
+ * uses, based on EXTENDED_ADDR_RANGE field of
+ * MMU_CONFIG0
+ * @rendec_buf_id: buffer id for rendec buffer allocation
+ * @firmware: firmware information based on vxd_dev_fw structure
+ * @firmware_loading_complete: loading completion
+ * @no_fw: Just to check if firmware is present in /lib
+ * @fw_refcnt: firmware reference counter
+ * @hw_on: indication if hw is on or off
+ * @hw_dead: indication if hw is dead
+ * @lock: basic primitive for locking through spinlock
+ * @state: internal state handling of vxd state
+ * @msgs: linked list of msgs with vxd_item
+ * @pend: linked list of pending msgs to be sent to fw
+ * @msg_cnt: counter of messages submitted to VXD. Wraps every VXD_MSG_ID_MASK
+ * @freq_khz: Core clock frequency measured during boot of firmware
+ * @streams: unique id for the stream
+ * @mem_ctx: memory management context for HW buffers
+ * @dwork: use for Power Management and Watchdog
+ * @work_sched_at: the time of the last work has been scheduled at
+ * @emergency: indicates if emergency condition occurred
+ * @dbgfs_ctx: pointer to debug FS context.
+ * @hw_pm_delay: delay before performaing PM
+ * @hw_dwr_period: period for checking for dwr
+ * @pm_start: time, in jiffies, when core become idle
+ * @dwr_start: time, in jiffies, when dwr has been started
+ */
+struct vxd_dev {
+ struct v4l2_device v4l2_dev;
+ void *dev;
+ struct video_device *vfd_dec;
+ struct platform_device *plat_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct mutex *mutex; /* Per device mutex */
+ struct mutex *mutex_queue; /* Mutex for ioctl synchronization on queue */
+ int module_irq;
+ void __iomem *reg_base;
+ struct vxd_core_props props;
+ unsigned int mmu_config_addr_width;
+ int rendec_buf_id;
+ struct vxd_dev_fw firmware;
+ void *firmware_loading_complete;
+ unsigned char no_fw;
+ unsigned char fw_refcnt;
+ unsigned int hw_on;
+ unsigned int hw_dead;
+ void *lock; /* basic device level spinlock */
+ struct vxd_state state;
+ struct list_head msgs;
+ struct list_head pend;
+ int msg_cnt;
+ unsigned int freq_khz;
+ struct idr *streams;
+ struct mem_ctx *mem_ctx;
+ void *dwork;
+ unsigned long long work_sched_at;
+ unsigned int emergency;
+ void *dbgfs_ctx;
+ unsigned int hw_pm_delay;
+ unsigned int hw_dwr_period;
+ unsigned long long pm_start;
+ unsigned long long dwr_start;
+ struct time_prof time_fw[MAX_BUF_TRACE];
+ struct time_prof time_drv[MAX_BUF_TRACE];
+
+ /* The variables defined below are used in RTOS only. */
+ /* This variable holds queue handler */
+ void *vxd_worker_queue_handle;
+ void *vxd_worker_queue_sem_handle;
+};
+
+/*
+ * struct vxd_stream - holds stream-related info
+ *
+ * @ctx: associated vxd_dec_ctx
+ * @mmu_ctx: MMU context for this stream
+ * @ptd: ptd for the stream
+ * @id: unique stream id
+ */
+struct vxd_stream {
+ struct vxd_dec_ctx *ctx;
+ struct mmu_ctx *mmu_ctx;
+ unsigned int ptd;
+ unsigned int id;
+};
+
+
+struct vxd_mapping {
+ struct list_head list;
+ unsigned int buf_map_id;
+ unsigned char reuse;
+ unsigned long dma_addr;
+};
+
+/*
+ * struct vxd_buffer - holds per buffer info.
+ * @buffer: the vb2_v4l2_buffer
+ * @list: list head for gathering in linked list
+ * @mapped: is this buffer mapped yet
+ * @reuse: is the buffer ready for reuse
+ * @buf_map_id: the mapped buffer id
+ * @buf_info: the buffer info for submitting to map
+ * @bstr_info: the buffer info for submitting to bspp
+ * @seq_unit: the str_unit for submitting sps
+ * @seq_unit: the str_unit for submitting pps and segments
+ * @seq_unit: the str_unit for submitting picture_end
+ */
+struct vxd_buffer {
+ struct v4l2_m2m_buffer buffer;
+ struct list_head list;
+ unsigned char mapped;
+ unsigned char reuse;
+ unsigned int buf_map_id;
+ struct vxd_mapping *mapping;
+ struct vdec_buf_info buf_info;
+ struct bspp_ddbuf_info bstr_info;
+ struct vdecdd_str_unit seq_unit;
+ struct vdecdd_str_unit pic_unit;
+ struct vdecdd_str_unit end_unit;
+ struct bspp_preparsed_data preparsed_data;
+};
+
+typedef void (*decode_cb)(int res_str_id, unsigned int *msg, unsigned int msg_size,
+ unsigned int msg_flags);
+
+/*
+ * struct vxd_dec_ctx - holds per stream data. Each playback has its own
+ * vxd_dec_ctx
+ *
+ * @fh: V4L2 file handler
+ * @dev: pointer to the device main information.
+ * @ctrl_hdl_dec: v4l2 custom control command for video decoder
+ * @mem_ctx: mem context for this stream
+ * @mmu_ctx: MMU context for this stream
+ * @ptd: page table information
+ * @items_done: linked list of items is ready
+ * @width: frame width
+ * @height: frame height
+ * @width_orig: original frame width (before padding)
+ * @height_orig: original frame height (before padding)
+ * @q_data: Queue data information of src[0] and dst[1]
+ * @stream: stream-related info
+ * @work: work queue for message handling
+ * @return_queue: list of resources returned from core
+ * @out_buffers: list of all output buffers
+ * @cap_buffers: list of all capture buffers except those in reuse_queue
+ * @cap_mappings: list of all capture buffers mapped to HW
+ * @reuse_queue: list of capture buffers waiting for core to signal reuse
+ * @res_str_id: Core stream id
+ * @stream_created: Core stream is created
+ * @stream_configured: Core stream is configured
+ * @opconfig_pending: Core opconfig is pending stream_create
+ * @src_streaming: V4L2 src stream is streaming
+ * @dst_streaming: V4L2 dst stream is streaming
+ * @core_streaming: core is streaming
+ * @aborting: signal job abort on next irq
+ * @str_opcfg: core output config
+ * @pict_bufcfg: core picture buffer config
+ * @bspp_context: BSPP Stream context handle
+ * @seg_list: list of bspp_bitstr_seg for submitting to BSPP
+ * @fw_sequ: BSPP sps resource
+ * @fw_pps: BSPP pps resource
+ * @cb: registered callback for incoming messages
+ * @mutex: mutex to protect context specific state machine
+ */
+struct vxd_dec_ctx {
+ struct v4l2_fh fh;
+ struct vxd_dev *dev;
+ struct mem_ctx *mem_ctx;
+ struct mmu_ctx *mmu_ctx;
+ unsigned int ptd;
+ struct list_head items_done;
+ unsigned int width;
+ unsigned int height;
+ unsigned int width_orig;
+ unsigned int height_orig;
+ struct vxd_dec_q_data q_data[2];
+ struct vxd_stream stream;
+ void *work;
+ struct list_head return_queue;
+ struct list_head out_buffers;
+ struct list_head cap_buffers;
+ struct list_head cap_mappings;
+ struct list_head reuse_queue;
+ unsigned int res_str_id;
+ unsigned char stream_created;
+ unsigned char stream_configured;
+ unsigned char opconfig_pending;
+ unsigned char src_streaming;
+ unsigned char dst_streaming;
+ unsigned char core_streaming;
+ unsigned char aborting;
+ unsigned char eos;
+ unsigned char stop_initiated;
+ unsigned char flag_last;
+ unsigned char num_decoding;
+ unsigned int max_num_ref_frames;
+ struct vdec_str_opconfig str_opcfg;
+ struct vdec_pict_bufconfig pict_bufcfg;
+ void *bspp_context;
+ struct bspp_bitstr_seg bstr_segments[MAX_SEGMENTS];
+ struct lst_t seg_list;
+ struct bspp_ddbuf_array_info fw_sequ[MAX_SEQUENCES];
+ struct bspp_ddbuf_array_info fw_pps[MAX_PPSS];
+ decode_cb cb;
+ struct mutex *mutex; /* Per stream mutex */
+
+ /* The below variable used only in Rtos */
+ void *mm_return_resource; /* Place holder for CB to application */
+ void *stream_worker_queue_handle;
+ void *stream_worker_queue_sem_handle;
+ // lock is used to synchronize the stream worker and process function
+ void *lock;
+ /* "sem_eos" this semaphore variable used to wait until all frame decoded */
+ void *sem_eos;
+};
+
+irqreturn_t vxd_handle_irq(void *dev);
+irqreturn_t vxd_handle_thread_irq(void *dev);
+int vxd_init(void *dev, struct vxd_dev *vxd, const struct heap_config heap_configs[], int heaps);
+int vxd_g_internal_heap_id(void);
+void vxd_deinit(struct vxd_dev *vxd);
+int vxd_prepare_fw(struct vxd_dev *vxd);
+void vxd_clean_fw_resources(struct vxd_dev *vxd);
+int vxd_send_msg(struct vxd_dec_ctx *ctx, struct vxd_fw_msg *msg);
+int vxd_suspend_dev(void *dev);
+int vxd_resume_dev(void *dev);
+
+int vxd_create_ctx(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx);
+void vxd_destroy_ctx(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx);
+
+int vxd_map_buffer_sg(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx,
+ unsigned int str_id, unsigned int buff_id,
+ void *sgt, unsigned int virt_addr,
+ unsigned int map_flags);
+int vxd_map_buffer(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx, unsigned int str_id,
+ unsigned int buff_id, unsigned int virt_addr, unsigned int map_flags);
+int vxd_unmap_buffer(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx,
+ unsigned int str_id, unsigned int buff_id);
+
+unsigned int get_nbuffers(enum vdec_vid_std std, int w, int h, unsigned int max_num_ref_frames);
+
+int vxd_dec_alloc_bspp_resource(struct vxd_dec_ctx *ctx, enum vdec_vid_std vid_std);
+
+#ifdef ERROR_RECOVERY_SIMULATION
+/* sysfs read write functions */
+ssize_t vxd_sysfs_show(struct kobject *vxd_dec_kobject,
+ struct kobj_attribute *attr, char *buf);
+
+ssize_t vxd_sysfs_store(struct kobject *vxd_dec_kobject,
+ struct kobj_attribute *attr, const char *buf, unsigned long count);
+#endif
+#endif /* _VXD_DEC_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_ext.h b/drivers/media/platform/vxe-vxd/decoder/vxd_ext.h
new file mode 100644
index 000000000000..fa92c9001c73
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_ext.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC Low-level device interface component
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ */
+
+#ifndef _VXD_EXT_H
+#define _VXD_EXT_H
+
+#define VLR_COMPLETION_COMMS_AREA_SIZE 476
+
+/* Word Size of buffer used to pass messages between LISR and HISR */
+#define VXD_SIZE_MSG_BUFFER (1 * 1024)
+
+/* This structure describes macroblock coordinates. */
+struct vxd_mb_coords {
+ unsigned int x;
+ unsigned int y;
+};
+
+/* This structure contains firmware and decoding pipe state information. */
+struct vxd_pipestate {
+ unsigned char is_pipe_present;
+ unsigned char cur_codec;
+ unsigned int acheck_point[VDECFW_CHECKPOINT_MAX];
+ unsigned int firmware_action;
+ unsigned int fe_slices;
+ unsigned int be_slices;
+ unsigned int fe_errored_slices;
+ unsigned int be_errored_slices;
+ unsigned int be_mbs_dropped;
+ unsigned int be_mbs_recovered;
+ struct vxd_mb_coords fe_mb;
+ struct vxd_mb_coords be_mb;
+};
+
+/* This structure contains firmware and decoder core state information. */
+struct vxd_firmware_state {
+ unsigned int fw_step;
+ struct vxd_pipestate pipe_state[VDECFW_MAX_DP];
+};
+
+/* This structure contains the video decoder device state. */
+struct vxd_states {
+ struct vxd_firmware_state fw_state;
+};
+
+struct vxd_pict_attrs {
+ unsigned int dwrfired;
+ unsigned int mmufault;
+ unsigned int deverror;
+};
+
+/* This type defines the message attributes. */
+enum vxd_msg_attr {
+ VXD_MSG_ATTR_NONE = 0,
+ VXD_MSG_ATTR_DECODED = 1,
+ VXD_MSG_ATTR_FATAL = 2,
+ VXD_MSG_ATTR_CANCELED = 3,
+ VXD_MSG_ATTR_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum vxd_msg_flag {
+ VXD_MSG_FLAG_DROP = 0,
+ VXD_MSG_FLAG_EXCL = 1,
+ VXD_MSG_FLAG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+#endif /* VXD_EXT_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_int.c b/drivers/media/platform/vxe-vxd/decoder/vxd_int.c
new file mode 100644
index 000000000000..c75aef6deed1
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_int.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD DEC Common low level core interface component
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp.h"
+#include "fw_interface.h"
+#include "h264fw_data.h"
+#include "img_errors.h"
+#include "img_dec_common.h"
+#include "img_pvdec_core_regs.h"
+#include "img_pvdec_pixel_regs.h"
+#include "img_pvdec_test_regs.h"
+#include "img_vdec_fw_msg.h"
+#include "img_video_bus4_mmu_regs.h"
+#include "img_msvdx_core_regs.h"
+#include "img_msvdx_cmds.h"
+#include "reg_io2.h"
+#include "scaler_setup.h"
+#include "vdecdd_defs.h"
+#include "vdecdd_utils.h"
+#include "vdecfw_shared.h"
+#include "vdec_defs.h"
+#include "vxd_ext.h"
+#include "vxd_int.h"
+#include "vxd_props.h"
+
+#define MSVDX_CACHE_REF_OFFSET_V100 (72L)
+#define MSVDX_CACHE_ROW_OFFSET_V100 (4L)
+
+#define MSVDX_CACHE_REF_OFFSET_V550 (144L)
+#define MSVDX_CACHE_ROW_OFFSET_V550 (8L)
+
+#define GET_BITS(v, lb, n) (((v) >> (lb)) & ((1 << (n)) - 1))
+#define IS_PVDEC_PIPELINE(std) ((std) == VDEC_STD_HEVC ? 1 : 0)
+
+static int amsvdx_codecmode[VDEC_STD_MAX] = {
+ /* Invalid */
+ -1,
+ /* MPEG2 */
+ 3,
+ /* MPEG4 */
+ 4,
+ /* H263 */
+ 4,
+ /* H264 */
+ 1,
+ /* VC1 */
+ 2,
+ /* AVS */
+ 5,
+ /* RealVideo (8) */
+ 8,
+ /* JPEG */
+ 0,
+ /* On2 VP6 */
+ 10,
+ /* On2 VP8 */
+ 11,
+ /* Invalid */
+#ifdef HAS_VP9
+ /* On2 VP9 */
+ 13,
+#endif
+ /* Sorenson */
+ 4,
+ /* HEVC */
+ 12,
+};
+
+struct msvdx_scaler_coeff_cmds {
+ unsigned int acmd_horizluma_coeff[VDECFW_NUM_SCALE_COEFFS];
+ unsigned int acmd_vertluma_coeff[VDECFW_NUM_SCALE_COEFFS];
+ unsigned int acmd_horizchroma_coeff[VDECFW_NUM_SCALE_COEFFS];
+ unsigned int acmd_vertchroma_coeff[VDECFW_NUM_SCALE_COEFFS];
+};
+
+static struct vxd_vidstd_props astd_props[] = {
+ { VDEC_STD_MPEG2, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+ PIXEL_FORMAT_420 },
+ { VDEC_STD_MPEG4, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+ PIXEL_FORMAT_420 },
+ { VDEC_STD_H263, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+ PIXEL_FORMAT_420 },
+ { VDEC_STD_H264, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0x10000, 8,
+ 8, PIXEL_FORMAT_420 },
+ { VDEC_STD_VC1, CORE_REVISION(7, 0, 0), 80, 16, 4096, 4096, 0, 8, 8,
+ PIXEL_FORMAT_420 },
+ { VDEC_STD_AVS, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+ PIXEL_FORMAT_420 },
+ { VDEC_STD_REAL, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+ PIXEL_FORMAT_420 },
+ { VDEC_STD_JPEG, CORE_REVISION(7, 0, 0), 64, 16, 32768, 32768, 0, 8, 8,
+ PIXEL_FORMAT_444 },
+ { VDEC_STD_VP6, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+ PIXEL_FORMAT_420 },
+ { VDEC_STD_VP8, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+ PIXEL_FORMAT_420 },
+ { VDEC_STD_SORENSON, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8,
+ 8, PIXEL_FORMAT_420 },
+ { VDEC_STD_HEVC, CORE_REVISION(7, 0, 0), 64, 16, 8192, 8192, 0, 8, 8,
+ PIXEL_FORMAT_420 },
+};
+
+enum vdec_msvdx_async_mode {
+ VDEC_MSVDX_ASYNC_NORMAL,
+ VDEC_MSVDX_ASYNC_VDMC,
+ VDEC_MSVDX_ASYNC_VDEB,
+ VDEC_MSVDX_ASYNC_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* MSVDX row strides for video buffers. */
+static const unsigned int amsvdx_64byte_row_stride[] = {
+ 384, 768, 1280, 1920, 512, 1024, 2048, 4096
+};
+
+/* MSVDX row strides for jpeg buffers. */
+static const unsigned int amsvdx_jpeg_row_stride[] = {
+ 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576, 32768
+};
+
+/* VXD Core major revision. */
+static unsigned int maj_rev;
+/* VXD Core minor revision. */
+static unsigned int min_rev;
+/* VXD Core maintenance revision. */
+static unsigned int maint_rev;
+
+static int get_stride_code(enum vdec_vid_std vidstd, unsigned int row_stride)
+{
+ unsigned int i;
+
+ if (vidstd == VDEC_STD_JPEG) {
+ for (i = 0; i < (sizeof(amsvdx_jpeg_row_stride) /
+ sizeof(amsvdx_jpeg_row_stride[0])); i++) {
+ if (amsvdx_jpeg_row_stride[i] == row_stride)
+ return i;
+ }
+ } else {
+ for (i = 0; i < (sizeof(amsvdx_64byte_row_stride) /
+ sizeof(amsvdx_64byte_row_stride[0])); i++) {
+ if (amsvdx_64byte_row_stride[i] == row_stride)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+/* Obtains the hardware defined video profile. */
+static unsigned int vxd_getprofile(enum vdec_vid_std vidstd, unsigned int std_profile)
+{
+ unsigned int profile = 0;
+
+ switch (vidstd) {
+ case VDEC_STD_H264:
+ switch (std_profile) {
+ case H264_PROFILE_BASELINE:
+ profile = 0;
+ break;
+
+ /*
+ * Extended may be attempted as Baseline or
+ * Main depending on the constraint_set_flags
+ */
+ case H264_PROFILE_EXTENDED:
+ case H264_PROFILE_MAIN:
+ profile = 1;
+ break;
+
+ case H264_PROFILE_HIGH:
+ case H264_PROFILE_HIGH444:
+ case H264_PROFILE_HIGH422:
+ case H264_PROFILE_HIGH10:
+ case H264_PROFILE_CAVLC444:
+ case H264_PROFILE_MVC_HIGH:
+ case H264_PROFILE_MVC_STEREO:
+ profile = 2;
+ break;
+ default:
+ profile = 2;
+ break;
+ }
+ break;
+
+ default:
+ profile = 0;
+ break;
+ }
+
+ return profile;
+}
+
+static int vxd_getcoreproperties(struct vxd_coreprops *coreprops,
+ unsigned int corerev,
+ unsigned int pvdec_coreid, unsigned int mmu_config0,
+ unsigned int mmu_config1, unsigned int *pixel_pipecfg,
+ unsigned int *pixel_misccfg, unsigned int max_framecfg)
+{
+ unsigned int group_id;
+ unsigned int core_id;
+ unsigned int core_config;
+ unsigned int extended_address_range;
+ unsigned char group_size = 0;
+ unsigned char pipe_minus1 = 0;
+ unsigned int max_h264_hw_chromaformat = 0;
+ unsigned int max_hevc_hw_chromaformat = 0;
+ unsigned int max_bitdepth_luma = 0;
+ unsigned int i;
+
+ struct pvdec_core_rev core_rev;
+
+ if (!coreprops || !pixel_pipecfg || !pixel_misccfg)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* PVDEC Core Revision Information */
+ core_rev.maj_rev = REGIO_READ_FIELD(corerev, PVDEC_CORE, CR_PVDEC_CORE_REV,
+ CR_PVDEC_MAJOR_REV);
+ core_rev.min_rev = REGIO_READ_FIELD(corerev, PVDEC_CORE, CR_PVDEC_CORE_REV,
+ CR_PVDEC_MINOR_REV);
+ core_rev.maint_rev = REGIO_READ_FIELD(corerev, PVDEC_CORE, CR_PVDEC_CORE_REV,
+ CR_PVDEC_MAINT_REV);
+
+ /* core id */
+ group_id = REGIO_READ_FIELD(pvdec_coreid, PVDEC_CORE, CR_PVDEC_CORE_ID, CR_GROUP_ID);
+ core_id = REGIO_READ_FIELD(pvdec_coreid, PVDEC_CORE, CR_PVDEC_CORE_ID, CR_CORE_ID);
+
+ /* Ensure that the core is IMG Video Decoder (PVDEC). */
+ if (group_id != 3 || core_id != 3)
+ return IMG_ERROR_DEVICE_NOT_FOUND;
+
+ core_config = REGIO_READ_FIELD(pvdec_coreid, PVDEC_CORE,
+ CR_PVDEC_CORE_ID, CR_PVDEC_CORE_CONFIG);
+
+ memset(coreprops, 0, sizeof(*(coreprops)));
+
+ /* Construct core version name. */
+ snprintf(coreprops->aversion, VER_STR_LEN, "%d.%d.%d",
+ core_rev.maj_rev, core_rev.min_rev, core_rev.maint_rev);
+
+ coreprops->mmu_support_stride_per_context =
+ REGIO_READ_FIELD(mmu_config1, IMG_VIDEO_BUS4_MMU,
+ MMU_CONFIG1,
+ SUPPORT_STRIDE_PER_CONTEXT) == 1 ? 1 : 0;
+
+ coreprops->mmu_support_secure = REGIO_READ_FIELD(mmu_config1, IMG_VIDEO_BUS4_MMU,
+ MMU_CONFIG1, SUPPORT_SECURE) == 1 ? 1 : 0;
+
+ extended_address_range = REGIO_READ_FIELD(mmu_config0, IMG_VIDEO_BUS4_MMU,
+ MMU_CONFIG0, EXTENDED_ADDR_RANGE);
+
+ switch (extended_address_range) {
+ case 0:
+ coreprops->mmu_type = MMU_TYPE_32BIT;
+ break;
+ case 4:
+ coreprops->mmu_type = MMU_TYPE_36BIT;
+ break;
+ case 8:
+ coreprops->mmu_type = MMU_TYPE_40BIT;
+ break;
+ default:
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ group_size += REGIO_READ_FIELD(mmu_config0, IMG_VIDEO_BUS4_MMU,
+ MMU_CONFIG0, GROUP_OVERRIDE_SIZE);
+
+ coreprops->num_entropy_pipes = core_config & 0xF;
+ coreprops->num_pixel_pipes = core_config >> 4 & 0xF;
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("PVDEC revision %08x detected, id %08x.\n", corerev, core_id);
+ pr_info("Found %d entropy pipe(s), %d pixel pipe(s), %d group size",
+ coreprops->num_entropy_pipes, coreprops->num_pixel_pipes,
+ group_size);
+#endif
+
+ /* Set global rev info variables used by macros */
+ maj_rev = core_rev.maj_rev;
+ min_rev = core_rev.min_rev;
+ maint_rev = core_rev.maint_rev;
+
+ /* Default settings */
+ for (i = 0; i < ARRAY_SIZE(astd_props); i++) {
+ struct vxd_vidstd_props *pvidstd_props =
+ &coreprops->vidstd_props[astd_props[i].vidstd];
+ /*
+ * Update video standard properties if the core is beyond
+ * specified version and the properties are for newer cores
+ * than the previous.
+ */
+ if (FROM_REV(MAJOR_REVISION((int)astd_props[i].core_rev),
+ MINOR_REVISION((int)astd_props[i].core_rev),
+ MAINT_REVISION((int)astd_props[i].core_rev), int) &&
+ astd_props[i].core_rev >= pvidstd_props->core_rev) {
+ *pvidstd_props = astd_props[i];
+
+ if (pvidstd_props->vidstd != VDEC_STD_JPEG &&
+ (FROM_REV(8, 0, 0, int)) && (pvidstd_props->vidstd ==
+ VDEC_STD_HEVC ? 1 : 0)) {
+ /*
+ * override default values with values
+ * specified in HW (register does not
+ * exist in previous cores)
+ */
+ pvidstd_props->max_width =
+ 2 << REGIO_READ_FIELD(max_framecfg,
+ PVDEC_PIXEL,
+ CR_MAX_FRAME_CONFIG,
+ CR_PVDEC_HOR_MSB);
+
+ pvidstd_props->max_height =
+ 2 << REGIO_READ_FIELD(max_framecfg,
+ PVDEC_PIXEL,
+ CR_MAX_FRAME_CONFIG,
+ CR_PVDEC_VER_MSB);
+ } else if (pvidstd_props->vidstd != VDEC_STD_JPEG &&
+ (FROM_REV(8, 0, 0, int))) {
+ pvidstd_props->max_width =
+ 2 << REGIO_READ_FIELD(max_framecfg,
+ PVDEC_PIXEL,
+ CR_MAX_FRAME_CONFIG,
+ CR_MSVDX_HOR_MSB);
+
+ pvidstd_props->max_height =
+ 2 << REGIO_READ_FIELD(max_framecfg,
+ PVDEC_PIXEL,
+ CR_MAX_FRAME_CONFIG,
+ CR_MSVDX_VER_MSB);
+ }
+ }
+ }
+
+ /* Populate the core properties. */
+ if (GET_BITS(core_config, 11, 1))
+ coreprops->hd_support = 1;
+
+ for (pipe_minus1 = 0; pipe_minus1 < coreprops->num_pixel_pipes;
+ pipe_minus1++) {
+ unsigned int current_bitdepth =
+ GET_BITS(pixel_misccfg[pipe_minus1], 4, 3) + 8;
+ unsigned int current_h264_hw_chromaformat =
+ GET_BITS(pixel_misccfg[pipe_minus1], 0, 2);
+ unsigned int current_hevc_hw_chromaformat =
+ GET_BITS(pixel_misccfg[pipe_minus1], 2, 2);
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("cur_bitdepth: %d cur_h264_hw_chromaformat: %d",
+ current_bitdepth, current_h264_hw_chromaformat);
+ pr_info("cur_hevc_hw_chromaformat: %d pipe_minus1: %d\n",
+ current_hevc_hw_chromaformat, pipe_minus1);
+#endif
+
+ if (GET_BITS(pixel_misccfg[pipe_minus1], 8, 1))
+ coreprops->rotation_support[pipe_minus1] = 1;
+
+ if (GET_BITS(pixel_misccfg[pipe_minus1], 9, 1))
+ coreprops->scaling_support[pipe_minus1] = 1;
+
+ coreprops->num_streams[pipe_minus1] =
+ GET_BITS(pixel_misccfg[pipe_minus1], 12, 2) + 1;
+
+ /* Video standards. */
+ coreprops->mpeg2[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 0, 1) ? 1 : 0;
+ coreprops->mpeg4[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 1, 1) ? 1 : 0;
+ coreprops->h264[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 2, 1) ? 1 : 0;
+ coreprops->vc1[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 3, 1) ? 1 : 0;
+ coreprops->jpeg[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 5, 1) ? 1 : 0;
+ coreprops->avs[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 7, 1) ? 1 : 0;
+ coreprops->real[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 8, 1) ? 1 : 0;
+ coreprops->vp6[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 9, 1) ? 1 : 0;
+ coreprops->vp8[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 10, 1) ? 1 : 0;
+ coreprops->hevc[pipe_minus1] =
+ GET_BITS(pixel_pipecfg[pipe_minus1], 22, 1) ? 1 : 0;
+
+ max_bitdepth_luma = (max_bitdepth_luma > current_bitdepth ?
+ max_bitdepth_luma : current_bitdepth);
+ max_h264_hw_chromaformat = (max_h264_hw_chromaformat >
+ current_h264_hw_chromaformat ? max_h264_hw_chromaformat
+ : current_h264_hw_chromaformat);
+ max_hevc_hw_chromaformat = (max_hevc_hw_chromaformat >
+ current_hevc_hw_chromaformat ? max_hevc_hw_chromaformat
+ : current_hevc_hw_chromaformat);
+ }
+
+ /* Override default bit-depth with value signalled explicitly by core. */
+ coreprops->vidstd_props[0].max_luma_bitdepth = max_bitdepth_luma;
+ coreprops->vidstd_props[0].max_chroma_bitdepth =
+ coreprops->vidstd_props[0].max_luma_bitdepth;
+
+ for (i = 1; i < VDEC_STD_MAX; i++) {
+ coreprops->vidstd_props[i].max_luma_bitdepth =
+ coreprops->vidstd_props[0].max_luma_bitdepth;
+ coreprops->vidstd_props[i].max_chroma_bitdepth =
+ coreprops->vidstd_props[0].max_chroma_bitdepth;
+ }
+
+ switch (max_h264_hw_chromaformat) {
+ case 1:
+ coreprops->vidstd_props[VDEC_STD_H264].max_chroma_format =
+ PIXEL_FORMAT_420;
+ break;
+
+ case 2:
+ coreprops->vidstd_props[VDEC_STD_H264].max_chroma_format =
+ PIXEL_FORMAT_422;
+ break;
+
+ case 3:
+ coreprops->vidstd_props[VDEC_STD_H264].max_chroma_format =
+ PIXEL_FORMAT_444;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (max_hevc_hw_chromaformat) {
+ case 1:
+ coreprops->vidstd_props[VDEC_STD_HEVC].max_chroma_format =
+ PIXEL_FORMAT_420;
+ break;
+
+ case 2:
+ coreprops->vidstd_props[VDEC_STD_HEVC].max_chroma_format =
+ PIXEL_FORMAT_422;
+ break;
+
+ case 3:
+ coreprops->vidstd_props[VDEC_STD_HEVC].max_chroma_format =
+ PIXEL_FORMAT_444;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned char vxd_is_supported_byatleast_onepipe(const unsigned char *features,
+ unsigned int num_pipes)
+{
+ unsigned int i;
+
+ VDEC_ASSERT(features);
+ VDEC_ASSERT(num_pipes <= VDEC_MAX_PIXEL_PIPES);
+
+ for (i = 0; i < num_pipes; i++) {
+ if (features[i])
+ return 1;
+ }
+
+ return 0;
+}
+
+void vxd_set_reconpictcmds(const struct vdecdd_str_unit *str_unit,
+ const struct vdec_str_configdata *str_configdata,
+ const struct vdec_str_opconfig *output_config,
+ const struct vxd_coreprops *coreprops,
+ const struct vxd_buffers *buffers,
+ unsigned int *pict_cmds)
+{
+ struct pixel_pixinfo *pixel_info;
+ unsigned int row_stride_code;
+ unsigned char benable_auxline_buf = 1;
+
+ unsigned int coded_height;
+ unsigned int coded_width;
+ unsigned int disp_height;
+ unsigned int disp_width;
+ unsigned int profile;
+ unsigned char plane;
+ unsigned int y_stride;
+ unsigned int uv_stride;
+ unsigned int v_stride;
+ unsigned int cache_ref_offset;
+ unsigned int cache_row_offset;
+
+ if (str_configdata->vid_std == VDEC_STD_JPEG) {
+ disp_height = 0;
+ disp_width = 0;
+ coded_height = 0;
+ coded_width = 0;
+ } else {
+ coded_height = ALIGN(str_unit->pict_hdr_info->coded_frame_size.height,
+ (str_unit->pict_hdr_info->field) ?
+ 2 * VDEC_MB_DIMENSION : VDEC_MB_DIMENSION);
+ /* Hardware field is coded size - 1 */
+ coded_height -= 1;
+
+ coded_width = ALIGN(str_unit->pict_hdr_info->coded_frame_size.width,
+ VDEC_MB_DIMENSION);
+ /* Hardware field is coded size - 1 */
+ coded_width -= 1;
+
+ disp_height = str_unit->pict_hdr_info->disp_info.enc_disp_region.height
+ + str_unit->pict_hdr_info->disp_info.enc_disp_region.left_offset - 1;
+ disp_width = str_unit->pict_hdr_info->disp_info.enc_disp_region.width +
+ str_unit->pict_hdr_info->disp_info.enc_disp_region.top_offset - 1;
+ }
+ /*
+ * Display picture size (DISPLAY_PICTURE)
+ * The display to be written is not the actual video size to be
+ * displayed but a number that has to differ from the coded pixel size
+ * by less than 1MB (coded_size-display_size <= 0x0F). Because H264 can
+ * have a different display size, we need to check and write
+ * the coded_size again in the display_size register if this condition
+ * is not fulfilled.
+ */
+ if (str_configdata->vid_std != VDEC_STD_VC1 && ((coded_height - disp_height) > 0x0F)) {
+ REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_DISPLAY_PICTURE],
+ MSVDX_CMDS, DISPLAY_PICTURE_SIZE,
+ DISPLAY_PICTURE_HEIGHT,
+ coded_height, unsigned int);
+ } else {
+ REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_DISPLAY_PICTURE],
+ MSVDX_CMDS, DISPLAY_PICTURE_SIZE,
+ DISPLAY_PICTURE_HEIGHT,
+ disp_height, unsigned int);
+ }
+
+ if (((coded_width - disp_width) > 0x0F)) {
+ REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_DISPLAY_PICTURE],
+ MSVDX_CMDS, DISPLAY_PICTURE_SIZE,
+ DISPLAY_PICTURE_WIDTH,
+ coded_width, unsigned int);
+ } else {
+ REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_DISPLAY_PICTURE],
+ MSVDX_CMDS, DISPLAY_PICTURE_SIZE,
+ DISPLAY_PICTURE_WIDTH,
+ disp_width, unsigned int);
+ }
+
+ REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_CODED_PICTURE],
+ MSVDX_CMDS, CODED_PICTURE_SIZE,
+ CODED_PICTURE_HEIGHT,
+ coded_height, unsigned int);
+ REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_CODED_PICTURE],
+ MSVDX_CMDS, CODED_PICTURE_SIZE,
+ CODED_PICTURE_WIDTH,
+ coded_width, unsigned int);
+
+ /*
+ * For standards where dpb_diff != 1 and chroma format != 420
+ * cache_ref_offset has to be calculated in the F/W.
+ */
+ if (str_configdata->vid_std != VDEC_STD_HEVC && str_configdata->vid_std != VDEC_STD_H264) {
+ unsigned int log2_size, cache_size, luma_size;
+ unsigned char is_hevc_supported, is_hevc444_supported = 0;
+
+ is_hevc_supported =
+ vxd_is_supported_byatleast_onepipe(coreprops->hevc,
+ coreprops->num_pixel_pipes);
+
+ if (is_hevc_supported) {
+ is_hevc444_supported =
+ coreprops->vidstd_props[VDEC_STD_HEVC].max_chroma_format ==
+ PIXEL_FORMAT_444 ? 1 : 0;
+ }
+
+ log2_size = 9 + (is_hevc_supported ? 1 : 0) + (is_hevc444_supported ? 1 : 0);
+ cache_size = 3 << log2_size;
+ luma_size = (cache_size * 2) / 3;
+ cache_ref_offset = (luma_size * 15) / 32;
+ cache_ref_offset = (cache_ref_offset + 7) & (~7);
+ cache_row_offset = 0x0C;
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_MC_CACHE_CONFIGURATION],
+ MSVDX_CMDS, MC_CACHE_CONFIGURATION,
+ CONFIG_REF_CHROMA_ADJUST, 1,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_MC_CACHE_CONFIGURATION],
+ MSVDX_CMDS, MC_CACHE_CONFIGURATION,
+ CONFIG_REF_OFFSET, cache_ref_offset,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_MC_CACHE_CONFIGURATION],
+ MSVDX_CMDS, MC_CACHE_CONFIGURATION,
+ CONFIG_ROW_OFFSET, cache_row_offset,
+ unsigned int, unsigned int);
+ }
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+ MSVDX_CMDS, OPERATING_MODE, CODEC_MODE,
+ amsvdx_codecmode[str_configdata->vid_std],
+ unsigned int, unsigned int);
+
+ profile = str_unit->seq_hdr_info->com_sequ_hdr_info.codec_profile;
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+ MSVDX_CMDS, OPERATING_MODE, CODEC_PROFILE,
+ vxd_getprofile(str_configdata->vid_std, profile),
+ unsigned int, unsigned int);
+
+ plane = str_unit->seq_hdr_info->com_sequ_hdr_info.separate_chroma_planes;
+ pixel_info = &str_unit->seq_hdr_info->com_sequ_hdr_info.pixel_info;
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+ MSVDX_CMDS, OPERATING_MODE, CHROMA_FORMAT, plane ?
+ 0 : pixel_info->chroma_fmt, unsigned int, int);
+
+ if (str_configdata->vid_std != VDEC_STD_JPEG) {
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE],
+ MSVDX_CMDS, EXT_OP_MODE, CHROMA_FORMAT_IDC, plane ?
+ 0 : pixel_get_hw_chroma_format_idc
+ (pixel_info->chroma_fmt_idc),
+ unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE],
+ MSVDX_CMDS, EXT_OP_MODE, MEMORY_PACKING,
+ output_config->pixel_info.mem_pkg ==
+ PIXEL_BIT10_MP ? 1 : 0, unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE],
+ MSVDX_CMDS, EXT_OP_MODE, BIT_DEPTH_LUMA_MINUS8,
+ pixel_info->bitdepth_y - 8,
+ unsigned int, unsigned int);
+
+ if (pixel_info->chroma_fmt_idc == PIXEL_FORMAT_MONO) {
+ /*
+ * For monochrome streams use the same bit depth for
+ * chroma and luma.
+ */
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE],
+ MSVDX_CMDS, EXT_OP_MODE,
+ BIT_DEPTH_CHROMA_MINUS8,
+ pixel_info->bitdepth_y - 8,
+ unsigned int, unsigned int);
+ } else {
+ /*
+ * For normal streams use the appropriate bit depth for chroma.
+ */
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE], MSVDX_CMDS,
+ EXT_OP_MODE, BIT_DEPTH_CHROMA_MINUS8,
+ pixel_info->bitdepth_c - 8,
+ unsigned int, unsigned int);
+ }
+ } else {
+ pict_cmds[VDECFW_CMD_EXT_OP_MODE] = 0;
+ }
+
+ if (str_configdata->vid_std != VDEC_STD_JPEG) {
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE], MSVDX_CMDS,
+ OPERATING_MODE, CHROMA_INTERLEAVED,
+ PIXEL_GET_HW_CHROMA_INTERLEAVED
+ (output_config->pixel_info.chroma_interleave),
+ unsigned int, int);
+ }
+
+ if (str_configdata->vid_std == VDEC_STD_JPEG) {
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+ MSVDX_CMDS, OPERATING_MODE, ASYNC_MODE,
+ VDEC_MSVDX_ASYNC_VDMC,
+ unsigned int, unsigned int);
+ }
+
+ if (str_configdata->vid_std == VDEC_STD_H264) {
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE], MSVDX_CMDS,
+ OPERATING_MODE, ASYNC_MODE,
+ str_unit->pict_hdr_info->discontinuous_mbs ?
+ VDEC_MSVDX_ASYNC_VDMC : VDEC_MSVDX_ASYNC_NORMAL,
+ unsigned int, int);
+ }
+
+ y_stride = buffers->recon_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_Y].stride;
+ uv_stride = buffers->recon_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_UV].stride;
+ v_stride = buffers->recon_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_V].stride;
+
+ if (((y_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0) &&
+ ((uv_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0) &&
+ ((v_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0)) {
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+ MSVDX_CMDS, OPERATING_MODE,
+ USE_EXT_ROW_STRIDE, 1, unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXTENDED_ROW_STRIDE],
+ MSVDX_CMDS, EXTENDED_ROW_STRIDE,
+ EXT_ROW_STRIDE, y_stride >> 6, unsigned int, unsigned int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_CHROMA_ROW_STRIDE],
+ MSVDX_CMDS, CHROMA_ROW_STRIDE,
+ CHROMA_ROW_STRIDE, uv_stride >> 6, unsigned int, unsigned int);
+ } else {
+ row_stride_code = get_stride_code(str_configdata->vid_std, y_stride);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+ MSVDX_CMDS, OPERATING_MODE, ROW_STRIDE,
+ row_stride_code & 0x7, unsigned int, unsigned int);
+
+ if (str_configdata->vid_std == VDEC_STD_JPEG) {
+ /*
+ * Use the unused chroma interleaved flag
+ * to hold MSB of row stride code
+ */
+ IMG_ASSERT(row_stride_code < 16);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+ MSVDX_CMDS, OPERATING_MODE,
+ CHROMA_INTERLEAVED,
+ row_stride_code >> 3, unsigned int, unsigned int);
+ } else {
+ IMG_ASSERT(row_stride_code < 8);
+ }
+ }
+ pict_cmds[VDECFW_CMD_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(&buffers->recon_pict->pict_buf->ddbuf_info) +
+ buffers->recon_pict->rend_info.plane_info[0].offset;
+
+ pict_cmds[VDECFW_CMD_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(&buffers->recon_pict->pict_buf->ddbuf_info) +
+ buffers->recon_pict->rend_info.plane_info[1].offset;
+
+ pict_cmds[VDECFW_CMD_CHROMA2_RECONSTRUCTED_PICTURE_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(&buffers->recon_pict->pict_buf->ddbuf_info) +
+ buffers->recon_pict->rend_info.plane_info[2].offset;
+
+ pict_cmds[VDECFW_CMD_LUMA_ERROR_PICTURE_BASE_ADDRESS] = 0;
+ pict_cmds[VDECFW_CMD_CHROMA_ERROR_PICTURE_BASE_ADDRESS] = 0;
+
+#ifdef ERROR_CONCEALMENT
+ /* update error concealment frame info if available */
+ if (buffers->err_pict_bufinfo) {
+ pict_cmds[VDECFW_CMD_LUMA_ERROR_PICTURE_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(buffers->err_pict_bufinfo) +
+ buffers->recon_pict->rend_info.plane_info[0].offset;
+
+ pict_cmds[VDECFW_CMD_CHROMA_ERROR_PICTURE_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(buffers->err_pict_bufinfo) +
+ buffers->recon_pict->rend_info.plane_info[1].offset;
+ }
+#endif
+
+ pict_cmds[VDECFW_CMD_INTRA_BUFFER_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(buffers->intra_bufinfo);
+ pict_cmds[VDECFW_CMD_INTRA_BUFFER_PLANE_SIZE] =
+ buffers->intra_bufsize_per_pipe / 3;
+ pict_cmds[VDECFW_CMD_INTRA_BUFFER_SIZE_PER_PIPE] =
+ buffers->intra_bufsize_per_pipe;
+ pict_cmds[VDECFW_CMD_AUX_LINE_BUFFER_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(buffers->auxline_bufinfo);
+ pict_cmds[VDECFW_CMD_AUX_LINE_BUFFER_SIZE_PER_PIPE] =
+ buffers->auxline_bufsize_per_pipe;
+
+ /*
+ * for pvdec we need to set this registers even if we don't
+ * use alternative output
+ */
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ ALT_BIT_DEPTH_CHROMA_MINUS8,
+ output_config->pixel_info.bitdepth_c - 8, unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ ALT_BIT_DEPTH_LUMA_MINUS8,
+ output_config->pixel_info.bitdepth_y - 8, unsigned int, unsigned int);
+
+ /*
+ * this is causing corruption in RV40 and VC1 streams with
+ * scaling/rotation enabled on Coral, so setting to 0
+ */
+ benable_auxline_buf = benable_auxline_buf &&
+ (str_configdata->vid_std != VDEC_STD_REAL) &&
+ (str_configdata->vid_std != VDEC_STD_VC1);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+ USE_AUX_LINE_BUF, benable_auxline_buf ? 1 : 0, unsigned int, int);
+}
+
+void vxd_set_altpictcmds(const struct vdecdd_str_unit *str_unit,
+ const struct vdec_str_configdata *str_configdata,
+ const struct vdec_str_opconfig *output_config,
+ const struct vxd_coreprops *coreprops,
+ const struct vxd_buffers *buffers,
+ unsigned int *pict_cmds)
+{
+ unsigned int row_stride_code;
+ unsigned int y_stride;
+ unsigned int uv_stride;
+ unsigned int v_stride;
+
+ y_stride = buffers->alt_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_Y].stride;
+ uv_stride = buffers->alt_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_UV].stride;
+ v_stride = buffers->alt_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_V].stride;
+
+ if (((y_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0) &&
+ ((uv_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0) &&
+ ((v_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0)) {
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+ USE_EXT_ROT_ROW_STRIDE, 1, unsigned int, int);
+
+ /* 64-byte (min) aligned luma stride value. */
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+ MSVDX_CMDS,
+ ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+ EXT_ROT_ROW_STRIDE, y_stride >> 6,
+ unsigned int, unsigned int);
+
+ /* 64-byte (min) aligned chroma stride value. */
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_CHROMA_ROW_STRIDE],
+ MSVDX_CMDS, CHROMA_ROW_STRIDE,
+ ALT_CHROMA_ROW_STRIDE, uv_stride >> 6,
+ unsigned int, unsigned int);
+ } else {
+ /*
+ * Obtain the code for buffer stride
+ * (must be less than 8, i.e. not JPEG strides)
+ */
+ row_stride_code =
+ get_stride_code(str_configdata->vid_std, y_stride);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+ MSVDX_CMDS,
+ ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+ ROTATION_ROW_STRIDE, row_stride_code & 0x7,
+ unsigned int, unsigned int);
+ }
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+ SCALE_INPUT_SIZE_SEL,
+ ((output_config->pixel_info.chroma_fmt_idc !=
+ str_unit->seq_hdr_info->com_sequ_hdr_info.pixel_info.chroma_fmt_idc)) ?
+ 1 : 0, unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+ PACKED_422_OUTPUT,
+ (output_config->pixel_info.chroma_fmt_idc ==
+ PIXEL_FORMAT_422 &&
+ output_config->pixel_info.num_planes == 1) ? 1 : 0,
+ unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ ALT_OUTPUT_FORMAT,
+ str_unit->seq_hdr_info->com_sequ_hdr_info.separate_chroma_planes ?
+ 0 : pixel_get_hw_chroma_format_idc
+ (output_config->pixel_info.chroma_fmt_idc),
+ unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ ALT_BIT_DEPTH_CHROMA_MINUS8,
+ output_config->pixel_info.bitdepth_c - 8,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ ALT_BIT_DEPTH_LUMA_MINUS8,
+ output_config->pixel_info.bitdepth_y - 8,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ ALT_MEMORY_PACKING,
+ (output_config->pixel_info.mem_pkg ==
+ PIXEL_BIT10_MP) ? 1 : 0, unsigned int, int);
+
+ pict_cmds[VDECFW_CMD_LUMA_ALTERNATIVE_PICTURE_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(&buffers->alt_pict->pict_buf->ddbuf_info) +
+ buffers->alt_pict->rend_info.plane_info[0].offset;
+
+ pict_cmds[VDECFW_CMD_CHROMA_ALTERNATIVE_PICTURE_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(&buffers->alt_pict->pict_buf->ddbuf_info) +
+ buffers->alt_pict->rend_info.plane_info[1].offset;
+
+ pict_cmds[VDECFW_CMD_CHROMA2_ALTERNATIVE_PICTURE_BASE_ADDRESS] =
+ (unsigned int)GET_HOST_ADDR(&buffers->alt_pict->pict_buf->ddbuf_info) +
+ buffers->alt_pict->rend_info.plane_info[2].offset;
+}
+
+int vxd_getscalercmds(const struct scaler_config *scaler_config,
+ const struct scaler_pitch *pitch,
+ const struct scaler_filter *filter,
+ const struct pixel_pixinfo *out_loop_pixel_info,
+ struct scaler_params *params,
+ unsigned int *pict_cmds)
+{
+ const struct vxd_coreprops *coreprops = scaler_config->coreprops;
+ /*
+ * Indirectly detect decoder core type (if HEVC is supported, it has
+ * to be PVDEC core) and decide if to force luma re-sampling.
+ */
+ unsigned char bforce_luma_resampling = coreprops->hevc[0];
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ ALT_OUTPUT_FORMAT,
+ scaler_config->bseparate_chroma_planes ? 0 :
+ pixel_get_hw_chroma_format_idc(out_loop_pixel_info->chroma_fmt_idc),
+ unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ SCALE_CHROMA_RESAMP_ONLY, bforce_luma_resampling ? 0 :
+ (pitch->horiz_luma == FIXED(1, HIGHP)) &&
+ (pitch->vert_luma == FIXED(1, HIGHP)), unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, ALT_MEMORY_PACKING,
+ pixel_get_hw_memory_packing(out_loop_pixel_info->mem_pkg),
+ unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ ALT_BIT_DEPTH_LUMA_MINUS8,
+ out_loop_pixel_info->bitdepth_y - 8,
+ unsigned int, unsigned int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ ALT_BIT_DEPTH_CHROMA_MINUS8,
+ out_loop_pixel_info->bitdepth_c - 8,
+ unsigned int, unsigned int);
+
+ /* Scale luma bifilter is always 0 for now */
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ SCALE_LUMA_BIFILTER_HORIZ,
+ 0, unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ SCALE_LUMA_BIFILTER_VERT,
+ 0, unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ SCALE_CHROMA_BIFILTER_HORIZ,
+ filter->bhoriz_bilinear ? 1 : 0,
+ unsigned int, int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+ MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+ SCALE_CHROMA_BIFILTER_VERT,
+ filter->bvert_bilinear ? 1 : 0, unsigned int, int);
+
+ /* for cores 7.x.x and more, precision 3.13 */
+ params->fixed_point_shift = 13;
+
+ /* Calculate the fixed-point versions for use by the hardware. */
+ params->vert_pitch = (int)((pitch->vert_luma +
+ (1 << (HIGHP - params->fixed_point_shift - 1))) >>
+ (HIGHP - params->fixed_point_shift));
+ params->vert_startpos = params->vert_pitch >> 1;
+ params->vert_pitch_chroma = (int)((pitch->vert_chroma +
+ (1 << (HIGHP - params->fixed_point_shift - 1))) >>
+ (HIGHP - params->fixed_point_shift));
+ params->vert_startpos_chroma = params->vert_pitch_chroma >> 1;
+ params->horz_pitch = (int)(pitch->horiz_luma >>
+ (HIGHP - params->fixed_point_shift));
+ params->horz_startpos = params->horz_pitch >> 1;
+ params->horz_pitch_chroma = (int)(pitch->horiz_chroma >>
+ (HIGHP - params->fixed_point_shift));
+ params->horz_startpos_chroma = params->horz_pitch_chroma >> 1;
+
+#ifdef HAS_HEVC
+ if (scaler_config->vidstd == VDEC_STD_HEVC) {
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+ MSVDX_CMDS, PVDEC_SCALED_DISPLAY_SIZE,
+ PVDEC_SCALE_DISPLAY_WIDTH,
+ scaler_config->recon_width - 1,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+ MSVDX_CMDS, PVDEC_SCALED_DISPLAY_SIZE,
+ PVDEC_SCALE_DISPLAY_HEIGHT,
+ scaler_config->recon_height - 1,
+ unsigned int, unsigned int);
+ } else {
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+ MSVDX_CMDS, SCALED_DISPLAY_SIZE,
+ SCALE_DISPLAY_WIDTH,
+ scaler_config->recon_width - 1,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+ MSVDX_CMDS, SCALED_DISPLAY_SIZE,
+ SCALE_DISPLAY_HEIGHT,
+ scaler_config->recon_height - 1,
+ unsigned int, unsigned int);
+ }
+#else
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+ MSVDX_CMDS, SCALED_DISPLAY_SIZE,
+ SCALE_DISPLAY_WIDTH,
+ scaler_config->recon_width - 1,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+ MSVDX_CMDS, SCALED_DISPLAY_SIZE, SCALE_DISPLAY_HEIGHT,
+ scaler_config->recon_height - 1,
+ unsigned int, unsigned int);
+#endif
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_OUTPUT_SIZE],
+ MSVDX_CMDS, SCALE_OUTPUT_SIZE,
+ SCALE_OUTPUT_WIDTH_MIN1,
+ scaler_config->scale_width - 1,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_OUTPUT_SIZE],
+ MSVDX_CMDS, SCALE_OUTPUT_SIZE,
+ SCALE_OUTPUT_HEIGHT_MIN1,
+ scaler_config->scale_height - 1,
+ unsigned int, unsigned int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_HORIZONTAL_SCALE_CONTROL],
+ MSVDX_CMDS, HORIZONTAL_SCALE_CONTROL,
+ HORIZONTAL_SCALE_PITCH, params->horz_pitch,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_HORIZONTAL_SCALE_CONTROL],
+ MSVDX_CMDS, HORIZONTAL_SCALE_CONTROL,
+ HORIZONTAL_INITIAL_POS, params->horz_startpos,
+ unsigned int, unsigned int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_HORIZONTAL_CHROMA],
+ MSVDX_CMDS, SCALE_HORIZONTAL_CHROMA,
+ CHROMA_HORIZONTAL_PITCH, params->horz_pitch_chroma,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_HORIZONTAL_CHROMA],
+ MSVDX_CMDS, SCALE_HORIZONTAL_CHROMA,
+ CHROMA_HORIZONTAL_INITIAL,
+ params->horz_startpos_chroma,
+ unsigned int, unsigned int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_VERTICAL_SCALE_CONTROL],
+ MSVDX_CMDS, VERTICAL_SCALE_CONTROL,
+ VERTICAL_SCALE_PITCH, params->vert_pitch,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_VERTICAL_SCALE_CONTROL],
+ MSVDX_CMDS, VERTICAL_SCALE_CONTROL,
+ VERTICAL_INITIAL_POS, params->vert_startpos,
+ unsigned int, unsigned int);
+
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_VERTICAL_CHROMA],
+ MSVDX_CMDS, SCALE_VERTICAL_CHROMA,
+ CHROMA_VERTICAL_PITCH, params->vert_pitch_chroma,
+ unsigned int, unsigned int);
+ REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_VERTICAL_CHROMA],
+ MSVDX_CMDS, SCALE_VERTICAL_CHROMA,
+ CHROMA_VERTICAL_INITIAL,
+ params->vert_startpos_chroma,
+ unsigned int, unsigned int);
+ return 0;
+}
+
+unsigned int vxd_get_codedpicsize(unsigned short width_min1, unsigned short height_min1)
+{
+ unsigned int reg = 0;
+
+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_CMDS, CODED_PICTURE_SIZE,
+ CODED_PICTURE_WIDTH, width_min1,
+ unsigned short);
+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_CMDS, CODED_PICTURE_SIZE,
+ CODED_PICTURE_HEIGHT, height_min1,
+ unsigned short);
+
+ return reg;
+}
+
+unsigned char vxd_get_codedmode(enum vdec_vid_std vidstd)
+{
+ return (unsigned char)amsvdx_codecmode[vidstd];
+}
+
+void vxd_get_coreproperties(void *hndl_coreproperties,
+ struct vxd_coreprops *vxd_coreprops)
+{
+ struct vxd_core_props *props =
+ (struct vxd_core_props *)hndl_coreproperties;
+
+ vxd_getcoreproperties(vxd_coreprops, props->core_rev,
+ props->pvdec_core_id,
+ props->mmu_config0,
+ props->mmu_config1,
+ props->pixel_pipe_cfg,
+ props->pixel_misc_cfg,
+ props->pixel_max_frame_cfg);
+}
+
+int vxd_get_pictattrs(unsigned int flags, struct vxd_pict_attrs *pict_attrs)
+{
+ if (flags & (VXD_FW_MSG_FLAG_DWR | VXD_FW_MSG_FLAG_FATAL))
+ pict_attrs->dwrfired = 1;
+ if (flags & VXD_FW_MSG_FLAG_MMU_FAULT)
+ pict_attrs->mmufault = 1;
+ if (flags & VXD_FW_MSG_FLAG_DEV_ERR)
+ pict_attrs->deverror = 1;
+
+ return 0;
+}
+
+int vxd_get_msgerrattr(unsigned int flags, enum vxd_msg_attr *msg_attr)
+{
+ if ((flags & ~VXD_FW_MSG_FLAG_CANCELED))
+ *msg_attr = VXD_MSG_ATTR_FATAL;
+ else if ((flags & VXD_FW_MSG_FLAG_CANCELED))
+ *msg_attr = VXD_MSG_ATTR_CANCELED;
+ else
+ *msg_attr = VXD_MSG_ATTR_NONE;
+
+ return 0;
+}
+
+int vxd_set_msgflag(enum vxd_msg_flag input_flag, unsigned int *flags)
+{
+ switch (input_flag) {
+ case VXD_MSG_FLAG_DROP:
+ *flags |= VXD_FW_MSG_FLAG_DROP;
+ break;
+ case VXD_MSG_FLAG_EXCL:
+ *flags |= VXD_FW_MSG_FLAG_EXCL;
+ break;
+ default:
+ return IMG_ERROR_FATAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_int.h b/drivers/media/platform/vxe-vxd/decoder/vxd_int.h
new file mode 100644
index 000000000000..a294e0d6044f
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_int.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC Common low level core interface component
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+#ifndef _VXD_INT_H
+#define _VXD_INT_H
+
+#include "fw_interface.h"
+#include "scaler_setup.h"
+#include "vdecdd_defs.h"
+#include "vdecfw_shared.h"
+#include "vdec_defs.h"
+#include "vxd_ext.h"
+#include "vxd_props.h"
+
+/*
+ * Size of buffer used for batching messages
+ */
+#define BATCH_MSG_BUFFER_SIZE (8 * 4096)
+
+#define INTRA_BUF_SIZE (1024 * 32)
+#define AUX_LINE_BUFFER_SIZE (512 * 1024)
+
+#define MAX_PICTURE_WIDTH (4096)
+#define MAX_PICTURE_HEIGHT (4096)
+
+/*
+ * this macro returns the host address of device buffer.
+ */
+#define GET_HOST_ADDR(buf) ((buf)->dev_virt)
+
+#define GET_HOST_ADDR_OFFSET(buf, offset) (((buf)->dev_virt) + (offset))
+
+/*
+ * The extended stride alignment for VXD.
+ */
+#define VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT (64)
+
+struct vxd_buffers {
+ struct vdecdd_ddpict_buf *recon_pict;
+ struct vdecdd_ddpict_buf *alt_pict;
+ struct vidio_ddbufinfo *intra_bufinfo;
+ struct vidio_ddbufinfo *auxline_bufinfo;
+ struct vidio_ddbufinfo *err_pict_bufinfo;
+ unsigned int intra_bufsize_per_pipe;
+ unsigned int auxline_bufsize_per_pipe;
+ struct vidio_ddbufinfo *msb_bufinfo;
+ unsigned char btwopass;
+};
+
+struct pvdec_core_rev {
+ unsigned int maj_rev;
+ unsigned int min_rev;
+ unsigned int maint_rev;
+ unsigned int int_rev;
+};
+
+/*
+ * this has all that it needs to translate a Stream Unit for a picture
+ * into a transaction.
+ */
+void vxd_set_altpictcmds(const struct vdecdd_str_unit *str_unit,
+ const struct vdec_str_configdata *str_configdata,
+ const struct vdec_str_opconfig *output_config,
+ const struct vxd_coreprops *coreprops,
+ const struct vxd_buffers *buffers,
+ unsigned int *pict_cmds);
+
+/*
+ * this has all that it needs to translate a Stream Unit for
+ * a picture into a transaction.
+ */
+void vxd_set_reconpictcmds(const struct vdecdd_str_unit *str_unit,
+ const struct vdec_str_configdata *str_configdata,
+ const struct vdec_str_opconfig *output_config,
+ const struct vxd_coreprops *coreprops,
+ const struct vxd_buffers *buffers,
+ unsigned int *pict_cmds);
+
+int vxd_getscalercmds(const struct scaler_config *scaler_config,
+ const struct scaler_pitch *pitch,
+ const struct scaler_filter *filter,
+ const struct pixel_pixinfo *out_loop_pixel_info,
+ struct scaler_params *params,
+ unsigned int *pict_cmds);
+
+/*
+ * this creates value of MSVDX_CMDS_CODED_PICTURE_SIZE register.
+ */
+unsigned int vxd_get_codedpicsize(unsigned short width_min1, unsigned short height_min1);
+
+/*
+ * return HW codec mode based on video standard.
+ */
+unsigned char vxd_get_codedmode(enum vdec_vid_std vidstd);
+
+/*
+ * translates core properties to the form of the struct vxd_coreprops struct.
+ */
+void vxd_get_coreproperties(void *hndl_coreproperties,
+ struct vxd_coreprops *vxd_coreprops);
+
+/*
+ * translates picture attributes to the form of the VXD_sPictAttrs struct.
+ */
+int vxd_get_pictattrs(unsigned int flags, struct vxd_pict_attrs *pict_attrs);
+
+/*
+ * translates message attributes to the form of the VXD_eMsgAttr struct.
+ */
+int vxd_get_msgerrattr(unsigned int flags, enum vxd_msg_attr *msg_attr);
+
+/*
+ * sets a message flag.
+ */
+int vxd_set_msgflag(enum vxd_msg_flag input_flag, unsigned int *flags);
+
+#endif /* _VXD_INT_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_mmu_defs.h b/drivers/media/platform/vxe-vxd/decoder/vxd_mmu_defs.h
new file mode 100644
index 000000000000..77d493cb39f2
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_mmu_defs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * V-DEC MMU Definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ */
+
+#ifndef _VXD_MMU_DEF_H_
+#define _VXD_MMU_DEF_H_
+
+/*
+ * This type defines the MMU heaps.
+ * @0: Heap for untiled video buffers
+ * @1: Heap for bitstream buffers
+ * @2: Heap for Stream buffers
+ * @3: Number of heaps
+ */
+enum mmu_eheap_id {
+ MMU_HEAP_IMAGE_BUFFERS_UNTILED = 0x00,
+ MMU_HEAP_BITSTREAM_BUFFERS,
+ MMU_HEAP_STREAM_BUFFERS,
+ MMU_HEAP_MAX,
+ MMU_HEAP_FORCE32BITS = 0x7FFFFFFFU
+};
+
+#endif /* _VXD_MMU_DEFS_H_ */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_props.h b/drivers/media/platform/vxe-vxd/decoder/vxd_props.h
new file mode 100644
index 000000000000..bdab182859a7
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_props.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Low-level VXD interface component
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _VXD_PROPS_H
+#define _VXD_PROPS_H
+
+#include "vdec_defs.h"
+#include "imgmmu.h"
+
+#define VDEC_MAX_PIXEL_PIPES 2
+
+#define VXD_MAX_CORES 1
+#define VER_STR_LEN 64
+
+#define CORE_REVISION(maj, min, maint) \
+ ((((maj) & 0xff) << 16) | (((min) & 0xff) << 8) | (((maint) & 0xff)))
+#define MAJOR_REVISION(rev) (((rev) >> 16) & 0xff)
+#define MINOR_REVISION(rev) (((rev) >> 8) & 0xff)
+#define MAINT_REVISION(rev) ((rev) & 0xff)
+
+#define FROM_REV(maj, min, maint, type) \
+ ({ \
+ type __maj = maj; \
+ type __min = min; \
+ (((maj_rev) > (__maj)) || \
+ (((maj_rev) == (__maj)) && ((min_rev) > (__min))) || \
+ (((maj_rev) == (__maj)) && ((min_rev) == (__min)) && \
+ ((int)(maint_rev) >= (maint)))); })
+
+struct vxd_vidstd_props {
+ enum vdec_vid_std vidstd;
+ unsigned int core_rev;
+ unsigned int min_width;
+ unsigned int min_height;
+ unsigned int max_width;
+ unsigned int max_height;
+ unsigned int max_macroblocks;
+ unsigned int max_luma_bitdepth;
+ unsigned int max_chroma_bitdepth;
+ enum pixel_fmt_idc max_chroma_format;
+};
+
+struct vxd_coreprops {
+ unsigned char aversion[VER_STR_LEN];
+ unsigned char mpeg2[VDEC_MAX_PIXEL_PIPES];
+ unsigned char mpeg4[VDEC_MAX_PIXEL_PIPES];
+ unsigned char h264[VDEC_MAX_PIXEL_PIPES];
+ unsigned char vc1[VDEC_MAX_PIXEL_PIPES];
+ unsigned char avs[VDEC_MAX_PIXEL_PIPES];
+ unsigned char real[VDEC_MAX_PIXEL_PIPES];
+ unsigned char jpeg[VDEC_MAX_PIXEL_PIPES];
+ unsigned char vp6[VDEC_MAX_PIXEL_PIPES];
+ unsigned char vp8[VDEC_MAX_PIXEL_PIPES];
+ unsigned char hevc[VDEC_MAX_PIXEL_PIPES];
+ unsigned char rotation_support[VDEC_MAX_PIXEL_PIPES];
+ unsigned char scaling_support[VDEC_MAX_PIXEL_PIPES];
+ unsigned char hd_support;
+ unsigned int num_streams[VDEC_MAX_PIXEL_PIPES];
+ unsigned int num_entropy_pipes;
+ unsigned int num_pixel_pipes;
+ struct vxd_vidstd_props vidstd_props[VDEC_STD_MAX];
+ enum mmu_etype mmu_type;
+ unsigned char mmu_support_stride_per_context;
+ unsigned char mmu_support_secure;
+ /* Range extensions supported by hw -> used only by hevc */
+ unsigned char hevc_range_ext[VDEC_MAX_PIXEL_PIPES];
+};
+
+#endif /* _VXD_PROPS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_pvdec.c b/drivers/media/platform/vxe-vxd/decoder/vxd_pvdec.c
new file mode 100644
index 000000000000..c2b59c3dd164
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_pvdec.c
@@ -0,0 +1,1745 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC PVDEC function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/time64.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_dec_common.h"
+#include "img_pvdec_test_regs.h"
+#include "img_video_bus4_mmu_regs.h"
+#include "vxd_pvdec_priv.h"
+#include "vxd_pvdec_regs.h"
+
+#ifdef PVDEC_SINGLETHREADED_IO
+static DEFINE_SPINLOCK(pvdec_irq_lock);
+static ulong pvdec_irq_flags;
+#endif
+
+static const ulong vxd_plat_poll_udelay = 100;
+
+/* This function will return reminder and quotient */
+static inline unsigned int do_divide(unsigned long long *n, unsigned int base)
+{
+ unsigned int remainder = *n % base;
+ *n = *n / base;
+ return remainder;
+}
+
+/*
+ * Reads PROC_DEBUG register and provides number of MTX RAM banks
+ * and their size
+ */
+static int pvdec_get_mtx_ram_info(void __iomem *reg_base, int *bank_cnt,
+ unsigned long *bank_size,
+ unsigned long *last_bank_size)
+{
+ unsigned int ram_bank_count, reg;
+
+ reg = VXD_RD_REG(reg_base, PVDEC_CORE, PROC_DEBUG);
+ ram_bank_count = VXD_RD_REG_FIELD(reg, PVDEC_CORE, PROC_DEBUG, MTX_RAM_BANKS);
+ if (!ram_bank_count)
+ return -EIO;
+
+ if (bank_cnt)
+ *bank_cnt = ram_bank_count;
+
+ if (bank_size) {
+ unsigned int ram_bank_size = VXD_RD_REG_FIELD(reg, PVDEC_CORE,
+ PROC_DEBUG, MTX_RAM_BANK_SIZE);
+ *bank_size = 1 << (ram_bank_size + 2);
+ }
+
+ if (last_bank_size) {
+ unsigned int last_bank = VXD_RD_REG_FIELD(reg, PVDEC_CORE, PROC_DEBUG,
+ MTX_LAST_RAM_BANK_SIZE);
+ unsigned char new_representation = VXD_RD_REG_FIELD(reg,
+ PVDEC_CORE, PROC_DEBUG, MTX_RAM_NEW_REPRESENTATION);
+ if (new_representation) {
+ *last_bank_size = 1024 * last_bank;
+ } else {
+ *last_bank_size = 1 << (last_bank + 2);
+ if (bank_cnt && last_bank == 13 && *bank_cnt == 4) {
+ /*
+ * VXD hardware ambiguity:
+ * old cores confuse 120k and 128k
+ * So assume worst case.
+ */
+ *last_bank_size -= 0x2000;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Provides size of MTX RAM in bytes */
+static int pvdec_get_mtx_ram_size(void __iomem *reg_base, unsigned int *ram_size)
+{
+ int bank_cnt, ret;
+ unsigned long bank_size, last_bank_size;
+
+ ret = pvdec_get_mtx_ram_info(reg_base, &bank_cnt, &bank_size, &last_bank_size);
+ if (ret)
+ return ret;
+
+ *ram_size = (bank_cnt - 1) * bank_size + last_bank_size;
+
+ return 0;
+}
+
+/* Poll for single register-based transfer to/from MTX to complete */
+static unsigned int pvdec_wait_mtx_reg_access(void __iomem *reg_base, unsigned int *mtx_fault)
+{
+ unsigned int pvdec_timeout = PVDEC_TIMEOUT_COUNTER, reg;
+
+ do {
+ /* Check MTX is OK */
+ reg = VXD_RD_REG(reg_base, MTX_CORE, MTX_FAULT0);
+ if (reg != 0) {
+ *mtx_fault = reg;
+ return -EIO;
+ }
+
+ pvdec_timeout--;
+ reg = VXD_RD_REG(reg_base, MTX_CORE, MTX_REG_READ_WRITE_REQUEST);
+ } while ((VXD_RD_REG_FIELD(reg, MTX_CORE,
+ MTX_REG_READ_WRITE_REQUEST,
+ MTX_DREADY) == 0) &&
+ (pvdec_timeout != 0));
+
+ if (pvdec_timeout == 0)
+ return -EIO;
+
+ return 0;
+}
+
+static void pvdec_mtx_status_dump(void __iomem *reg_base, unsigned int *status)
+{
+ unsigned int reg;
+
+ pr_debug("%s: *** dumping status ***\n", __func__);
+
+#define READ_MTX_REG(_NAME_) \
+ do { \
+ unsigned int val; \
+ VXD_WR_REG(reg_base, MTX_CORE, \
+ MTX_REG_READ_WRITE_REQUEST, reg); \
+ if (pvdec_wait_mtx_reg_access(reg_base, &reg)) { \
+ pr_debug("%s: " \
+ "MTX REG RD fault: 0x%08x\n", __func__, reg); \
+ break; \
+ } \
+ val = VXD_RD_REG(reg_base, MTX_CORE, MTX_REG_READ_WRITE_DATA); \
+ if (status) \
+ *status++ = val; \
+ pr_debug("%s: " _NAME_ ": 0x%08x\n", __func__, val); \
+ } while (0)
+
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* Read */
+ MTX_REG_READ_WRITE_REQUEST, MTX_RNW, 1);
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* PC or PCX */
+ MTX_REG_READ_WRITE_REQUEST, MTX_USPECIFIER, 5);
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* PC */
+ MTX_REG_READ_WRITE_REQUEST, MTX_RSPECIFIER, 0);
+ READ_MTX_REG("MTX PC");
+
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* Read */
+ MTX_REG_READ_WRITE_REQUEST, MTX_RNW, 1);
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* PC or PCX */
+ MTX_REG_READ_WRITE_REQUEST, MTX_USPECIFIER, 5);
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* PCX */
+ MTX_REG_READ_WRITE_REQUEST, MTX_RSPECIFIER, 1);
+ READ_MTX_REG("MTX PCX");
+
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* Read */
+ MTX_REG_READ_WRITE_REQUEST, MTX_RNW, 1);
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* A0StP */
+ MTX_REG_READ_WRITE_REQUEST, MTX_USPECIFIER, 3);
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE,
+ MTX_REG_READ_WRITE_REQUEST, MTX_RSPECIFIER, 0);
+ READ_MTX_REG("MTX A0STP");
+
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* Read */
+ MTX_REG_READ_WRITE_REQUEST, MTX_RNW, 1);
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* A0FrP */
+ MTX_REG_READ_WRITE_REQUEST, MTX_USPECIFIER, 3);
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_REG_READ_WRITE_REQUEST, MTX_RSPECIFIER, 1);
+ READ_MTX_REG("MTX A0FRP");
+#undef PRINT_MTX_REG
+
+ pr_debug("%s: *** status dump done ***\n", __func__);
+}
+
+static void pvdec_prep_fw_upload(const void *dev,
+ void __iomem *reg_base,
+ struct vxd_ena_params *ena_params,
+ unsigned char dma_channel)
+{
+ unsigned int fw_vxd_virt_addr = ena_params->fw_buf_virt_addr;
+ unsigned int vxd_ptd_addr = ena_params->ptd;
+ unsigned int reg = 0;
+ int i;
+ unsigned int flags = PVDEC_FWFLAG_FORCE_FS_FLOW |
+ PVDEC_FWFLAG_DISABLE_GENC_FLUSHING |
+ PVDEC_FWFLAG_DISABLE_AUTONOMOUS_RESET |
+ PVDEC_FWFLAG_DISABLE_IDLE_GPIO |
+ PVDEC_FWFLAG_ENABLE_ERROR_CONCEALMENT;
+
+ if (ena_params->secure)
+ flags |= PVDEC_FWFLAG_BIG_TO_HOST_BUFFER;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: fw_virt: 0x%x, ptd: 0x%x, dma ch: %u, flags: 0x%x\n",
+ __func__, fw_vxd_virt_addr, vxd_ptd_addr, dma_channel, flags);
+#endif
+
+ /* Reset MTX */
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SOFT_RESET, MTX_RESET, 1);
+ VXD_WR_REG(reg_base, MTX_CORE, MTX_SOFT_RESET, reg);
+ /*
+ * NOTE: The MTX reset bit is WRITE ONLY, so we cannot
+ * check the reset procedure has finished, thus BEWARE to put
+ * any MTX_CORE* access just after this line
+ */
+
+ /* Clear COMMS RAM header */
+ for (i = 0; i < PVDEC_FW_COMMS_HDR_SIZE; i++)
+ VXD_WR_REG_ABS(reg_base, VLR_OFFSET + i * sizeof(unsigned int), 0);
+
+ VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_FLAGS_OFFSET, flags);
+ /* Do not wait for debug FIFO flag - set it only when requested */
+ VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_SIGNATURE_OFFSET,
+ !ena_params->wait_dbg_fifo);
+
+ /*
+ * Clear the bypass bits and enable extended addressing in MMU.
+ * Firmware depends on this configuration, so we have to set it,
+ * even if firmware is being uploaded via registers.
+ */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, UPPER_ADDR_FIXED, 0);
+ reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, MMU_ENA_EXT_ADDR, 1);
+ reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, MMU_BYPASS, 0);
+ VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, reg);
+
+ /*
+ * Buffer device virtual address.
+ * This is an address of a firmware blob, firmware reads this base
+ * address from DMAC_SETUP register and uses to load the modules, so it
+ * has to be set even when uploading the FW via registers.
+ */
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_SETUP, fw_vxd_virt_addr, dma_channel);
+
+ /*
+ * Set base address of PTD. Same as before, has to be configured even
+ * when uploading the firmware via regs, FW uses it to execute DMA
+ * before switching to stream MMU context.
+ */
+ VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_DIR_BASE_ADDR, vxd_ptd_addr);
+
+ /* Configure MMU bank index - Use bank 0 */
+ VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_BANK_INDEX, 0);
+
+ /* Set the MTX timer divider register */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV, TIMER_EN, 1);
+ /*
+ * Setting max freq - divide by 1 for better measurement accuracy
+ * during fw upload stage
+ */
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV, TIMER_DIV, 0);
+ VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_TIMERDIV, reg);
+}
+
+static int pvdec_check_fw_sig(void __iomem *reg_base)
+{
+ unsigned int fw_sig = VXD_RD_REG_ABS(reg_base, VLR_OFFSET +
+ PVDEC_FW_SIGNATURE_OFFSET);
+
+ if (fw_sig != PVDEC_FW_READY_SIG)
+ return -EIO;
+
+ return 0;
+}
+
+static void pvdec_kick_mtx(void __iomem *reg_base)
+{
+ unsigned int reg = 0;
+
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_KICKI, MTX_KICKI, 1);
+ VXD_WR_REG(reg_base, MTX_CORE, MTX_KICKI, reg);
+}
+
+static int pvdec_write_vlr(void __iomem *reg_base, const unsigned int *buf,
+ unsigned long size_dwrds, int off_dwrds)
+{
+ unsigned int i;
+
+ if (((off_dwrds + size_dwrds) * sizeof(unsigned int)) > VLR_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < size_dwrds; i++) {
+ int off = (off_dwrds + i) * sizeof(unsigned int);
+
+ VXD_WR_REG_ABS(reg_base, (VLR_OFFSET + off), *buf);
+ buf++;
+ }
+
+ return 0;
+}
+
+static int pvdec_poll_fw_boot(void __iomem *reg_base, struct vxd_boot_poll_params *poll_params)
+{
+ unsigned int i;
+
+ for (i = 0; i < 25; i++) {
+ if (!pvdec_check_fw_sig(reg_base))
+ return 0;
+ usleep_range(100, 110);
+ }
+ for (i = 0; i < poll_params->msleep_cycles; i++) {
+ if (!pvdec_check_fw_sig(reg_base))
+ return 0;
+ msleep(100);
+ }
+ return -EIO;
+}
+
+static int pvdec_read_vlr(void __iomem *reg_base, unsigned int *buf,
+ unsigned long size_dwrds, int off_dwrds)
+{
+ unsigned int i;
+
+ if (((off_dwrds + size_dwrds) * sizeof(unsigned int)) > VLR_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < size_dwrds; i++) {
+ int off = (off_dwrds + i) * sizeof(unsigned int);
+ *buf++ = VXD_RD_REG_ABS(reg_base, (VLR_OFFSET + off));
+ }
+
+ return 0;
+}
+
+/* Get configuration of a ring buffer used to send messages to the MTX */
+static int pvdec_get_to_mtx_cfg(void __iomem *reg_base, unsigned long *size, int *off,
+ unsigned int *wr_idx, unsigned int *rd_idx)
+{
+ unsigned int to_mtx_cfg;
+ int to_mtx_off, ret;
+
+ ret = pvdec_check_fw_sig(reg_base);
+ if (ret)
+ return ret;
+
+ to_mtx_cfg = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_BUF_CONF_OFFSET);
+
+ *size = PVDEC_FW_COM_BUF_SIZE(to_mtx_cfg);
+ to_mtx_off = PVDEC_FW_COM_BUF_OFF(to_mtx_cfg);
+
+ if (to_mtx_off % 4)
+ return -EIO;
+
+ to_mtx_off /= sizeof(unsigned int);
+ *off = to_mtx_off;
+
+ *wr_idx = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_WR_IDX_OFFSET);
+ *rd_idx = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_RD_IDX_OFFSET);
+
+ if ((*rd_idx >= *size) || (*wr_idx >= *size))
+ return -EIO;
+
+ return 0;
+}
+
+/* Submit a padding message to the host->MTX ring buffer */
+static int pvdec_send_pad_msg(void __iomem *reg_base)
+{
+ int ret, pad_size, to_mtx_off; /* offset in dwords */
+ unsigned int wr_idx, rd_idx; /* indicies in dwords */
+ unsigned long pad_msg_size = 1, to_mtx_size; /* size in dwords */
+ const unsigned long max_msg_size = VXD_MAX_PAYLOAD_SIZE / sizeof(unsigned int);
+ unsigned int pad_msg;
+
+ ret = pvdec_get_to_mtx_cfg(reg_base, &to_mtx_size, &to_mtx_off, &wr_idx, &rd_idx);
+ if (ret)
+ return ret;
+
+ pad_size = to_mtx_size - wr_idx; /* size in dwords */
+
+ if (pad_size <= 0) {
+ VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_WR_IDX_OFFSET, 0);
+ return 0;
+ }
+
+ while (pad_size > 0) {
+ int cur_pad_size = pad_size > max_msg_size ?
+ max_msg_size : pad_size;
+
+ pad_msg = 0;
+ pad_msg = VXD_WR_REG_FIELD(pad_msg, PVDEC_FW, DEVA_GENMSG, MSG_SIZE, cur_pad_size);
+ pad_msg = VXD_WR_REG_FIELD(pad_msg, PVDEC_FW, DEVA_GENMSG,
+ MSG_TYPE, PVDEC_FW_MSG_TYPE_PADDING);
+
+ ret = pvdec_write_vlr(reg_base, &pad_msg, pad_msg_size, to_mtx_off + wr_idx);
+ if (ret)
+ return ret;
+
+ wr_idx += cur_pad_size;
+
+ VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_WR_IDX_OFFSET, wr_idx);
+
+ pad_size -= cur_pad_size;
+
+ pvdec_kick_mtx(reg_base);
+ }
+
+ wr_idx = 0;
+ VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_WR_IDX_OFFSET, wr_idx);
+
+ return 0;
+}
+
+/*
+ * Check if there is enough space in comms RAM to submit a <msg_size>
+ * dwords long message. Submit a padding message if necessary and requested.
+ *
+ * Returns 0 if there is space for a message.
+ * Returns -EINVAL when msg is too big or empty.
+ * Returns -EIO when there was a problem accessing the HW.
+ * Returns -EBUSY when there is not ennough space.
+ */
+static int pvdec_check_comms_space(void __iomem *reg_base, unsigned long msg_size,
+ unsigned char send_padding)
+{
+ int ret, to_mtx_off; /* offset in dwords */
+ unsigned int wr_idx, rd_idx; /* indicies in dwords */
+ unsigned long to_mtx_size; /* size in dwords */
+
+ ret = pvdec_get_to_mtx_cfg(reg_base, &to_mtx_size, &to_mtx_off, &wr_idx, &rd_idx);
+ if (ret)
+ return ret;
+
+ /* Enormous or empty message, won't fit */
+ if (msg_size >= to_mtx_size || !msg_size)
+ return -EINVAL;
+
+ /* Buffer does not wrap */
+ if (wr_idx >= rd_idx) {
+ /* Is there enough space to put the message? */
+ if (wr_idx + msg_size < to_mtx_size)
+ return 0;
+
+ if (!send_padding)
+ return -EBUSY;
+
+ /* Check if it's ok to send a padding message */
+ if (rd_idx == 0)
+ return -EBUSY;
+
+ /* Send a padding message */
+ ret = pvdec_send_pad_msg(reg_base);
+ if (ret)
+ return ret;
+
+ /*
+ * And check if there's enough space at the beginning
+ * of a buffer
+ */
+ if (msg_size >= rd_idx)
+ return -EBUSY; /* Not enough space at the beginning */
+
+ } else { /* Buffer wraps */
+ if (wr_idx + msg_size >= rd_idx)
+ return -EBUSY; /* Not enough space! */
+ }
+
+ return 0;
+}
+
+/* Get configuration of a ring buffer used to receive messages from the MTX */
+static int pvdec_get_to_host_cfg(void __iomem *reg_base, unsigned long *size, int *off,
+ unsigned int *wr_idx, unsigned int *rd_idx)
+{
+ unsigned int to_host_cfg;
+ int to_host_off, ret;
+
+ ret = pvdec_check_fw_sig(reg_base);
+ if (ret)
+ return ret;
+
+ to_host_cfg = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_HOST_BUF_CONF_OFFSET);
+
+ *size = PVDEC_FW_COM_BUF_SIZE(to_host_cfg);
+ to_host_off = PVDEC_FW_COM_BUF_OFF(to_host_cfg);
+
+ if (to_host_off % 4)
+ return -EIO;
+
+ to_host_off /= sizeof(unsigned int);
+ *off = to_host_off;
+
+ *wr_idx = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_HOST_WR_IDX_OFFSET);
+ *rd_idx = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_HOST_RD_IDX_OFFSET);
+
+ if ((*rd_idx >= *size) || (*wr_idx >= *size))
+ return -EIO;
+
+ return 0;
+}
+
+static void pvdec_select_pipe(void __iomem *reg_base, unsigned char pipe)
+{
+ unsigned int reg = 0;
+
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_HOST_PIPE_SELECT, PIPE_SEL, pipe);
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_PIPE_SELECT, reg);
+}
+
+static void pvdec_pre_boot_setup(const void *dev,
+ void __iomem *reg_base,
+ struct vxd_ena_params *ena_params)
+{
+ /* Memory staller pre boot settings */
+ if (ena_params->mem_staller.data) {
+ unsigned char size = ena_params->mem_staller.size;
+
+ if (size == PVDEC_CORE_MEMSTALLER_ELEMENTS) {
+ unsigned int *data = ena_params->mem_staller.data;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: Setting up memory staller", __func__);
+#endif
+ /*
+ * Data structure represents PVDEC_TEST memory staller
+ * registers according to TRM 5.25 section
+ */
+ VXD_WR_REG(reg_base, PVDEC_TEST, MEM_READ_LATENCY, data[0]);
+ VXD_WR_REG(reg_base, PVDEC_TEST, MEM_WRITE_RESPONSE_LATENCY, data[1]);
+ VXD_WR_REG(reg_base, PVDEC_TEST, MEM_CTRL, data[2]);
+ VXD_WR_REG(reg_base, PVDEC_TEST, RAND_STL_MEM_CMD_CONFIG, data[3]);
+ VXD_WR_REG(reg_base, PVDEC_TEST, RAND_STL_MEM_WDATA_CONFIG, data[4]);
+ VXD_WR_REG(reg_base, PVDEC_TEST, RAND_STL_MEM_WRESP_CONFIG, data[5]);
+ VXD_WR_REG(reg_base, PVDEC_TEST, RAND_STL_MEM_RDATA_CONFIG, data[6]);
+ } else {
+ dev_warn(dev, "%s: Wrong layout of mem staller config (%u)!",
+ __func__, size);
+ }
+ }
+}
+
+static void pvdec_post_boot_setup(const void *dev,
+ void __iomem *reg_base,
+ unsigned int freq_khz)
+{
+ int reg;
+
+ /*
+ * Configure VXD MMU to use video tiles (256x16) and unique
+ * strides per context as default. There is currently no
+ * override mechanism.
+ */
+ reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL0);
+ reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL0,
+ MMU_TILING_SCHEME, 0);
+ reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL0,
+ USE_TILE_STRIDE_PER_CTX, 1);
+ VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL0, reg);
+
+ /*
+ * Setup VXD MMU with the tile heap device virtual address
+ * ranges.
+ */
+ VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MIN_ADDR,
+ PVDEC_HEAP_TILE512_START, 0);
+ VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MAX_ADDR,
+ PVDEC_HEAP_TILE512_START + PVDEC_HEAP_TILE512_SIZE - 1, 0);
+ VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MIN_ADDR,
+ PVDEC_HEAP_TILE1024_START, 1);
+ VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MAX_ADDR,
+ PVDEC_HEAP_TILE1024_START + PVDEC_HEAP_TILE1024_SIZE - 1, 1);
+ VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MIN_ADDR,
+ PVDEC_HEAP_TILE2048_START, 2);
+ VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MAX_ADDR,
+ PVDEC_HEAP_TILE2048_START + PVDEC_HEAP_TILE2048_SIZE - 1, 2);
+ VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MIN_ADDR,
+ PVDEC_HEAP_TILE4096_START, 3);
+ VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MAX_ADDR,
+ PVDEC_HEAP_TILE4096_START + PVDEC_HEAP_TILE4096_SIZE - 1, 3);
+
+ /* Disable timer */
+ VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_TIMERDIV, 0);
+
+ reg = 0;
+ if (freq_khz)
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV, TIMER_DIV,
+ PVDEC_CALC_TIMER_DIV(freq_khz / 1000));
+ else
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV,
+ TIMER_DIV, PVDEC_CLK_MHZ_DEFAULT - 1);
+
+ /* Enable the MTX timer with final settings */
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV, TIMER_EN, 1);
+ VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_TIMERDIV, reg);
+}
+
+static void pvdec_clock_measure(void __iomem *reg_base,
+ struct timespec64 *start_time,
+ unsigned int *start_ticks)
+{
+ local_irq_disable();
+ ktime_get_real_ts64(start_time);
+ *start_ticks = VXD_RD_REG(reg_base, MTX_CORE, MTX_SYSC_TXTIMER);
+ local_irq_enable();
+}
+
+static int pvdec_clock_calculate(const void *dev,
+ void __iomem *reg_base,
+ struct timespec64 *start_time,
+ unsigned int start_ticks,
+ unsigned int *freq_khz)
+{
+ struct timespec64 end_time, dif_time;
+ long long span_nsec = 0;
+ unsigned int stop_ticks, tot_ticks;
+
+ local_irq_disable();
+ ktime_get_real_ts64(&end_time);
+
+ stop_ticks = VXD_RD_REG(reg_base, MTX_CORE, MTX_SYSC_TXTIMER);
+ local_irq_enable();
+
+ *(struct timespec64 *)(&dif_time) = timespec64_sub(*((struct timespec64 *)(&end_time)),
+ *((struct timespec64 *)(&start_time)));
+
+ span_nsec = timespec64_to_ns((const struct timespec64 *)&dif_time);
+
+ /* Sanity check for mtx timer */
+ if (!stop_ticks || stop_ticks < start_ticks) {
+ dev_err(dev, "%s: invalid ticks (0x%x -> 0x%x)\n",
+ __func__, start_ticks, stop_ticks);
+ return -EIO;
+ }
+ tot_ticks = stop_ticks - start_ticks;
+
+ if (span_nsec) {
+ unsigned long long res = (unsigned long long)tot_ticks * 1000000UL;
+
+ do_divide(&res, span_nsec);
+ *freq_khz = (unsigned int)res;
+ if (*freq_khz < 1000)
+ *freq_khz = 1000; /* 1MHz */
+ } else {
+ dev_err(dev, "%s: generic failure!\n", __func__);
+ *freq_khz = 0;
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int pvdec_wait_dma_done(const void *dev,
+ void __iomem *reg_base,
+ unsigned long size,
+ unsigned char dma_channel)
+{
+ unsigned int reg, timeout = PVDEC_TIMEOUT_COUNTER, prev_count, count = size;
+
+ do {
+ usleep_range(300, 310);
+ prev_count = count;
+ reg = VXD_RD_RPT_REG(reg_base, DMAC, DMAC_COUNT, dma_channel);
+ count = VXD_RD_REG_FIELD(reg, DMAC, DMAC_COUNT, CNT);
+ /* Check for dma progress */
+ if (count == prev_count) {
+ /* There could be a bus lag, protect against that */
+ timeout--;
+ if (timeout == 0) {
+ dev_err(dev, "%s FW DMA failed! (0x%x)\n", __func__, count);
+ return -EIO;
+ }
+ } else {
+ /* Reset timeout counter */
+ timeout = PVDEC_TIMEOUT_COUNTER;
+ }
+ } while (count > 0);
+
+ return 0;
+}
+
+static int pvdec_start_fw_dma(const void *dev,
+ void __iomem *reg_base,
+ unsigned char dma_channel,
+ unsigned long fw_buf_size,
+ unsigned int *freq_khz)
+{
+ unsigned int reg = 0;
+ int ret = 0;
+
+ fw_buf_size = fw_buf_size / sizeof(unsigned int);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: dma FW upload, fw_buf_size: %zu (dwords)\n", __func__, fw_buf_size);
+#endif
+
+ pvdec_select_pipe(reg_base, 1);
+
+ reg = VXD_RD_REG(reg_base, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA);
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA, PIXEL_DMAC_MAN_CLK_ENA, 1);
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA, PIXEL_REG_MAN_CLK_ENA, 1);
+ VXD_WR_REG(reg_base, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA, reg);
+
+ /*
+ * Setup MTX to receive DMA
+ * DMA transfers to/from the MTX have to be 32-bit aligned and
+ * in multiples of 32 bits
+ */
+ VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_CDMAA, 0); /* MTX: 0x80900000 */
+
+ reg = 0;
+ /* Burst size in multiples of 64 bits (allowed values are 2 or 4) */
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_CDMAC, BURSTSIZE, 0);
+ /* 0 - write to MTX memory */
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_CDMAC, RNW, 0);
+ /* Begin transfer */
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_CDMAC, ENABLE, 1);
+ /* Transfer size */
+ reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_CDMAC, LENGTH,
+ ((fw_buf_size + 7) & (~7)) + 8);
+ VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_CDMAC, reg);
+
+ /* Boot MTX once transfer is done */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PROC_DMAC_CONTROL,
+ BOOT_ON_DMA_CH0, 1);
+ VXD_WR_REG(reg_base, PVDEC_CORE, PROC_DMAC_CONTROL, reg);
+
+ /* Toggle channel 0 usage between MTX and other PVDEC peripherals */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_PIXEL, PIXEL_CONTROL_0,
+ DMAC_CH_SEL_FOR_MTX, 0);
+ VXD_WR_REG(reg_base, PVDEC_PIXEL, PIXEL_CONTROL_0, reg);
+
+ /* Reset DMA channel first */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, SRST, 1);
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, LIST_EN, 0);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, CNT, 0);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, EN, 0);
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, SRST, 0);
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+ /*
+ * Setup a Simple DMA for Ch0
+ * Specify the holdover period to use for the channel
+ */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PER_HOLD, PER_HOLD, 7);
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_PER_HOLD, reg, dma_channel);
+
+ /* Clear the DMAC Stats */
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_IRQ_STAT, 0, dma_channel);
+
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH_ADDR, ADDR,
+ MTX_CORE_MTX_SYSC_CDMAT_OFFSET);
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_PERIPH_ADDR, reg, dma_channel);
+
+ /* Clear peripheral register address */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, ACC_DEL, 0);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, INCR, DMAC_INCR_OFF);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, BURST, DMAC_BURST_1);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, EXT_BURST, DMAC_EXT_BURST_0);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, EXT_SA, 0);
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_PERIPH, reg, dma_channel);
+
+ /*
+ * Now start the transfer by setting the list enable bit in
+ * the count register
+ */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, TRANSFER_IEN, 1);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, PW, DMAC_PWIDTH_32_BIT);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, DIR, DMAC_MEM_TO_VXD);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, PI, DMAC_INCR_ON);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, LIST_FIN_CTL, 0);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, LIST_EN, 0);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, ENABLE_2D_MODE, 0);
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, CNT, fw_buf_size);
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+ reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, EN, 1);
+ VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+ /* NOTE: The MTX timer starts once DMA boot is triggered */
+ {
+ struct timespec64 host_time;
+ unsigned int mtx_time;
+
+ pvdec_clock_measure(reg_base, &host_time, &mtx_time);
+
+ ret = pvdec_wait_dma_done(dev, reg_base, fw_buf_size, dma_channel);
+ if (!ret) {
+ if (pvdec_clock_calculate(dev, reg_base, &host_time, mtx_time,
+ freq_khz) < 0)
+ dev_dbg(dev, "%s: measure info not available!\n", __func__);
+ }
+ }
+
+ return ret;
+}
+
+static int pvdec_set_clocks(void __iomem *reg_base, unsigned int req_clocks)
+{
+ unsigned int clocks = 0, reg;
+ unsigned int pvdec_timeout;
+
+ /* Turn on core clocks only */
+ clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+ PVDEC_REG_MAN_CLK_ENA, 1);
+ clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA, CORE_MAN_CLK_ENA, 1);
+
+ /* Wait until core clocks set */
+ pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+ do {
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_MAN_CLK_ENA, clocks);
+ udelay(vxd_plat_poll_udelay);
+ reg = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_MAN_CLK_ENA);
+ pvdec_timeout--;
+ } while (reg != clocks && pvdec_timeout != 0);
+
+ if (pvdec_timeout == 0)
+ return -EIO;
+
+ /* Write requested clocks */
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_MAN_CLK_ENA, req_clocks);
+
+ return 0;
+}
+
+static int pvdec_enable_clocks(void __iomem *reg_base)
+{
+ unsigned int clocks = 0;
+
+ clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+ PVDEC_REG_MAN_CLK_ENA, 1);
+ clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+ CORE_MAN_CLK_ENA, 1);
+ clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+ MEM_MAN_CLK_ENA, 1);
+ clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+ PROC_MAN_CLK_ENA, 1);
+ clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+ PIXEL_PROC_MAN_CLK_ENA, 1);
+
+ return pvdec_set_clocks(reg_base, clocks);
+}
+
+static int pvdec_disable_clocks(void __iomem *reg_base)
+{
+ return pvdec_set_clocks(reg_base, 0);
+}
+
+static void pvdec_ena_mtx_int(void __iomem *reg_base)
+{
+ unsigned int reg = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA);
+
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_INT_STAT, HOST_PROC_IRQ, 1);
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_INT_STAT, HOST_MMU_FAULT_IRQ, 1);
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA, reg);
+}
+
+static void pvdec_check_mmu_requests(void __iomem *reg_base,
+ unsigned int mmu_checks,
+ unsigned int max_attempts)
+{
+ unsigned int reg, i, checks = 0;
+
+ for (i = 0; i < max_attempts; i++) {
+ reg = VXD_RD_REG(reg_base,
+ IMG_VIDEO_BUS4_MMU, MMU_MEM_REQ);
+ reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_MEM_REQ, TAG_OUTSTANDING);
+ if (reg) {
+ udelay(vxd_plat_poll_udelay);
+ continue;
+ }
+
+ /* Read READ_WORDS_OUTSTANDING */
+ reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_MEM_EXT_OUTSTANDING);
+ reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_MEM_EXT_OUTSTANDING,
+ READ_WORDS);
+ if (!reg) {
+ checks++;
+ if (checks == mmu_checks)
+ break;
+ } else { /* Reset the counter and continue */
+ checks = 0;
+ }
+ }
+
+ if (checks != mmu_checks)
+ pr_warn("Checking for MMU outstanding requests failed!\n");
+}
+
+static int pvdec_reset(void __iomem *reg_base, unsigned char skip_pipe_clocks)
+{
+ unsigned int reg = 0;
+ unsigned char pipe, num_ent_pipes, num_pix_pipes;
+ unsigned int core_id, pvdec_timeout;
+
+ core_id = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_CORE_ID);
+
+ num_ent_pipes = VXD_RD_REG_FIELD(core_id, PVDEC_CORE, PVDEC_CORE_ID, ENT_PIPES);
+ num_pix_pipes = VXD_RD_REG_FIELD(core_id, PVDEC_CORE, PVDEC_CORE_ID, PIX_PIPES);
+
+ if (num_pix_pipes == 0 || num_pix_pipes > VXD_MAX_PIPES)
+ return -EINVAL;
+
+ /* Clear interrupt enabled flag */
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA, 0);
+
+ /* Clear any pending interrupt flags */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_INT_CLEAR, IRQ_CLEAR, 0xFFFF);
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_INT_CLEAR, reg);
+
+ /* Turn all clocks on - don't touch reserved bits! */
+ pvdec_set_clocks(reg_base, 0xFFFF0113);
+
+ if (!skip_pipe_clocks) {
+ for (pipe = 1; pipe <= num_pix_pipes; pipe++) {
+ pvdec_select_pipe(reg_base, pipe);
+ /* Turn all available clocks on - skip reserved bits! */
+ VXD_WR_REG(reg_base, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA, 0xFFBF0FFF);
+ }
+
+ for (pipe = 1; pipe <= num_ent_pipes; pipe++) {
+ pvdec_select_pipe(reg_base, pipe);
+ /* Turn all available clocks on - skip reserved bits! */
+ VXD_WR_REG(reg_base, PVDEC_ENTROPY, ENTROPY_MAN_CLK_ENA, 0x5);
+ }
+ }
+
+ /* 1st MMU outstanding requests check */
+ pvdec_check_mmu_requests(reg_base, 1000, 2000);
+
+ /* Make sure MMU is not under reset MMU_SOFT_RESET -> 0 */
+ pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+ do {
+ reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1);
+ reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_SOFT_RESET);
+ udelay(vxd_plat_poll_udelay);
+ pvdec_timeout--;
+ } while (reg != 0 && pvdec_timeout != 0);
+
+ if (pvdec_timeout == 0) {
+ pr_err("Waiting for MMU soft reset(1) timed out!\n");
+ pvdec_mtx_status_dump(reg_base, NULL);
+ }
+
+ /* Write 1 to MMU_PAUSE_SET */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_PAUSE_SET, 1);
+ VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, reg);
+
+ /* 2nd MMU outstanding requests check */
+ pvdec_check_mmu_requests(reg_base, 100, 1000);
+
+ /* Issue software reset for all but MMU/core */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_SOFT_RST, PVDEC_PIXEL_PROC_SOFT_RST, 0xFF);
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_SOFT_RST, PVDEC_ENTROPY_SOFT_RST, 0xFF);
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST, reg);
+
+ VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST);
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST, 0);
+
+ /* Write 1 to MMU_PAUSE_CLEAR in MMU_CONTROL1 reg */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_PAUSE_CLEAR, 1);
+ VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, reg);
+
+ /* Confirm MMU_PAUSE_SET is cleared */
+ pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+ do {
+ reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1);
+ reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_PAUSE_SET);
+ udelay(vxd_plat_poll_udelay);
+ pvdec_timeout--;
+ } while (reg != 0 && pvdec_timeout != 0);
+
+ if (pvdec_timeout == 0) {
+ pr_err("Waiting for MMU pause clear timed out!\n");
+ pvdec_mtx_status_dump(reg_base, NULL);
+ return -EIO;
+ }
+
+ /* Issue software reset for MMU */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_SOFT_RESET, 1);
+ VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, reg);
+
+ /* Wait until MMU_SOFT_RESET -> 0 */
+ pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+ do {
+ reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1);
+ reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_SOFT_RESET);
+ udelay(vxd_plat_poll_udelay);
+ pvdec_timeout--;
+ } while (reg != 0 && pvdec_timeout != 0);
+
+ if (pvdec_timeout == 0) {
+ pr_err("Waiting for MMU soft reset(2) timed out!\n");
+ pvdec_mtx_status_dump(reg_base, NULL);
+ }
+
+ /* Issue software reset for entire PVDEC */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_SOFT_RST, PVDEC_SOFT_RST, 0x1);
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST, reg);
+
+ /* Waiting for reset bit to be cleared */
+ pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+ do {
+ reg = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST);
+ reg = VXD_RD_REG_FIELD(reg, PVDEC_CORE, PVDEC_SOFT_RST, PVDEC_SOFT_RST);
+ udelay(vxd_plat_poll_udelay);
+ pvdec_timeout--;
+ } while (reg != 0 && pvdec_timeout != 0);
+
+ if (pvdec_timeout == 0) {
+ pr_err("Waiting for PVDEC soft reset timed out!\n");
+ pvdec_mtx_status_dump(reg_base, NULL);
+ return -EIO;
+ }
+
+ /* Clear interrupt enabled flag */
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA, 0);
+
+ /* Clear any pending interrupt flags */
+ reg = 0;
+ reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_INT_CLEAR, IRQ_CLEAR, 0xFFFF);
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_INT_CLEAR, reg);
+ return 0;
+}
+
+static int pvdec_get_properties(void __iomem *reg_base,
+ struct vxd_core_props *props)
+{
+ unsigned int major, minor, maint, group_id, core_id;
+ unsigned char num_pix_pipes, pipe;
+
+ if (!props)
+ return -EINVAL;
+
+ /* PVDEC Core Revision Information */
+ props->core_rev = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_CORE_REV);
+ major = VXD_RD_REG_FIELD(props->core_rev, PVDEC_CORE, PVDEC_CORE_REV, PVDEC_MAJOR_REV);
+ minor = VXD_RD_REG_FIELD(props->core_rev, PVDEC_CORE, PVDEC_CORE_REV, PVDEC_MINOR_REV);
+ maint = VXD_RD_REG_FIELD(props->core_rev, PVDEC_CORE, PVDEC_CORE_REV, PVDEC_MAINT_REV);
+
+ /* Core ID */
+ props->pvdec_core_id = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_CORE_ID);
+ group_id = VXD_RD_REG_FIELD(props->pvdec_core_id, PVDEC_CORE, PVDEC_CORE_ID, GROUP_ID);
+ core_id = VXD_RD_REG_FIELD(props->pvdec_core_id, PVDEC_CORE, PVDEC_CORE_ID, CORE_ID);
+
+ /* Ensure that the core is IMG Video Decoder (PVDEC). */
+ if (group_id != 3 || core_id != 3) {
+ pr_err("Wrong core revision %d.%d.%d !!!\n", major, minor, maint);
+ return -EIO;
+ }
+
+ props->mmu_config0 = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONFIG0);
+ props->mmu_config1 = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONFIG1);
+
+ num_pix_pipes = VXD_NUM_PIX_PIPES(*props);
+
+ if (unlikely(num_pix_pipes > VXD_MAX_PIPES)) {
+ pr_warn("Too many pipes detected!\n");
+ num_pix_pipes = VXD_MAX_PIPES;
+ }
+
+ for (pipe = 1; pipe <= num_pix_pipes; ++pipe) {
+ pvdec_select_pipe(reg_base, pipe);
+ if (pipe < VXD_MAX_PIPES) {
+ props->pixel_pipe_cfg[pipe - 1] =
+ VXD_RD_REG(reg_base, PVDEC_PIXEL, PIXEL_PIPE_CONFIG);
+ props->pixel_misc_cfg[pipe - 1] =
+ VXD_RD_REG(reg_base, PVDEC_PIXEL, PIXEL_MISC_CONFIG);
+ /*
+ * Detect pipe access problems.
+ * Pipe config shall always indicate
+ * a non zero value (at least one standard supported)!
+ */
+ if (!props->pixel_pipe_cfg[pipe - 1])
+ pr_warn("Pipe config info is wrong!\n");
+ }
+ }
+
+ pvdec_select_pipe(reg_base, 1);
+ props->pixel_max_frame_cfg = VXD_RD_REG(reg_base, PVDEC_PIXEL, MAX_FRAME_CONFIG);
+
+ {
+ unsigned int fifo_ctrl = VXD_RD_REG(reg_base, PVDEC_CORE, PROC_DBG_FIFO_CTRL0);
+
+ props->dbg_fifo_size = VXD_RD_REG_FIELD(fifo_ctrl,
+ PVDEC_CORE,
+ PROC_DBG_FIFO_CTRL0,
+ PROC_DBG_FIFO_SIZE);
+ }
+
+ return 0;
+}
+
+int vxd_pvdec_init(const void *dev, void __iomem *reg_base)
+{
+ int ret;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: trying to reset VXD, reg base: %p\n", __func__, reg_base);
+#endif
+
+ ret = pvdec_enable_clocks(reg_base);
+ if (ret) {
+ dev_err(dev, "%s: failed to enable clocks!\n", __func__);
+ return ret;
+ }
+
+ ret = pvdec_reset(reg_base, FALSE);
+ if (ret) {
+ dev_err(dev, "%s: VXD reset failed!\n", __func__);
+ return ret;
+ }
+
+ pvdec_ena_mtx_int(reg_base);
+
+ return 0;
+}
+
+/* Send <msg_size> dwords long message */
+int vxd_pvdec_send_msg(const void *dev,
+ void __iomem *reg_base,
+ unsigned int *msg,
+ unsigned long msg_size,
+ unsigned short msg_id,
+ struct vxd_dev *ctx)
+{
+ int ret, to_mtx_off; /* offset in dwords */
+ unsigned int wr_idx, rd_idx; /* indicies in dwords */
+ unsigned long to_mtx_size; /* size in dwords */
+ unsigned int msg_wrd;
+ struct timespec64 time;
+ static int cnt;
+
+ ktime_get_real_ts64(&time);
+
+ ctx->time_fw[cnt].start_time = timespec64_to_ns((const struct timespec64 *)&time);
+ ctx->time_fw[cnt].id = msg_id;
+ cnt++;
+
+ if (cnt >= ARRAY_SIZE(ctx->time_fw))
+ cnt = 0;
+
+ ret = pvdec_get_to_mtx_cfg(reg_base, &to_mtx_size, &to_mtx_off, &wr_idx, &rd_idx);
+ if (ret) {
+ dev_err(dev, "%s: failed to obtain mtx ring buffer config!\n", __func__);
+ return ret;
+ }
+
+ /* populate the size and id fields in the message header */
+ msg_wrd = VXD_RD_MSG_WRD(msg, PVDEC_FW, DEVA_GENMSG);
+ msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_GENMSG, MSG_SIZE, msg_size);
+ msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_GENMSG, MSG_ID, msg_id);
+ VXD_WR_MSG_WRD(msg, PVDEC_FW, DEVA_GENMSG, msg_wrd);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: [msg out] size: %zu, id: 0x%x, type: 0x%x\n", __func__, msg_size, msg_id,
+ VXD_RD_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_GENMSG, MSG_TYPE));
+ dev_dbg(dev, "%s: to_mtx: (%zu @ %d), wr_idx: %d, rd_idx: %d\n",
+ __func__, to_mtx_size, to_mtx_off, wr_idx, rd_idx);
+#endif
+
+ ret = pvdec_check_comms_space(reg_base, msg_size, FALSE);
+ if (ret) {
+ dev_err(dev, "%s: invalid message or not enough space (%d)!\n", __func__, ret);
+ return ret;
+ }
+
+ ret = pvdec_write_vlr(reg_base, msg, msg_size, to_mtx_off + wr_idx);
+ if (ret) {
+ dev_err(dev, "%s: failed to write msg to vlr!\n", __func__);
+ return ret;
+ }
+
+ wr_idx += msg_size;
+ if (wr_idx == to_mtx_size)
+ wr_idx = 0;
+ VXD_WR_REG_ABS(reg_base, VLR_OFFSET +
+ PVDEC_FW_TO_MTX_WR_IDX_OFFSET, wr_idx);
+
+ pvdec_kick_mtx(reg_base);
+
+ return 0;
+}
+
+/* Fetch size (in dwords) of message pending from MTX */
+int vxd_pvdec_pend_msg_info(const void *dev, void __iomem *reg_base,
+ unsigned long *size,
+ unsigned short *msg_id,
+ unsigned char *not_last_msg)
+{
+ int ret, to_host_off; /* offset in dwords */
+ unsigned int wr_idx, rd_idx; /* indicies in dwords */
+ unsigned long to_host_size; /* size in dwords */
+ unsigned int val = 0;
+
+ ret = pvdec_get_to_host_cfg(reg_base, &to_host_size, &to_host_off, &wr_idx, &rd_idx);
+ if (ret) {
+ dev_err(dev, "%s: failed to obtain host ring buffer config!\n", __func__);
+ return ret;
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: to host: (%zu @ %d), wr: %u, rd: %u\n", __func__,
+ to_host_size, to_host_off, wr_idx, rd_idx);
+#endif
+
+ if (wr_idx == rd_idx) {
+ *size = 0;
+ *msg_id = 0;
+ return 0;
+ }
+
+ ret = pvdec_read_vlr(reg_base, &val, 1, to_host_off + rd_idx);
+ if (ret) {
+ dev_err(dev, "%s: failed to read first word!\n", __func__);
+ return ret;
+ }
+
+ *size = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_SIZE);
+ *msg_id = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_ID);
+ *not_last_msg = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, NOT_LAST_MSG);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: [msg in] rd_idx: %d, size: %zu, id: 0x%04x, type: 0x%x\n",
+ __func__, rd_idx, *size, *msg_id,
+ VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_TYPE));
+#endif
+
+ return 0;
+}
+
+/*
+ * Receive message from the MTX and place it in a <buf_size> dwords long
+ * buffer. If the provided buffer is too small to hold the message, only part
+ * of it will be placed in a buffer, but the ring buffer read index will be
+ * moved so that message is no longer available.
+ */
+int vxd_pvdec_recv_msg(const void *dev, void __iomem *reg_base,
+ unsigned int *buf,
+ unsigned long buf_size,
+ struct vxd_dev *vxd)
+{
+ int ret, to_host_off; /* offset in dwords */
+ unsigned int wr_idx, rd_idx; /* indicies in dwords */
+ unsigned long to_host_size, msg_size, to_read; /* sizes in dwords */
+ unsigned int val = 0;
+ struct timespec64 time;
+ unsigned short msg_id;
+ int loop;
+
+ ret = pvdec_get_to_host_cfg(reg_base, &to_host_size,
+ &to_host_off, &wr_idx, &rd_idx);
+ if (ret) {
+ dev_err(dev, "%s: failed to obtain host ring buffer config!\n", __func__);
+ return ret;
+ }
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: to host: (%zu @ %d), wr: %u, rd: %u\n", __func__,
+ to_host_size, to_host_off, wr_idx, rd_idx);
+#endif
+
+ /* Obtain the message size */
+ ret = pvdec_read_vlr(reg_base, &val, 1, to_host_off + rd_idx);
+ if (ret) {
+ dev_err(dev, "%s: failed to read first word!\n", __func__);
+ return ret;
+ }
+ msg_size = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_SIZE);
+
+ to_read = (msg_size > buf_size) ? buf_size : msg_size;
+
+ /* Does the message wrap? */
+ if (to_read + rd_idx > to_host_size) {
+ unsigned long chunk_size = to_host_size - rd_idx;
+
+ ret = pvdec_read_vlr(reg_base, buf, chunk_size, to_host_off + rd_idx);
+ if (ret) {
+ dev_err(dev, "%s: failed to read chunk before wrap!\n", __func__);
+ return ret;
+ }
+ to_read -= chunk_size;
+ buf += chunk_size;
+ rd_idx = 0;
+ msg_size -= chunk_size;
+ }
+
+ /*
+ * If the message wrapped, read the second chunk.
+ * If it didn't, read first and only chunk
+ */
+ ret = pvdec_read_vlr(reg_base, buf, to_read, to_host_off + rd_idx);
+ if (ret) {
+ dev_err(dev, "%s: failed to read message from vlr!\n", __func__);
+ return ret;
+ }
+
+ /* Update read index in the ring buffer */
+ rd_idx = (rd_idx + msg_size) % to_host_size;
+ VXD_WR_REG_ABS(reg_base, VLR_OFFSET +
+ PVDEC_FW_TO_HOST_RD_IDX_OFFSET, rd_idx);
+
+ msg_id = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_ID);
+
+ ktime_get_real_ts64(&time);
+ for (loop = 0; loop < ARRAY_SIZE(vxd->time_fw); loop++) {
+ if (vxd->time_fw[loop].id == msg_id) {
+ vxd->time_fw[loop].end_time =
+ timespec64_to_ns((const struct timespec64 *)&time);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(dev, "fw decode time is %llu us for msg_id x%0x\n",
+ div_s64(vxd->time_fw[loop].end_time -
+ vxd->time_fw[loop].start_time, 1000), msg_id);
+#endif
+ break;
+ }
+ }
+
+ if (loop == ARRAY_SIZE(vxd->time_fw))
+ dev_err(dev, "fw decode time for msg_id x%0x is not measured\n", msg_id);
+
+ return 0;
+}
+
+int vxd_pvdec_check_fw_status(const void *dev, void __iomem *reg_base)
+{
+ int ret;
+ unsigned int val = 0;
+
+ /* Obtain current fw status */
+ ret = pvdec_read_vlr(reg_base, &val, 1, PVDEC_FW_STATUS_OFFSET);
+ if (ret) {
+ dev_err(dev, "%s: failed to read fw status!\n", __func__);
+ return ret;
+ }
+
+ /* Check for fatal condition */
+ if (val == PVDEC_FW_STATUS_PANIC || val == PVDEC_FW_STATUS_ASSERT ||
+ val == PVDEC_FW_STATUS_SO)
+ return -1;
+
+ return 0;
+}
+
+static int pvdec_send_init_msg(const void *dev,
+ void __iomem *reg_base,
+ struct vxd_ena_params *ena_params)
+{
+ unsigned short msg_id = 0;
+ unsigned int msg[PVDEC_FW_DEVA_INIT_MSG_WRDS] = { 0 }, msg_wrd = 0;
+ struct vxd_dev *vxd;
+ int ret;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: rendec: %d@0x%x, crc: 0x%x\n", __func__,
+ ena_params->rendec_size, ena_params->rendec_addr, ena_params->crc);
+#endif
+
+ vxd = kzalloc(sizeof(*vxd), GFP_KERNEL);
+ if (!vxd)
+ return -1;
+
+ /* message type */
+ msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_GENMSG, MSG_TYPE,
+ PVDEC_FW_MSG_TYPE_INIT);
+ VXD_WR_MSG_WRD(msg, PVDEC_FW, DEVA_GENMSG, msg_wrd);
+
+ /* rendec address */
+ VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, RENDEC_ADDR0, ena_params->rendec_addr);
+
+ /* rendec size */
+ msg_wrd = 0;
+ msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_INIT, RENDEC_SIZE0,
+ ena_params->rendec_size);
+ VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, RENDEC_SIZE0, msg_wrd);
+
+ /* HEVC configuration */
+ msg_wrd = 0;
+ msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_INIT,
+ HEVC_CFG_MAX_H_FOR_PIPE_WAIT, 0xFFFF);
+ VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, HEVC_CFG, msg_wrd);
+
+ /* signature select */
+ VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, SIG_SELECT, ena_params->crc);
+
+ /* partial frame notification timer divider */
+ msg_wrd = 0;
+ msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_INIT, PFNT_DIV, PVDEC_PFNT_DIV);
+ VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, PFNT_DIV, msg_wrd);
+
+ /* firmware watchdog timeout value */
+ msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_INIT, FWWDT_MS, ena_params->fwwdt_ms);
+ VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, FWWDT_MS, msg_wrd);
+
+ ret = vxd_pvdec_send_msg(dev, reg_base, msg, ARRAY_SIZE(msg), msg_id, vxd);
+ kfree(vxd);
+
+ return ret;
+}
+
+int vxd_pvdec_ena(const void *dev, void __iomem *reg_base,
+ struct vxd_ena_params *ena_params,
+ struct vxd_fw_hdr *fw_hdr,
+ unsigned int *freq_khz)
+{
+ int ret;
+ unsigned int mtx_ram_size = 0;
+ unsigned char dma_channel = 0;
+
+ ret = vxd_pvdec_init(dev, reg_base);
+ if (ret) {
+ dev_err(dev, "%s: PVDEC init failed!\n", __func__);
+ return ret;
+ }
+
+ ret = pvdec_get_mtx_ram_size(reg_base, &mtx_ram_size);
+ if (ret) {
+ dev_err(dev, "%s: failed to get MTX RAM size!\n", __func__);
+ return ret;
+ }
+
+ if (mtx_ram_size < fw_hdr->core_size) {
+ dev_err(dev, "%s: FW larger than MTX RAM size (%u < %d)!\n",
+ __func__, mtx_ram_size, fw_hdr->core_size);
+ return -EINVAL;
+ }
+
+ /* Apply pre boot settings - if any */
+ pvdec_pre_boot_setup(dev, reg_base, ena_params);
+
+ pvdec_prep_fw_upload(dev, reg_base, ena_params, dma_channel);
+
+ ret = pvdec_start_fw_dma(dev, reg_base, dma_channel, fw_hdr->core_size, freq_khz);
+
+ if (ret) {
+ dev_err(dev, "%s: failed to load FW! (%d)", __func__, ret);
+ pvdec_mtx_status_dump(reg_base, NULL);
+ return ret;
+ }
+
+ /* Apply final settings - if any */
+ pvdec_post_boot_setup(dev, reg_base, *freq_khz);
+
+ ret = pvdec_poll_fw_boot(reg_base, &ena_params->boot_poll);
+ if (ret) {
+ dev_err(dev, "%s: FW failed to boot! (%d)!\n", __func__, ret);
+ return ret;
+ }
+
+ ret = pvdec_send_init_msg(dev, reg_base, ena_params);
+ if (ret) {
+ dev_err(dev, "%s: failed to send init message! (%d)!\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int vxd_pvdec_dis(const void *dev, void __iomem *reg_base)
+{
+ int ret = pvdec_enable_clocks(reg_base);
+
+ if (ret) {
+ dev_err(dev, "%s: failed to enable clocks! (%d)\n", __func__, ret);
+ return ret;
+ }
+
+ ret = pvdec_reset(reg_base, TRUE);
+ if (ret) {
+ dev_err(dev, "%s: VXD reset failed! (%d)\n", __func__, ret);
+ return ret;
+ }
+
+ ret = pvdec_disable_clocks(reg_base);
+ if (ret) {
+ dev_err(dev, "%s: VXD disable clocks failed! (%d)\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Invalidate VXD's MMU cache.
+ */
+int vxd_pvdec_mmu_flush(const void *dev, void __iomem *reg_base)
+{
+ unsigned int reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1);
+
+ if (reg == PVDEC_INVALID_HW_STATE) {
+ dev_err(dev, "%s: invalid HW state!\n", __func__);
+ return -EIO;
+ }
+
+ reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_INVALDC, 0xF);
+ VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, reg);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: device MMU cache invalidated!\n", __func__);
+#endif
+
+ return 0;
+}
+
+irqreturn_t vxd_pvdec_clear_int(void __iomem *reg_base, unsigned int *irq_status)
+{
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int enabled;
+ unsigned int status = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_INT_STAT);
+
+ enabled = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA);
+
+ status &= enabled;
+ /* Store the last irq status */
+ *irq_status |= status;
+
+ if (status & (PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK |
+ PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_IRQ_MASK))
+ ret = IRQ_WAKE_THREAD;
+
+ /* Disable MMU interrupts - clearing is not enough */
+ if (status & PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK) {
+ enabled &= ~PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK;
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA, enabled);
+ }
+
+ VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_INT_CLEAR, status);
+
+ return ret;
+}
+
+/*
+ * Check if there's enough space in comms RAM to submit <msg_size> dwords long
+ * message. This function also submits a padding message if it will be
+ * necessary for this particular message.
+ *
+ * return 0 if there is enough space,
+ * return -EBUSY if there is not enough space,
+ * return another fault code in case of an error.
+ */
+int vxd_pvdec_msg_fit(const void *dev, void __iomem *reg_base, unsigned long msg_size)
+{
+ int ret = pvdec_check_comms_space(reg_base, msg_size, TRUE);
+
+ /*
+ * In specific environment, when to_mtx buffer is small, and messages
+ * the userspace is submitting are large (e.g. FWBSP flow), it's
+ * possible that firmware will consume the padding message sent by
+ * vxd_pvdec_msg_fit() immediately. Retry the check.
+ */
+ if (ret == -EBUSY) {
+ unsigned int flags = VXD_RD_REG_ABS(reg_base,
+ VLR_OFFSET + PVDEC_FW_FLAGS_OFFSET) |
+ PVDEC_FWFLAG_FAKE_COMPLETION;
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "comms space full, asking fw to send empty msg when space is available");
+#endif
+
+ VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_FLAGS_OFFSET, flags);
+ ret = pvdec_check_comms_space(reg_base, msg_size, FALSE);
+ }
+
+ return ret;
+}
+
+void vxd_pvdec_get_state(const void *dev, void __iomem *reg_base,
+ unsigned int num_pipes,
+ struct vxd_hw_state *state)
+{
+ unsigned char pipe;
+#ifdef DEBUG_DECODER_DRIVER
+ unsigned int state_cfg = VXD_RD_REG_ABS(reg_base, (VLR_OFFSET +
+ PVDEC_FW_STATE_BUF_CFG_OFFSET));
+
+ unsigned short state_size = PVDEC_FW_COM_BUF_SIZE(state_cfg);
+ unsigned short state_off = PVDEC_FW_COM_BUF_OFF(state_cfg);
+
+ /*
+ * The generic fw progress counter
+ * is the first element in the fw state
+ */
+ dev_dbg(dev, "%s: state off: 0x%x, size: 0x%x\n", __func__, state_off, state_size);
+ state->fw_counter = VXD_RD_REG_ABS(reg_base, (VLR_OFFSET + state_off));
+ dev_dbg(dev, "%s: fw_counter: 0x%x\n", __func__, state->fw_counter);
+#endif
+
+ /* We just combine the macroblocks being processed by the HW */
+ for (pipe = 0; pipe < num_pipes; pipe++) {
+ unsigned int p_off = VXD_GET_PIPE_OFF(num_pipes, pipe + 1);
+ unsigned int reg_val;
+
+ /* Front-end */
+ unsigned int reg_off = VXD_GET_REG_OFF(PVDEC_ENTROPY, ENTROPY_LAST_MB);
+
+ state->fe_status[pipe] = VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+
+ reg_off = VXD_GET_REG_OFF(MSVDX_VEC, VEC_ENTDEC_INFORMATION);
+ state->fe_status[pipe] |= VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+
+ /* Back-end */
+ reg_off = VXD_GET_REG_OFF(PVDEC_VEC_BE, VEC_BE_STATUS);
+ state->be_status[pipe] = VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+ reg_off = VXD_GET_REG_OFF(MSVDX_VDMC, VDMC_MACROBLOCK_NUMBER);
+ state->be_status[pipe] |= VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+
+ /*
+ * Take DMAC channels 2/3 into consideration to cover
+ * parser progress on SR1/2
+ */
+ reg_off = VXD_GET_RPT_REG_OFF(DMAC, DMAC_COUNT, 2);
+ reg_val = VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+ state->dmac_status[pipe][0] = VXD_RD_REG_FIELD(reg_val, DMAC, DMAC_COUNT, CNT);
+ reg_off = VXD_GET_RPT_REG_OFF(DMAC, DMAC_COUNT, 3);
+ reg_val = VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+ state->dmac_status[pipe][1] = VXD_RD_REG_FIELD(reg_val, DMAC, DMAC_COUNT, CNT);
+ }
+}
+
+/*
+ * Check for the source of the last interrupt.
+ *
+ * return 0 if nothing serious happened,
+ * return -EFAULT if there was a critical interrupt detected.
+ */
+int vxd_pvdec_check_irq(const void *dev, void __iomem *reg_base, unsigned int irq_status)
+{
+ if (irq_status & PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK) {
+ unsigned int status0 =
+ VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_STATUS0);
+ unsigned int status1 =
+ VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_STATUS1);
+
+ unsigned int addr = VXD_RD_REG_FIELD(status0, IMG_VIDEO_BUS4_MMU,
+ MMU_STATUS0, MMU_FAULT_ADDR) << 12;
+ unsigned char reason = VXD_RD_REG_FIELD(status0, IMG_VIDEO_BUS4_MMU,
+ MMU_STATUS0, MMU_PF_N_RW);
+ unsigned char requestor = VXD_RD_REG_FIELD(status1, IMG_VIDEO_BUS4_MMU,
+ MMU_STATUS1, MMU_FAULT_REQ_ID);
+ unsigned char type = VXD_RD_REG_FIELD(status1, IMG_VIDEO_BUS4_MMU,
+ MMU_STATUS1, MMU_FAULT_RNW);
+ unsigned char secure = VXD_RD_REG_FIELD(status0, IMG_VIDEO_BUS4_MMU,
+ MMU_STATUS0, MMU_SECURE_FAULT);
+
+#ifdef DEBUG_DECODER_DRIVER
+ dev_dbg(dev, "%s: MMU Page Fault s0:%08x s1:%08x", __func__, status0, status1);
+#endif
+
+ dev_err(dev, "%s: MMU %s fault from %s while %s @ 0x%08X", __func__,
+ (reason) ? "Page" : "Protection",
+ (requestor & (0x1)) ? "dmac" :
+ (requestor & (0x2)) ? "vec" :
+ (requestor & (0x4)) ? "vdmc" :
+ (requestor & (0x8)) ? "vdeb" : "unknown source",
+ (type) ? "reading" : "writing", addr);
+
+ if (secure)
+ dev_err(dev, "%s: MMU security policy violation detected!", __func__);
+
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * This functions enables the clocks, fetches the core properties, stores them
+ * in the <props> structure and DISABLES the clocks. Do not call when hardware
+ * is busy!
+ */
+int vxd_pvdec_get_props(const void *dev, void __iomem *reg_base, struct vxd_core_props *props)
+{
+#ifdef DEBUG_DECODER_DRIVER
+ unsigned char num_pix_pipes, pipe;
+#endif
+ int ret = pvdec_enable_clocks(reg_base);
+
+ if (ret) {
+ dev_err(dev, "%s: failed to enable clocks!\n", __func__);
+ return ret;
+ }
+
+ ret = pvdec_get_mtx_ram_size(reg_base, &props->mtx_ram_size);
+ if (ret) {
+ dev_err(dev, "%s: failed to get MTX ram size!\n", __func__);
+ return ret;
+ }
+
+ ret = pvdec_get_properties(reg_base, props);
+ if (ret) {
+ dev_err(dev, "%s: failed to get VXD props!\n", __func__);
+ return ret;
+ }
+
+ if (pvdec_disable_clocks(reg_base))
+ dev_err(dev, "%s: failed to disable clocks!\n", __func__);
+
+#ifdef DEBUG_DECODER_DRIVER
+ num_pix_pipes = VXD_NUM_PIX_PIPES(*props);
+
+ /* Warning already raised in pvdec_get_properties() */
+ if (unlikely(num_pix_pipes > VXD_MAX_PIPES))
+ num_pix_pipes = VXD_MAX_PIPES;
+ dev_dbg(dev, "%s: core_rev: 0x%08x\n", __func__, props->core_rev);
+ dev_dbg(dev, "%s: pvdec_core_id: 0x%08x\n", __func__, props->pvdec_core_id);
+ dev_dbg(dev, "%s: mmu_config0: 0x%08x\n", __func__, props->mmu_config0);
+ dev_dbg(dev, "%s: mmu_config1: 0x%08x\n", __func__, props->mmu_config1);
+ dev_dbg(dev, "%s: mtx_ram_size: %u\n", __func__, props->mtx_ram_size);
+ dev_dbg(dev, "%s: pix max frame: 0x%08x\n", __func__, props->pixel_max_frame_cfg);
+
+ for (pipe = 1; pipe <= num_pix_pipes; ++pipe)
+ dev_dbg(dev, "%s: pipe %u, 0x%08x, misc 0x%08x\n",
+ __func__, pipe, props->pixel_pipe_cfg[pipe - 1],
+ props->pixel_misc_cfg[pipe - 1]);
+ dev_dbg(dev, "%s: dbg fifo size: %u\n", __func__, props->dbg_fifo_size);
+#endif
+ return 0;
+}
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_pvdec_priv.h b/drivers/media/platform/vxe-vxd/decoder/vxd_pvdec_priv.h
new file mode 100644
index 000000000000..6cc9aef45904
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_pvdec_priv.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD PVDEC Private header file
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef _VXD_PVDEC_PRIV_H
+#define _VXD_PVDEC_PRIV_H
+#include <linux/interrupt.h>
+
+#include "img_dec_common.h"
+#include "vxd_pvdec_regs.h"
+#include "vxd_dec.h"
+
+#ifdef ERROR_RECOVERY_SIMULATION
+/* kernel object used to debug. Declared in v4l2_int.c */
+extern struct kobject *vxd_dec_kobject;
+extern int disable_fw_irq_value;
+extern int g_module_irq;
+#endif
+
+struct vxd_boot_poll_params {
+ unsigned int msleep_cycles;
+};
+
+struct vxd_ena_params {
+ struct vxd_boot_poll_params boot_poll;
+
+ unsigned long fw_buf_size;
+ unsigned int fw_buf_virt_addr;
+ /*
+ * VXD's MMU virtual address of a firmware
+ * buffer.
+ */
+ unsigned int ptd; /* Shifted physical address of PTD */
+
+ /* Required for firmware upload via registers. */
+ struct {
+ const unsigned char *buf; /* Firmware blob buffer */
+
+ } regs_data;
+
+ struct {
+ unsigned secure : 1; /* Secure flow indicator. */
+ unsigned wait_dbg_fifo : 1; /*
+ * Indicates that fw shall use
+ * blocking mode when putting logs
+ * into debug fifo
+ */
+ };
+
+ /* Structure containing memory staller configuration */
+ struct {
+ unsigned int *data; /* Configuration data array */
+ unsigned char size; /* Configuration size in dwords */
+
+ } mem_staller;
+
+ unsigned int fwwdt_ms; /* Firmware software watchdog timeout value */
+
+ unsigned int crc; /* HW signatures to be enabled by firmware */
+ unsigned int rendec_addr; /* VXD's virtual address of a rendec buffer */
+ unsigned short rendec_size; /* Size of a rendec buffer in 4K pages */
+};
+
+int vxd_pvdec_init(const void *dev, void __iomem *reg_base);
+
+int vxd_pvdec_ena(const void *dev, void __iomem *reg_base,
+ struct vxd_ena_params *ena_params, struct vxd_fw_hdr *hdr,
+ unsigned int *freq_khz);
+
+int vxd_pvdec_dis(const void *dev, void __iomem *reg_base);
+
+int vxd_pvdec_mmu_flush(const void *dev, void __iomem *reg_base);
+
+int vxd_pvdec_send_msg(const void *dev, void __iomem *reg_base,
+ unsigned int *msg, unsigned long msg_size, unsigned short msg_id,
+ struct vxd_dev *ctx);
+
+int vxd_pvdec_pend_msg_info(const void *dev, void __iomem *reg_base,
+ unsigned long *size, unsigned short *msg_id,
+ unsigned char *not_last_msg);
+
+int vxd_pvdec_recv_msg(const void *dev, void __iomem *reg_base,
+ unsigned int *buf, unsigned long buf_size, struct vxd_dev *ctx);
+
+int vxd_pvdec_check_fw_status(const void *dev, void __iomem *reg_base);
+
+unsigned long vxd_pvdec_peek_mtx_fifo(const void *dev,
+ void __iomem *reg_base);
+
+unsigned long vxd_pvdec_read_mtx_fifo(const void *dev, void __iomem *reg_base,
+ unsigned int *buf, unsigned long size);
+
+irqreturn_t vxd_pvdec_clear_int(void __iomem *reg_base, unsigned int *irq_status);
+
+int vxd_pvdec_check_irq(const void *dev, void __iomem *reg_base,
+ unsigned int irq_status);
+
+int vxd_pvdec_msg_fit(const void *dev, void __iomem *reg_base,
+ unsigned long msg_size);
+
+void vxd_pvdec_get_state(const void *dev, void __iomem *reg_base,
+ unsigned int num_pipes, struct vxd_hw_state *state);
+
+int vxd_pvdec_get_props(const void *dev, void __iomem *reg_base,
+ struct vxd_core_props *props);
+
+unsigned long vxd_pvdec_get_dbg_fifo_size(void __iomem *reg_base);
+
+int vxd_pvdec_dump_mtx_ram(const void *dev, void __iomem *reg_base,
+ unsigned int addr, unsigned int count, unsigned int *buf);
+
+int vxd_pvdec_dump_mtx_status(const void *dev, void __iomem *reg_base,
+ unsigned int *array, unsigned int array_size);
+
+#endif /* _VXD_PVDEC_PRIV_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_pvdec_regs.h b/drivers/media/platform/vxe-vxd/decoder/vxd_pvdec_regs.h
new file mode 100644
index 000000000000..2d8cf9ef8df7
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_pvdec_regs.h
@@ -0,0 +1,779 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD PVDEC registers header file
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef VXD_PVDEC_REGS_H
+#define VXD_PVDEC_REGS_H
+
+/* ************************* VXD-specific values *************************** */
+/* 0x10 for code, 0x18 for data. */
+#define PVDEC_MTX_CORE_MEM 0x18
+/* Iteration time out counter for MTX I/0. */
+#define PVDEC_TIMEOUT_COUNTER 1000
+/* Partial frame notification timer divider. */
+#define PVDEC_PFNT_DIV 0
+/* Value returned by register reads when HW enters invalid state (FPGA) */
+#define PVDEC_INVALID_HW_STATE 0x000dead1
+
+/* Default core clock for pvdec */
+#define PVDEC_CLK_MHZ_DEFAULT 200
+
+/* Offsets of registers groups within VXD. */
+#define PVDEC_PROC_OFFSET 0x0000
+/* 0x34c: Skip DMA registers when running against CSIM (vritual platform) */
+#define PVDEC_PROC_SIZE 0x34C /* 0x3FF */
+
+#define PVDEC_CORE_OFFSET 0x0400
+#define PVDEC_CORE_SIZE 0x3FF
+
+#define MTX_CORE_OFFSET PVDEC_PROC_OFFSET
+#define MTX_CORE_SIZE PVDEC_PROC_SIZE
+
+#define VIDEO_BUS4_MMU_OFFSET 0x1000
+#define VIDEO_BUS4_MMU_SIZE 0x1FF
+
+#define IMG_VIDEO_BUS4_MMU_OFFSET VIDEO_BUS4_MMU_OFFSET
+#define IMG_VIDEO_BUS4_MMU_SIZE VIDEO_BUS4_MMU_SIZE
+
+#define VLR_OFFSET 0x2000
+#define VLR_SIZE 0x1000
+
+/* PVDEC_ENTROPY defined in uapi/vxd_pvdec.h */
+
+#define PVDEC_PIXEL_OFFSET 0x4000
+#define PVDEC_PIXEL_SIZE 0x1FF
+
+/* PVDEC_VEC_BE defined in uapi/vxd_pvdec.h */
+
+/* MSVDX_VEC defined in uapi/vxd_pvdec.h */
+
+#define MSVDX_VDMC_OFFSET 0x6800
+#define MSVDX_VDMC_SIZE 0x7F
+
+#define DMAC_OFFSET 0x6A00
+#define DMAC_SIZE 0x1FF
+
+#define PVDEC_TEST_OFFSET 0xFF00
+#define PVDEC_TEST_SIZE 0xFF
+
+/* *********************** firmware specific values ************************* */
+
+/* layout of COMMS RAM */
+
+#define PVDEC_FW_COMMS_HDR_SIZE 0x38
+
+#define PVDEC_FW_STATUS_OFFSET 0x00
+#define PVDEC_FW_TASK_STATUS_OFFSET 0x04
+#define PVDEC_FW_ID_OFFSET 0x08
+#define PVDEC_FW_MTXPC_OFFSET 0x0c
+#define PVDEC_FW_MSG_COUNTER_OFFSET 0x10
+#define PVDEC_FW_SIGNATURE_OFFSET 0x14
+#define PVDEC_FW_TO_HOST_BUF_CONF_OFFSET 0x18
+#define PVDEC_FW_TO_HOST_RD_IDX_OFFSET 0x1c
+#define PVDEC_FW_TO_HOST_WR_IDX_OFFSET 0x20
+#define PVDEC_FW_TO_MTX_BUF_CONF_OFFSET 0x24
+#define PVDEC_FW_TO_MTX_RD_IDX_OFFSET 0x28
+#define PVDEC_FW_FLAGS_OFFSET 0x2c
+#define PVDEC_FW_TO_MTX_WR_IDX_OFFSET 0x30
+#define PVDEC_FW_STATE_BUF_CFG_OFFSET 0x34
+
+/* firmware status */
+
+#define PVDEC_FW_STATUS_PANIC 0x2
+#define PVDEC_FW_STATUS_ASSERT 0x3
+#define PVDEC_FW_STATUS_SO 0x8
+
+/* firmware flags */
+
+#define PVDEC_FWFLAG_BIG_TO_HOST_BUFFER 0x00000002
+#define PVDEC_FWFLAG_FORCE_FS_FLOW 0x00000004
+#define PVDEC_FWFLAG_DISABLE_WATCHDOGS 0x00000008
+#define PVDEC_FWFLAG_DISABLE_AUTONOMOUS_RESET 0x00000040
+#define PVDEC_FWFLAG_DISABLE_IDLE_GPIO 0x00002000
+#define PVDEC_FWFLAG_ENABLE_ERROR_CONCEALMENT 0x00100000
+#define PVDEC_FWFLAG_DISABLE_GENC_FLUSHING 0x00800000
+#define PVDEC_FWFLAG_FAKE_COMPLETION 0x20000000
+#define PVDEC_FWFLAG_DISABLE_COREWDT_TIMERS 0x01000000
+
+/* firmware message header */
+
+#define PVDEC_FW_DEVA_GENMSG_OFFSET 0
+
+#define PVDEC_FW_DEVA_GENMSG_MSG_ID_MASK 0xFFFF0000
+#define PVDEC_FW_DEVA_GENMSG_MSG_ID_SHIFT 16
+
+#define PVDEC_FW_DEVA_GENMSG_MSG_TYPE_MASK 0xFF00
+#define PVDEC_FW_DEVA_GENMSG_MSG_TYPE_SHIFT 8
+
+#define PVDEC_FW_DEVA_GENMSG_NOT_LAST_MSG_MASK 0x80
+#define PVDEC_FW_DEVA_GENMSG_NOT_LAST_MSG_SHIFT 7
+
+#define PVDEC_FW_DEVA_GENMSG_MSG_SIZE_MASK 0x7F
+#define PVDEC_FW_DEVA_GENMSG_MSG_SIZE_SHIFT 0
+
+/* firmware init message */
+
+#define PVDEC_FW_DEVA_INIT_MSG_WRDS 9
+
+#define PVDEC_FW_DEVA_INIT_RENDEC_ADDR0_OFFSET 0xC
+
+#define PVDEC_FW_DEVA_INIT_RENDEC_SIZE0_OFFSET 0x10
+#define PVDEC_FW_DEVA_INIT_RENDEC_SIZE0_MASK 0xFFFF
+#define PVDEC_FW_DEVA_INIT_RENDEC_SIZE0_SHIFT 0
+
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_OFFSET 0x14
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_MAX_H_FOR_PIPE_WAIT_MASK 0xFFFF0000
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_MAX_H_FOR_PIPE_WAIT_SHIFT 16
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_MIN_H_FOR_DUAL_PIPE_MASK 0xFFFF
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_MIN_H_FOR_DUAL_PIPE_SHIFT 0
+
+#define PVDEC_FW_DEVA_INIT_SIG_SELECT_OFFSET 0x18
+
+#define PVDEC_FW_DEVA_INIT_DBG_DELAYS_OFFSET 0x1C
+
+#define PVDEC_FW_DEVA_INIT_PFNT_DIV_OFFSET 0x20
+#define PVDEC_FW_DEVA_INIT_PFNT_DIV_MASK 0xFFFF0000
+#define PVDEC_FW_DEVA_INIT_PFNT_DIV_SHIFT 16
+
+#define PVDEC_FW_DEVA_INIT_FWWDT_MS_OFFSET 0x20
+#define PVDEC_FW_DEVA_INIT_FWWDT_MS_MASK 0xFFFF
+#define PVDEC_FW_DEVA_INIT_FWWDT_MS_SHIFT 0
+
+/* firmware message types */
+#define PVDEC_FW_MSG_TYPE_PADDING 0
+#define PVDEC_FW_MSG_TYPE_INIT 0x80
+
+/* miscellaneous */
+
+#define PVDEC_FW_READY_SIG 0xa5a5a5a5
+
+#define PVDEC_FW_COM_BUF_SIZE(cfg) ((cfg) & 0x0000ffff)
+#define PVDEC_FW_COM_BUF_OFF(cfg) (((cfg) & 0xffff0000) >> 16)
+
+/*
+ * Timer divider calculation macro.
+ * NOTE: The Timer divider is only 8bit field
+ * so we set it for 2MHz timer base to cover wider
+ * range of core frequencies on real platforms (freq > 255MHz)
+ */
+#define PVDEC_CALC_TIMER_DIV(val) (((val) - 1) / 2)
+
+#define MTX_CORE_STATUS_ELEMENTS 4
+
+#define PVDEC_CORE_MEMSTALLER_ELEMENTS 7
+
+/* ********************** PVDEC_CORE registers group ************************ */
+
+/* register PVDEC_SOFT_RESET */
+#define PVDEC_CORE_PVDEC_SOFT_RST_OFFSET 0x0000
+
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_PIXEL_PROC_SOFT_RST_MASK 0xFF000000
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_PIXEL_PROC_SOFT_RST_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_ENTROPY_SOFT_RST_MASK 0x00FF0000
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_ENTROPY_SOFT_RST_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_MMU_SOFT_RST_MASK 0x00000002
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_MMU_SOFT_RST_SHIFT 1
+
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_SOFT_RST_MASK 0x00000001
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_SOFT_RST_SHIFT 0
+
+/* register PVDEC_HOST_INTERRUPT_STATUS */
+#define PVDEC_CORE_PVDEC_INT_STAT_OFFSET 0x0010
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_SYS_WDT_MASK 0x10000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_SYS_WDT_SHIFT 28
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_READ_TIMEOUT_PROC_IRQ_MASK 0x08000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_READ_TIMEOUT_PROC_IRQ_SHIFT 27
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_COMMAND_TIMEOUT_PROC_IRQ_MASK 0x04000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_COMMAND_TIMEOUT_PROC_IRQ_SHIFT 26
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_READ_TIMEOUT_HOST_IRQ_MASK 0x02000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_READ_TIMEOUT_HOST_IRQ_SHIFT 25
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_COMMAND_TIMEOUT_HOST_IRQ_MASK 0x01000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_COMMAND_TIMEOUT_HOST_IRQ_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_GPIO_IRQ_MASK 0x00200000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_GPIO_IRQ_SHIFT 21
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_IRQ_MASK 0x00100000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_IRQ_SHIFT 20
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK 0x00010000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PIXEL_PROCESSING_IRQ_MASK 0x0000FF00
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PIXEL_PROCESSING_IRQ_SHIFT 8
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_ENTROPY_PIPE_IRQ_MASK 0x000000FF
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_ENTROPY_PIPE_IRQ_SHIFT 0
+
+/* register PVDEC_INTERRUPT_CLEAR */
+#define PVDEC_CORE_PVDEC_INT_CLEAR_OFFSET 0x0014
+
+#define PVDEC_CORE_PVDEC_INT_CLEAR_IRQ_CLEAR_MASK 0xFFFF0000
+#define PVDEC_CORE_PVDEC_INT_CLEAR_IRQ_CLEAR_SHIFT 16
+
+/* register PVDEC_HOST_INTERRUPT_ENABLE */
+#define PVDEC_CORE_PVDEC_HOST_INT_ENA_OFFSET 0x0018
+
+#define PVDEC_CORE_PVDEC_HOST_INT_ENA_HOST_IRQ_ENABLE_MASK 0xFFFF0000
+#define PVDEC_CORE_PVDEC_HOST_INT_ENA_HOST_IRQ_ENABLE_SHIFT 16
+
+/* Register PVDEC_MAN_CLK_ENABLE */
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_OFFSET 0x0040
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PIXEL_PROC_MAN_CLK_ENA_MASK 0xFF000000
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PIXEL_PROC_MAN_CLK_ENA_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_ENTROPY_PIPE_MAN_CLK_ENA_MASK 0x00FF0000
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_ENTROPY_PIPE_MAN_CLK_ENA_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_MEM_MAN_CLK_ENA_MASK 0x00000100
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_MEM_MAN_CLK_ENA_SHIFT 8
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PVDEC_REG_MAN_CLK_ENA_MASK 0x00000010
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PVDEC_REG_MAN_CLK_ENA_SHIFT 4
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PROC_MAN_CLK_ENA_MASK 0x00000002
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PROC_MAN_CLK_ENA_SHIFT 1
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_CORE_MAN_CLK_ENA_MASK 0x00000001
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_CORE_MAN_CLK_ENA_SHIFT 0
+
+/* register PVDEC_HOST_PIPE_SELECT */
+#define PVDEC_CORE_PVDEC_HOST_PIPE_SELECT_OFFSET 0x0060
+
+#define PVDEC_CORE_PVDEC_HOST_PIPE_SELECT_PIPE_SEL_MASK 0x0000000F
+#define PVDEC_CORE_PVDEC_HOST_PIPE_SELECT_PIPE_SEL_SHIFT 0
+
+/* register PROC_DEBUG */
+#define PVDEC_CORE_PROC_DEBUG_OFFSET 0x0100
+
+#define PVDEC_CORE_PROC_DEBUG_MTX_LAST_RAM_BANK_SIZE_MASK 0xFF000000
+#define PVDEC_CORE_PROC_DEBUG_MTX_LAST_RAM_BANK_SIZE_SHIFT 24
+
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_BANK_SIZE_MASK 0x000F0000
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_BANK_SIZE_SHIFT 16
+
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_BANKS_MASK 0x00000F00
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_BANKS_SHIFT 8
+
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_NEW_REPRESENTATION_MASK 0x00000080
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_NEW_REPRESENTATION_SHIFT 7
+
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_GPIO_OUT_MASK 0x00000018
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_GPIO_OUT_SHIFT 3
+
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_IS_SLAVE_MASK 0x00000004
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_IS_SLAVE_SHIFT 2
+
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_GPIO_IN_MASK 0x00000003
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_GPIO_IN_SHIFT 0
+
+/* register PROC_DMAC_CONTROL */
+#define PVDEC_CORE_PROC_DMAC_CONTROL_OFFSET 0x0104
+
+#define PVDEC_CORE_PROC_DMAC_CONTROL_BOOT_ON_DMA_CH0_MASK 0x80000000
+#define PVDEC_CORE_PROC_DMAC_CONTROL_BOOT_ON_DMA_CH0_SHIFT 31
+
+/* register PROC_DEBUG_FIFO */
+#define PVDEC_CORE_PROC_DBG_FIFO_OFFSET 0x0108
+
+#define PVDEC_CORE_PROC_DBG_FIFO_PROC_DBG_FIFO_MASK 0xFFFFFFFF
+#define PVDEC_CORE_PROC_DBG_FIFO_PROC_DBG_FIFO_SHIFT 0
+
+/* register PROC_DEBUG_FIFO_CTRL_0 */
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_OFFSET 0x010C
+
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_PROC_DBG_FIFO_COUNT_MASK 0xFFFF0000
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_PROC_DBG_FIFO_COUNT_SHIFT 16
+
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_PROC_DBG_FIFO_SIZE_MASK 0x0000FFFF
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_PROC_DBG_FIFO_SIZE_SHIFT 0
+
+/* register PVDEC_CORE_ID */
+#define PVDEC_CORE_PVDEC_CORE_ID_OFFSET 0x0230
+
+#define PVDEC_CORE_PVDEC_CORE_ID_GROUP_ID_MASK 0xFF000000
+#define PVDEC_CORE_PVDEC_CORE_ID_GROUP_ID_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_CORE_ID_CORE_ID_MASK 0x00FF0000
+#define PVDEC_CORE_PVDEC_CORE_ID_CORE_ID_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_CORE_ID_PVDEC_CORE_CONFIG_MASK 0x0000FFFF
+#define PVDEC_CORE_PVDEC_CORE_ID_PVDEC_CORE_CONFIG_SHIFT 0
+
+#define PVDEC_CORE_PVDEC_CORE_ID_ENT_PIPES_MASK 0x0000000F
+#define PVDEC_CORE_PVDEC_CORE_ID_ENT_PIPES_SHIFT 0
+
+#define PVDEC_CORE_PVDEC_CORE_ID_PIX_PIPES_MASK 0x000000F0
+#define PVDEC_CORE_PVDEC_CORE_ID_PIX_PIPES_SHIFT 4
+
+/* register PVDEC_CORE_REV */
+#define PVDEC_CORE_PVDEC_CORE_REV_OFFSET 0x0240
+
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_DESIGNER_MASK 0xFF000000
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_DESIGNER_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MAJOR_REV_MASK 0x00FF0000
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MAJOR_REV_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MINOR_REV_MASK 0x0000FF00
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MINOR_REV_SHIFT 8
+
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MAINT_REV_MASK 0x000000FF
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MAINT_REV_SHIFT 0
+
+/* *********************** MTX_CORE registers group ************************* */
+
+/* register MTX_ENABLE */
+#define MTX_CORE_MTX_ENABLE_OFFSET 0x0000
+
+/* register MTX_SYSC_TXTIMER. Note: it's not defined in PVDEC TRM. */
+#define MTX_CORE_MTX_SYSC_TXTIMER_OFFSET 0x0010
+
+/* register MTX_KICKI */
+#define MTX_CORE_MTX_KICKI_OFFSET 0x0088
+
+#define MTX_CORE_MTX_KICKI_MTX_KICKI_MASK 0x0000FFFF
+#define MTX_CORE_MTX_KICKI_MTX_KICKI_SHIFT 0
+
+/* register MTX_FAULT0 */
+#define MTX_CORE_MTX_FAULT0_OFFSET 0x0090
+
+/* register MTX_REGISTER_READ_WRITE_DATA */
+#define MTX_CORE_MTX_REG_READ_WRITE_DATA_OFFSET 0x00F8
+
+/* register MTX_REGISTER_READ_WRITE_REQUEST */
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_OFFSET 0x00FC
+
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_DREADY_SHIFT 31
+
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_RNW_SHIFT 16
+
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_RSPECIFIER_MASK 0x00000070
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_RSPECIFIER_SHIFT 4
+
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_USPECIFIER_MASK 0x0000000F
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_USPECIFIER_SHIFT 0
+
+/* register MTX_RAM_ACCESS_DATA_EXCHANGE */
+#define MTX_CORE_MTX_RAM_ACCESS_DATA_EXCHANGE_OFFSET 0x0100
+
+/* register MTX_RAM_ACCESS_DATA_TRANSFER */
+#define MTX_CORE_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET 0x0104
+
+/* register MTX_RAM_ACCESS_CONTROL */
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_OFFSET 0x0108
+
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK 0x0FF00000
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT 20
+
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK 0x000FFFFC
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT 2
+
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK 0x00000002
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT 1
+
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK 0x00000001
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT 0
+
+/* register MTX_RAM_ACCESS_STATUS */
+#define MTX_CORE_MTX_RAM_ACCESS_STATUS_OFFSET 0x010C
+
+#define MTX_CORE_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK 0x00000001
+#define MTX_CORE_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_SHIFT 0
+
+/* register MTX_SOFT_RESET */
+#define MTX_CORE_MTX_SOFT_RESET_OFFSET 0x0200
+
+#define MTX_CORE_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
+#define MTX_CORE_MTX_SOFT_RESET_MTX_RESET_SHIFT 0
+
+/* register MTX_SYSC_TIMERDIV */
+#define MTX_CORE_MTX_SYSC_TIMERDIV_OFFSET 0x0208
+
+#define MTX_CORE_MTX_SYSC_TIMERDIV_TIMER_EN_MASK 0x00010000
+#define MTX_CORE_MTX_SYSC_TIMERDIV_TIMER_EN_SHIFT 16
+
+#define MTX_CORE_MTX_SYSC_TIMERDIV_TIMER_DIV_MASK 0x000000FF
+#define MTX_CORE_MTX_SYSC_TIMERDIV_TIMER_DIV_SHIFT 0
+
+/* register MTX_SYSC_CDMAA */
+#define MTX_CORE_MTX_SYSC_CDMAA_OFFSET 0x0344
+
+#define MTX_CORE_MTX_SYSC_CDMAA_CDMAA_ADDRESS_MASK 0x03FFFFFC
+#define MTX_CORE_MTX_SYSC_CDMAA_CDMAA_ADDRESS_SHIFT 2
+
+/* register MTX_SYSC_CDMAC */
+#define MTX_CORE_MTX_SYSC_CDMAC_OFFSET 0x0340
+
+#define MTX_CORE_MTX_SYSC_CDMAC_BURSTSIZE_MASK 0x07000000
+#define MTX_CORE_MTX_SYSC_CDMAC_BURSTSIZE_SHIFT 24
+
+#define MTX_CORE_MTX_SYSC_CDMAC_RNW_MASK 0x00020000
+#define MTX_CORE_MTX_SYSC_CDMAC_RNW_SHIFT 17
+
+#define MTX_CORE_MTX_SYSC_CDMAC_ENABLE_MASK 0x00010000
+#define MTX_CORE_MTX_SYSC_CDMAC_ENABLE_SHIFT 16
+
+#define MTX_CORE_MTX_SYSC_CDMAC_LENGTH_MASK 0x0000FFFF
+#define MTX_CORE_MTX_SYSC_CDMAC_LENGTH_SHIFT 0
+
+/* register MTX_SYSC_CDMAT */
+#define MTX_CORE_MTX_SYSC_CDMAT_OFFSET 0x0350
+
+/* ****************** IMG_VIDEO_BUS4_MMU registers group ******************** */
+
+/* register MMU_CONTROL0_ */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL0_USE_TILE_STRIDE_PER_CTX_MASK 0x00010000
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL0_USE_TILE_STRIDE_PER_CTX_SHIFT 16
+
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_MMU_ENA_EXT_ADDR_MASK 0x00000010
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_MMU_ENA_EXT_ADDR_SHIFT 4
+
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_UPPER_ADDR_FIXED_MASK 0x00FF0000
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_UPPER_ADDR_FIXED_SHIFT 16
+
+#define IMG_VIDEO_BUS4_MMU_MMU_MEM_EXT_OUTSTANDING_READ_WORDS_MASK 0x0000FFFF
+#define IMG_VIDEO_BUS4_MMU_MMU_MEM_EXT_OUTSTANDING_READ_WORDS_SHIFT 0
+
+/* *************************** MMU-related values ************************** */
+
+/* MMU page size */
+
+enum {
+ VXD_MMU_SOFT_PAGE_SIZE_PAGE_64K = 0x4,
+ VXD_MMU_SOFT_PAGE_SIZE_PAGE_16K = 0x2,
+ VXD_MMU_SOFT_PAGE_SIZE_PAGE_4K = 0x0,
+ VXD_MMU_SOFT_PAGE_SIZE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* MMU PTD entry flags */
+enum {
+ VXD_MMU_PTD_FLAG_NONE = 0x0,
+ VXD_MMU_PTD_FLAG_VALID = 0x1,
+ VXD_MMU_PTD_FLAG_WRITE_ONLY = 0x2,
+ VXD_MMU_PTD_FLAG_READ_ONLY = 0x4,
+ VXD_MMU_PTD_FLAG_CACHE_COHERENCY = 0x8,
+ VXD_MMU_PTD_FLAG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* ********************* PVDEC_PIXEL registers group *********************** */
+
+/* register PVDEC_PIXEL_PIXEL_CONTROL_0 */
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_OFFSET 0x0004
+
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_DMAC_CH_SEL_FOR_MTX_MASK 0x0000000E
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_DMAC_CH_SEL_FOR_MTX_SHIFT 1
+
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_PROC_DMAC_CH0_SEL_MASK 0x00000001
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_PROC_DMAC_CH0_SEL_SHIFT 0
+
+/* register PVDEC_PIXEL_MAN_CLK_ENABLE */
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_OFFSET 0x0020
+
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_PIXEL_REG_MAN_CLK_ENA_MASK 0x00020000
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_PIXEL_REG_MAN_CLK_ENA_SHIFT 17
+
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_PIXEL_DMAC_MAN_CLK_ENA_MASK 0x00010000
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_PIXEL_DMAC_MAN_CLK_ENA_SHIFT 16
+
+/* register PIXEL_PIPE_CONFIG */
+#define PVDEC_PIXEL_PIXEL_PIPE_CONFIG_OFFSET 0x00C0
+
+/* register PIXEL_MISC_CONFIG */
+#define PVDEC_PIXEL_PIXEL_MISC_CONFIG_OFFSET 0x00C4
+
+/* register MAX_FRAME_CONFIG */
+#define PVDEC_PIXEL_MAX_FRAME_CONFIG_OFFSET 0x00C8
+
+/* ********************* PVDEC_ENTROPY registers group ********************* */
+
+/* Register PVDEC_ENTROPY_MAN_CLK_ENABLE */
+#define PVDEC_ENTROPY_ENTROPY_MAN_CLK_ENA_OFFSET 0x0020
+
+/* Register PVDEC_ENTROPY_LAST_LAST_MB */
+#define PVDEC_ENTROPY_ENTROPY_LAST_MB_OFFSET 0x00BC
+
+/* ********************* PVDEC_VEC_BE registers group ********************** */
+
+/* Register PVDEC_VEC_BE_VEC_BE_STATUS */
+#define PVDEC_VEC_BE_VEC_BE_STATUS_OFFSET 0x0018
+
+/* ********************* MSVDX_VEC registers group ************************* */
+
+/* Register MSVDX_VEC_VEC_ENTDEC_INFORMATION */
+#define MSVDX_VEC_VEC_ENTDEC_INFORMATION_OFFSET 0x00AC
+
+/* ********************* MSVDX_VDMC registers group ************************ */
+
+/* Register MSVDX_VDMC_VDMC_MACROBLOCK_NUMBER */
+#define MSVDX_VDMC_VDMC_MACROBLOCK_NUMBER_OFFSET 0x0048
+
+/* ************************** DMAC registers group ************************* */
+
+/* register DMAC_SETUP */
+#define DMAC_DMAC_SETUP_OFFSET 0x0000
+#define DMAC_DMAC_SETUP_STRIDE 32
+#define DMAC_DMAC_SETUP_NO_ENTRIES 6
+
+/* register DMAC_COUNT */
+#define DMAC_DMAC_COUNT_OFFSET 0x0004
+#define DMAC_DMAC_COUNT_STRIDE 32
+#define DMAC_DMAC_COUNT_NO_ENTRIES 6
+
+#define DMAC_DMAC_COUNT_LIST_IEN_MASK 0x80000000
+#define DMAC_DMAC_COUNT_LIST_IEN_SHIFT 31
+
+#define DMAC_DMAC_COUNT_BSWAP_MASK 0x40000000
+#define DMAC_DMAC_COUNT_BSWAP_SHIFT 30
+
+#define DMAC_DMAC_COUNT_TRANSFER_IEN_MASK 0x20000000
+#define DMAC_DMAC_COUNT_TRANSFER_IEN_SHIFT 29
+
+#define DMAC_DMAC_COUNT_PW_MASK 0x18000000
+#define DMAC_DMAC_COUNT_PW_SHIFT 27
+
+#define DMAC_DMAC_COUNT_DIR_MASK 0x04000000
+#define DMAC_DMAC_COUNT_DIR_SHIFT 26
+
+#define DMAC_DMAC_COUNT_PI_MASK 0x03000000
+#define DMAC_DMAC_COUNT_PI_SHIFT 24
+
+#define DMAC_DMAC_COUNT_LIST_FIN_CTL_MASK 0x00400000
+#define DMAC_DMAC_COUNT_LIST_FIN_CTL_SHIFT 22
+
+#define DMAC_DMAC_COUNT_DREQ_MASK 0x00100000
+#define DMAC_DMAC_COUNT_DREQ_SHIFT 20
+
+#define DMAC_DMAC_COUNT_SRST_MASK 0x00080000
+#define DMAC_DMAC_COUNT_SRST_SHIFT 19
+
+#define DMAC_DMAC_COUNT_LIST_EN_MASK 0x00040000
+#define DMAC_DMAC_COUNT_LIST_EN_SHIFT 18
+
+#define DMAC_DMAC_COUNT_ENABLE_2D_MODE_MASK 0x00020000
+#define DMAC_DMAC_COUNT_ENABLE_2D_MODE_SHIFT 17
+
+#define DMAC_DMAC_COUNT_EN_MASK 0x00010000
+#define DMAC_DMAC_COUNT_EN_SHIFT 16
+
+#define DMAC_DMAC_COUNT_CNT_MASK 0x0000FFFF
+#define DMAC_DMAC_COUNT_CNT_SHIFT 0
+
+/* register DMAC_PERIPH */
+#define DMAC_DMAC_PERIPH_OFFSET 0x0008
+#define DMAC_DMAC_PERIPH_STRIDE 32
+#define DMAC_DMAC_PERIPH_NO_ENTRIES 6
+
+#define DMAC_DMAC_PERIPH_ACC_DEL_MASK 0xE0000000
+#define DMAC_DMAC_PERIPH_ACC_DEL_SHIFT 29
+
+#define DMAC_DMAC_PERIPH_INCR_MASK 0x08000000
+#define DMAC_DMAC_PERIPH_INCR_SHIFT 27
+
+#define DMAC_DMAC_PERIPH_BURST_MASK 0x07000000
+#define DMAC_DMAC_PERIPH_BURST_SHIFT 24
+
+#define DMAC_DMAC_PERIPH_EXT_BURST_MASK 0x000F0000
+#define DMAC_DMAC_PERIPH_EXT_BURST_SHIFT 16
+
+#define DMAC_DMAC_PERIPH_EXT_SA_MASK 0x0000000F
+#define DMAC_DMAC_PERIPH_EXT_SA_SHIFT 0
+
+/* register DMAC_IRQ_STAT */
+#define DMAC_DMAC_IRQ_STAT_OFFSET 0x000C
+#define DMAC_DMAC_IRQ_STAT_STRIDE 32
+#define DMAC_DMAC_IRQ_STAT_NO_ENTRIES 6
+
+/* register DMAC_PERIPHERAL_ADDR */
+#define DMAC_DMAC_PERIPH_ADDR_OFFSET 0x0014
+#define DMAC_DMAC_PERIPH_ADDR_STRIDE 32
+#define DMAC_DMAC_PERIPH_ADDR_NO_ENTRIES 6
+
+#define DMAC_DMAC_PERIPH_ADDR_ADDR_MASK 0x007FFFFF
+#define DMAC_DMAC_PERIPH_ADDR_ADDR_SHIFT 0
+
+/* register DMAC_PER_HOLD */
+#define DMAC_DMAC_PER_HOLD_OFFSET 0x0018
+#define DMAC_DMAC_PER_HOLD_STRIDE 32
+#define DMAC_DMAC_PER_HOLD_NO_ENTRIES 6
+
+#define DMAC_DMAC_PER_HOLD_PER_HOLD_MASK 0x0000001F
+#define DMAC_DMAC_PER_HOLD_PER_HOLD_SHIFT 0
+
+#define DMAC_DMAC_SOFT_RESET_OFFSET 0x00C0
+
+/* ************************** DMAC-related values *************************** */
+
+/*
+ * This type defines whether the peripheral address is static or
+ * auto-incremented. (see the TRM "Transfer Sequence Linked-list - INCR")
+ */
+enum {
+ DMAC_INCR_OFF = 0, /* No action, no increment. */
+ DMAC_INCR_ON = 1, /* Generate address increment. */
+ DMAC_INCR_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Burst size settings (see the TRM "Transfer Sequence Linked-list - BURST"). */
+enum {
+ DMAC_BURST_0 = 0x0, /* burst size of 0 */
+ DMAC_BURST_1 = 0x1, /* burst size of 1 */
+ DMAC_BURST_2 = 0x2, /* burst size of 2 */
+ DMAC_BURST_3 = 0x3, /* burst size of 3 */
+ DMAC_BURST_4 = 0x4, /* burst size of 4 */
+ DMAC_BURST_5 = 0x5, /* burst size of 5 */
+ DMAC_BURST_6 = 0x6, /* burst size of 6 */
+ DMAC_BURST_7 = 0x7, /* burst size of 7 */
+ DMAC_BURST_8 = 0x8, /* burst size of 8 */
+ DMAC_BURST_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Extended burst size settings (see TRM "Transfer Sequence Linked-list -
+ * EXT_BURST").
+ */
+enum {
+ DMAC_EXT_BURST_0 = 0x0, /* no extension */
+ DMAC_EXT_BURST_1 = 0x1, /* extension of 8 */
+ DMAC_EXT_BURST_2 = 0x2, /* extension of 16 */
+ DMAC_EXT_BURST_3 = 0x3, /* extension of 24 */
+ DMAC_EXT_BURST_4 = 0x4, /* extension of 32 */
+ DMAC_EXT_BURST_5 = 0x5, /* extension of 40 */
+ DMAC_EXT_BURST_6 = 0x6, /* extension of 48 */
+ DMAC_EXT_BURST_7 = 0x7, /* extension of 56 */
+ DMAC_EXT_BURST_8 = 0x8, /* extension of 64 */
+ DMAC_EXT_BURST_9 = 0x9, /* extension of 72 */
+ DMAC_EXT_BURST_10 = 0xa, /* extension of 80 */
+ DMAC_EXT_BURST_11 = 0xb, /* extension of 88 */
+ DMAC_EXT_BURST_12 = 0xc, /* extension of 96 */
+ DMAC_EXT_BURST_13 = 0xd, /* extension of 104 */
+ DMAC_EXT_BURST_14 = 0xe, /* extension of 112 */
+ DMAC_EXT_BURST_15 = 0xf, /* extension of 120 */
+ DMAC_EXT_BURST_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Transfer direction. */
+enum {
+ DMAC_MEM_TO_VXD = 0x0,
+ DMAC_VXD_TO_MEM = 0x1,
+ DMAC_VXD_TO_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* How much to increment the peripheral address. */
+enum {
+ DMAC_PI_1 = 0x2, /* increment by 1 */
+ DMAC_PI_2 = 0x1, /* increment by 2 */
+ DMAC_PI_4 = 0x0, /* increment by 4 */
+ DMAC_PI_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Peripheral width settings (see TRM "Transfer Sequence Linked-list - PW"). */
+enum {
+ DMAC_PWIDTH_32_BIT = 0x0, /* Peripheral width 32-bit. */
+ DMAC_PWIDTH_16_BIT = 0x1, /* Peripheral width 16-bit. */
+ DMAC_PWIDTH_8_BIT = 0x2, /* Peripheral width 8-bit. */
+ DMAC_PWIDTH_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* ******************************* macros ********************************** */
+
+#ifdef PVDEC_SINGLETHREADED_IO
+/* Write to the register */
+#define VXD_WR_REG_ABS(base, addr, val) \
+ ({ spin_lock_irqsave(&pvdec_irq_lock, pvdec_irq_flags); \
+ iowrite32((val), (addr) + (base)); \
+ spin_unlock_irqrestore(&pvdec_irq_lock, (unsigned long)pvdec_irq_flags); })
+
+/* Read the register */
+#define VXD_RD_REG_ABS(base, addr) \
+ ({ unsigned int reg; \
+ spin_lock_irqsave(&pvdec_irq_lock, pvdec_irq_flags); \
+ reg = ioread32((addr) + (base)); \
+ spin_unlock_irqrestore(&pvdec_irq_lock, (unsigned long)pvdec_irq_flags); \
+ reg; })
+#else /* ndef PVDEC_SINGLETHREADED_IO */
+
+/* Write to the register */
+#define VXD_WR_REG_ABS(base, addr, val) \
+ (iowrite32((val), (addr) + (base)))
+
+/* Read the register */
+#define VXD_RD_REG_ABS(base, addr) \
+ (ioread32((addr) + (base)))
+
+#endif
+
+/* Get offset of a register */
+#define VXD_GET_REG_OFF(group, reg) \
+ (group ## _OFFSET + group ## _ ## reg ## _OFFSET)
+
+/* Get offset of a repated register */
+#define VXD_GET_RPT_REG_OFF(group, reg, index) \
+ (VXD_GET_REG_OFF(group, reg) + ((index) * group ## _ ## reg ## _STRIDE))
+
+/* Extract field from a register */
+#define VXD_RD_REG_FIELD(val, group, reg, field) \
+ (((val) & group ## _ ## reg ## _ ## field ## _MASK) >> \
+ group ## _ ## reg ## _ ## field ## _SHIFT)
+
+/* Shift provided value by number of bits relevant to register specification */
+#define VXD_ENC_REG_FIELD(group, reg, field, val) \
+ ((unsigned int)(val) << (group ## _ ## reg ## _ ## field ## _SHIFT))
+
+/* Update the field in a register */
+#define VXD_WR_REG_FIELD(reg_val, group, reg, field, val) \
+ (((reg_val) & ~(group ## _ ## reg ## _ ## field ## _MASK)) | \
+ (VXD_ENC_REG_FIELD(group, reg, field, val) & \
+ (group ## _ ## reg ## _ ## field ## _MASK)))
+
+/* Write to a register */
+#define VXD_WR_REG(base, group, reg, val) \
+ VXD_WR_REG_ABS(base, VXD_GET_REG_OFF(group, reg), val)
+
+/* Write to a repeated register */
+#define VXD_WR_RPT_REG(base, group, reg, val, index) \
+ VXD_WR_REG_ABS(base, VXD_GET_RPT_REG_OFF(group, reg, index), val)
+
+/* Read a register */
+#define VXD_RD_REG(base, group, reg) \
+ VXD_RD_REG_ABS(base, VXD_GET_REG_OFF(group, reg))
+
+/* Read a repeated register */
+#define VXD_RD_RPT_REG(base, group, reg, index) \
+ VXD_RD_REG_ABS(base, VXD_GET_RPT_REG_OFF(group, reg, index))
+
+/* Insert word into the message buffer */
+#define VXD_WR_MSG_WRD(buf, msg_type, wrd, val) \
+ (((unsigned int *)buf)[(msg_type ## _ ## wrd ## _OFFSET) / sizeof(unsigned int)] = \
+ val)
+
+/* Get a word from the message buffer */
+#define VXD_RD_MSG_WRD(buf, msg_type, wrd) \
+ (((unsigned int *)buf)[(msg_type ## _ ## wrd ## _OFFSET) / sizeof(unsigned int)])
+
+/* Get offset for pipe register */
+#define VXD_GET_PIPE_OFF(num_pipes, pipe) \
+ ((num_pipes) > 1 ? ((pipe) << 16) : 0)
+
+#endif /* VXD_PVDEC_REGS_H */
diff --git a/drivers/media/platform/vxe-vxd/decoder/vxd_v4l2.c b/drivers/media/platform/vxe-vxd/decoder/vxd_v4l2.c
new file mode 100644
index 000000000000..3bf46aec2f22
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/decoder/vxd_v4l2.c
@@ -0,0 +1,2232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC V4L2 Interface function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Angela Stegmaier <angelabaker@ti.com>
+ * David Huang <d-huang@ti.com>
+ *
+ * Re-written for upstreaming
+ * Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+
+#ifdef ERROR_RECOVERY_SIMULATION
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/types.h>
+#endif
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-sg.h>
+#ifdef CAPTURE_CONTIG_ALLOC
+#include <media/videobuf2-dma-contig.h>
+#endif
+
+#include "core.h"
+#include "h264fw_data.h"
+#include "hevcfw_data.h"
+#include "img_dec_common.h"
+#include "vxd_pvdec_priv.h"
+#include "vxd_dec.h"
+#include "img_errors.h"
+
+#define VXD_DEC_SPIN_LOCK_NAME "vxd-dec"
+#define IMG_VXD_DEC_MODULE_NAME "vxd-dec"
+
+#ifdef ERROR_RECOVERY_SIMULATION
+/* This code should be execute only in debug flag */
+/*
+ * vxd decoder kernel object to create sysfs to debug error recovery and firmware
+ * watchdog timer. This kernel object will create a directory under /sys/kernel,
+ * containing two files fw_error_value and disable_fw_irq.
+ */
+struct kobject *vxd_dec_kobject;
+
+/* fw_error_value is the variable used to handle fw_error_attr */
+int fw_error_value = VDEC_ERROR_MAX;
+
+/* irq for the module, stored globally so can be accessed from sysfs */
+int g_module_irq;
+
+/*
+ * fw_error_attr. Application can set the value of this attribute, based on the
+ * firmware error that needs to be reproduced.
+ */
+struct kobj_attribute fw_error_attr =
+ __ATTR(fw_error_value, 0660, vxd_sysfs_show, vxd_sysfs_store);
+
+/* disable_fw_irq_value is variable to handle disable_fw_irq_attr */
+int disable_fw_irq_value;
+
+/*
+ * disable_fw_irq_attr. Application can set the value of this attribute. 1 to
+ * disable irq. 0 to enable irq.
+ */
+struct kobj_attribute disable_fw_irq_attr =
+ __ATTR(disable_fw_irq_value, 0660, vxd_sysfs_show, vxd_sysfs_store);
+
+/*
+ * Group attribute so that we can create and destroy all of them at once.
+ */
+struct attribute *attrs[] = {
+ &fw_error_attr.attr,
+ &disable_fw_irq_attr.attr,
+ NULL, /* Terminate list of attributes with NULL */
+};
+
+/*
+ * An unnamed attribute group will put all of the attributes directly in
+ * the kobject directory. If we specify a name, a sub directory will be
+ * created for the attributes with the directory being the name of the
+ * attribute group
+ */
+struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+#endif
+
+static struct heap_config vxd_dec_heap_configs[] = {
+ {
+ .type = MEM_HEAP_TYPE_UNIFIED,
+ .options.unified = {
+ .gfp_type = __GFP_DMA32 | __GFP_ZERO,
+ },
+ .to_dev_addr = NULL,
+ },
+};
+
+static struct vxd_dec_fmt vxd_dec_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .num_planes = 1,
+ .type = IMG_DEC_FMT_TYPE_CAPTURE,
+ .std = VDEC_STD_UNDEFINED,
+ .pixfmt = IMG_PIXFMT_420PL12YUV8,
+ .interleave = PIXEL_UV_ORDER,
+ .idc = PIXEL_FORMAT_420,
+ .size_num = 3,
+ .size_den = 2,
+ .bytes_pp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .num_planes = 1,
+ .type = IMG_DEC_FMT_TYPE_CAPTURE,
+ .std = VDEC_STD_UNDEFINED,
+ .pixfmt = IMG_PIXFMT_422PL12YUV8,
+ .interleave = PIXEL_UV_ORDER,
+ .idc = PIXEL_FORMAT_422,
+ .size_num = 2,
+ .size_den = 1,
+ .bytes_pp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_TI1210,
+ .num_planes = 1,
+ .type = IMG_DEC_FMT_TYPE_CAPTURE,
+ .std = VDEC_STD_UNDEFINED,
+ .pixfmt = IMG_PIXFMT_420PL12YUV10_MSB,
+ .interleave = PIXEL_UV_ORDER,
+ .idc = PIXEL_FORMAT_420,
+ .size_num = 3,
+ .size_den = 2,
+ .bytes_pp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_TI1610,
+ .num_planes = 1,
+ .type = IMG_DEC_FMT_TYPE_CAPTURE,
+ .std = VDEC_STD_UNDEFINED,
+ .pixfmt = IMG_PIXFMT_422PL12YUV10_MSB,
+ .interleave = PIXEL_UV_ORDER,
+ .idc = PIXEL_FORMAT_422,
+ .size_num = 2,
+ .size_den = 1,
+ .bytes_pp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .type = IMG_DEC_FMT_TYPE_OUTPUT,
+ .std = VDEC_STD_H264,
+ .pixfmt = IMG_PIXFMT_UNDEFINED,
+ .interleave = PIXEL_INVALID_CI,
+ .idc = PIXEL_FORMAT_INVALID,
+ .size_num = 1,
+ .size_den = 1,
+ .bytes_pp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HEVC,
+ .num_planes = 1,
+ .type = IMG_DEC_FMT_TYPE_OUTPUT,
+ .std = VDEC_STD_HEVC,
+ .pixfmt = IMG_PIXFMT_UNDEFINED,
+ .interleave = PIXEL_INVALID_CI,
+ .idc = PIXEL_FORMAT_INVALID,
+ .size_num = 1,
+ .size_den = 1,
+ .bytes_pp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MJPEG,
+ .num_planes = 1,
+ .type = IMG_DEC_FMT_TYPE_OUTPUT,
+ .std = VDEC_STD_JPEG,
+ .pixfmt = IMG_PIXFMT_UNDEFINED,
+ .interleave = PIXEL_INVALID_CI,
+ .idc = PIXEL_FORMAT_INVALID,
+ .size_num = 1,
+ .size_den = 1,
+ .bytes_pp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .num_planes = 3,
+ .type = IMG_DEC_FMT_TYPE_CAPTURE,
+ .std = VDEC_STD_UNDEFINED,
+ .pixfmt = 86031,
+ .interleave = PIXEL_UV_ORDER,
+ .idc = PIXEL_FORMAT_420,
+ .size_num = 2,
+ .size_den = 1,
+ .bytes_pp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422M,
+ .num_planes = 3,
+ .type = IMG_DEC_FMT_TYPE_CAPTURE,
+ .std = VDEC_STD_UNDEFINED,
+ .pixfmt = 81935,
+ .interleave = PIXEL_UV_ORDER,
+ .idc = PIXEL_FORMAT_422,
+ .size_num = 3,
+ .size_den = 1,
+ .bytes_pp = 1,
+ },
+};
+
+#ifdef ERROR_RECOVERY_SIMULATION
+ssize_t vxd_sysfs_show(struct kobject *vxd_dec_kobject,
+ struct kobj_attribute *attr, char *buf)
+
+{
+ int var = 0;
+
+ if (strcmp(attr->attr.name, "fw_error_value") == 0)
+ var = fw_error_value;
+
+ else
+ var = disable_fw_irq_value;
+
+ return sprintf(buf, "%d\n", var);
+}
+
+ssize_t vxd_sysfs_store(struct kobject *vxd_dec_kobject,
+ struct kobj_attribute *attr,
+ const char *buf, unsigned long count)
+{
+ int var = 0, rv = 0;
+
+ rv = sscanf(buf, "%du", &var);
+
+ if (strcmp(attr->attr.name, "fw_error_value") == 0) {
+ fw_error_value = var;
+ } else {
+ disable_fw_irq_value = var;
+ /*
+ * if disable_fw_irq_value is not zero, disable the irq to reproduce
+ * firmware non responsiveness in vxd_worker.
+ */
+ if (disable_fw_irq_value != 0) {
+ /* just ignore the irq */
+ disable_irq(g_module_irq);
+ }
+ }
+ return sprintf((char *)buf, "%d\n", var);
+}
+#endif
+
+static struct vxd_dec_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct vxd_dec_ctx, fh);
+}
+
+static irqreturn_t soft_thread_irq(int irq, void *dev_id)
+{
+ struct platform_device *pdev = (struct platform_device *)dev_id;
+
+ if (!pdev)
+ return IRQ_NONE;
+
+ return vxd_handle_thread_irq(&pdev->dev);
+}
+
+static irqreturn_t hard_isrcb(int irq, void *dev_id)
+{
+ struct platform_device *pdev = (struct platform_device *)dev_id;
+
+ if (!pdev)
+ return IRQ_NONE;
+
+ return vxd_handle_irq(&pdev->dev);
+}
+
+static struct vxd_mapping *find_mapping(unsigned int buf_map_id, struct list_head *head)
+{
+ struct list_head *list;
+ struct vxd_mapping *mapping = NULL;
+
+ list_for_each(list, head) {
+ mapping = list_entry(list, struct vxd_mapping, list);
+ if (mapping->buf_map_id == buf_map_id)
+ break;
+ mapping = NULL;
+ }
+ return mapping;
+}
+
+static struct vxd_buffer *find_buffer(unsigned int buf_map_id, struct list_head *head)
+{
+ struct list_head *list;
+ struct vxd_buffer *buf = NULL;
+
+ list_for_each(list, head) {
+ buf = list_entry(list, struct vxd_buffer, list);
+ if (buf->buf_map_id == buf_map_id)
+ break;
+ buf = NULL;
+ }
+ return buf;
+}
+
+static void return_worker(void *work)
+{
+ struct vxd_dec_ctx *ctx;
+ struct vxd_return *res;
+ struct device *dev;
+ struct timespec64 time;
+ int loop;
+
+ work = get_work_buff(work, TRUE);
+
+ res = container_of(work, struct vxd_return, work);
+ ctx = res->ctx;
+ dev = ctx->dev->dev;
+ switch (res->type) {
+ case VXD_CB_PICT_DECODED:
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ ktime_get_real_ts64(&time);
+ for (loop = 0; loop < ARRAY_SIZE(ctx->dev->time_drv); loop++) {
+ if (ctx->dev->time_drv[loop].id == res->buf_map_id) {
+ ctx->dev->time_drv[loop].end_time =
+ timespec64_to_ns(&time);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(dev, "picture buf decode time is %llu us for buf_map_id 0x%x\n",
+ div_s64(ctx->dev->time_drv[loop].end_time -
+ ctx->dev->time_drv[loop].start_time, 1000),
+ res->buf_map_id);
+#endif
+ break;
+ }
+ }
+
+ if (loop == ARRAY_SIZE(ctx->dev->time_drv))
+ dev_err(dev, "picture buf decode for buf_map_id x%0x is not measured\n",
+ res->buf_map_id);
+ break;
+
+ default:
+ break;
+ }
+ kfree(res->work);
+ kfree(res);
+}
+
+static void vxd_error_recovery(struct vxd_dec_ctx *ctx)
+{
+ int ret = -1;
+
+ /*
+ * In the previous frame decoding fatal error has been detected
+ * so we need to reload the firmware to make it alive.
+ */
+ pr_debug("Reloading the firmware because of previous error\n");
+ vxd_clean_fw_resources(ctx->dev);
+ ret = vxd_prepare_fw(ctx->dev);
+ if (ret)
+ pr_err("Reloading the firmware failed!!");
+}
+
+static struct vxd_dec_q_data *get_q_data(struct vxd_dec_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return &ctx->q_data[Q_DATA_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return &ctx->q_data[Q_DATA_DST];
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static void vxd_return_resource(void *ctx_handle, enum vxd_cb_type type,
+ unsigned int buf_map_id)
+{
+ struct vxd_return *res;
+ struct vxd_buffer *buf = NULL;
+ struct vb2_v4l2_buffer *vb;
+ struct vxd_mapping *mapping = NULL;
+ struct vxd_dec_ctx *ctx = (struct vxd_dec_ctx *)ctx_handle;
+ struct v4l2_event event = {};
+ struct device *dev = ctx->dev->dev;
+ int i;
+ struct vxd_dec_q_data *q_data;
+
+ switch (type) {
+ case VXD_CB_STRUNIT_PROCESSED:
+
+ buf = find_buffer(buf_map_id, &ctx->out_buffers);
+ if (!buf) {
+ dev_err(dev, "Could not locate buf_map_id=0x%x in OUTPUT buffers list\n",
+ buf_map_id);
+ break;
+ }
+ buf->buffer.vb.field = V4L2_FIELD_NONE;
+ q_data = get_q_data(ctx, buf->buffer.vb.vb2_buf.vb2_queue->type);
+ if (!q_data)
+ return;
+
+ for (i = 0; i < q_data->fmt->num_planes; i++)
+ vb2_set_plane_payload(&buf->buffer.vb.vb2_buf, i,
+ ctx->pict_bufcfg.plane_size[i]);
+
+ v4l2_m2m_buf_done(&buf->buffer.vb, VB2_BUF_STATE_DONE);
+ break;
+ case VXD_CB_SPS_RELEASE:
+ break;
+ case VXD_CB_PPS_RELEASE:
+ break;
+ case VXD_CB_PICT_DECODED:
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return;
+ res->ctx = ctx;
+ res->type = type;
+ res->buf_map_id = buf_map_id;
+
+ init_work(&res->work, return_worker, HWA_DECODER);
+ if (!res->work)
+ return;
+
+ schedule_work(res->work);
+
+ break;
+ case VXD_CB_PICT_DISPLAY:
+ buf = find_buffer(buf_map_id, &ctx->cap_buffers);
+ if (!buf) {
+ dev_err(dev, "Could not locate buf_map_id=0x%x in CAPTURE buffers list\n",
+ buf_map_id);
+ break;
+ }
+ buf->mapping->reuse = FALSE;
+ buf->buffer.vb.field = V4L2_FIELD_NONE;
+ q_data = get_q_data(ctx, buf->buffer.vb.vb2_buf.vb2_queue->type);
+ if (!q_data)
+ return;
+
+ for (i = 0; i < q_data->fmt->num_planes; i++)
+ vb2_set_plane_payload(&buf->buffer.vb.vb2_buf, i,
+ ctx->pict_bufcfg.plane_size[i]);
+
+ v4l2_m2m_buf_done(&buf->buffer.vb, VB2_BUF_STATE_DONE);
+ break;
+ case VXD_CB_PICT_RELEASE:
+ buf = find_buffer(buf_map_id, &ctx->reuse_queue);
+ if (buf) {
+ buf->mapping->reuse = TRUE;
+ list_move_tail(&buf->list, &ctx->cap_buffers);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, &buf->buffer.vb);
+ break;
+ }
+ mapping = find_mapping(buf_map_id, &ctx->cap_mappings);
+ if (!mapping) {
+ dev_err(dev, "Could not locate buf_map_id=0x%x in CAPTURE buffers list\n",
+ buf_map_id);
+ break;
+ }
+ mapping->reuse = TRUE;
+
+ break;
+ case VXD_CB_PICT_END:
+ break;
+ case VXD_CB_STR_END:
+ event.type = V4L2_EVENT_EOS;
+ v4l2_event_queue_fh(&ctx->fh, &event);
+ if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0) {
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ vb->flags |= V4L2_BUF_FLAG_LAST;
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (!q_data)
+ break;
+
+ for (i = 0; i < q_data->fmt->num_planes; i++)
+ vb2_set_plane_payload(&vb->vb2_buf, i, 0);
+
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
+ } else {
+ ctx->flag_last = TRUE;
+ }
+ break;
+ case VXD_CB_ERROR_FATAL:
+ /*
+ * There has been FW error, so we need to reload the firmware.
+ */
+#ifdef ERROR_RECOVERY_SIMULATION
+ vxd_error_recovery(ctx);
+#endif
+
+ if (ctx->dev->emergency)
+ ctx->eos = TRUE;
+ /*
+ * Just send zero size buffer to v4l2 application,
+ * informing the error condition.
+ */
+ if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0) {
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ vb->flags |= V4L2_BUF_FLAG_LAST;
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (!q_data)
+ break;
+
+ for (i = 0; i < q_data->fmt->num_planes; i++)
+ vb2_set_plane_payload(&vb->vb2_buf, i, 0);
+
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
+ } else {
+ ctx->flag_last = TRUE;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int vxd_dec_submit_opconfig(struct vxd_dec_ctx *ctx)
+{
+ int ret = 0;
+
+ if (ctx->stream_created) {
+ ret = core_stream_set_output_config(ctx->res_str_id,
+ &ctx->str_opcfg,
+ &ctx->pict_bufcfg);
+ if (ret) {
+ dev_err(ctx->dev->dev, "core_stream_set_output_config failed\n");
+ ctx->opconfig_pending = TRUE;
+ return ret;
+ }
+ ctx->opconfig_pending = FALSE;
+ ctx->stream_configured = TRUE;
+ } else {
+ ctx->opconfig_pending = TRUE;
+ }
+ return ret;
+}
+
+static int vxd_dec_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vxd_dec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vxd_dec_q_data *q_data;
+ struct vxd_dec_q_data *src_q_data;
+ int i;
+ unsigned int hw_nbuffers = 0;
+
+ q_data = get_q_data(ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (*nplanes) {
+ /* This is being called from CREATEBUFS, perform validation */
+ if (*nplanes != q_data->fmt->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ if (sizes[i] != q_data->size_image[i])
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ *nplanes = q_data->fmt->num_planes;
+
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ src_q_data = &ctx->q_data[Q_DATA_SRC];
+ if (src_q_data)
+ hw_nbuffers = get_nbuffers(src_q_data->fmt->std,
+ q_data->width,
+ q_data->height,
+ ctx->max_num_ref_frames);
+ }
+
+ *nbuffers = max(*nbuffers, hw_nbuffers);
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->size_image[i];
+
+ return 0;
+}
+
+static int vxd_dec_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vxd_dec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = ctx->dev->dev;
+ struct vxd_dec_q_data *q_data;
+ void *sgt;
+#ifdef CAPTURE_CONTIG_ALLOC
+ struct page *new_page;
+#else
+ void *sgl;
+#endif
+ struct sg_table *sgt_new;
+ void *sgl_new;
+ int pages;
+ int nents = 0;
+ int size = 0;
+ int plane, num_planes, ret = 0;
+ unsigned long dma_addr;
+ struct vxd_mapping *mapping;
+ struct list_head *list;
+ struct vxd_buffer *buf =
+ container_of(vb, struct vxd_buffer, buffer.vb.vb2_buf);
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (!q_data)
+ return -EINVAL;
+
+ num_planes = q_data->fmt->num_planes;
+
+ for (plane = 0; plane < num_planes; plane++) {
+ if (vb2_plane_size(vb, plane) < q_data->size_image[plane]) {
+ dev_err(dev, "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, plane),
+ (long)q_data->size_image[plane]);
+ return -EINVAL;
+ }
+ }
+
+ if (buf->mapped && !V4L2_TYPE_IS_OUTPUT(vb->type)) {
+#ifdef CAPTURE_CONTIG_ALLOC
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+#else
+ sgt = vb2_dma_sg_plane_desc(vb, 0);
+ if (!sgt)
+ return -EFAULT;
+
+ dma_addr = sg_phys(img_mmu_get_sgl(sgt));
+#endif
+ if (buf->buf_info.dma_addr != dma_addr) {
+ list_for_each(list, &ctx->cap_mappings) {
+ mapping = list_entry(list, struct vxd_mapping, list);
+ if (dma_addr == mapping->dma_addr)
+ break;
+
+ mapping = NULL;
+ }
+ if (mapping) {
+ /* skip the mapping, buf update buf_map_id */
+ buf->buf_info.dma_addr = mapping->dma_addr;
+ buf->buf_map_id = mapping->buf_map_id;
+ buf->buf_info.cpu_linear_addr = vb2_plane_vaddr(vb, 0);
+ buf->mapping = mapping;
+ return 0;
+ }
+ } else {
+ return 0;
+ }
+ } else if (buf->mapped) {
+ return 0;
+ }
+
+ buf->buf_info.cpu_linear_addr = vb2_plane_vaddr(vb, 0);
+ buf->buf_info.buf_size = vb2_plane_size(vb, 0);
+ buf->buf_info.fd = -1;
+ sgt = vb2_dma_sg_plane_desc(vb, 0);
+ if (!sgt) {
+ dev_err(dev, "Could not get sg_table from plane 0\n");
+ return -EINVAL;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->type)) {
+ buf->buf_info.dma_addr = sg_phys(img_mmu_get_sgl(sgt));
+ ret = core_stream_map_buf_sg(ctx->res_str_id,
+ VDEC_BUFTYPE_BITSTREAM,
+ &buf->buf_info, sgt,
+ &buf->buf_map_id);
+ if (ret) {
+ dev_err(dev, "OUTPUT core_stream_map_buf_sg failed\n");
+ return ret;
+ }
+
+ buf->bstr_info.buf_size = q_data->size_image[0];
+ buf->bstr_info.cpu_virt_addr = buf->buf_info.cpu_linear_addr;
+ buf->bstr_info.mem_attrib =
+ SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE |
+ SYS_MEMATTRIB_INPUT | SYS_MEMATTRIB_CPU_WRITE;
+ buf->bstr_info.bufmap_id = buf->buf_map_id;
+ lst_init(&buf->seq_unit.bstr_seg_list);
+ lst_init(&buf->pic_unit.bstr_seg_list);
+ lst_init(&buf->end_unit.bstr_seg_list);
+
+ list_add_tail(&buf->list, &ctx->out_buffers);
+ } else {
+#ifdef CAPTURE_CONTIG_ALLOC
+ buf->buf_info.dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+#else
+ buf->buf_info.dma_addr = sg_phys(img_mmu_get_sgl(sgt));
+#endif
+ /* Create a single sgt from the plane(s) */
+ sgt_new = kmalloc(sizeof(*sgt_new), GFP_KERNEL);
+ if (!sgt_new)
+ return -EINVAL;
+
+ for (plane = 0; plane < num_planes; plane++) {
+ size += ALIGN(vb2_plane_size(vb, plane), PAGE_SIZE);
+ sgt = vb2_dma_sg_plane_desc(vb, plane);
+ if (!sgt) {
+ dev_err(dev, "Could not get sg_table from plane %d\n", plane);
+ kfree(sgt_new);
+ return -EINVAL;
+ }
+#ifdef CAPTURE_CONTIG_ALLOC
+ nents += 1;
+#else
+ nents += sg_nents(img_mmu_get_sgl(sgt));
+#endif
+ }
+ buf->buf_info.buf_size = size;
+
+ pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ ret = sg_alloc_table(sgt_new, nents, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt_new);
+ return -EINVAL;
+ }
+ sgl_new = img_mmu_get_sgl(sgt_new);
+
+ for (plane = 0; plane < num_planes; plane++) {
+ sgt = vb2_dma_sg_plane_desc(vb, plane);
+ if (!sgt) {
+ dev_err(dev, "Could not get sg_table from plane %d\n", plane);
+ sg_free_table(sgt_new);
+ kfree(sgt_new);
+ return -EINVAL;
+ }
+#ifdef CAPTURE_CONTIG_ALLOC
+ new_page = phys_to_page(vb2_dma_contig_plane_dma_addr(vb, plane));
+ sg_set_page(sgl_new, new_page, ALIGN(vb2_plane_size(vb, plane),
+ PAGE_SIZE), 0);
+ sgl_new = sg_next(sgl_new);
+#else
+ sgl = img_mmu_get_sgl(sgt);
+
+ while (sgl) {
+ sg_set_page(sgl_new, sg_page(sgl), img_mmu_get_sgl_length(sgl), 0);
+ sgl = sg_next(sgl);
+ sgl_new = sg_next(sgl_new);
+ }
+#endif
+ }
+
+ buf->buf_info.pictbuf_cfg = ctx->pict_bufcfg;
+ ret = core_stream_map_buf_sg(ctx->res_str_id,
+ VDEC_BUFTYPE_PICTURE,
+ &buf->buf_info, sgt_new,
+ &buf->buf_map_id);
+ sg_free_table(sgt_new);
+ kfree(sgt_new);
+ if (ret) {
+ dev_err(dev, "CAPTURE core_stream_map_buf_sg failed\n");
+ return ret;
+ }
+ if (buf->mapped == FALSE)
+ list_add_tail(&buf->list, &ctx->cap_buffers);
+
+ /* Add this to the mappings */
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+
+ mapping->reuse = TRUE;
+ mapping->dma_addr = buf->buf_info.dma_addr;
+ mapping->buf_map_id = buf->buf_map_id;
+ list_add_tail(&mapping->list, &ctx->cap_mappings);
+ buf->mapping = mapping;
+ }
+ buf->mapped = TRUE;
+
+ return 0;
+}
+
+static void vxd_dec_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vxd_dec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vxd_buffer *buf =
+ container_of(vb, struct vxd_buffer, buffer.vb.vb2_buf);
+ struct vxd_dec_q_data *q_data;
+ int i;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->type)) {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ } else {
+ mutex_lock_nested(ctx->mutex, SUBCLASS_VXD_V4L2);
+ if (buf->mapping->reuse) {
+ mutex_unlock(ctx->mutex);
+ if (ctx->flag_last) {
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+
+ for (i = 0; i < q_data->fmt->num_planes; i++)
+ vb2_set_plane_payload(&vbuf->vb2_buf, i, 0);
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+ } else {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ }
+ } else {
+ list_move_tail(&buf->list, &ctx->reuse_queue);
+ mutex_unlock(ctx->mutex);
+ }
+ }
+}
+
+static void vxd_dec_return_all_buffers(struct vxd_dec_ctx *ctx,
+ struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (!vb)
+ break;
+
+ spin_lock_irqsave(ctx->dev->lock, flags);
+ v4l2_m2m_buf_done(vb, state);
+ spin_unlock_irqrestore(ctx->dev->lock, (unsigned long)flags);
+ }
+}
+
+static int vxd_dec_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ int ret = 0;
+ struct vxd_dec_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ ctx->dst_streaming = TRUE;
+ else
+ ctx->src_streaming = TRUE;
+
+ if (ctx->dst_streaming && ctx->src_streaming && !ctx->core_streaming) {
+ if (!ctx->stream_configured) {
+ vxd_dec_return_all_buffers(ctx, vq, VB2_BUF_STATE_ERROR);
+ return -EINVAL;
+ }
+ ctx->eos = FALSE;
+ ctx->stop_initiated = FALSE;
+ ctx->flag_last = FALSE;
+ ret = core_stream_play(ctx->res_str_id);
+ if (ret) {
+ vxd_dec_return_all_buffers(ctx, vq, VB2_BUF_STATE_ERROR);
+ return ret;
+ }
+ ctx->core_streaming = TRUE;
+ ctx->aborting = 0;
+ }
+
+ return 0;
+}
+
+static void vxd_dec_stop_streaming(struct vb2_queue *vq)
+{
+ struct vxd_dec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct list_head *list;
+ struct list_head *temp;
+ struct vxd_buffer *buf = NULL;
+ struct vxd_mapping *mapping = NULL;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ ctx->dst_streaming = FALSE;
+ else
+ ctx->src_streaming = FALSE;
+
+ if (!ctx->stream_created) {
+ vxd_dec_return_all_buffers(ctx, vq, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ if (ctx->core_streaming) {
+ core_stream_stop(ctx->res_str_id);
+ ctx->core_streaming = FALSE;
+
+ core_stream_flush(ctx->res_str_id, TRUE);
+ }
+ /* unmap all the output and capture plane buffers */
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ list_for_each(list, &ctx->out_buffers) {
+ buf = list_entry(list, struct vxd_buffer, list);
+ core_stream_unmap_buf_sg(buf->buf_map_id);
+ buf->mapped = FALSE;
+ __list_del_entry(&buf->list);
+ }
+ } else {
+ list_for_each_safe(list, temp, &ctx->reuse_queue) {
+ buf = list_entry(list, struct vxd_buffer, list);
+ list_move_tail(&buf->list, &ctx->cap_buffers);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, &buf->buffer.vb);
+ }
+
+ list_for_each(list, &ctx->cap_mappings) {
+ mapping = list_entry(list, struct vxd_mapping, list);
+ core_stream_unmap_buf_sg(mapping->buf_map_id);
+ __list_del_entry(&mapping->list);
+ }
+ }
+
+ ctx->flag_last = FALSE;
+ vxd_dec_return_all_buffers(ctx, vq, VB2_BUF_STATE_ERROR);
+}
+
+static struct vb2_ops vxd_dec_video_ops = {
+ .queue_setup = vxd_dec_queue_setup,
+ .buf_prepare = vxd_dec_buf_prepare,
+ .buf_queue = vxd_dec_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vxd_dec_start_streaming,
+ .stop_streaming = vxd_dec_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct vxd_dec_ctx *ctx = priv;
+ struct vxd_dev *vxd = ctx->dev;
+ int ret = 0;
+
+ /* src_vq */
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct vxd_buffer);
+ src_vq->ops = &vxd_dec_video_ops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = vxd->mutex_queue;
+ src_vq->dev = vxd->v4l2_dev.dev;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ /* dst_vq */
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct vxd_buffer);
+ dst_vq->ops = &vxd_dec_video_ops;
+#ifdef CAPTURE_CONTIG_ALLOC
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+#else
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+#endif
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = vxd->mutex_queue;
+ dst_vq->dev = vxd->v4l2_dev.dev;
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vxd_dec_open(struct file *file)
+{
+ struct vxd_dev *vxd = video_drvdata(file);
+ struct vxd_dec_ctx *ctx;
+ struct vxd_dec_q_data *s_q_data;
+ int i, ret = 0;
+
+ dev_dbg(vxd->dev, "%s:%d vxd %p\n", __func__, __LINE__, vxd);
+
+ if (vxd->no_fw) {
+ dev_err(vxd->dev, "Error!! fw binary is not present");
+ return -1;
+ }
+
+ mutex_lock_nested(vxd->mutex, SUBCLASS_BASE);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock(vxd->mutex);
+ return -ENOMEM;
+ }
+ ctx->dev = vxd;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+
+ s_q_data = &ctx->q_data[Q_DATA_SRC];
+ s_q_data->fmt = &vxd_dec_formats[0];
+ s_q_data->width = 1920;
+ s_q_data->height = 1080;
+ for (i = 0; i < s_q_data->fmt->num_planes; i++) {
+ s_q_data->bytesperline[i] = s_q_data->width;
+ s_q_data->size_image[i] = s_q_data->bytesperline[i] * s_q_data->height;
+ }
+
+ ctx->q_data[Q_DATA_DST] = *s_q_data;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vxd->m2m_dev, ctx, &queue_init);
+ if (IS_ERR_VALUE((unsigned long)ctx->fh.m2m_ctx)) {
+ ret = (long)(ctx->fh.m2m_ctx);
+ goto exit;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ ret = idr_alloc_cyclic(vxd->streams, &ctx->stream, VXD_MIN_STREAM_ID, VXD_MAX_STREAM_ID,
+ GFP_KERNEL);
+ if (ret < VXD_MIN_STREAM_ID || ret > VXD_MAX_STREAM_ID) {
+ dev_err(vxd->dev, "%s: stream id creation failed!\n",
+ __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ctx->stream.id = ret;
+ ctx->stream.ctx = ctx;
+
+ ctx->stream_created = FALSE;
+ ctx->stream_configured = FALSE;
+ ctx->src_streaming = FALSE;
+ ctx->dst_streaming = FALSE;
+ ctx->core_streaming = FALSE;
+ ctx->eos = FALSE;
+ ctx->stop_initiated = FALSE;
+ ctx->flag_last = FALSE;
+
+ lst_init(&ctx->seg_list);
+ for (i = 0; i < MAX_SEGMENTS; i++)
+ lst_add(&ctx->seg_list, &ctx->bstr_segments[i]);
+
+ if (vxd_create_ctx(vxd, ctx))
+ goto out_idr_remove;
+
+ ctx->stream.mmu_ctx = ctx->mmu_ctx;
+ ctx->stream.ptd = ctx->ptd;
+
+ ctx->mutex = kzalloc(sizeof(*ctx->mutex), GFP_KERNEL);
+ if (!ctx->mutex) {
+ ret = -ENOMEM;
+ goto out_idr_remove;
+ }
+ mutex_init(ctx->mutex);
+
+ INIT_LIST_HEAD(&ctx->items_done);
+ INIT_LIST_HEAD(&ctx->reuse_queue);
+ INIT_LIST_HEAD(&ctx->return_queue);
+ INIT_LIST_HEAD(&ctx->out_buffers);
+ INIT_LIST_HEAD(&ctx->cap_buffers);
+ INIT_LIST_HEAD(&ctx->cap_mappings);
+
+ mutex_unlock(vxd->mutex);
+
+ return 0;
+
+out_idr_remove:
+ idr_remove(vxd->streams, ctx->stream.id);
+
+exit:
+ v4l2_fh_exit(&ctx->fh);
+ get_work_buff(ctx->work, TRUE);
+ kfree(ctx->work);
+ kfree(ctx);
+ mutex_unlock(vxd->mutex);
+ return ret;
+}
+
+static int vxd_dec_release(struct file *file)
+{
+ struct vxd_dev *vxd = video_drvdata(file);
+ struct vxd_dec_ctx *ctx = file2ctx(file);
+ struct bspp_ddbuf_array_info *fw_sequ = ctx->fw_sequ;
+ struct bspp_ddbuf_array_info *fw_pps = ctx->fw_pps;
+ int i, ret = 0;
+ struct vxd_dec_q_data *s_q_data;
+ struct list_head *list;
+ struct list_head *temp;
+ struct vxd_buffer *buf = NULL;
+ struct vxd_mapping *mapping = NULL;
+
+ s_q_data = &ctx->q_data[Q_DATA_SRC];
+ if (ctx->core_streaming) {
+ core_stream_stop(ctx->res_str_id);
+ ctx->core_streaming = FALSE;
+
+ core_stream_flush(ctx->res_str_id, TRUE);
+ }
+
+ list_for_each(list, &ctx->out_buffers) {
+ buf = list_entry(list, struct vxd_buffer, list);
+ core_stream_unmap_buf_sg(buf->buf_map_id);
+ buf->mapped = FALSE;
+ __list_del_entry(&buf->list);
+ }
+
+ list_for_each_safe(list, temp, &ctx->reuse_queue) {
+ buf = list_entry(list, struct vxd_buffer, list);
+ core_stream_unmap_buf_sg(buf->buf_map_id);
+ buf->mapped = FALSE;
+ __list_del_entry(&buf->list);
+ }
+
+ list_for_each(list, &ctx->cap_mappings) {
+ mapping = list_entry(list, struct vxd_mapping, list);
+ core_stream_unmap_buf_sg(mapping->buf_map_id);
+ __list_del_entry(&mapping->list);
+ }
+ if (ctx->stream_created) {
+ bspp_stream_destroy(ctx->bspp_context);
+
+ for (i = 0; i < MAX_SEQUENCES; i++) {
+ core_stream_unmap_buf(fw_sequ[i].ddbuf_info.bufmap_id);
+ img_mem_free(ctx->mem_ctx, fw_sequ[i].ddbuf_info.buf_id);
+ }
+
+ if (s_q_data->fmt->std != VDEC_STD_JPEG) {
+ for (i = 0; i < MAX_PPSS; i++) {
+ core_stream_unmap_buf(fw_pps[i].ddbuf_info.bufmap_id);
+ img_mem_free(ctx->mem_ctx, fw_pps[i].ddbuf_info.buf_id);
+ }
+ }
+ core_stream_destroy(ctx->res_str_id);
+ ctx->stream_created = FALSE;
+ }
+
+ mutex_lock_nested(vxd->mutex, SUBCLASS_BASE);
+
+ vxd_destroy_ctx(vxd, ctx);
+
+ idr_remove(vxd->streams, ctx->stream.id);
+
+ v4l2_fh_del(&ctx->fh);
+
+ v4l2_fh_exit(&ctx->fh);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ mutex_destroy(ctx->mutex);
+ kfree(ctx->mutex);
+ ctx->mutex = NULL;
+
+ get_work_buff(ctx->work, TRUE);
+ kfree(ctx->work);
+ kfree(ctx);
+
+ mutex_unlock(vxd->mutex);
+
+ return ret;
+}
+
+static int vxd_dec_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, IMG_VXD_DEC_MODULE_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, IMG_VXD_DEC_MODULE_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", IMG_VXD_DEC_MODULE_NAME);
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int __enum_fmt(struct v4l2_fmtdesc *f, unsigned int type)
+{
+ int i, index;
+ struct vxd_dec_fmt *fmt = NULL;
+
+ index = 0;
+ for (i = 0; i < ARRAY_SIZE(vxd_dec_formats); ++i) {
+ if (vxd_dec_formats[i].type & type) {
+ if (index == f->index) {
+ fmt = &vxd_dec_formats[i];
+ break;
+ }
+ index++;
+ }
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int vxd_dec_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
+{
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ return __enum_fmt(f, IMG_DEC_FMT_TYPE_OUTPUT);
+
+ return __enum_fmt(f, IMG_DEC_FMT_TYPE_CAPTURE);
+}
+
+static struct vxd_dec_fmt *find_format(struct v4l2_format *f, unsigned int type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vxd_dec_formats); ++i) {
+ if (vxd_dec_formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ vxd_dec_formats[i].type == type)
+ return &vxd_dec_formats[i];
+ }
+ return NULL;
+}
+
+static unsigned int get_sizeimage(int w, int h, struct vxd_dec_fmt *fmt, int plane)
+{
+ switch (fmt->fourcc) {
+ case V4L2_PIX_FMT_YUV420M:
+ return ((plane == 0) ? (w * h) : (w * h / 2));
+ case V4L2_PIX_FMT_YUV422M:
+ return (w * h);
+ default:
+ return (w * h * fmt->size_num / fmt->size_den);
+ }
+
+ return 0;
+}
+
+static unsigned int get_stride(int w, struct vxd_dec_fmt *fmt)
+{
+ return (ALIGN(w, HW_ALIGN) * fmt->bytes_pp);
+}
+
+/*
+ * @ Function vxd_get_header_info
+ * Run bspp stream submit and preparse once before device_run
+ * To retrieve header information
+ */
+static int vxd_get_header_info(void *priv)
+{
+ struct vxd_dec_ctx *ctx = priv;
+ struct vxd_dev *vxd_dev = ctx->dev;
+ struct device *dev = vxd_dev->v4l2_dev.dev;
+ struct vb2_v4l2_buffer *src_vb;
+ struct vxd_buffer *src_vxdb;
+ struct vxd_buffer *dst_vxdb;
+ struct bspp_preparsed_data *preparsed_data;
+ unsigned int data_size;
+ int ret;
+
+ /*
+ * Checking for queued buffer.
+ * If no next buffer present, do not get information from header.
+ * Else, get header information and store for later use.
+ */
+ src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!src_vb) {
+ dev_warn(dev, "get_header_info Next src buffer is null\n");
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+ mutex_lock_nested(ctx->mutex, SUBCLASS_VXD_V4L2);
+
+ src_vxdb = container_of(src_vb, struct vxd_buffer, buffer.vb);
+ /* Setting dst_vxdb to arbitrary value (using src_vb) for now */
+ dst_vxdb = container_of(src_vb, struct vxd_buffer, buffer.vb);
+
+ preparsed_data = &dst_vxdb->preparsed_data;
+
+ data_size = vb2_get_plane_payload(&src_vxdb->buffer.vb.vb2_buf, 0);
+
+ ret = bspp_stream_submit_buffer(ctx->bspp_context,
+ &src_vxdb->bstr_info,
+ src_vxdb->buf_map_id,
+ data_size, NULL,
+ VDEC_BSTRELEMENT_UNSPECIFIED);
+ if (ret) {
+ dev_err(dev, "get_header_info bspp_stream_submit_buffer failed %d\n", ret);
+ return ret;
+ }
+ mutex_unlock(ctx->mutex);
+
+ ret = bspp_stream_preparse_buffers(ctx->bspp_context, NULL, 0,
+ &ctx->seg_list,
+ preparsed_data, ctx->eos);
+ if (ret) {
+ dev_err(dev, "get_header_info bspp_stream_preparse_buffers failed %d\n", ret);
+ return ret;
+ }
+
+ if (preparsed_data->sequ_hdr_info.com_sequ_hdr_info.max_frame_size.height &&
+ preparsed_data->sequ_hdr_info.com_sequ_hdr_info.max_ref_frame_num) {
+ ctx->height = preparsed_data->sequ_hdr_info.com_sequ_hdr_info.max_frame_size.height;
+ ctx->max_num_ref_frames =
+ preparsed_data->sequ_hdr_info.com_sequ_hdr_info.max_ref_frame_num;
+ } else {
+ dev_err(dev, "get_header_info preparsed data is null %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vxd_dec_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct vxd_dec_ctx *ctx = file2ctx(file);
+ struct vxd_dec_q_data *q_data;
+ struct vxd_dev *vxd_dev = ctx->dev;
+ unsigned int i = 0;
+ int ret = 0;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->num_planes = q_data->fmt->num_planes;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* The buffer contains compressed image. */
+ pix_mp->width = ctx->width;
+ pix_mp->height = ctx->height;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ pix_mp->plane_fmt[0].sizeimage = q_data->size_image[0];
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* The buffer contains decoded YUV image. */
+ pix_mp->width = ctx->width;
+ pix_mp->height = ctx->height;
+ for (i = 0; i < q_data->fmt->num_planes; i++) {
+ pix_mp->plane_fmt[i].bytesperline = get_stride(pix_mp->width, q_data->fmt);
+ pix_mp->plane_fmt[i].sizeimage = get_sizeimage
+ (pix_mp->plane_fmt[i].bytesperline,
+ ctx->height, q_data->fmt, i);
+ }
+ } else {
+ dev_err(vxd_dev->v4l2_dev.dev, "Wrong V4L2_format type\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vxd_dec_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vxd_dec_ctx *ctx = file2ctx(file);
+ struct vxd_dev *vxd_dev = ctx->dev;
+ struct vxd_dec_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt = pix_mp->plane_fmt;
+ unsigned int i = 0;
+ int ret = 0;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ fmt = find_format(f, IMG_DEC_FMT_TYPE_OUTPUT);
+ if (!fmt) {
+ dev_err(vxd_dev->v4l2_dev.dev, "Unsupported format for source.\n");
+ return -EINVAL;
+ }
+ /*
+ * Allocation for NV12 input frame size:
+ */
+ plane_fmt[0].sizeimage = (ALIGN(pix_mp->width, HW_ALIGN) *
+ ALIGN(pix_mp->height, HW_ALIGN) * 3)/2;
+ } else {
+ fmt = find_format(f, IMG_DEC_FMT_TYPE_CAPTURE);
+ if (!fmt) {
+ dev_err(vxd_dev->v4l2_dev.dev, "Unsupported format for dest.\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < fmt->num_planes; i++) {
+ plane_fmt[i].bytesperline = get_stride(pix_mp->width, fmt);
+ plane_fmt[i].sizeimage = get_sizeimage(plane_fmt[i].bytesperline,
+ pix_mp->height, fmt, i);
+ }
+ pix_mp->num_planes = fmt->num_planes;
+ pix_mp->flags = 0;
+ }
+
+ if (pix_mp->field == V4L2_FIELD_ANY)
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ return ret;
+}
+
+static int vxd_dec_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct vxd_dec_ctx *ctx = file2ctx(file);
+ struct vxd_dev *vxd_dev = ctx->dev;
+ struct device *dev = vxd_dev->v4l2_dev.dev;
+ struct vxd_dec_q_data *q_data;
+ struct vb2_queue *vq;
+ struct vdec_str_configdata strcfgdata;
+ int ret = 0;
+ unsigned char i = 0, j = 0;
+
+ pix_mp = &f->fmt.pix_mp;
+
+ if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
+ int res = vxd_get_header_info(ctx);
+
+ if (res == 0)
+ pix_mp->height = ctx->height;
+ }
+
+ ret = vxd_dec_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ dev_err(dev, "Queue is busy\n");
+ return -EBUSY;
+ }
+
+ q_data = get_q_data(ctx, f->type);
+
+ if (!q_data)
+ return -EINVAL;
+
+ /*
+ * saving the original dimensions to pass to gstreamer (to remove the green
+ * padding on kmsink)
+ */
+ ctx->width_orig = pix_mp->width;
+ ctx->height_orig = pix_mp->height;
+
+ ctx->width = pix_mp->width;
+ ctx->height = pix_mp->height;
+
+ q_data->width = pix_mp->width;
+ q_data->height = pix_mp->height;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ q_data->fmt = find_format(f, IMG_DEC_FMT_TYPE_OUTPUT);
+ q_data->size_image[0] = pix_mp->plane_fmt[0].sizeimage;
+
+ if (!ctx->stream_created) {
+ strcfgdata.vid_std = q_data->fmt->std;
+
+ if (strcfgdata.vid_std == VDEC_STD_UNDEFINED) {
+ dev_err(dev, "Invalid input format\n");
+ return -EINVAL;
+ }
+ strcfgdata.bstr_format = VDEC_BSTRFORMAT_ELEMENTARY;
+ strcfgdata.user_str_id = ctx->stream.id;
+ strcfgdata.update_yuv = FALSE;
+ strcfgdata.bandwidth_efficient = FALSE;
+ strcfgdata.disable_mvc = FALSE;
+ strcfgdata.full_scan = FALSE;
+ strcfgdata.immediate_decode = TRUE;
+ strcfgdata.intra_frame_closed_gop = TRUE;
+
+ ret = core_stream_create(ctx, &strcfgdata, &ctx->res_str_id);
+ if (ret) {
+ dev_err(dev, "Core stream create failed\n");
+ return -EINVAL;
+ }
+ ctx->stream_created = TRUE;
+ if (ctx->opconfig_pending) {
+ ret = vxd_dec_submit_opconfig(ctx);
+ if (ret) {
+ dev_err(dev, "Output config failed\n");
+ return -EINVAL;
+ }
+ }
+
+ vxd_dec_alloc_bspp_resource(ctx, strcfgdata.vid_std);
+ ret = bspp_stream_create(&strcfgdata,
+ &ctx->bspp_context,
+ ctx->fw_sequ,
+ ctx->fw_pps);
+ if (ret) {
+ dev_err(dev, "BSPP stream create failed %d\n", ret);
+ return ret;
+ }
+ } else if (q_data->fmt !=
+ find_format(f, IMG_DEC_FMT_TYPE_OUTPUT)) {
+ dev_err(dev, "Input format already set\n");
+ return -EBUSY;
+ }
+ } else {
+ q_data->fmt = find_format(f, IMG_DEC_FMT_TYPE_CAPTURE);
+ for (i = 0; i < q_data->fmt->num_planes; i++) {
+ q_data->size_image[i] =
+ get_sizeimage(get_stride(pix_mp->width, q_data->fmt),
+ ctx->height, q_data->fmt, i);
+ }
+
+ ctx->str_opcfg.pixel_info.pixfmt = q_data->fmt->pixfmt;
+ ctx->str_opcfg.pixel_info.chroma_interleave = q_data->fmt->interleave;
+ ctx->str_opcfg.pixel_info.chroma_fmt = TRUE;
+ ctx->str_opcfg.pixel_info.chroma_fmt_idc = q_data->fmt->idc;
+
+ if (q_data->fmt->pixfmt == IMG_PIXFMT_420PL12YUV10_MSB ||
+ q_data->fmt->pixfmt == IMG_PIXFMT_422PL12YUV10_MSB) {
+ ctx->str_opcfg.pixel_info.mem_pkg = PIXEL_BIT10_MSB_MP;
+ ctx->str_opcfg.pixel_info.bitdepth_y = 10;
+ ctx->str_opcfg.pixel_info.bitdepth_c = 10;
+ } else {
+ ctx->str_opcfg.pixel_info.mem_pkg = PIXEL_BIT8_MP;
+ ctx->str_opcfg.pixel_info.bitdepth_y = 8;
+ ctx->str_opcfg.pixel_info.bitdepth_c = 8;
+ }
+
+ ctx->str_opcfg.force_oold = FALSE;
+
+ ctx->pict_bufcfg.coded_width = pix_mp->width;
+ ctx->pict_bufcfg.coded_height = pix_mp->height;
+ ctx->pict_bufcfg.pixel_fmt = q_data->fmt->pixfmt;
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ q_data->bytesperline[i] = get_stride(q_data->width, q_data->fmt);
+ if (q_data->bytesperline[i] <
+ pix_mp->plane_fmt[0].bytesperline)
+ q_data->bytesperline[i] =
+ ALIGN(pix_mp->plane_fmt[0].bytesperline, HW_ALIGN);
+ pix_mp->plane_fmt[0].bytesperline =
+ q_data->bytesperline[i];
+ ctx->pict_bufcfg.stride[i] = q_data->bytesperline[i];
+ }
+ for (j = i; j < IMG_MAX_NUM_PLANES; j++) {
+ if ((i - 1) < 0)
+ i++;
+ ctx->pict_bufcfg.stride[j] =
+ q_data->bytesperline[i - 1];
+ }
+ ctx->pict_bufcfg.stride_alignment = HW_ALIGN;
+ ctx->pict_bufcfg.byte_interleave = FALSE;
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ unsigned int plane_size =
+ get_sizeimage(ctx->pict_bufcfg.stride[i],
+ ctx->pict_bufcfg.coded_height,
+ q_data->fmt, i);
+ ctx->pict_bufcfg.buf_size += ALIGN(plane_size, PAGE_SIZE);
+ ctx->pict_bufcfg.plane_size[i] = plane_size;
+ pix_mp->plane_fmt[i].sizeimage = plane_size;
+ }
+ if (q_data->fmt->pixfmt == 86031 ||
+ q_data->fmt->pixfmt == 81935) {
+ /* Handle the v4l2 multi-planar formats */
+ ctx->str_opcfg.pixel_info.num_planes = 3;
+ ctx->pict_bufcfg.packed = FALSE;
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ ctx->pict_bufcfg.chroma_offset[i] =
+ ALIGN(pix_mp->plane_fmt[i].sizeimage, PAGE_SIZE);
+ ctx->pict_bufcfg.chroma_offset[i] +=
+ (i ? ctx->pict_bufcfg.chroma_offset[i - 1] : 0);
+ }
+ } else {
+ /* IMG Decoders support only multi-planar formats */
+ ctx->str_opcfg.pixel_info.num_planes = 2;
+ ctx->pict_bufcfg.packed = TRUE;
+ ctx->pict_bufcfg.chroma_offset[0] = 0;
+ ctx->pict_bufcfg.chroma_offset[1] = 0;
+ }
+
+ vxd_dec_submit_opconfig(ctx);
+ }
+
+ return ret;
+}
+
+static int vxd_dec_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_EOS)
+ return -EINVAL;
+
+ v4l2_event_subscribe(fh, sub, 0, NULL);
+ return 0;
+}
+
+static int vxd_dec_try_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vxd_dec_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ struct vxd_dec_ctx *ctx = file2ctx(file);
+
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("%s CMD_STOP\n", __func__);
+#endif
+ /*
+ * When stop command is received, notify device_run if it is
+ * scheduled to run, or tell the decoder that eos has
+ * happened.
+ */
+ mutex_lock_nested(ctx->mutex, SUBCLASS_VXD_V4L2);
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("V4L2 src bufs not empty, set a flag to notify device_run\n");
+#endif
+ ctx->stop_initiated = TRUE;
+ mutex_unlock(ctx->mutex);
+ } else {
+ if (ctx->num_decoding) {
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("buffers are still being decoded, so just set eos flag\n");
+#endif
+ ctx->eos = TRUE;
+ mutex_unlock(ctx->mutex);
+ } else {
+ mutex_unlock(ctx->mutex);
+#ifdef DEBUG_DECODER_DRIVER
+ pr_info("All buffers are decoded, so issue dummy stream end\n");
+#endif
+ vxd_return_resource((void *)ctx, VXD_CB_STR_END, 0);
+ }
+ }
+
+ return 0;
+}
+
+static int vxd_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vxd_dec_ctx *ctx = file2ctx(file);
+ bool def_bounds = true;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ def_bounds = false;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ def_bounds = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (def_bounds) {
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->width_orig;
+ s->r.height = ctx->height_orig;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vxd_dec_ioctl_ops = {
+ .vidioc_querycap = vxd_dec_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vxd_dec_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vxd_dec_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vxd_dec_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vxd_dec_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vxd_dec_enum_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vxd_dec_g_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vxd_dec_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vxd_dec_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = vxd_dec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_try_decoder_cmd = vxd_dec_try_cmd,
+ .vidioc_decoder_cmd = vxd_dec_cmd,
+
+ .vidioc_g_selection = vxd_g_selection,
+};
+
+static const struct v4l2_file_operations vxd_dec_fops = {
+ .owner = THIS_MODULE,
+ .open = vxd_dec_open,
+ .release = vxd_dec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static void device_run(void *priv)
+{
+ struct vxd_dec_ctx *ctx = priv;
+ struct vxd_dev *vxd_dev = ctx->dev;
+ struct device *dev = vxd_dev->v4l2_dev.dev;
+ struct vb2_v4l2_buffer *src_vb;
+ struct vb2_v4l2_buffer *dst_vb;
+ struct vxd_buffer *src_vxdb;
+ struct vxd_buffer *dst_vxdb;
+ struct bspp_bitstr_seg *item = NULL, *next = NULL;
+ struct bspp_preparsed_data *preparsed_data;
+ unsigned int data_size;
+ int ret;
+ struct timespec64 time;
+ static int cnt;
+ int i;
+
+ mutex_lock_nested(ctx->mutex, SUBCLASS_VXD_V4L2);
+ ctx->num_decoding++;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (!src_vb)
+ dev_err(dev, "Next src buffer is null\n");
+
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!dst_vb)
+ dev_err(dev, "Next dst buffer is null\n");
+
+ src_vxdb = container_of(src_vb, struct vxd_buffer, buffer.vb);
+ dst_vxdb = container_of(dst_vb, struct vxd_buffer, buffer.vb);
+
+ preparsed_data = &dst_vxdb->preparsed_data;
+
+ data_size = vb2_get_plane_payload(&src_vxdb->buffer.vb.vb2_buf, 0);
+
+ ret = bspp_stream_submit_buffer(ctx->bspp_context,
+ &src_vxdb->bstr_info,
+ src_vxdb->buf_map_id,
+ data_size, NULL,
+ VDEC_BSTRELEMENT_UNSPECIFIED);
+ if (ret)
+ dev_err(dev, "bspp_stream_submit_buffer failed %d\n", ret);
+
+ if (ctx->stop_initiated &&
+ (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) == 0))
+ ctx->eos = TRUE;
+
+ mutex_unlock(ctx->mutex);
+
+ ret = bspp_stream_preparse_buffers(ctx->bspp_context, NULL, 0, &ctx->seg_list,
+ preparsed_data, ctx->eos);
+ if (ret)
+ dev_err(dev, "bspp_stream_preparse_buffers failed %d\n", ret);
+
+ ktime_get_real_ts64(&time);
+ vxd_dev->time_drv[cnt].start_time = timespec64_to_ns(&time);
+ vxd_dev->time_drv[cnt].id = dst_vxdb->buf_map_id;
+ cnt++;
+
+ if (cnt >= ARRAY_SIZE(vxd_dev->time_drv))
+ cnt = 0;
+
+ core_stream_fill_pictbuf(dst_vxdb->buf_map_id);
+
+ if (preparsed_data->new_sequence) {
+ src_vxdb->seq_unit.str_unit_type =
+ VDECDD_STRUNIT_SEQUENCE_START;
+ src_vxdb->seq_unit.str_unit_handle = ctx;
+ src_vxdb->seq_unit.err_flags = 0;
+ src_vxdb->seq_unit.dd_data = NULL;
+ src_vxdb->seq_unit.seq_hdr_info =
+ &preparsed_data->sequ_hdr_info;
+ src_vxdb->seq_unit.seq_hdr_id = 0;
+ src_vxdb->seq_unit.closed_gop = TRUE;
+ src_vxdb->seq_unit.eop = FALSE;
+ src_vxdb->seq_unit.pict_hdr_info = NULL;
+ src_vxdb->seq_unit.dd_pict_data = NULL;
+ src_vxdb->seq_unit.last_pict_in_seq = FALSE;
+ src_vxdb->seq_unit.str_unit_tag = NULL;
+ src_vxdb->seq_unit.decode = FALSE;
+ src_vxdb->seq_unit.features = 0;
+ core_stream_submit_unit(ctx->res_str_id, &src_vxdb->seq_unit);
+ }
+
+ src_vxdb->pic_unit.str_unit_type = VDECDD_STRUNIT_PICTURE_START;
+ src_vxdb->pic_unit.str_unit_handle = ctx;
+ src_vxdb->pic_unit.err_flags = 0;
+ /* Move the processed segments to the submission buffer */
+ for (i = 0; i < BSPP_MAX_PICTURES_PER_BUFFER; i++) {
+ item = lst_first(&preparsed_data->picture_data.pre_pict_seg_list[i]);
+ while (item) {
+ next = lst_next(item);
+ lst_remove(&preparsed_data->picture_data.pre_pict_seg_list[i], item);
+ lst_add(&src_vxdb->pic_unit.bstr_seg_list, item);
+ item = next;
+ }
+ /* Move the processed segments to the submission buffer */
+ item = lst_first(&preparsed_data->picture_data.pict_seg_list[i]);
+ while (item) {
+ next = lst_next(item);
+ lst_remove(&preparsed_data->picture_data.pict_seg_list[i], item);
+ lst_add(&src_vxdb->pic_unit.bstr_seg_list, item);
+ item = next;
+ }
+ }
+
+ src_vxdb->pic_unit.dd_data = NULL;
+ src_vxdb->pic_unit.seq_hdr_info = NULL;
+ src_vxdb->pic_unit.seq_hdr_id = 0;
+ if (preparsed_data->new_sequence)
+ src_vxdb->pic_unit.closed_gop = TRUE;
+ else
+ src_vxdb->pic_unit.closed_gop = FALSE;
+ src_vxdb->pic_unit.eop = TRUE;
+ src_vxdb->pic_unit.eos = ctx->eos;
+ src_vxdb->pic_unit.pict_hdr_info =
+ &preparsed_data->picture_data.pict_hdr_info;
+ src_vxdb->pic_unit.dd_pict_data = NULL;
+ src_vxdb->pic_unit.last_pict_in_seq = FALSE;
+ src_vxdb->pic_unit.str_unit_tag = NULL;
+ src_vxdb->pic_unit.decode = FALSE;
+ src_vxdb->pic_unit.features = 0;
+ core_stream_submit_unit(ctx->res_str_id, &src_vxdb->pic_unit);
+
+ src_vxdb->end_unit.str_unit_type = VDECDD_STRUNIT_PICTURE_END;
+ src_vxdb->end_unit.str_unit_handle = ctx;
+ src_vxdb->end_unit.err_flags = 0;
+ src_vxdb->end_unit.dd_data = NULL;
+ src_vxdb->end_unit.seq_hdr_info = NULL;
+ src_vxdb->end_unit.seq_hdr_id = 0;
+ src_vxdb->end_unit.closed_gop = FALSE;
+ src_vxdb->end_unit.eop = FALSE;
+ src_vxdb->end_unit.eos = ctx->eos;
+ src_vxdb->end_unit.pict_hdr_info = NULL;
+ src_vxdb->end_unit.dd_pict_data = NULL;
+ src_vxdb->end_unit.last_pict_in_seq = FALSE;
+ src_vxdb->end_unit.str_unit_tag = NULL;
+ src_vxdb->end_unit.decode = FALSE;
+ src_vxdb->end_unit.features = 0;
+ core_stream_submit_unit(ctx->res_str_id, &src_vxdb->end_unit);
+}
+
+static int job_ready(void *priv)
+{
+ struct vxd_dec_ctx *ctx = priv;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < 1 ||
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < 1 ||
+ !ctx->core_streaming)
+ return 0;
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct vxd_dec_ctx *ctx = priv;
+
+ /* Cancel the transaction at next callback */
+ ctx->aborting = 1;
+}
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+};
+
+static const struct of_device_id vxd_dec_of_match[] = {
+ {.compatible = "img,d5500-vxd"},
+ { /* end */},
+};
+MODULE_DEVICE_TABLE(of, vxd_dec_of_match);
+
+static int vxd_dec_probe(struct platform_device *pdev)
+{
+ struct vxd_dev *vxd;
+ struct resource *res;
+ const struct of_device_id *of_dev_id;
+ int ret;
+ int module_irq;
+ struct video_device *vfd;
+
+ struct heap_config *heap_configs;
+ int num_heaps;
+ unsigned int i_heap_id;
+ /* Protect structure fields */
+ spinlock_t **lock;
+
+ of_dev_id = of_match_device(vxd_dec_of_match, &pdev->dev);
+ if (!of_dev_id) {
+ dev_err(&pdev->dev, "%s: Unable to match device\n", __func__);
+ return -ENODEV;
+ }
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+
+ vxd = devm_kzalloc(&pdev->dev, sizeof(*vxd), GFP_KERNEL);
+ if (!vxd)
+ return -ENOMEM;
+
+ vxd->dev = &pdev->dev;
+ vxd->plat_dev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vxd->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR_VALUE((unsigned long)vxd->reg_base))
+ return (long)(vxd->reg_base);
+
+ module_irq = platform_get_irq(pdev, 0);
+ if (module_irq < 0)
+ return -ENXIO;
+ vxd->module_irq = module_irq;
+#ifdef ERROR_RECOVERY_SIMULATION
+ g_module_irq = module_irq;
+#endif
+
+ heap_configs = vxd_dec_heap_configs;
+ num_heaps = ARRAY_SIZE(vxd_dec_heap_configs);
+
+ vxd->mutex = kzalloc(sizeof(*vxd->mutex), GFP_KERNEL);
+ if (!vxd->mutex)
+ return -ENOMEM;
+
+ mutex_init(vxd->mutex);
+
+ vxd->mutex_queue = kzalloc(sizeof(*vxd->mutex_queue), GFP_KERNEL);
+ if (!vxd->mutex_queue)
+ return -ENOMEM;
+
+ mutex_init(vxd->mutex_queue);
+
+ platform_set_drvdata(pdev, vxd);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: failed to enable clock, status = %d\n", __func__, ret);
+ goto exit;
+ }
+
+ /* Read HW properties */
+ ret = vxd_pvdec_get_props(vxd->dev, vxd->reg_base, &vxd->props);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to fetch core properties!\n", __func__);
+ ret = -ENXIO;
+ goto out_put_sync;
+ }
+ vxd->mmu_config_addr_width = VXD_EXTRN_ADDR_WIDTH(vxd->props);
+#ifdef DEBUG_DECODER_DRIVER
+ dev_info(&pdev->dev, "hw:%u.%u.%u, num_pix: %d, num_ent: %d, mmu: %d, MTX RAM: %d\n",
+ VXD_MAJ_REV(vxd->props),
+ VXD_MIN_REV(vxd->props),
+ VXD_MAINT_REV(vxd->props),
+ VXD_NUM_PIX_PIPES(vxd->props),
+ VXD_NUM_ENT_PIPES(vxd->props),
+ VXD_EXTRN_ADDR_WIDTH(vxd->props),
+ vxd->props.mtx_ram_size);
+#endif
+
+ INIT_LIST_HEAD(&vxd->msgs);
+ INIT_LIST_HEAD(&vxd->pend);
+
+ /* initialize memory manager */
+ ret = img_mem_init(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize memory\n");
+ ret = -ENOMEM;
+ goto out_put_sync;
+ }
+ vxd->streams = kzalloc(sizeof(*vxd->streams), GFP_KERNEL);
+ if (!vxd->streams) {
+ ret = -ENOMEM;
+ goto out_init;
+ }
+
+ idr_init(vxd->streams);
+
+ ret = vxd_init(&pdev->dev, vxd, heap_configs, num_heaps);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: main component initialisation failed!\n", __func__);
+ goto out_idr_init;
+ }
+
+ /* initialize core */
+ i_heap_id = vxd_g_internal_heap_id();
+ if (i_heap_id < 0) {
+ dev_err(&pdev->dev, "%s: Invalid internal heap id", __func__);
+ goto out_vxd_init;
+ }
+ ret = core_initialise(vxd, i_heap_id, vxd_return_resource);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: core initialization failed!", __func__);
+ goto out_vxd_init;
+ }
+
+ vxd->fw_refcnt = 0;
+ vxd->hw_on = 0;
+
+#ifdef DEBUG_DECODER_DRIVER
+ vxd->hw_pm_delay = 10000;
+ vxd->hw_dwr_period = 10000;
+#else
+ vxd->hw_pm_delay = 1000;
+ vxd->hw_dwr_period = 1000;
+#endif
+ ret = vxd_prepare_fw(vxd);
+ if (ret) {
+ dev_err(&pdev->dev, "%s fw acquire failed!", __func__);
+ goto out_core_init;
+ }
+
+ if (vxd->no_fw) {
+ dev_err(&pdev->dev, "%s fw acquire failed!", __func__);
+ goto out_core_init;
+ }
+
+ lock = (spinlock_t **)&vxd->lock;
+ *lock = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
+
+ if (!(*lock)) {
+ pr_err("Memory allocation failed for spin-lock\n");
+ ret = ENOMEM;
+ goto out_core_init;
+ }
+ spin_lock_init(*lock);
+
+ ret = v4l2_device_register(&pdev->dev, &vxd->v4l2_dev);
+ if (ret)
+ goto out_clean_fw;
+
+#ifdef ERROR_RECOVERY_SIMULATION
+ /*
+ * create a sysfs entry here, to debug firmware error recovery.
+ */
+ vxd_dec_kobject = kobject_create_and_add("vxd_decoder", kernel_kobj);
+ if (!vxd_dec_kobject) {
+ dev_err(&pdev->dev, "Failed to create kernel object\n");
+ goto out_clean_fw;
+ }
+
+ ret = sysfs_create_group(vxd_dec_kobject, &attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to create sysfs files\n");
+ kobject_put(vxd_dec_kobject);
+ }
+#endif
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ dev_err(&pdev->dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto out_v4l2_device;
+ }
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s", IMG_VXD_DEC_MODULE_NAME);
+ vfd->fops = &vxd_dec_fops;
+ vfd->ioctl_ops = &vxd_dec_ioctl_ops;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->v4l2_dev = &vxd->v4l2_dev;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ vfd->lock = vxd->mutex;
+
+ vxd->vfd_dec = vfd;
+ video_set_drvdata(vfd, vxd);
+
+ ret = devm_request_threaded_irq(&pdev->dev, module_irq, (irq_handler_t)hard_isrcb,
+ (irq_handler_t)soft_thread_irq, IRQF_SHARED,
+ IMG_VXD_DEC_MODULE_NAME, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ goto out_vid_dev;
+ }
+
+ vxd->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR_VALUE((unsigned long)vxd->m2m_dev)) {
+ dev_err(&pdev->dev, "Failed to init mem2mem device\n");
+ ret = -EINVAL;
+ goto out_vid_dev;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register video device\n");
+ goto out_vid_reg;
+ }
+ v4l2_info(&vxd->v4l2_dev, "decoder registered as /dev/video%d\n", vfd->num);
+
+ return 0;
+
+out_vid_reg:
+ v4l2_m2m_release(vxd->m2m_dev);
+
+out_vid_dev:
+ video_device_release(vfd);
+
+out_v4l2_device:
+ v4l2_device_unregister(&vxd->v4l2_dev);
+
+out_clean_fw:
+ vxd_clean_fw_resources(vxd);
+
+out_core_init:
+ core_deinitialise();
+
+out_vxd_init:
+ vxd_deinit(vxd);
+
+out_idr_init:
+ idr_destroy(vxd->streams);
+ kfree(vxd->streams);
+
+out_init:
+ img_mem_exit();
+
+out_put_sync:
+ pm_runtime_put_sync(&pdev->dev);
+
+exit:
+ pm_runtime_disable(&pdev->dev);
+ mutex_destroy(vxd->mutex);
+ kfree(vxd->mutex);
+ vxd->mutex = NULL;
+
+ return ret;
+}
+
+static int vxd_dec_remove(struct platform_device *pdev)
+{
+ struct vxd_dev *vxd = platform_get_drvdata(pdev);
+
+ core_deinitialise();
+
+ vxd_clean_fw_resources(vxd);
+ vxd_deinit(vxd);
+ idr_destroy(vxd->streams);
+ kfree(vxd->streams);
+ get_delayed_work_buff(&vxd->dwork, TRUE);
+ kfree(&vxd->lock);
+ img_mem_exit();
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ kfree(vxd->dwork);
+ mutex_destroy(vxd->mutex);
+ mutex_destroy(vxd->mutex_queue);
+ kfree(vxd->mutex);
+ kfree(vxd->mutex_queue);
+ vxd->mutex = NULL;
+ vxd->mutex_queue = NULL;
+
+ video_unregister_device(vxd->vfd_dec);
+ v4l2_m2m_release(vxd->m2m_dev);
+ v4l2_device_unregister(&vxd->v4l2_dev);
+
+ return 0;
+}
+
+static int __maybe_unused vxd_dec_suspend(struct device *dev)
+{
+ int ret = 0;
+
+ ret = vxd_suspend_dev(dev);
+ if (ret)
+ dev_err(dev, "failed to suspend core hw!\n");
+
+ return ret;
+}
+
+static int __maybe_unused vxd_dec_resume(struct device *dev)
+{
+ int ret = 0;
+
+ ret = vxd_resume_dev(dev);
+ if (ret)
+ dev_err(dev, "failed to resume core hw!\n");
+
+ return ret;
+}
+
+static UNIVERSAL_DEV_PM_OPS(vxd_dec_pm_ops,
+ vxd_dec_suspend, vxd_dec_resume, NULL);
+
+static struct platform_driver vxd_dec_driver = {
+ .probe = vxd_dec_probe,
+ .remove = vxd_dec_remove,
+ .driver = {
+ .name = "img_dec",
+ .pm = &vxd_dec_pm_ops,
+ .of_match_table = vxd_dec_of_match,
+ },
+};
+module_platform_driver(vxd_dec_driver);
+
+MODULE_AUTHOR("Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com> Sidraya Jayagond <sidraya.bj@pathpartnertech.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IMG D5520 video decoder driver");
diff --git a/drivers/media/platform/vxe-vxd/encoder/fw_binaries/ALL_CODECS_FW_ALL_pipes_2_contexts_8_hwconfig_1_bin.c b/drivers/media/platform/vxe-vxd/encoder/fw_binaries/ALL_CODECS_FW_ALL_pipes_2_contexts_8_hwconfig_1_bin.c
new file mode 100644
index 000000000000..187fb6330276
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/fw_binaries/ALL_CODECS_FW_ALL_pipes_2_contexts_8_hwconfig_1_bin.c
@@ -0,0 +1,29013 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Encoder FW binary file
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+/* note that order of defines has to match the structure declaration!
+ */
+
+unsigned char *all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_define_names_array[] = {
+ "TOPAZHP_NUM_PIPES",
+ "TOPAZHP_MAX_BU_SUPPORT",
+ "MAX_REF_B_LEVELS_FW",
+ "SEI_INSERTION",
+ "TOPAZHP_MAX_NUM_STREAMS",
+};
+
+unsigned int all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_define_values_array[] = {
+ 2,
+ (TOPAZHP_MAX_BU_SUPPORT_HD),
+ 0,
+ 1,
+ 8,
+};
+
+unsigned int all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_text[] = {
+ 0x9040c001,
+ 0xc80993fe,
+ 0xc0000e42,
+ 0xc8290e00,
+ 0xcc3e8426,
+ 0xc8298420,
+ 0xceea8622,
+ 0x9e838660,
+ 0xc8099e43,
+ 0xcdd40d46,
+ 0xc8090d40,
+ 0xcdd60946,
+ 0xc8090900,
+ 0xc00a0e42,
+ 0xc8090e40,
+ 0xc00e87c2,
+ 0x9c1887c0,
+ 0x0c020802,
+ 0x09820d82,
+ 0x09020d02,
+ 0x08820c82,
+ 0x9320fffe,
+ 0xa401c838,
+ 0x0dc6c809,
+ 0x0d80cdd4,
+ 0x0e42c809,
+ 0x0c66b080,
+ 0x0882a992,
+ 0x9ff3a48d,
+ 0x93e0ffff,
+ 0x80819d13,
+ 0xa205f839,
+ 0x03070707,
+ 0x9e970685,
+ 0xc8090383,
+ 0xcdd60ac6,
+ 0xc8090aa0,
+ 0xcdd61ac4,
+ 0x060f1a80,
+ 0x07fac101,
+ 0x018d058d,
+ 0x9c62008f,
+ 0x9320ffff,
+ 0xc101060b,
+ 0x9c6206da,
+ 0x9380ffff,
+ 0x018d058d,
+ 0x460cb700,
+ 0x4594b780,
+ 0xa6059c01,
+ 0xc8090687,
+ 0xcdd60ac6,
+ 0xc8090aa0,
+ 0xcdd61ac4,
+ 0x060b1a80,
+ 0x06dac101,
+ 0xffff9c62,
+ 0xf9f89380,
+ 0xf9f8aa9d,
+ 0x9c22aa1d,
+ 0x420cb700,
+ 0xc000587c,
+ 0xe0003800,
+ 0xc0003800,
+ 0x9c22901a,
+ 0x9c8fc127,
+ 0x080a9c22,
+ 0x9c81c017,
+ 0x9c80c071,
+ 0x9c80c017,
+ 0x0d849c22,
+ 0x9e5a5db0,
+ 0x4018b960,
+ 0x0900c021,
+ 0x0940c00e,
+ 0xaa45f031,
+ 0xf0009dad,
+ 0x0910a261,
+ 0x9341ffff,
+ 0xc3fe9e5c,
+ 0xc02129c0,
+ 0xc0010a00,
+ 0xc00e3988,
+ 0x9dcd0a30,
+ 0xa1e1f000,
+ 0xc0219c22,
+ 0xd1100d80,
+ 0x9d3d05b7,
+ 0x2244aa61,
+ 0xffff7115,
+ 0x9c229384,
+ 0xd011a605,
+ 0xc3fe0eb2,
+ 0xc28029c0,
+ 0x020b5ab0,
+ 0x0a00c021,
+ 0x398cc001,
+ 0xc00e0685,
+ 0x9dcd0a30,
+ 0xa1e1f000,
+ 0xc00e9eab,
+ 0x0d020992,
+ 0x0902cff0,
+ 0x0a80c021,
+ 0x9bdbfff4,
+ 0x0ac0c00e,
+ 0x4018b960,
+ 0xaa619d5d,
+ 0xa225f231,
+ 0xffff0a90,
+ 0xb79f9361,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xfffd9c22,
+ 0xf0129040,
+ 0x9e582d36,
+ 0xc0009e5a,
+ 0xc18093a4,
+ 0x9e515a0b,
+ 0xd2247500,
+ 0x9e825988,
+ 0x9c83c810,
+ 0x0106c101,
+ 0x4418b313,
+ 0x9080c000,
+ 0xa0c5f031,
+ 0x93c1ffff,
+ 0x11b6c101,
+ 0x90c0c000,
+ 0xa146d029,
+ 0x9120c000,
+ 0x9e540d02,
+ 0xb33474c0,
+ 0xc8104436,
+ 0xffff9c83,
+ 0x9c2292a1,
+ 0xa285f839,
+ 0xc00272d7,
+ 0x70d79022,
+ 0xc0009e59,
+ 0x0a8690a6,
+ 0x9100c000,
+ 0xd0101d04,
+ 0xc10104b4,
+ 0x0aff01b4,
+ 0xc0017c46,
+ 0xf0139124,
+ 0xc0012936,
+ 0xd12290a4,
+ 0x9e885e0b,
+ 0x5808c200,
+ 0x9e99610b,
+ 0x00947500,
+ 0xc81001b4,
+ 0xc2809c83,
+ 0xb3235908,
+ 0xc0004c18,
+ 0x9e9590e0,
+ 0xaa29e059,
+ 0xa209e059,
+ 0x9361ffff,
+ 0x1520c101,
+ 0x9100c000,
+ 0xa966c059,
+ 0xa126c059,
+ 0x9100c000,
+ 0x0a029e52,
+ 0x7088d012,
+ 0x9c83c810,
+ 0x9281ffff,
+ 0xf9f89e58,
+ 0x9c22aa9d,
+ 0x59300904,
+ 0x9d299da9,
+ 0x9e919e90,
+ 0x76c00005,
+ 0x8700c021,
+ 0x0da2c020,
+ 0x8500c021,
+ 0x0800c021,
+ 0x0c00c021,
+ 0x0c80c021,
+ 0x1db0d021,
+ 0x08820d08,
+ 0x8730c00e,
+ 0x8540c00e,
+ 0x0850c00e,
+ 0x0c60c00e,
+ 0x0cf0c00e,
+ 0x92a0c001,
+ 0xaa41d9d0,
+ 0xa95dd990,
+ 0x5a40c200,
+ 0x9e2e3244,
+ 0xa261f000,
+ 0xaa49d9d0,
+ 0xa945d9d0,
+ 0x5a40c200,
+ 0x9d8d3244,
+ 0xa261f000,
+ 0xaa51d9d0,
+ 0xa94dd9d0,
+ 0x5a40c200,
+ 0x9d8e3244,
+ 0xa261f000,
+ 0xaa59d9d0,
+ 0xa955d9d0,
+ 0x5a40c200,
+ 0x9d9e3244,
+ 0xa261f000,
+ 0x5e10d1a2,
+ 0xc3fe08a0,
+ 0xc0012a40,
+ 0x83853a08,
+ 0xa261f000,
+ 0x70460d84,
+ 0xfffe0d40,
+ 0x9c229166,
+ 0x8420a61d,
+ 0x0b820307,
+ 0xc001a19a,
+ 0xf04892a0,
+ 0xa91aaac6,
+ 0x9e6b7740,
+ 0xc001672b,
+ 0xf0489162,
+ 0x05d6a9ce,
+ 0xc3b41d84,
+ 0xc1019924,
+ 0xf208628b,
+ 0x018ba34a,
+ 0xc3b4058d,
+ 0x628b991c,
+ 0x6979d031,
+ 0x16ebd110,
+ 0xa041f208,
+ 0xa2c5f208,
+ 0x430cb780,
+ 0x0679d110,
+ 0xaa09f248,
+ 0xf2086009,
+ 0xb780a041,
+ 0x0128430c,
+ 0xa945f008,
+ 0x000a6005,
+ 0xa049f208,
+ 0x0b300b84,
+ 0x430cb780,
+ 0x5b90d3a4,
+ 0x0579d110,
+ 0xaa15f288,
+ 0xfffe71c8,
+ 0xb79f9086,
+ 0xb7bf7dee,
+ 0xb7df7e6e,
+ 0xb7ff7eee,
+ 0xc0027f6e,
+ 0x9c228c20,
+ 0xb7a0a60d,
+ 0x9e5e430c,
+ 0xf2489e9e,
+ 0xf248a9ae,
+ 0xd120aaa5,
+ 0x018b01d7,
+ 0xc3b41d84,
+ 0x9e8598da,
+ 0xc101636b,
+ 0x9eb366db,
+ 0xf2109e6b,
+ 0xc3b4a349,
+ 0xd13298d0,
+ 0xf210628b,
+ 0xc101a041,
+ 0xf210136a,
+ 0xb780a345,
+ 0xf248430c,
+ 0x6009aa09,
+ 0xa041f210,
+ 0x430cb780,
+ 0xaa05f208,
+ 0x000c6009,
+ 0xa049f210,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0x5db00d84,
+ 0xc0219e5c,
+ 0xc0100a00,
+ 0x9dcd0a00,
+ 0xa162f000,
+ 0x592809bc,
+ 0xcff05990,
+ 0xc00f2980,
+ 0xc021297c,
+ 0x31260d80,
+ 0x0d80c00e,
+ 0xf0009dbe,
+ 0x9c22a161,
+ 0x5db00d84,
+ 0x0992c00e,
+ 0xcff00d02,
+ 0xfff20902,
+ 0xaa1d91c0,
+ 0x09bc0405,
+ 0xd0117500,
+ 0xd0120e32,
+ 0xd0a2299e,
+ 0xc0015cc0,
+ 0xd2249004,
+ 0x9e545930,
+ 0x0a00c021,
+ 0x0a00c010,
+ 0xf0009dcd,
+ 0xc100a062,
+ 0x9e525a18,
+ 0x2a00cffc,
+ 0x0900c021,
+ 0x0930c010,
+ 0xf0009dad,
+ 0xc180a261,
+ 0xcff05a10,
+ 0xc0112a00,
+ 0xc0003a00,
+ 0xd22492e0,
+ 0x9e545930,
+ 0x0a00c021,
+ 0x0a10c010,
+ 0xf0009dcd,
+ 0x5958a062,
+ 0x9dcd0a20,
+ 0xa161f000,
+ 0x5a10c180,
+ 0x2a00cff0,
+ 0x3a00c031,
+ 0xc00f9e5a,
+ 0x324428fc,
+ 0x0d00c021,
+ 0xc00e3242,
+ 0x9dae0d00,
+ 0xa261f000,
+ 0xa61d9c22,
+ 0x8400c00a,
+ 0x430cb7c0,
+ 0x0802c004,
+ 0xb55fa011,
+ 0x74807dec,
+ 0xa945f208,
+ 0x7decb79f,
+ 0x7d74b57f,
+ 0x5b99c100,
+ 0x4422b340,
+ 0x5a18c380,
+ 0xa0117104,
+ 0x7c74b55f,
+ 0x7cecb53f,
+ 0xf248000d,
+ 0xf248a94d,
+ 0x9e4dab46,
+ 0xb7809e9f,
+ 0xb7a040cd,
+ 0xd0125eb4,
+ 0xc006136c,
+ 0xd0209244,
+ 0x9e7311a8,
+ 0x9811c3b4,
+ 0x701b9e6c,
+ 0x4434b304,
+ 0xb347711f,
+ 0xf2084454,
+ 0xf208a9c1,
+ 0x657daa29,
+ 0xb740008d,
+ 0xd0f2572b,
+ 0xd0200eae,
+ 0xb77f0138,
+ 0x74887468,
+ 0x2e81cffc,
+ 0x293ed3f1,
+ 0x9172c000,
+ 0x9e93a892,
+ 0x0a029e6b,
+ 0x0892010f,
+ 0xc000a21d,
+ 0xa8929120,
+ 0x9e6b0a02,
+ 0xa21d9e93,
+ 0x0896010f,
+ 0x9b57fff4,
+ 0x7468b79f,
+ 0x29ced3f2,
+ 0x9b48fff4,
+ 0xaa25f208,
+ 0x0659d110,
+ 0xa225f208,
+ 0xa94df248,
+ 0xc0007104,
+ 0x0a029186,
+ 0xa225f208,
+ 0xa949f248,
+ 0xaa21f208,
+ 0xf2080244,
+ 0xb740a221,
+ 0xf208430c,
+ 0xf208aa21,
+ 0xf008a926,
+ 0x6129a945,
+ 0x0124c101,
+ 0xa129f208,
+ 0x4314b700,
+ 0x574ab780,
+ 0xc0007506,
+ 0x750a90a2,
+ 0x9144c000,
+ 0xaa11a911,
+ 0x0a08090c,
+ 0x00899e91,
+ 0x9100c000,
+ 0xa911aa11,
+ 0x090c0a08,
+ 0x00859ea1,
+ 0x7decb71f,
+ 0x5dccb780,
+ 0x7c6cb77f,
+ 0xa8110100,
+ 0x6679d131,
+ 0xcffe9e83,
+ 0xcffe28fc,
+ 0xd0202cfc,
+ 0xd0200334,
+ 0xd0101110,
+ 0xda101516,
+ 0x5d18a241,
+ 0xcffe5930,
+ 0xc2002a7c,
+ 0xc1015a48,
+ 0xd1100524,
+ 0xb79f0729,
+ 0x71887cec,
+ 0x9322c001,
+ 0x8c80f292,
+ 0x8c84f271,
+ 0x0c029e68,
+ 0x91a0c000,
+ 0x7c6ab77f,
+ 0xa1b1da31,
+ 0x7eb2b57f,
+ 0x7f32b55f,
+ 0xa231da29,
+ 0x9e44a111,
+ 0xa9c1da10,
+ 0xcffea911,
+ 0xd0202a7c,
+ 0x09100142,
+ 0x0242c101,
+ 0xcffe7217,
+ 0xcffe2a7c,
+ 0xcffe2d7c,
+ 0x0c10297c,
+ 0xc0200820,
+ 0xc0103a00,
+ 0xd0123d00,
+ 0xfffe19a6,
+ 0xb77f9366,
+ 0xb75f7df4,
+ 0xe2927d6c,
+ 0xfff48d00,
+ 0x000d99b3,
+ 0x746eb79f,
+ 0x74eeb7bf,
+ 0x756eb7df,
+ 0x75eeb7ff,
+ 0x8c00c00c,
+ 0xa61d9c22,
+ 0x8400c00c,
+ 0x430cb7c0,
+ 0x40d5b700,
+ 0x000d9e4f,
+ 0x5732b720,
+ 0xaa4df248,
+ 0xaac5f248,
+ 0x76400f86,
+ 0x79f4b57f,
+ 0x11c1d120,
+ 0xd002040f,
+ 0x9e9d0c72,
+ 0xb51f018b,
+ 0xb55f7874,
+ 0xb55f7d6c,
+ 0xb53f78f4,
+ 0xb51f796c,
+ 0xc3947b74,
+ 0x9eb09b14,
+ 0xa946f208,
+ 0x5eccb780,
+ 0xa941f208,
+ 0x7d6cb77f,
+ 0xb3047008,
+ 0x711b4434,
+ 0x4454b345,
+ 0x9e695d19,
+ 0x7bf4b5bf,
+ 0xaa69f208,
+ 0xb71f629b,
+ 0xd0326670,
+ 0x74c00ade,
+ 0xb55f0128,
+ 0xc0047af4,
+ 0xb3300802,
+ 0x9e924422,
+ 0x7a6cb55f,
+ 0xb75f2ec1,
+ 0xc00e7aec,
+ 0x9e6b2c7c,
+ 0x9e799e81,
+ 0x05810a02,
+ 0xa015a21d,
+ 0xfff4a012,
+ 0xa9929a54,
+ 0x5a8dc280,
+ 0x9a46fff4,
+ 0xaa49f248,
+ 0xc0017510,
+ 0xf2089114,
+ 0xf248aa61,
+ 0x0a20a951,
+ 0xc0017104,
+ 0xa8159008,
+ 0x4314b740,
+ 0x7a74b71f,
+ 0xf010020a,
+ 0x9ea6a946,
+ 0x7aecb75f,
+ 0xcffea992,
+ 0x9e792f7c,
+ 0x5d0c9e6b,
+ 0x05200a02,
+ 0xa21d048d,
+ 0x9a2bfff4,
+ 0xfff4a992,
+ 0xb5df9a1f,
+ 0xc0007df4,
+ 0xab1690a0,
+ 0x7df4b5df,
+ 0xaa65f208,
+ 0x0659d110,
+ 0xa265f208,
+ 0xa94df248,
+ 0xc0007104,
+ 0x0a029186,
+ 0xa265f208,
+ 0xa949f248,
+ 0xaa61f208,
+ 0xf2080244,
+ 0xb780a261,
+ 0xf208430c,
+ 0xf208a961,
+ 0xf208a966,
+ 0x6245aa05,
+ 0x0244c101,
+ 0xa269f208,
+ 0x4314b7e0,
+ 0xa971f210,
+ 0x5819d124,
+ 0xb51f9e44,
+ 0xc2007cf4,
+ 0x71045a18,
+ 0x90e2c000,
+ 0xb53f0882,
+ 0xc00a7c6c,
+ 0xf2509140,
+ 0xb77faa75,
+ 0xd0327bec,
+ 0xd1100af8,
+ 0x6239056b,
+ 0xa92af210,
+ 0xa9edf210,
+ 0xd0319e96,
+ 0xb75f0b4e,
+ 0xa9927cec,
+ 0x7b6cb73f,
+ 0xd1202b41,
+ 0xcffe0135,
+ 0x0a062f7c,
+ 0x048d018d,
+ 0xfff4a21d,
+ 0xf21099cc,
+ 0x0268aa25,
+ 0xa225f210,
+ 0xa97df250,
+ 0xc0007104,
+ 0x0a029186,
+ 0xa225f210,
+ 0xa979f250,
+ 0xaa21f210,
+ 0xf2100244,
+ 0xb720a221,
+ 0xf210430c,
+ 0xf210aa21,
+ 0xb740a926,
+ 0x6129422d,
+ 0x0124c101,
+ 0xa129f210,
+ 0x572bb780,
+ 0xc0007502,
+ 0xd0119144,
+ 0x040d0e62,
+ 0xcffe9ea1,
+ 0xc0022cfc,
+ 0x750491a0,
+ 0x9144c000,
+ 0x0e62d011,
+ 0x9ea0048d,
+ 0x2c7ccffe,
+ 0x9040c002,
+ 0x0af0d072,
+ 0x5a0dc300,
+ 0x0669d110,
+ 0x432db7a0,
+ 0xa929f210,
+ 0x0389a992,
+ 0xcffe02d4,
+ 0xfff42bfc,
+ 0xb75f9979,
+ 0xb73f7cec,
+ 0xa9927b6c,
+ 0x9eb99eaa,
+ 0x018d8506,
+ 0xfff4a11f,
+ 0xf2109976,
+ 0x0268aa25,
+ 0xa225f210,
+ 0xa97df250,
+ 0xc0007104,
+ 0x0a029186,
+ 0xa225f210,
+ 0xa979f250,
+ 0xaa21f210,
+ 0xf2100244,
+ 0xb740a221,
+ 0xf210430c,
+ 0xf210aa21,
+ 0xf008a926,
+ 0x9eb9a95d,
+ 0x6129040d,
+ 0x0124c101,
+ 0xa129f210,
+ 0x7d6cb71f,
+ 0x78ecb75f,
+ 0x430cb720,
+ 0x02a80200,
+ 0x5dadb720,
+ 0x7becb79f,
+ 0x7df4b77f,
+ 0xd01060c3,
+ 0xda081516,
+ 0x0203a0a1,
+ 0xcffea815,
+ 0xc2002a7c,
+ 0x9e595a48,
+ 0x12401506,
+ 0x797cb75f,
+ 0x02425d18,
+ 0x0244c101,
+ 0x02445930,
+ 0xb59f7135,
+ 0xc0007c6c,
+ 0xb71f9182,
+ 0xb740706a,
+ 0x0d824304,
+ 0x287ccffe,
+ 0xc0029d8d,
+ 0xa9929020,
+ 0x9914fff4,
+ 0x9320c003,
+ 0x7d6ab75f,
+ 0x0d34d012,
+ 0x0624d010,
+ 0xa121da10,
+ 0xa041da08,
+ 0xa949f040,
+ 0xcffe9d4d,
+ 0xe31328fc,
+ 0x74908d00,
+ 0xf010a095,
+ 0xc0008026,
+ 0xb71f9254,
+ 0xd0127b6a,
+ 0xda080d38,
+ 0xda10a001,
+ 0xcffea1c1,
+ 0xd01129fc,
+ 0xcffe0a32,
+ 0xb59f2a7c,
+ 0xd0107dec,
+ 0xd0110524,
+ 0x9d2d0e22,
+ 0xe3130248,
+ 0xe0108d00,
+ 0x9d4d8126,
+ 0xa3c1d808,
+ 0x8026f010,
+ 0x0da4d012,
+ 0xa382da08,
+ 0x0536d010,
+ 0x0e32d011,
+ 0xe3139d2d,
+ 0x02488d00,
+ 0x7decb75f,
+ 0x80a6f020,
+ 0xd0119d4d,
+ 0x09040e36,
+ 0xf0100248,
+ 0x01858126,
+ 0xda089d4d,
+ 0xd010a922,
+ 0xd0100607,
+ 0xa8150517,
+ 0x9e47a895,
+ 0x9ea00783,
+ 0x72c49e91,
+ 0xf0200804,
+ 0x08888126,
+ 0x3b80c010,
+ 0x3f80c020,
+ 0x2c7ccffe,
+ 0x2cfccffe,
+ 0x9306fffc,
+ 0xfff4a992,
+ 0xb77f98a5,
+ 0xda087d74,
+ 0xb75fa9a1,
+ 0xe31279ec,
+ 0xffd48d00,
+ 0xb71f9b9d,
+ 0xb79f7c6c,
+ 0xb7bf726e,
+ 0xb7df72ee,
+ 0xb7ff736e,
+ 0xc00e73ee,
+ 0x9c228c00,
+ 0xa6059c22,
+ 0x4c8cb7a0,
+ 0x6da2c146,
+ 0x06bbd110,
+ 0x008b099a,
+ 0x6031b76d,
+ 0xc3940d84,
+ 0x008b9961,
+ 0x60a9b78d,
+ 0x287cc00e,
+ 0x08027100,
+ 0x0802d001,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0xb7409e5c,
+ 0xc1464c8c,
+ 0x02446a22,
+ 0x0902c021,
+ 0x430cb580,
+ 0x4490b560,
+ 0x0920c030,
+ 0xaa619d2d,
+ 0xc0015dc0,
+ 0x9e5b2dbc,
+ 0x2a42cfff,
+ 0x9dad3246,
+ 0xa261f000,
+ 0xb7209c22,
+ 0x9e58430c,
+ 0x4829b78a,
+ 0x71172208,
+ 0x90c4c000,
+ 0x48a9b78a,
+ 0x9c222008,
+ 0x4588b780,
+ 0x9c222008,
+ 0xb720a60d,
+ 0x0b06430c,
+ 0x4731b7aa,
+ 0x46a9b7aa,
+ 0x90e0c000,
+ 0x0a52d011,
+ 0xc00e0289,
+ 0xd2242afc,
+ 0x715b51d4,
+ 0x2dfcc00e,
+ 0x90e8c000,
+ 0x9bd9fff4,
+ 0xffff7400,
+ 0x000b9224,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0x430cb720,
+ 0x4cadb782,
+ 0x5b2db743,
+ 0xb5823244,
+ 0xb7204cad,
+ 0xb782430c,
+ 0xc0104cad,
+ 0xb5822a04,
+ 0xb7204cad,
+ 0xb782430c,
+ 0xc0204cad,
+ 0xb3407d00,
+ 0x3a0448a2,
+ 0x4cadb582,
+ 0x0d849c22,
+ 0x9e5c5db0,
+ 0x0a00c021,
+ 0x0a00c010,
+ 0xf0009dcd,
+ 0x09bca162,
+ 0x59285990,
+ 0x2980cff0,
+ 0x297cc00f,
+ 0x3980c011,
+ 0x0d80c021,
+ 0xc00e31b4,
+ 0x9dbe0d80,
+ 0xa1e1f000,
+ 0xa61d9c22,
+ 0x8400c002,
+ 0x0abed031,
+ 0x2ac19e9f,
+ 0xa119a192,
+ 0xc001a116,
+ 0xc1fc9300,
+ 0xc0fc7562,
+ 0xb3560f02,
+ 0xa9164446,
+ 0x9e73a992,
+ 0xfff40902,
+ 0xa9929bce,
+ 0x068d0b82,
+ 0x9bcaffd4,
+ 0x010fab19,
+ 0x92e0c000,
+ 0xc0007dde,
+ 0xa9929144,
+ 0x8d00e092,
+ 0xffd4018f,
+ 0xe0919a1d,
+ 0x77c08d00,
+ 0xc0000b84,
+ 0xd02990e2,
+ 0x1f84aa45,
+ 0xa241d208,
+ 0x1e840b04,
+ 0xffff777f,
+ 0xa91a9124,
+ 0x12dcc101,
+ 0xa11a052c,
+ 0x052ca916,
+ 0x7540a116,
+ 0x9124fffe,
+ 0x7c6eb79f,
+ 0x7ceeb7bf,
+ 0x7d6eb7df,
+ 0x7deeb7ff,
+ 0x8c00c004,
+ 0xa61d9c22,
+ 0x8400c002,
+ 0x9e9f9e5c,
+ 0x5a30c200,
+ 0x0a00c021,
+ 0xc110a19a,
+ 0x09820a30,
+ 0xf0009dcd,
+ 0xd032a1e1,
+ 0x2ec10efe,
+ 0xa115a112,
+ 0x9180c002,
+ 0x7762c1fc,
+ 0x0f02c0fc,
+ 0x4846b356,
+ 0x9e760d02,
+ 0x7df4b55f,
+ 0x7d74b55f,
+ 0x7cf4b55f,
+ 0x7c74b55f,
+ 0xe0920b82,
+ 0x77c08d80,
+ 0x0534d010,
+ 0x9162c000,
+ 0x1f84a991,
+ 0xaa65d029,
+ 0xd008a191,
+ 0xc000a241,
+ 0xd0089080,
+ 0x0b84a3c2,
+ 0x2afef031,
+ 0xc0000d04,
+ 0xa99a9244,
+ 0x8d00e092,
+ 0x19f0d051,
+ 0x9987ffd4,
+ 0xb5bf9eaa,
+ 0xb5bf7dec,
+ 0xb5bf7d6c,
+ 0xb5bf7cec,
+ 0xe0007c6c,
+ 0xfffe1b04,
+ 0xa91692c4,
+ 0x9e73a99a,
+ 0xffd4010d,
+ 0xa99a9b23,
+ 0xffd416dc,
+ 0xa9969b39,
+ 0xa19605bc,
+ 0xfffd7740,
+ 0xb79f92a4,
+ 0xb7bf7c6e,
+ 0xb7df7cee,
+ 0xb7ff7d6e,
+ 0xc0047dee,
+ 0x9c228c00,
+ 0x430cb720,
+ 0x0992c144,
+ 0x5badb743,
+ 0x6c31b76d,
+ 0xfffc9e8a,
+ 0xf8399100,
+ 0x9e90a285,
+ 0xaa4df008,
+ 0x0caed091,
+ 0xa949f008,
+ 0x5815c080,
+ 0x0244c101,
+ 0x76400120,
+ 0x41ccb580,
+ 0x414cb540,
+ 0x9202c000,
+ 0x40ccb780,
+ 0x404cb740,
+ 0x4254b740,
+ 0x0a061128,
+ 0x5208c200,
+ 0x35229ea1,
+ 0x4254b540,
+ 0x5e30d1a2,
+ 0x08fec00f,
+ 0xcff19ea3,
+ 0xc0210d02,
+ 0xc0100d80,
+ 0xc1409c83,
+ 0xc00e0de0,
+ 0xcff008fc,
+ 0xc0000d00,
+ 0xf0299200,
+ 0x9e55aa65,
+ 0x2242214a,
+ 0xc2005921,
+ 0x32445a20,
+ 0x4006ba24,
+ 0xf0009dbe,
+ 0xffffa261,
+ 0xb7809241,
+ 0x0a04404c,
+ 0x404cb580,
+ 0xaa9df9f8,
+ 0xa6859c22,
+ 0x87029e91,
+ 0x85220685,
+ 0x0b7ec00e,
+ 0x882bf210,
+ 0xc2007640,
+ 0xc0025153,
+ 0xf0089182,
+ 0x9e69aa61,
+ 0x2a1ce000,
+ 0xc28022a2,
+ 0xb4235920,
+ 0xf0084442,
+ 0xd010aa61,
+ 0x2a1ca962,
+ 0x8821f310,
+ 0x1242c101,
+ 0x400aba0c,
+ 0x52411003,
+ 0x5921c200,
+ 0xc00e9e95,
+ 0xd0312a7c,
+ 0xc2000910,
+ 0x3454520b,
+ 0x2aced3f2,
+ 0x92eac000,
+ 0x5224c200,
+ 0x32429e51,
+ 0xa261d010,
+ 0xa961f008,
+ 0x2a2ed011,
+ 0x0619d110,
+ 0xd110750e,
+ 0xd0090615,
+ 0xf0080db2,
+ 0xc000a261,
+ 0xd0109240,
+ 0xf008a062,
+ 0xd110aa61,
+ 0x9e810619,
+ 0xf0080242,
+ 0xcffea261,
+ 0xd0322cfc,
+ 0xfffda164,
+ 0x9e589220,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c40,
+ 0x9e58a60d,
+ 0x9e569e9e,
+ 0x709b0ee2,
+ 0x923cc000,
+ 0x9eb39e6d,
+ 0x2afccffe,
+ 0x112a5575,
+ 0x9e739e83,
+ 0x297ccffe,
+ 0x2d7cc00e,
+ 0x9b8dfff4,
+ 0x1ea0010b,
+ 0xffff7771,
+ 0xb79f9184,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0xa60d9c22,
+ 0x9e589e56,
+ 0x09069e9e,
+ 0xc0000d02,
+ 0x13649080,
+ 0xd0110124,
+ 0x710c1a22,
+ 0x2eaed3f2,
+ 0xffff0d04,
+ 0x9e6d9306,
+ 0x9080c000,
+ 0x9b69fff4,
+ 0x0a52d011,
+ 0x9e837510,
+ 0x0d029e73,
+ 0x1aa00922,
+ 0x92d2ffff,
+ 0x294ed3f1,
+ 0xfff40d04,
+ 0xc0009b5a,
+ 0xd3f291e0,
+ 0x9e6c2ace,
+ 0x52d1c200,
+ 0x295ed3f2,
+ 0x9b4ffff4,
+ 0xc2009e6c,
+ 0x136a52b0,
+ 0xd0317750,
+ 0x9e831e50,
+ 0x09229e73,
+ 0x91b2ffff,
+ 0x296ed3f2,
+ 0xb79f9e6a,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0x92e0fff9,
+ 0x1d20f011,
+ 0xd01d9e9a,
+ 0xd00d0224,
+ 0xc0001a42,
+ 0x1205909a,
+ 0x9e531244,
+ 0xfffd9ea2,
+ 0x02079140,
+ 0xa1c1d010,
+ 0x9ea29e53,
+ 0xfff60c82,
+ 0xa6059240,
+ 0x02870d0a,
+ 0x9b9dfff4,
+ 0x430cb720,
+ 0x7ea9b785,
+ 0xc0007500,
+ 0x02039122,
+ 0x0a00c098,
+ 0xaa01d208,
+ 0x3a040248,
+ 0x9ea29e83,
+ 0xb79f018b,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0x90a0fffc,
+ 0xb720a605,
+ 0xf011430c,
+ 0xc0020ea0,
+ 0xb7450a02,
+ 0xb3547ea9,
+ 0x02894424,
+ 0x9e9d7480,
+ 0x90a2c000,
+ 0xba2c0248,
+ 0x75404000,
+ 0x9116c000,
+ 0x0d02120b,
+ 0x4000ba2c,
+ 0x9060c000,
+ 0xfff40d06,
+ 0x9e839b66,
+ 0xd0129e6b,
+ 0xb79f1952,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0x9360fffa,
+ 0x8460a60d,
+ 0x8c88e051,
+ 0xa2190a02,
+ 0xa211a215,
+ 0x7d6cb59f,
+ 0x402db580,
+ 0xa9e1f008,
+ 0x07059e5e,
+ 0xc004198c,
+ 0xd01274c4,
+ 0xc0020a92,
+ 0xd1a691b2,
+ 0x8520590c,
+ 0x8044e050,
+ 0x9120c002,
+ 0x91c0c002,
+ 0x9360c002,
+ 0x9140c003,
+ 0x91e0c003,
+ 0x9300c003,
+ 0x90c0c017,
+ 0x9280c016,
+ 0x93a0c015,
+ 0x9280c001,
+ 0x9120c011,
+ 0x9220c011,
+ 0x9320c011,
+ 0x9060c013,
+ 0x91a0c013,
+ 0x93c0c013,
+ 0x90c0c014,
+ 0x9340c011,
+ 0x9020c019,
+ 0x9280c018,
+ 0x9000c017,
+ 0x9180c017,
+ 0x9300c017,
+ 0x90e0c003,
+ 0x92a0c003,
+ 0x9360c006,
+ 0x9120c007,
+ 0x93c0c007,
+ 0x9200c00d,
+ 0x9180c000,
+ 0x9140c000,
+ 0x9240c018,
+ 0x9240c01a,
+ 0x9320c01a,
+ 0x92e0c01b,
+ 0xc01d0802,
+ 0xd0729000,
+ 0xe0711924,
+ 0x058b8d80,
+ 0x91a0c000,
+ 0x430cb780,
+ 0x8d80e071,
+ 0xc09c058b,
+ 0xd2080a1c,
+ 0xd120a902,
+ 0xfff41125,
+ 0xc01c9b2b,
+ 0xd3f19020,
+ 0xe0712aae,
+ 0x9eaa8d80,
+ 0x0916058b,
+ 0x9a55fff4,
+ 0x430cb780,
+ 0x0a1cc09c,
+ 0xa281d208,
+ 0x9200c01b,
+ 0x292ed3f2,
+ 0x8d80e071,
+ 0xc016058b,
+ 0xf01091a0,
+ 0x0d06aacd,
+ 0x8d80e071,
+ 0x9e524afd,
+ 0x2a9c058b,
+ 0x93e0c013,
+ 0xaacdf010,
+ 0x8d80e071,
+ 0x0d02058b,
+ 0xfff40906,
+ 0x4afd9a32,
+ 0x9e832a9c,
+ 0x8d80e071,
+ 0xc00e010b,
+ 0xc01a0d7e,
+ 0xb7809120,
+ 0x0d02430c,
+ 0x8d80e071,
+ 0x0a40c09a,
+ 0xaa01d208,
+ 0x090a058b,
+ 0xd0027500,
+ 0xc0190d22,
+ 0xb7209320,
+ 0x7640430c,
+ 0xb7840a82,
+ 0xd002752d,
+ 0x75000ad2,
+ 0x9204c000,
+ 0xc0007540,
+ 0xb74491a2,
+ 0xb78475ad,
+ 0xe0716fad,
+ 0x058b8d80,
+ 0x090a7104,
+ 0x9060c013,
+ 0x6fadb744,
+ 0x75adb784,
+ 0xc0027088,
+ 0x0d069004,
+ 0x8d80e071,
+ 0x058b9e52,
+ 0x99f1fff4,
+ 0xc0007540,
+ 0xb72093a2,
+ 0xb785430c,
+ 0x75007ea9,
+ 0x91c2c000,
+ 0x6cadb784,
+ 0xc0007502,
+ 0x9e839122,
+ 0x8d80e071,
+ 0x0d2ec002,
+ 0x90e0c000,
+ 0xe0719e83,
+ 0xc0028d80,
+ 0x09220d2a,
+ 0x99d3fff4,
+ 0xe0719e83,
+ 0x09168d80,
+ 0xfff40d1e,
+ 0xb78099cc,
+ 0x9e83430c,
+ 0x8d80e071,
+ 0x0a04c098,
+ 0xa902d208,
+ 0x9a43fff4,
+ 0x9e830d06,
+ 0x8d80e071,
+ 0xc0169e52,
+ 0x02039360,
+ 0x0a40c09a,
+ 0xaa01d208,
+ 0xc0167500,
+ 0xc00992a2,
+ 0xb7809180,
+ 0x0d02430c,
+ 0x8d80e071,
+ 0x0a14c098,
+ 0xaa01d208,
+ 0x0906058b,
+ 0xd0017500,
+ 0xc0160d22,
+ 0xb7809020,
+ 0xc098430c,
+ 0xd2080a10,
+ 0x7500aa01,
+ 0x9122c000,
+ 0x8d80e071,
+ 0x0d2a058b,
+ 0xc0150912,
+ 0x9ea29220,
+ 0x8d80e071,
+ 0x0906058b,
+ 0x9140c015,
+ 0x4314b720,
+ 0x6ccdb784,
+ 0xd0227504,
+ 0xc0011524,
+ 0x9e4c93a4,
+ 0xc0969e4a,
+ 0xd2080a54,
+ 0xb744aa01,
+ 0xc09875d5,
+ 0xc2000910,
+ 0xc1015a08,
+ 0xb76400c2,
+ 0xd008722d,
+ 0xd011a941,
+ 0x72990a32,
+ 0xd0020d02,
+ 0x74800d22,
+ 0x9042c001,
+ 0x7ec9b785,
+ 0xd0127500,
+ 0xc0001124,
+ 0x9e4c9244,
+ 0xc0960902,
+ 0xd2080a58,
+ 0xc200aa01,
+ 0xc1015a08,
+ 0xb72400c2,
+ 0x0884722d,
+ 0xd00270c2,
+ 0x9e4c0922,
+ 0xc0989e51,
+ 0xd2080a0c,
+ 0x3218aa01,
+ 0x3144d020,
+ 0x9e529e4c,
+ 0x0a08c098,
+ 0xaa01d208,
+ 0x3228e000,
+ 0x91a2fffd,
+ 0xe0710d06,
+ 0x9e528d80,
+ 0xfff4058b,
+ 0xb7209934,
+ 0x0203430c,
+ 0x0a08c098,
+ 0xaa01d208,
+ 0xc0007500,
+ 0x9e839122,
+ 0x8d80e071,
+ 0x9a07fff4,
+ 0x92e0c000,
+ 0xc0960203,
+ 0xd2080a54,
+ 0xb744aa01,
+ 0x9e8375b5,
+ 0x5a08c200,
+ 0xb72400c2,
+ 0xe071722d,
+ 0xd1208d80,
+ 0xba121115,
+ 0xfff44001,
+ 0xb7209a0d,
+ 0x0203430c,
+ 0x0a10c098,
+ 0xaa01d208,
+ 0xc0037500,
+ 0x02039342,
+ 0x0a0cc098,
+ 0xaa01d208,
+ 0xc0027500,
+ 0x020392c4,
+ 0x0a08c098,
+ 0xaa01d208,
+ 0xc0960103,
+ 0x75000958,
+ 0x90c2c000,
+ 0xaa41d008,
+ 0x9260c002,
+ 0xc0960203,
+ 0xd2080a54,
+ 0xd008aa01,
+ 0x9e83a941,
+ 0x5a08c200,
+ 0x00c2d020,
+ 0x00a25908,
+ 0x722db724,
+ 0x7255b724,
+ 0x8d80e071,
+ 0x1113d120,
+ 0x90e0c002,
+ 0x430cb720,
+ 0xc0980203,
+ 0xd2080a0c,
+ 0x7500aa01,
+ 0x9284c000,
+ 0x6f2db784,
+ 0xc0027500,
+ 0x02039044,
+ 0x0a54c096,
+ 0xaa01d208,
+ 0x5a08c200,
+ 0xb78400c2,
+ 0x7500722d,
+ 0x92a4c001,
+ 0xe0710d06,
+ 0x9e528d80,
+ 0xfff4058b,
+ 0xb72098b2,
+ 0x0203430c,
+ 0x0a0cc098,
+ 0xaa01d208,
+ 0xc0007500,
+ 0x9e839122,
+ 0x8d80e071,
+ 0x9985fff4,
+ 0x92e0c000,
+ 0xc0960203,
+ 0xd2080a58,
+ 0xc200aa01,
+ 0xb7445a08,
+ 0x00c275b5,
+ 0x722db724,
+ 0xe0719e83,
+ 0xd1208d80,
+ 0xba121115,
+ 0xfff44001,
+ 0x9e83998b,
+ 0x8d80e071,
+ 0xc0020d0e,
+ 0xe0719200,
+ 0x058b8d80,
+ 0xfff70d02,
+ 0xb72092c0,
+ 0xe071430c,
+ 0x058b8d80,
+ 0x75b5b744,
+ 0xc0060916,
+ 0xb72091a0,
+ 0xe071430c,
+ 0x058b8d80,
+ 0x75b5b744,
+ 0xc0060922,
+ 0xb7209060,
+ 0xe071430c,
+ 0x058b8d80,
+ 0x75b5b744,
+ 0x5d21090a,
+ 0x9300c005,
+ 0x430cb720,
+ 0x67adb784,
+ 0xd01a7502,
+ 0xc0001248,
+ 0xb7619174,
+ 0x9ea34d2d,
+ 0x9811c374,
+ 0x0a027400,
+ 0x0a42d001,
+ 0x09c0f012,
+ 0x04850d0a,
+ 0x1ca2d001,
+ 0x76c09e4a,
+ 0x8d80e071,
+ 0x4842b332,
+ 0xc00b058b,
+ 0xb7209020,
+ 0xe071430c,
+ 0x058b8d80,
+ 0x6d35b744,
+ 0xfff42d04,
+ 0xc00a98b8,
+ 0xb72092e0,
+ 0x7640430c,
+ 0xb7240d02,
+ 0xd001752d,
+ 0xe0710d22,
+ 0x00928d80,
+ 0x0522c101,
+ 0xc00e058b,
+ 0x091a2d7c,
+ 0x9040c00a,
+ 0x430cb720,
+ 0x8d80e071,
+ 0xb744058b,
+ 0x090a7535,
+ 0x91c0c003,
+ 0x430cb720,
+ 0x4d2db761,
+ 0x67b5b764,
+ 0xc00074c0,
+ 0x0a8690a4,
+ 0x9220c000,
+ 0xc0029e9a,
+ 0xc0000902,
+ 0xd0119100,
+ 0x01091a22,
+ 0xc00e0524,
+ 0x7680297c,
+ 0x9336ffff,
+ 0xc3540285,
+ 0x9e829bb8,
+ 0x8d80e071,
+ 0xc00e058b,
+ 0xd3f12d7c,
+ 0xc008295e,
+ 0xb7209220,
+ 0xe071430c,
+ 0x058b8d80,
+ 0x75adb7a4,
+ 0xc03e090a,
+ 0xd2a42afc,
+ 0xffd45921,
+ 0x9e839be2,
+ 0x8d80e071,
+ 0x295ed3f2,
+ 0x91e0c000,
+ 0x8d80e071,
+ 0xc00e058b,
+ 0x09220d7e,
+ 0x9bd3ffd4,
+ 0xe0719e83,
+ 0xc00e8d80,
+ 0x09220d7e,
+ 0x9180c007,
+ 0xaa4df010,
+ 0xd0114a7d,
+ 0x754e2ace,
+ 0x90c2c007,
+ 0x8d80e071,
+ 0x0d02058b,
+ 0xffd40906,
+ 0x9e839bbc,
+ 0x8d80e071,
+ 0x0d02010b,
+ 0x92c0c006,
+ 0x5614b740,
+ 0x8d80e071,
+ 0x9e54058b,
+ 0xc2400922,
+ 0x0d04a946,
+ 0x2d7cc00e,
+ 0x9100c006,
+ 0x430cb720,
+ 0x8d80e071,
+ 0xb784058b,
+ 0x09066cad,
+ 0x0d027502,
+ 0x0d22d001,
+ 0x9340c005,
+ 0x430cb720,
+ 0x7ea9b785,
+ 0xd0017500,
+ 0xc0000a46,
+ 0xb78590e2,
+ 0x9e497f29,
+ 0x0a044242,
+ 0x294ed3f2,
+ 0x8d80e071,
+ 0x090a058b,
+ 0x9080c005,
+ 0x5614b740,
+ 0x8d80e071,
+ 0x9e52058b,
+ 0xa946c220,
+ 0xc0040916,
+ 0xb7209320,
+ 0xe071430c,
+ 0x058b8d80,
+ 0x7f31b745,
+ 0x45220906,
+ 0x91c0c004,
+ 0x430cb720,
+ 0x67adb780,
+ 0xc0007500,
+ 0xb78690c4,
+ 0xc00050ad,
+ 0xb7819380,
+ 0x75045dad,
+ 0x9164c000,
+ 0xb7860902,
+ 0xd0126bad,
+ 0xc8127088,
+ 0xb5407088,
+ 0xb72067ad,
+ 0xb781430c,
+ 0x75045dad,
+ 0x9122c000,
+ 0xb7460a02,
+ 0xd0126dad,
+ 0xb5807104,
+ 0xb7a067ad,
+ 0x0d82430c,
+ 0xb760008b,
+ 0xfa0867ad,
+ 0xc354a921,
+ 0x008b9a64,
+ 0x67b5b500,
+ 0x430cb720,
+ 0x8d80e071,
+ 0xb740058b,
+ 0xb72067b5,
+ 0x095e682d,
+ 0x1115d120,
+ 0x90e0c001,
+ 0x430cb720,
+ 0x8d80e071,
+ 0xb740058b,
+ 0xc00067b5,
+ 0xb78093a0,
+ 0xb720558c,
+ 0xc240430c,
+ 0x0103aa45,
+ 0x4ab5b746,
+ 0xc0c47500,
+ 0xc0000960,
+ 0xd00890c2,
+ 0xc000aa41,
+ 0xd00890c0,
+ 0xc101aa41,
+ 0xe0710244,
+ 0xb5868d80,
+ 0x058b4aad,
+ 0xffd4095e,
+ 0xc0019b63,
+ 0xb72090a0,
+ 0xb784430c,
+ 0x75066cad,
+ 0x1124d011,
+ 0x9282c000,
+ 0x67adb784,
+ 0xc0c40103,
+ 0x75000960,
+ 0x9142c000,
+ 0x4badb781,
+ 0xa941d008,
+ 0x61290a04,
+ 0x9080c000,
+ 0xa941d008,
+ 0x8d80e071,
+ 0x292ed3f2,
+ 0x091e058b,
+ 0x9adfffd4,
+ 0x7d6cb77f,
+ 0x9e729eb3,
+ 0x8d08e052,
+ 0x9bb1ffd4,
+ 0xb79f0806,
+ 0xb7bf7d6e,
+ 0xb7df7dee,
+ 0xc0027e6e,
+ 0x9c228c40,
+ 0xc002a61d,
+ 0x9e5f8440,
+ 0xabc6f031,
+ 0x0a72d011,
+ 0x5a30c200,
+ 0x0d829ea6,
+ 0x0a00c040,
+ 0x0f00c021,
+ 0xa191a215,
+ 0x7decb55f,
+ 0x7d74b53f,
+ 0xb57fa099,
+ 0x9e567c74,
+ 0x7a74b57f,
+ 0x7af4b57f,
+ 0x7b74b57f,
+ 0x7bf4b57f,
+ 0x0f50c040,
+ 0x0a02c801,
+ 0xf0009dee,
+ 0x0687a261,
+ 0xc002a19e,
+ 0xf2089200,
+ 0x028daa41,
+ 0x75000b10,
+ 0x9102c000,
+ 0xc0007504,
+ 0x750290a2,
+ 0x9284c000,
+ 0x09d8d011,
+ 0xa962d008,
+ 0x0c827504,
+ 0xe0d19ebb,
+ 0xd0028d00,
+ 0xffd40c92,
+ 0x58089a38,
+ 0x08060360,
+ 0x9200c000,
+ 0x7decb75f,
+ 0x7d74b73f,
+ 0xe0d29ebb,
+ 0x018b8d00,
+ 0x9ba0ffd4,
+ 0x1e72d011,
+ 0x03c0d020,
+ 0xa9a1f208,
+ 0xc00074c0,
+ 0xa19d90a4,
+ 0x91e0c000,
+ 0xc00074d8,
+ 0xa9199124,
+ 0x0e860d06,
+ 0xa142d808,
+ 0x9220c000,
+ 0xc0007740,
+ 0xa91d9144,
+ 0x02200e82,
+ 0x2a7cc00e,
+ 0xc000a21d,
+ 0xd11090c0,
+ 0xd3f20651,
+ 0xb79f2ace,
+ 0x711f7a6c,
+ 0x91e6fffd,
+ 0xc0007740,
+ 0xa91d9322,
+ 0x1e52d011,
+ 0xc1f05920,
+ 0x2a7c2900,
+ 0xa9113244,
+ 0x32445940,
+ 0xc021a915,
+ 0xc0080900,
+ 0x9dad0900,
+ 0xa261f000,
+ 0x0a06a91a,
+ 0xa241d810,
+ 0xd808a919,
+ 0x7500aa41,
+ 0x9284c000,
+ 0xb79fa915,
+ 0xc0217c6c,
+ 0x09700900,
+ 0x2a7ec7ff,
+ 0xf0009dad,
+ 0xb79fa261,
+ 0xd1107aec,
+ 0x9dee1679,
+ 0xa261f000,
+ 0x7becb71f,
+ 0x7b6eb79f,
+ 0x7beeb7bf,
+ 0x7c6eb7df,
+ 0x7ceeb7ff,
+ 0x8c40c004,
+ 0xa61d9c22,
+ 0xc1800d84,
+ 0x9e745f30,
+ 0xc0409e76,
+ 0x9ea50a00,
+ 0xc021078d,
+ 0xc0210b00,
+ 0x02870f80,
+ 0x0e80c021,
+ 0x0b82c008,
+ 0xc0400b40,
+ 0xc0000fd0,
+ 0xc03c93e0,
+ 0xc01e757e,
+ 0xb3540a7e,
+ 0xc2004434,
+ 0x12d85914,
+ 0x2900c3fe,
+ 0xf0009dfe,
+ 0x9d5ea161,
+ 0x9dedaa61,
+ 0xa3e1f000,
+ 0x9dde3a40,
+ 0xa261f000,
+ 0x09929eba,
+ 0x010f058d,
+ 0x9afbff94,
+ 0xffff7540,
+ 0xc0089044,
+ 0x9ded0a02,
+ 0xa261f000,
+ 0x7e6eb79f,
+ 0x7eeeb7bf,
+ 0x7f6eb7df,
+ 0x7feeb7ff,
+ 0x8c00c002,
+ 0xa6059c22,
+ 0x5db00d84,
+ 0xc0089e5d,
+ 0xc0210e82,
+ 0x0ac00a80,
+ 0xf0009ddd,
+ 0x9e5aa2e2,
+ 0x0900c021,
+ 0x0900c040,
+ 0xa9629d2d,
+ 0xd05174c0,
+ 0xc0003e20,
+ 0xd0d19082,
+ 0x9dad3e20,
+ 0xa261f000,
+ 0x09929e6a,
+ 0xff94050b,
+ 0x9ddd9ac6,
+ 0xa2e2f000,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0xc008a61d,
+ 0xf2128420,
+ 0x9e5e8c88,
+ 0x03850705,
+ 0x9e6a9e9a,
+ 0xc0080a82,
+ 0xb5bf0982,
+ 0xffd45e6a,
+ 0x9eb39872,
+ 0x9eb99e72,
+ 0x8c80e231,
+ 0x050b018b,
+ 0x9ab6fff4,
+ 0x018b9eb3,
+ 0x9bb9fff4,
+ 0x75eeb79f,
+ 0x766eb7bf,
+ 0x76eeb7df,
+ 0x776eb7ff,
+ 0x8c20c00a,
+ 0xb7209c22,
+ 0x09824f0c,
+ 0xb9600c96,
+ 0x9e8a4038,
+ 0x01030203,
+ 0x0a39cfca,
+ 0x0929cfdc,
+ 0x0d09cfac,
+ 0x40a9b57f,
+ 0x4129b57f,
+ 0x4029b560,
+ 0x64a9b575,
+ 0xa082f208,
+ 0xa0c2f008,
+ 0xa1c1f010,
+ 0x08a0c146,
+ 0x90e1ffff,
+ 0x0c7f9e99,
+ 0x4608b560,
+ 0x91c0c000,
+ 0x430cb740,
+ 0x04a5d110,
+ 0x6fadb789,
+ 0x02446a32,
+ 0x0a20c138,
+ 0xa012d208,
+ 0x2e1ed3f1,
+ 0x71170c84,
+ 0x5908d224,
+ 0x91d8ffff,
+ 0x588cb740,
+ 0xb9600a02,
+ 0x9ea24058,
+ 0xa106c220,
+ 0xffff0a04,
+ 0x9c229381,
+ 0x588cb720,
+ 0x09029e5b,
+ 0x0d34d012,
+ 0x4038b960,
+ 0xd2080222,
+ 0x7115aa09,
+ 0x90c4c000,
+ 0x297cc00e,
+ 0x90c0c000,
+ 0xffff0904,
+ 0x090292a1,
+ 0x588cb780,
+ 0x01280904,
+ 0x0a049ea2,
+ 0x11a8d000,
+ 0x90e0c000,
+ 0xaa41d008,
+ 0xa245d008,
+ 0xffff1904,
+ 0xd0119361,
+ 0xd0100a34,
+ 0x9c22a249,
+ 0xc912a61d,
+ 0xc9129cc4,
+ 0xb7a09cce,
+ 0xb7a05814,
+ 0xf010478c,
+ 0x0507a1ed,
+ 0x1fd6d032,
+ 0xd0510b86,
+ 0x0f020b50,
+ 0x167ad010,
+ 0x4a7d9e6a,
+ 0x9c83c810,
+ 0xaa5dd1e9,
+ 0xd0206a52,
+ 0xf01001ca,
+ 0x7500aa6d,
+ 0x93a4c000,
+ 0x0e20d051,
+ 0xa003f208,
+ 0xa3edf010,
+ 0x0e38d011,
+ 0xa803f208,
+ 0xaa61d050,
+ 0xa961f010,
+ 0x4508b580,
+ 0x9c629e94,
+ 0xa843f208,
+ 0xb5c09eaa,
+ 0xf2084510,
+ 0xfffea3ad,
+ 0x750292e0,
+ 0x91c4c000,
+ 0x0e20d051,
+ 0xa003f208,
+ 0x0e30d051,
+ 0xa803f208,
+ 0xaa61d050,
+ 0x9240c000,
+ 0x9181fffe,
+ 0x9200c003,
+ 0x0e20d051,
+ 0xa003f208,
+ 0x4b8cb780,
+ 0xa803f208,
+ 0x478cb780,
+ 0xaa01d248,
+ 0x4508b580,
+ 0x9120c003,
+ 0x0a02c021,
+ 0x9d4d0a10,
+ 0xdfe0aa61,
+ 0xc0007d00,
+ 0xc10090c2,
+ 0xc0010a82,
+ 0xb7609120,
+ 0x0b0a4688,
+ 0x74c0010d,
+ 0x0a02c021,
+ 0x0a00c030,
+ 0x1962d001,
+ 0xa9629d4d,
+ 0x9e512d71,
+ 0x3122290c,
+ 0xf0009dcd,
+ 0xc100a161,
+ 0xc017080a,
+ 0x74c09c81,
+ 0x90a4c000,
+ 0xff949e9b,
+ 0xc171996d,
+ 0xc8179c81,
+ 0x9eb39c84,
+ 0x2afccffe,
+ 0x9964ff94,
+ 0xff940d8a,
+ 0xc0219961,
+ 0xc0300902,
+ 0x9d2d0900,
+ 0x3a0caa61,
+ 0xf0009dad,
+ 0xb740a261,
+ 0x018b4a8c,
+ 0x2954d012,
+ 0x2980c100,
+ 0xb9600d86,
+ 0xf0084050,
+ 0x7506aa41,
+ 0x90a4c000,
+ 0xc00074c0,
+ 0x750490c0,
+ 0x90a4c000,
+ 0xb41a7680,
+ 0x19504a24,
+ 0x9221ffff,
+ 0x9160c000,
+ 0x478cb780,
+ 0xaa0df208,
+ 0xfffc7502,
+ 0xfffc93a4,
+ 0xb79f9160,
+ 0xb7bf7e6e,
+ 0xb7df7eee,
+ 0xb7ff7f6e,
+ 0xc0027fee,
+ 0x9c228c00,
+ 0xb780a605,
+ 0x9e5b4508,
+ 0x4794b760,
+ 0xb7a06a52,
+ 0xd1204488,
+ 0xfff401c7,
+ 0x9eab9b23,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0xffb38c40,
+ 0xa60592e0,
+ 0x9080c000,
+ 0x9be8fff4,
+ 0x430cb720,
+ 0x47a9b7aa,
+ 0xffb49eab,
+ 0x700a9a86,
+ 0xffff0d92,
+ 0xb79f92a2,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0x9180ffb4,
+ 0xc002a61d,
+ 0x9e5e8400,
+ 0xa116a199,
+ 0xc000a111,
+ 0xfff49080,
+ 0xb7809bcb,
+ 0x0d924408,
+ 0x7d3ec01c,
+ 0x9324ffff,
+ 0x4408b780,
+ 0x0d02c021,
+ 0x0d10c010,
+ 0xb5800a04,
+ 0x9d2e4408,
+ 0xc021ab62,
+ 0xc0100902,
+ 0xd0720920,
+ 0x9d2d2eee,
+ 0x9e55aa61,
+ 0x29ced071,
+ 0xc0000385,
+ 0xfff49180,
+ 0x9d5d9ba9,
+ 0xd072ab62,
+ 0x9d7d2eee,
+ 0xd071aa61,
+ 0xd01229ce,
+ 0xd0710fd2,
+ 0x0d8a2e7e,
+ 0xffff7106,
+ 0x0a029202,
+ 0x7c6cb59f,
+ 0x7cecb59f,
+ 0x7d6cb59f,
+ 0x7decb59f,
+ 0xb9600389,
+ 0xaa154078,
+ 0xe0939d7d,
+ 0xc2708d00,
+ 0xf010a905,
+ 0x0b848026,
+ 0xa101d208,
+ 0x92a1ffff,
+ 0x9eb39dcb,
+ 0xff940982,
+ 0x9eb398aa,
+ 0x8d00e092,
+ 0xff940982,
+ 0xaa9198cd,
+ 0x4c8cb780,
+ 0x5e88c280,
+ 0x6aa2c146,
+ 0x02d89eb3,
+ 0x0aa0c050,
+ 0xa922f208,
+ 0x7decb79f,
+ 0xe050018f,
+ 0x0902a94a,
+ 0xff94a21d,
+ 0x9eb39a3d,
+ 0x2f01cffe,
+ 0x9a52ff94,
+ 0x9eb3aa1d,
+ 0x8d00e092,
+ 0x09824a7d,
+ 0x7decb59f,
+ 0x987fff94,
+ 0xaaa1f208,
+ 0x9eb39e6c,
+ 0xa926e240,
+ 0x0902018f,
+ 0x9a3dffb4,
+ 0xff949eb3,
+ 0xc0049a3b,
+ 0x0a0277c0,
+ 0x4824b374,
+ 0x2a7c9e72,
+ 0xc0213244,
+ 0xc0100902,
+ 0x9dad0910,
+ 0xa261f000,
+ 0xe0929eb3,
+ 0x09828d00,
+ 0x9886ff94,
+ 0xb79fa91f,
+ 0x71357dec,
+ 0x92c4ffff,
+ 0x4408b780,
+ 0x1a04a919,
+ 0xb5807480,
+ 0xc0004408,
+ 0xb7809164,
+ 0xc0a4430c,
+ 0xd2080a10,
+ 0x7500aa01,
+ 0x9102c000,
+ 0xff940d8e,
+ 0x0d8a9837,
+ 0x9834ff94,
+ 0x7c6eb79f,
+ 0x7ceeb7bf,
+ 0x7d6eb7df,
+ 0x7deeb7ff,
+ 0x8c00c004,
+ 0xf8399c22,
+ 0x8440a205,
+ 0x9e9b9e5a,
+ 0x5a20c100,
+ 0xc7f09e53,
+ 0xe0522a00,
+ 0xc0018d00,
+ 0xa2113a04,
+ 0xfff4a096,
+ 0xf9f89b1f,
+ 0x9c22aa15,
+ 0xa205f839,
+ 0x0a028440,
+ 0x0a20c146,
+ 0x4c8cb720,
+ 0x00c26229,
+ 0x5aadb783,
+ 0x5ca0c180,
+ 0x5b35b703,
+ 0x2a7cc006,
+ 0xb7039ea3,
+ 0xc7f05bad,
+ 0xb7232c80,
+ 0x34965c2d,
+ 0x9e539e9b,
+ 0x8d00e052,
+ 0xa019a016,
+ 0xa092a09d,
+ 0x9af8fff4,
+ 0xaa15f9f8,
+ 0xa6059c22,
+ 0x0d80c021,
+ 0x06b7d110,
+ 0xc0000685,
+ 0xb78091c0,
+ 0x0a044688,
+ 0x4688b580,
+ 0x9abafff4,
+ 0x4688b780,
+ 0xb5801a04,
+ 0x9d5d4688,
+ 0x791baa61,
+ 0xffff0d8e,
+ 0xb79f9202,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa6059c22,
+ 0xc4019e5c,
+ 0xc0210902,
+ 0x9e5d0a00,
+ 0x0900c008,
+ 0x9dcd0a20,
+ 0xa161f000,
+ 0xc0080992,
+ 0xc0210d02,
+ 0xfff40a80,
+ 0x0ac09bce,
+ 0x0a02c008,
+ 0xf0009ddd,
+ 0xb79fa261,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa61d9c22,
+ 0xb7208440,
+ 0xd132430c,
+ 0x06876f31,
+ 0x46b1b76a,
+ 0x04e3d110,
+ 0x05d60b02,
+ 0x4294b560,
+ 0x4c2bb5c1,
+ 0x430cb780,
+ 0xd0120c06,
+ 0xd1100d32,
+ 0x5d300669,
+ 0x0a48c012,
+ 0xa301da08,
+ 0x430cb720,
+ 0xd1109e52,
+ 0xc02104e3,
+ 0xb5c00900,
+ 0xb78066ad,
+ 0xc01a430c,
+ 0xd1100920,
+ 0xc0120669,
+ 0xf2080a60,
+ 0xb720a301,
+ 0xd110430c,
+ 0xb50004e3,
+ 0xb78065b5,
+ 0xd110430c,
+ 0xc0120669,
+ 0xd2080a20,
+ 0x9dada301,
+ 0xa361f000,
+ 0xc0089e54,
+ 0xc0210902,
+ 0x0a400a00,
+ 0xf0009dcd,
+ 0xb7a0a161,
+ 0x9e51430c,
+ 0x0d00c040,
+ 0x046ac101,
+ 0x0d80d3f1,
+ 0x604cb780,
+ 0xa965f008,
+ 0x0880c021,
+ 0x08c0c046,
+ 0x9d9d0244,
+ 0xa261f000,
+ 0x0d00c021,
+ 0xf0009dae,
+ 0xb702a361,
+ 0xd0105450,
+ 0xe0530636,
+ 0xc1018d00,
+ 0xc2000240,
+ 0x9d4d5a08,
+ 0xf010008b,
+ 0x050d81a6,
+ 0x5cadb700,
+ 0xa9e1f008,
+ 0xa8e1f208,
+ 0x00ddd120,
+ 0x0d40c012,
+ 0x0155d120,
+ 0xc0109e42,
+ 0x9e840c90,
+ 0xb7209c62,
+ 0x7740430c,
+ 0x04e3d110,
+ 0x67adb5c0,
+ 0x4314b720,
+ 0x04e2d010,
+ 0x714db780,
+ 0x5f2db720,
+ 0xb5800242,
+ 0xc000714d,
+ 0x9eb790e4,
+ 0xa061f208,
+ 0x9240c008,
+ 0x4294b760,
+ 0xd0110d7f,
+ 0xc2000e32,
+ 0x01095a30,
+ 0x0900c021,
+ 0x0940c00e,
+ 0xf0009dad,
+ 0x0910a162,
+ 0xf0009dad,
+ 0x0910a361,
+ 0xf0009dad,
+ 0xc021a361,
+ 0xc00e0a00,
+ 0x9dcd0a70,
+ 0xa361f000,
+ 0x430cb720,
+ 0x2dfcc00e,
+ 0xd110010d,
+ 0x09c204e3,
+ 0x5fadb720,
+ 0x1910d052,
+ 0x7d6eb79f,
+ 0x7deeb7bf,
+ 0x7e6eb7df,
+ 0x7eeeb7ff,
+ 0x8c40c002,
+ 0x9280ff84,
+ 0x4729b7ea,
+ 0xc380070f,
+ 0x02895a30,
+ 0x0a80c021,
+ 0x0a80c116,
+ 0x9100c002,
+ 0x5f4db780,
+ 0xc0027500,
+ 0xb7809002,
+ 0x750067cd,
+ 0x9162c000,
+ 0xb5e09eab,
+ 0xcfdf67d5,
+ 0xcfea0dfc,
+ 0xfff40d80,
+ 0xb7409af0,
+ 0xd0914314,
+ 0xc1016e59,
+ 0xb74200e4,
+ 0x02445429,
+ 0x74800248,
+ 0x00c5d120,
+ 0x91e2c000,
+ 0x4c4bb781,
+ 0x0902c201,
+ 0x0900c004,
+ 0x5a40c200,
+ 0x2a7cc00f,
+ 0xc0003244,
+ 0xb7819140,
+ 0xc2004c2b,
+ 0xc00f5a40,
+ 0xc2012a7c,
+ 0xb5803a00,
+ 0xb78066ad,
+ 0xd131430c,
+ 0x00986cd1,
+ 0x66adb720,
+ 0xf0009ddd,
+ 0x0f04a0e1,
+ 0x0a81cf00,
+ 0x430cb720,
+ 0x117cc101,
+ 0x46a9b78a,
+ 0x12a8d020,
+ 0xd1317088,
+ 0xd0206f51,
+ 0xfffd00e2,
+ 0xb7ca91b6,
+ 0xc0034729,
+ 0xd0209200,
+ 0xd1321368,
+ 0xd0916ee1,
+ 0xd0106d69,
+ 0x9ea806d2,
+ 0x5f4cb780,
+ 0xc0037500,
+ 0xb7229022,
+ 0xb7605448,
+ 0x00a25e4d,
+ 0xc1010092,
+ 0xb7610092,
+ 0xc3344c33,
+ 0x008b98c4,
+ 0x67adb500,
+ 0x430cb720,
+ 0x04d3d110,
+ 0x0910d3f1,
+ 0xaa41f008,
+ 0x67adb720,
+ 0xf0081242,
+ 0xb720a241,
+ 0xd0104314,
+ 0xb78004d2,
+ 0xb720714d,
+ 0x124267ad,
+ 0x714db580,
+ 0x430cb780,
+ 0x0659d110,
+ 0xc0120089,
+ 0xb78008a0,
+ 0x75004029,
+ 0x90c2c000,
+ 0x4031b5e0,
+ 0x90a0c000,
+ 0xb5400906,
+ 0xb7004029,
+ 0xd1324314,
+ 0xd0106d61,
+ 0xb76004a0,
+ 0x74c05f2d,
+ 0x9382c000,
+ 0x5429b742,
+ 0xe053026c,
+ 0x02448d00,
+ 0x5a08c200,
+ 0xb7009d4d,
+ 0xf0105ccc,
+ 0x048480a6,
+ 0xa8a1f208,
+ 0x0d40c012,
+ 0xc0109eb3,
+ 0x05040c90,
+ 0x9c629e84,
+ 0xa021f208,
+ 0xb7201b04,
+ 0xb78a4314,
+ 0x718846c9,
+ 0x91b6fffc,
+ 0x430cb720,
+ 0x712db740,
+ 0xfff97480,
+ 0xb78a9024,
+ 0x028546a9,
+ 0x9220c000,
+ 0x67adb780,
+ 0xc0007500,
+ 0xd0129122,
+ 0xb5a009a2,
+ 0x5db067ad,
+ 0x9a1dfff4,
+ 0x428cb780,
+ 0xb7200a04,
+ 0xb580430c,
+ 0xb740428c,
+ 0xb78a428c,
+ 0xb74a4729,
+ 0x708846b1,
+ 0x2e2ed3f1,
+ 0xc0041228,
+ 0x00c26a22,
+ 0x9014ffff,
+ 0x2e2ed3f1,
+ 0x0f8ec001,
+ 0xc0020b02,
+ 0xc0010bc2,
+ 0xce000f0a,
+ 0xb5800f80,
+ 0xc005428c,
+ 0xb78a93a0,
+ 0x122846a9,
+ 0x6ac1d132,
+ 0x04d3d110,
+ 0x65adb780,
+ 0xc0057500,
+ 0xd0129182,
+ 0xb5c009a2,
+ 0x5db067ad,
+ 0xc0219e5c,
+ 0xc0400a00,
+ 0x9dcd0a00,
+ 0xa3e1f000,
+ 0x99ddfff4,
+ 0x428cb7a0,
+ 0x0952d011,
+ 0x02055930,
+ 0x0a00c021,
+ 0x0a20c040,
+ 0xaa619d4d,
+ 0x430cb720,
+ 0x9e929e7b,
+ 0x04d3d110,
+ 0x0d00c021,
+ 0x65adb580,
+ 0x430cb720,
+ 0x0d40c046,
+ 0x04d3d110,
+ 0x65adb780,
+ 0xc2002246,
+ 0xb5805a35,
+ 0x9d2e672d,
+ 0xb720a962,
+ 0x9e91430c,
+ 0x04d3d110,
+ 0x0c80c021,
+ 0x65b5b540,
+ 0x430cb720,
+ 0x0cc0c00e,
+ 0x04d3d110,
+ 0xb7409e8b,
+ 0xc01065b5,
+ 0xf0100d80,
+ 0xb760aa61,
+ 0xb720672d,
+ 0xc1015fad,
+ 0x12460244,
+ 0xf0101242,
+ 0xb720a261,
+ 0xd110430c,
+ 0xb78004d3,
+ 0x9d9e602d,
+ 0xa261f000,
+ 0xc0210205,
+ 0xc00e0a00,
+ 0x9dcd0a50,
+ 0xa361f000,
+ 0x9dcd0a10,
+ 0xa361f000,
+ 0x9dcd0a10,
+ 0xa361f000,
+ 0x0900c021,
+ 0x0930c00e,
+ 0xf0009dad,
+ 0xb720a362,
+ 0xc00e5fad,
+ 0x9eab2afc,
+ 0x1910d052,
+ 0x010d09c2,
+ 0x9af2ff74,
+ 0xff749eab,
+ 0xb7809b09,
+ 0xd110430c,
+ 0xc0100659,
+ 0xf2080a00,
+ 0xb780a301,
+ 0xd110430c,
+ 0xc00e0659,
+ 0xf2080a60,
+ 0xb780a305,
+ 0xd110430c,
+ 0xc00e0659,
+ 0xf2080a60,
+ 0xb720a301,
+ 0xd110430c,
+ 0xb5c004d3,
+ 0xb72067ad,
+ 0xb5c0430c,
+ 0xb720712d,
+ 0xd110430c,
+ 0xb5c004d3,
+ 0xb78065ad,
+ 0x0a04428c,
+ 0x428cb580,
+ 0x430cb720,
+ 0x428cb740,
+ 0x4729b78a,
+ 0xfff97088,
+ 0xb79f93d4,
+ 0xb7bf7d6e,
+ 0xb7df7dee,
+ 0xb7ff7e6e,
+ 0xc0027eee,
+ 0x9c228c40,
+ 0xb720a605,
+ 0x9e5d430c,
+ 0x4829b78a,
+ 0x22d8d020,
+ 0xc0007346,
+ 0xc0009244,
+ 0xffd49080,
+ 0xb7209bc1,
+ 0x0d92430c,
+ 0x48a9b78a,
+ 0xffff7b59,
+ 0xc0009304,
+ 0xffd49380,
+ 0xb7209bb5,
+ 0xb740430c,
+ 0x0d924588,
+ 0x4829b78a,
+ 0x22444a7d,
+ 0xffff790a,
+ 0x322a9284,
+ 0x4588b580,
+ 0x4829b78a,
+ 0xb58a3258,
+ 0xb7204829,
+ 0xb78a430c,
+ 0x325848a9,
+ 0x0d82c021,
+ 0x48a9b58a,
+ 0x0da0c030,
+ 0xa9629d3e,
+ 0x4488b740,
+ 0x59080a3e,
+ 0x5208c200,
+ 0x51a8c200,
+ 0x4a7d21b8,
+ 0x25229ea1,
+ 0x35229e99,
+ 0xf0009dbe,
+ 0xb79fa162,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa6059c22,
+ 0x478cb780,
+ 0xb7400906,
+ 0xf2084c14,
+ 0xf010a10d,
+ 0x0a10a043,
+ 0xa803f208,
+ 0x4a8cb7a0,
+ 0xb5800a02,
+ 0xd3724508,
+ 0x010b1ad8,
+ 0xaa4df1e9,
+ 0xc000750c,
+ 0x0d9290e2,
+ 0x9b60ffd4,
+ 0x92e0ffff,
+ 0xffff709b,
+ 0xb78092a4,
+ 0xf2084c0c,
+ 0xb79fa803,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xb7409c22,
+ 0x0d064b0c,
+ 0x4058b960,
+ 0xaa41f008,
+ 0xb4127508,
+ 0x09504a22,
+ 0x9341ffff,
+ 0xb7209c22,
+ 0xb785430c,
+ 0x750044ab,
+ 0x9142c000,
+ 0x48a9b78a,
+ 0x42289e5a,
+ 0x48a9b58a,
+ 0x9300c000,
+ 0xb74a9e5c,
+ 0x4a7d4829,
+ 0x4000ba24,
+ 0xb54a2144,
+ 0xb7204829,
+ 0xb74a430c,
+ 0x224448a9,
+ 0x48a9b58a,
+ 0x4588b780,
+ 0x42289e5a,
+ 0x4588b580,
+ 0x0982c021,
+ 0x09a0c030,
+ 0xa9629d3d,
+ 0x4488b740,
+ 0x2e3ed031,
+ 0xc2005908,
+ 0x4a7d5208,
+ 0x25269ea3,
+ 0xf0009dbd,
+ 0xfffda162,
+ 0xf8399340,
+ 0xb720a205,
+ 0xb783430c,
+ 0xb58a5b2d,
+ 0xb72047a9,
+ 0xb783430c,
+ 0xc2005b2d,
+ 0xb58a5a21,
+ 0xb7204729,
+ 0xb786430c,
+ 0xb58a76ab,
+ 0xb72046a9,
+ 0xb78d430c,
+ 0xb58a6da9,
+ 0xb7204629,
+ 0xb76a430c,
+ 0xfff447b1,
+ 0xb7209b19,
+ 0x0982430c,
+ 0xb74a0dff,
+ 0xba1246b1,
+ 0xc0004001,
+ 0xb7899140,
+ 0x6a326fcd,
+ 0xc1380242,
+ 0xd2080a20,
+ 0xb720a192,
+ 0xc101430c,
+ 0xc2000234,
+ 0xd0205908,
+ 0xb74a00a2,
+ 0x09844729,
+ 0xffff7104,
+ 0xb76a919c,
+ 0xf9f847b1,
+ 0xfffcaa1d,
+ 0xa60d9000,
+ 0x8400c004,
+ 0xc0210705,
+ 0x03070d02,
+ 0xc0309e95,
+ 0x9d2e0d00,
+ 0x0205a961,
+ 0x2a70c03e,
+ 0xcfc00a10,
+ 0xc03e290d,
+ 0x32442a70,
+ 0xf0009dae,
+ 0xc021a261,
+ 0xc0300902,
+ 0x9d2d0920,
+ 0xc180aa61,
+ 0x9e515d60,
+ 0x2a7ec00f,
+ 0x9dad3242,
+ 0xa261f000,
+ 0x76d6097f,
+ 0xa121d210,
+ 0x90a4c002,
+ 0x2a3ed3f1,
+ 0xd1a4750c,
+ 0xc0105921,
+ 0xd2269312,
+ 0xb720590c,
+ 0x8520430c,
+ 0x8044e050,
+ 0x91c0c000,
+ 0x91a0c010,
+ 0x91c0c000,
+ 0x9340c000,
+ 0x9380c000,
+ 0x90a0c010,
+ 0x9380c000,
+ 0x4835b545,
+ 0x93e0c00f,
+ 0x0a060103,
+ 0x0970c0a2,
+ 0xa241d008,
+ 0x430cb780,
+ 0x0a74c0a2,
+ 0xa102d208,
+ 0x9220c00f,
+ 0x6735b544,
+ 0x91a0c00f,
+ 0x68b5b544,
+ 0x9120c00f,
+ 0x5e30d122,
+ 0x2d7cc03e,
+ 0x5d18d122,
+ 0x2a00cffd,
+ 0xb5813244,
+ 0xc00e482d,
+ 0x76d89380,
+ 0x9084c002,
+ 0x430cb720,
+ 0x5e2db781,
+ 0xc0017500,
+ 0xd3f191a2,
+ 0xc01c2a3e,
+ 0xb59f753e,
+ 0xc0006268,
+ 0xc18091c2,
+ 0xc2005a21,
+ 0xb59f5a0c,
+ 0xb55f786c,
+ 0xb55f6368,
+ 0xc00062e8,
+ 0xd1a491e0,
+ 0x09025941,
+ 0x5a21c180,
+ 0x62e8b59f,
+ 0x6370b55f,
+ 0x6268b55f,
+ 0x786cb55f,
+ 0x430cb720,
+ 0x8d80e112,
+ 0x6cadb729,
+ 0x9c629e8c,
+ 0x90e0c00d,
+ 0x67a9b566,
+ 0x430cb720,
+ 0x5a21c180,
+ 0x5941c180,
+ 0x6829b586,
+ 0x430cb720,
+ 0x68a9b546,
+ 0x92e0c00c,
+ 0xc00476da,
+ 0xffd49124,
+ 0x9e839a1c,
+ 0x9eaa0a86,
+ 0x2dfcc00e,
+ 0xd21055cc,
+ 0xc00ea021,
+ 0xfff42dfc,
+ 0xd2109a2d,
+ 0xe111a9a2,
+ 0x050d8d00,
+ 0x09a2c002,
+ 0x9905ff94,
+ 0xaa21d210,
+ 0x52b0c200,
+ 0x29ded3f2,
+ 0x9aa9fff4,
+ 0x8c80e111,
+ 0x430cb780,
+ 0x50a9b740,
+ 0xb75f0244,
+ 0xc0987068,
+ 0xd2080a60,
+ 0xb780a115,
+ 0xb72050a9,
+ 0xc200430c,
+ 0x00c25a08,
+ 0x796cb79f,
+ 0x63adb582,
+ 0x8c80e111,
+ 0x50a9b780,
+ 0x430cb720,
+ 0x5a08c200,
+ 0xb79f00c2,
+ 0xb58279ec,
+ 0xe111682d,
+ 0xb7808c80,
+ 0xb72050a9,
+ 0xc200430c,
+ 0x00c25a08,
+ 0x7a6cb79f,
+ 0x6cadb582,
+ 0x8c80e111,
+ 0x50a9b780,
+ 0x430cb720,
+ 0x5a08c200,
+ 0xb79f00c2,
+ 0xb5827aec,
+ 0xe111712d,
+ 0xb7808c80,
+ 0xb72050a9,
+ 0xc200430c,
+ 0x00c25a08,
+ 0x7b6cb79f,
+ 0x75adb582,
+ 0x8c80e111,
+ 0x50a9b780,
+ 0x430cb720,
+ 0x5a08c200,
+ 0xb79f00c2,
+ 0xb5827bec,
+ 0xe1117a2d,
+ 0xb75f8c80,
+ 0xb7807c6e,
+ 0xb72050a9,
+ 0xc200430c,
+ 0x00c25a0c,
+ 0x61afb541,
+ 0x91a0c008,
+ 0xc00176dc,
+ 0xd3f192e4,
+ 0x75022a3e,
+ 0x90c2c001,
+ 0xc0007502,
+ 0x750492e6,
+ 0x93e4c007,
+ 0x430cb720,
+ 0x5a21c180,
+ 0x5941c180,
+ 0x4535b5c2,
+ 0x430cb720,
+ 0x6c29b588,
+ 0x430cb720,
+ 0x6ca9b548,
+ 0x91a0c007,
+ 0x430cb720,
+ 0x5a41c180,
+ 0x45b5b5c2,
+ 0x430cb720,
+ 0x6d29b588,
+ 0x9020c007,
+ 0x430cb720,
+ 0x5a41c180,
+ 0x4635b5c2,
+ 0x430cb720,
+ 0x6da9b588,
+ 0x92a0c006,
+ 0xc00376de,
+ 0xffd49164,
+ 0x9e85995a,
+ 0xc00e0a86,
+ 0x9e6b2efc,
+ 0x52acc200,
+ 0x2afcc00e,
+ 0xfff49eab,
+ 0xe111996b,
+ 0x050d8d00,
+ 0x0982c004,
+ 0xff94058b,
+ 0x058b9844,
+ 0x9826ff74,
+ 0xfff49eab,
+ 0xb78099ea,
+ 0xd032430c,
+ 0x0982296e,
+ 0x0629d110,
+ 0xc1369e9b,
+ 0xd2080a20,
+ 0xb720a18d,
+ 0xc300430c,
+ 0xcfc05a18,
+ 0xb5822a01,
+ 0xb7807ead,
+ 0xd031430c,
+ 0x00986ca9,
+ 0x78ecb79f,
+ 0x712db589,
+ 0x0624d010,
+ 0x796cb75f,
+ 0x0045d120,
+ 0x430cb780,
+ 0x2900c01e,
+ 0xc1365915,
+ 0xd2080a28,
+ 0xc000a101,
+ 0xb78090c0,
+ 0xb589404d,
+ 0xd3f170ad,
+ 0xb7602d3e,
+ 0xc100430c,
+ 0x9d4d5a08,
+ 0xc1360207,
+ 0xd2080a28,
+ 0xd110a902,
+ 0xe1130605,
+ 0xc2008d00,
+ 0x72955a08,
+ 0x80a6e020,
+ 0x0d8400c6,
+ 0x90b2ffff,
+ 0x9120c003,
+ 0xc00176ea,
+ 0xf01190a4,
+ 0xc0002a32,
+ 0xffd49362,
+ 0x9e8398ea,
+ 0x9eaa0a86,
+ 0x2dfcc00e,
+ 0xc00e55cc,
+ 0xd2102dfc,
+ 0xfff4a021,
+ 0xb74098fb,
+ 0xd210430c,
+ 0x050da9a2,
+ 0x0920c046,
+ 0x09d2c002,
+ 0x9040c001,
+ 0x430cb720,
+ 0x4f2db582,
+ 0x9060c002,
+ 0xc00176ec,
+ 0xffd490a4,
+ 0x9e8398c8,
+ 0x9eaa0a86,
+ 0x2dfcc00e,
+ 0xc00e55cc,
+ 0xd2102dfc,
+ 0xfff4a021,
+ 0xb74098d9,
+ 0xd210430c,
+ 0x050da9a2,
+ 0x0970c048,
+ 0x0982c002,
+ 0x9bafff74,
+ 0xaa21d210,
+ 0x52b0c200,
+ 0x29ded3f2,
+ 0x9953fff4,
+ 0x93a0c000,
+ 0xc00076e0,
+ 0xb7809164,
+ 0x0906430c,
+ 0x0a10c0a4,
+ 0xa101d208,
+ 0x9220c000,
+ 0xc00076c6,
+ 0xc2d490c4,
+ 0xc0009a1f,
+ 0xb7409140,
+ 0xd1a2598c,
+ 0xe2205e08,
+ 0x9ea4aa05,
+ 0xb7209c62,
+ 0x0203430c,
+ 0x0a10c0a4,
+ 0xaa01d208,
+ 0xc0007500,
+ 0x0a029222,
+ 0x44abb585,
+ 0x430cb720,
+ 0x4829b78a,
+ 0x48b1b76a,
+ 0x45b49ea2,
+ 0x2dfcc00e,
+ 0x991dfff4,
+ 0x7aeeb79f,
+ 0x7b6eb7bf,
+ 0x7beeb7df,
+ 0x8c60c004,
+ 0xa61d9c22,
+ 0x8400c002,
+ 0xd050a192,
+ 0x0c82aa61,
+ 0x0ca0c146,
+ 0x9ea51a08,
+ 0x4c8cb780,
+ 0x2efcc00e,
+ 0xb7e064d3,
+ 0x9e4a4794,
+ 0x03a8a096,
+ 0x9eba9eb9,
+ 0x0882010f,
+ 0x0c80c06c,
+ 0x0960c06c,
+ 0x0d00c06e,
+ 0xa09d028f,
+ 0xb55fa09a,
+ 0xb55f7dec,
+ 0x058b7d74,
+ 0x9ab6ff74,
+ 0x430cb780,
+ 0x0a38c0a2,
+ 0xab01d208,
+ 0x90a0c000,
+ 0xffd40d92,
+ 0xa99a9821,
+ 0xb78d008f,
+ 0xd01060a9,
+ 0xb7c0a961,
+ 0x70884c94,
+ 0x9282ffff,
+ 0xc0007580,
+ 0xb7209164,
+ 0xb76a430c,
+ 0xff7447b1,
+ 0x74009ab2,
+ 0x9104ffff,
+ 0xff74058b,
+ 0xa9199a91,
+ 0xd0089ea9,
+ 0xb76daa41,
+ 0x710660c9,
+ 0x9102c001,
+ 0xc180aa15,
+ 0xb73f5890,
+ 0x00987df4,
+ 0x009cc101,
+ 0x4cadb783,
+ 0x4db5b743,
+ 0x4c2db743,
+ 0x4d2db723,
+ 0x404db580,
+ 0xd0129ea9,
+ 0xb52309b2,
+ 0xb79f5bcd,
+ 0x099a7d6c,
+ 0x5acdb543,
+ 0xa102f208,
+ 0x297cc006,
+ 0xc2f4a11d,
+ 0x008b9bb5,
+ 0x60a9b50d,
+ 0xff74058b,
+ 0xb7209a5f,
+ 0xa99e430c,
+ 0x8d00e091,
+ 0x5bb5b743,
+ 0x5b2db763,
+ 0x990bfff4,
+ 0x7068b79f,
+ 0x753ec01c,
+ 0x90c4c000,
+ 0x9a78ff74,
+ 0x7068b51f,
+ 0x7070b77f,
+ 0x55ac0c86,
+ 0x2dfcc00e,
+ 0x9beaffd4,
+ 0x4490b760,
+ 0x4c8cb740,
+ 0x7068b77f,
+ 0xc1469e5c,
+ 0x00c46a22,
+ 0x5ab5b743,
+ 0x5d3d9e5a,
+ 0xffd42d04,
+ 0xb77f98b9,
+ 0x0d067070,
+ 0xc00e55cc,
+ 0xfff42dfc,
+ 0xf250985e,
+ 0x7508aa61,
+ 0x90a4c000,
+ 0xf2500d86,
+ 0xb720a1e2,
+ 0xb740430c,
+ 0xb7604914,
+ 0xf0104e8c,
+ 0x7508aa41,
+ 0x91c4c000,
+ 0x47a9b78a,
+ 0xa961d008,
+ 0xc01c2244,
+ 0xc0007d3e,
+ 0x0a0690a2,
+ 0xa241f010,
+ 0x4894b720,
+ 0xc1460d50,
+ 0x728209a0,
+ 0x9124ffff,
+ 0x7482a91d,
+ 0x90e4fffa,
+ 0x4794b740,
+ 0xa141f050,
+ 0x4608b780,
+ 0x0a04a992,
+ 0x4608b580,
+ 0xf0100a1a,
+ 0xb79fa26d,
+ 0xb7bf7c6e,
+ 0xb7df7cee,
+ 0xb7ff7d6e,
+ 0xc0047dee,
+ 0x9c228c00,
+ 0xc006a61d,
+ 0xb7208440,
+ 0xf1d2430c,
+ 0xb7838d00,
+ 0xb7c35c2d,
+ 0xa2155b2d,
+ 0x5badb783,
+ 0x0a06a219,
+ 0x5218c200,
+ 0xb7239ea5,
+ 0xc00e5aad,
+ 0x058b2efc,
+ 0xffd4a091,
+ 0xb7209b73,
+ 0xe1d14314,
+ 0xb9608d00,
+ 0xb7ed4028,
+ 0xb7ed6049,
+ 0x9e4960d1,
+ 0x4c2db783,
+ 0xa241f008,
+ 0x4cadb783,
+ 0xa245f008,
+ 0x4d2db783,
+ 0xa249f008,
+ 0x4dadb783,
+ 0xf00808c0,
+ 0x0940a24d,
+ 0x91c1ffff,
+ 0xb5aa0a82,
+ 0xb7204849,
+ 0xd3f2430c,
+ 0xc00629ee,
+ 0xb5aa0992,
+ 0xa91a48a9,
+ 0x430cb740,
+ 0x9a25ff74,
+ 0x430cb720,
+ 0xb5aa058b,
+ 0xb7204831,
+ 0xb5aa430c,
+ 0xb78048b1,
+ 0xc0a4430c,
+ 0xd2080a10,
+ 0xb720a281,
+ 0xb5a5430c,
+ 0xffd444ab,
+ 0xb7209bbc,
+ 0x9eaa430c,
+ 0x4028b960,
+ 0x6029b5ed,
+ 0x430cb720,
+ 0x60b1b5ed,
+ 0x430cb720,
+ 0xaa41f210,
+ 0x5c90c100,
+ 0x0493d110,
+ 0xb5830d04,
+ 0xb7204c2d,
+ 0xf210430c,
+ 0xd110aa45,
+ 0xb5830493,
+ 0xb7204cad,
+ 0xf210430c,
+ 0xd110aa49,
+ 0xb5830493,
+ 0xb7804d2d,
+ 0xf210430c,
+ 0x0f40a94d,
+ 0x0498c101,
+ 0x4dcdb543,
+ 0x9381fffe,
+ 0x430cb720,
+ 0xb583aa11,
+ 0xb7205aad,
+ 0xb5c3430c,
+ 0xb7205b2d,
+ 0xaa19430c,
+ 0x5badb583,
+ 0x430cb720,
+ 0xb583aa15,
+ 0xb7205c2d,
+ 0xb780430c,
+ 0xb58a70a9,
+ 0xb72047a9,
+ 0xb780430c,
+ 0xb58a7029,
+ 0xb7204729,
+ 0xb780430c,
+ 0xb58a6fa9,
+ 0xb72046a9,
+ 0xb780430c,
+ 0xb58a6f29,
+ 0xb7204629,
+ 0xb780430c,
+ 0x7504572b,
+ 0x91d2c001,
+ 0x0a42c809,
+ 0x0a30c0ec,
+ 0x5cadb580,
+ 0x430cb720,
+ 0x0a42c809,
+ 0x0a40c06c,
+ 0x5d2db580,
+ 0x430cb720,
+ 0x44b5b760,
+ 0x46b5b740,
+ 0x452db740,
+ 0x472db780,
+ 0x572bb760,
+ 0x6127c101,
+ 0x6245c101,
+ 0xd02074c0,
+ 0xc00102c4,
+ 0xb7409024,
+ 0xb78048ad,
+ 0x6245492d,
+ 0x06d8c101,
+ 0x9300c000,
+ 0x0a42c809,
+ 0x0a30c0aa,
+ 0x5cadb580,
+ 0x430cb720,
+ 0x0a42c809,
+ 0x0a40c080,
+ 0x5d2db580,
+ 0x430cb780,
+ 0xa905f248,
+ 0xaa09f248,
+ 0x62c4d032,
+ 0x430cb720,
+ 0x0a02c100,
+ 0xb5809e6b,
+ 0xb7a05ead,
+ 0x008b430c,
+ 0x5eb5b760,
+ 0x99d7c2f4,
+ 0xb500008b,
+ 0xb7a05ead,
+ 0xf248430c,
+ 0x7500aa35,
+ 0x9242c000,
+ 0xb760008b,
+ 0x01895eb5,
+ 0x61c7d132,
+ 0x2d81cffc,
+ 0x99c3c2f4,
+ 0xc0007400,
+ 0x008b90a2,
+ 0x5eadb500,
+ 0x430cb720,
+ 0x5e19d2a2,
+ 0x5dadb580,
+ 0x4314b740,
+ 0xaa4df050,
+ 0xa945f050,
+ 0x1a040244,
+ 0xa24df050,
+ 0x430cb7a0,
+ 0xaaa6f248,
+ 0xa9aef248,
+ 0xc2f49e6b,
+ 0xc10199a4,
+ 0xf248600b,
+ 0xb720a02d,
+ 0xb720430c,
+ 0xb78046b5,
+ 0xb740472d,
+ 0xb7604535,
+ 0xb74044b5,
+ 0xb760492d,
+ 0xc10148ad,
+ 0x65276243,
+ 0xc1016127,
+ 0x02440244,
+ 0x5a19c200,
+ 0x5e2db580,
+ 0x430cb740,
+ 0xaa45f008,
+ 0x7d3ec004,
+ 0x90e4c001,
+ 0xaa51f008,
+ 0x2a7ce002,
+ 0x9024c001,
+ 0x01899ea2,
+ 0x91c0c000,
+ 0x5f4db560,
+ 0x430cb720,
+ 0xb56000a2,
+ 0xb72067ad,
+ 0x00a2430c,
+ 0x65adb560,
+ 0x430cb720,
+ 0x6d21d131,
+ 0x4629b78a,
+ 0x00a2d020,
+ 0x0d047299,
+ 0x9146ffff,
+ 0xb5800a02,
+ 0xb79f712d,
+ 0xb7bf776e,
+ 0xb7df77ee,
+ 0xb7ff786e,
+ 0xc00878ee,
+ 0x9c228c40,
+ 0xb7a0a605,
+ 0x0e86430c,
+ 0xb763008b,
+ 0xb7a35b35,
+ 0xc2005bad,
+ 0xc00e55ac,
+ 0xffd42dfc,
+ 0xb72099e5,
+ 0x9eaa430c,
+ 0x0982c008,
+ 0x6c31b76d,
+ 0x0918d191,
+ 0x98bbff74,
+ 0x430cb720,
+ 0x5b35b743,
+ 0x56a8c200,
+ 0x2dded3f2,
+ 0x9a5dffd4,
+ 0x430cb720,
+ 0x0a18d191,
+ 0x612db582,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0xb7a0a605,
+ 0x0e86430c,
+ 0xb763008b,
+ 0xb7a35b35,
+ 0xc2005bad,
+ 0xc00e55ac,
+ 0xffd42dfc,
+ 0xb72099b5,
+ 0x9eaa430c,
+ 0x0982c008,
+ 0x6c31b76d,
+ 0xc0180103,
+ 0xff740930,
+ 0xb720988a,
+ 0xb743430c,
+ 0xc2005b35,
+ 0xd3f256a8,
+ 0xb79f2dde,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0x90e0ffd1,
+ 0xb7a0a60d,
+ 0xb7204994,
+ 0x09024c8c,
+ 0x0d96050b,
+ 0xb9600185,
+ 0x0a024038,
+ 0x0a20c146,
+ 0x02426229,
+ 0x0a18c144,
+ 0xa182f208,
+ 0xf0310904,
+ 0xffffa1d5,
+ 0xb7409281,
+ 0xc809478c,
+ 0xcbdc0a46,
+ 0xf0480a30,
+ 0xf008a1c1,
+ 0x0287a255,
+ 0xffd40b06,
+ 0xf23199c6,
+ 0xd3f1aa35,
+ 0x0d82295e,
+ 0x9eb27500,
+ 0xc0000a84,
+ 0x9e5b90e2,
+ 0x4508b5c0,
+ 0x9a44ffb4,
+ 0xffff7550,
+ 0xb79f9204,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0x5db09c22,
+ 0x0d80c021,
+ 0x0da0c102,
+ 0xaa619d3e,
+ 0xc40059b8,
+ 0xcbfe2980,
+ 0x32462a7d,
+ 0xf0009dbe,
+ 0x9c22a261,
+ 0x430cb720,
+ 0x9e5b0926,
+ 0x6735b744,
+ 0xd0110802,
+ 0x75101e22,
+ 0x4826b322,
+ 0x9c8bc010,
+ 0x9160c000,
+ 0xaa01d208,
+ 0xc0007106,
+ 0xc00e90a4,
+ 0x9c22287c,
+ 0x02030804,
+ 0x0a74c098,
+ 0xffff0884,
+ 0xc00e9261,
+ 0x9c22087e,
+ 0xb740a605,
+ 0xd1a2430c,
+ 0x00d45e88,
+ 0x6fadb789,
+ 0x02446a32,
+ 0x0a20c138,
+ 0xa911d208,
+ 0xd2080904,
+ 0xb740a111,
+ 0x00d4430c,
+ 0x6fadb789,
+ 0x02446a32,
+ 0x0a20c138,
+ 0xaa11d208,
+ 0xc0007502,
+ 0xc00e90d4,
+ 0xc001087e,
+ 0xc10192c0,
+ 0x090200a6,
+ 0x4829b54c,
+ 0x430cb720,
+ 0x5e18d1a2,
+ 0x0982c004,
+ 0x04b3d110,
+ 0x5914b760,
+ 0x4929b54c,
+ 0x01c7d120,
+ 0x430cb780,
+ 0xc05e0258,
+ 0xf2080a60,
+ 0xb720a101,
+ 0xb543430c,
+ 0xb720402d,
+ 0x00d2430c,
+ 0x40adb543,
+ 0x430cb720,
+ 0x41adb543,
+ 0x9a2fff34,
+ 0x430cb740,
+ 0xb78900d4,
+ 0x6a326fad,
+ 0xc1380244,
+ 0xd2080a20,
+ 0xb79fa811,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xb7609c22,
+ 0x0d02430c,
+ 0x4078b960,
+ 0x0134c101,
+ 0xc1360205,
+ 0xd2080a2c,
+ 0x7500aa01,
+ 0x92a4c000,
+ 0x0920c136,
+ 0xd0080a04,
+ 0xb740a24d,
+ 0xd031430c,
+ 0xc00e6e29,
+ 0x02442d7c,
+ 0x0a20c138,
+ 0xd208097f,
+ 0xc000a111,
+ 0x0d0490e0,
+ 0x9041ffff,
+ 0x0d7ec00e,
+ 0x9c229e50,
+ 0xb720a61d,
+ 0x9e5f430c,
+ 0xc0a40203,
+ 0xd2080a10,
+ 0xb7caaa01,
+ 0x75004729,
+ 0x93a4c005,
+ 0x5a88c300,
+ 0x02d2d020,
+ 0xb7899e69,
+ 0xc01c6fad,
+ 0xc001753e,
+ 0x9eb39004,
+ 0xfff49eae,
+ 0x9e699bbc,
+ 0x9180c000,
+ 0x984cffb4,
+ 0x430cb7a0,
+ 0xd1109eb3,
+ 0xfff406eb,
+ 0x008b9bb0,
+ 0x6fadb509,
+ 0x430cb720,
+ 0xd1100d92,
+ 0xb78904e3,
+ 0xc01c6fad,
+ 0xffff753e,
+ 0xb7209162,
+ 0xc300430c,
+ 0xd0205908,
+ 0x9e8a00a2,
+ 0x6fcdb789,
+ 0x0d10c09c,
+ 0x00c26a32,
+ 0x712db729,
+ 0x568cb780,
+ 0xe0409e8b,
+ 0xd010a1c6,
+ 0x7500aa41,
+ 0x9284c003,
+ 0xc0009eb3,
+ 0xffb491c0,
+ 0xb7809819,
+ 0x9eb3430c,
+ 0x0a10c0a4,
+ 0xaa01d208,
+ 0xc0037500,
+ 0xfff49184,
+ 0xc01c9b1b,
+ 0x0d92743e,
+ 0x91e2ffff,
+ 0xc00175c0,
+ 0xb7209062,
+ 0x0982430c,
+ 0xb76a9e9a,
+ 0xb72a46b1,
+ 0xd1a24729,
+ 0x01095e30,
+ 0x0900c021,
+ 0x0930c140,
+ 0x91a0c000,
+ 0xf0009dad,
+ 0xd011a162,
+ 0x9dcd0a28,
+ 0xa162f000,
+ 0xc1000984,
+ 0xc1010900,
+ 0x71020236,
+ 0x925cffff,
+ 0x4314b720,
+ 0x5988d324,
+ 0x59b0c300,
+ 0x04b2d010,
+ 0x0980c021,
+ 0x6fb5b749,
+ 0x09c0c146,
+ 0x6d29d031,
+ 0x0624d010,
+ 0x0122c101,
+ 0x0920c138,
+ 0xa951d008,
+ 0x0244c101,
+ 0xc2000244,
+ 0xc1015a08,
+ 0xb78000c2,
+ 0xb7294f8c,
+ 0x9ea270ad,
+ 0xa0e9e020,
+ 0xf0009dbd,
+ 0xb786a0e1,
+ 0x750040c9,
+ 0x9184c000,
+ 0x53cbb78c,
+ 0xc0007500,
+ 0xb76690e2,
+ 0x9eb34a4d,
+ 0x99cbff94,
+ 0x430cb780,
+ 0xc09c0906,
+ 0xd2080a10,
+ 0xb79fa101,
+ 0xb7bf7e6e,
+ 0xb7df7eee,
+ 0xb7ff7f6e,
+ 0xc0027fee,
+ 0x9c228c00,
+ 0xb720a605,
+ 0x9e5d430c,
+ 0x63adb744,
+ 0xc0017ca0,
+ 0xb7819202,
+ 0x75045dad,
+ 0x90e4c000,
+ 0x5a21c100,
+ 0xb5842a04,
+ 0xb7207e2b,
+ 0xb781430c,
+ 0x1a105dad,
+ 0xc0007502,
+ 0x0a0690b2,
+ 0x7f2bb584,
+ 0x430cb720,
+ 0x6e29b788,
+ 0x9e8b0183,
+ 0x5333b741,
+ 0x6aadb749,
+ 0x098cc026,
+ 0xc04a01b8,
+ 0x9e940df0,
+ 0xb7209c62,
+ 0xb784430c,
+ 0xcffe63ad,
+ 0xb5842a3d,
+ 0xb72063ad,
+ 0xb78c430c,
+ 0x7500532b,
+ 0x9222c001,
+ 0x6cadb744,
+ 0xc0007484,
+ 0xb7819124,
+ 0x75004bad,
+ 0x9084c001,
+ 0x9140c000,
+ 0xc0007482,
+ 0xb78190e4,
+ 0x75024b2d,
+ 0x9344c000,
+ 0xc09c0203,
+ 0xd2080a14,
+ 0x7500aa01,
+ 0x9244c000,
+ 0xc0007486,
+ 0x02039144,
+ 0x0a40c09a,
+ 0xaa01d208,
+ 0xc0007500,
+ 0xb78190e4,
+ 0x750a5dad,
+ 0x90a4c000,
+ 0xb58c0a02,
+ 0xb720532b,
+ 0x0203430c,
+ 0x0a18c0a4,
+ 0xaa01d208,
+ 0x6cb5b764,
+ 0xc0007500,
+ 0xb74193e2,
+ 0xb70953ab,
+ 0xb76c6c2d,
+ 0x9eaa532b,
+ 0x9c629e84,
+ 0x430cb720,
+ 0x5dadb781,
+ 0x75021a10,
+ 0x90f2c000,
+ 0x532bb78c,
+ 0xc0017500,
+ 0x02039064,
+ 0x0a20c09c,
+ 0xa001d208,
+ 0x9380c000,
+ 0xc00076c4,
+ 0xb7499164,
+ 0x020375a9,
+ 0x0a20c09c,
+ 0xa101d208,
+ 0x9200c000,
+ 0x76c60103,
+ 0x0920c09c,
+ 0x90c4c000,
+ 0x7629b789,
+ 0x9080c000,
+ 0x7529b789,
+ 0xa241d008,
+ 0x430cb720,
+ 0x5dadb781,
+ 0x75021a10,
+ 0x90f2c000,
+ 0x532bb78c,
+ 0xc0007500,
+ 0xb74a9344,
+ 0xb76046a9,
+ 0x01835514,
+ 0x09a0c09c,
+ 0x9140c000,
+ 0xaa61d008,
+ 0x9e529e91,
+ 0xa229c030,
+ 0x297cc00e,
+ 0x4729b78a,
+ 0x0922d012,
+ 0xffff7104,
+ 0xb79f9268,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xb7209c22,
+ 0x05964314,
+ 0x0da0c098,
+ 0xa8edd010,
+ 0x6e4db704,
+ 0x4000ba09,
+ 0xd001747f,
+ 0xb3401802,
+ 0x009248a2,
+ 0x0092c101,
+ 0x532bb789,
+ 0x2a60c00e,
+ 0x5a0fc200,
+ 0x9c220040,
+ 0xb7a0a61d,
+ 0x0687430c,
+ 0x020b9e9e,
+ 0x0a58c096,
+ 0xa982d208,
+ 0xfff49e57,
+ 0x020b9bda,
+ 0x0a54c096,
+ 0xa982d208,
+ 0xfff40301,
+ 0x020b9bd2,
+ 0x0a50c096,
+ 0xaa01d208,
+ 0x9e819ea8,
+ 0xc0980258,
+ 0xd2080a20,
+ 0xb744aa0d,
+ 0xba246e4c,
+ 0x753f4000,
+ 0x19a2d001,
+ 0x9202c000,
+ 0xc096020b,
+ 0xd2080a30,
+ 0x0092a881,
+ 0xb789009a,
+ 0xc00e532b,
+ 0xc2002a60,
+ 0x01c45a0f,
+ 0xc0007780,
+ 0x9eaa9102,
+ 0xc0260d82,
+ 0xc0010d50,
+ 0x75c69320,
+ 0x91e2c000,
+ 0xd01d7186,
+ 0xd01e1266,
+ 0xd012123c,
+ 0xd03119c2,
+ 0xc0286e39,
+ 0xc0010a00,
+ 0x020b90e0,
+ 0x0a08c098,
+ 0xaa01d208,
+ 0xc0007500,
+ 0x9e699222,
+ 0x0a02c409,
+ 0x622db580,
+ 0x0a42c001,
+ 0x0a00c010,
+ 0x6335b5c0,
+ 0x62adb580,
+ 0x9220c001,
+ 0x0c0270d3,
+ 0x1617d11e,
+ 0x1233d11d,
+ 0x1942d012,
+ 0x7280cc12,
+ 0x6e39d031,
+ 0x0a60c09c,
+ 0x0158d020,
+ 0xaa49f010,
+ 0x76c09e69,
+ 0x622db580,
+ 0xaa45f010,
+ 0x62adb580,
+ 0xaa41f010,
+ 0x632db580,
+ 0x9236c000,
+ 0x5d60d0a2,
+ 0x2a6ed3f1,
+ 0x5a20c200,
+ 0xd3f13244,
+ 0xc00e293e,
+ 0x59403a7c,
+ 0xb5803244,
+ 0xb79f622d,
+ 0xb7bf7e6e,
+ 0xb7df7eee,
+ 0xb7ff7f6e,
+ 0xc0027fee,
+ 0x9c228c00,
+ 0x8420a605,
+ 0x430cb720,
+ 0xb5bf0a82,
+ 0xb72a7e6a,
+ 0xb72046a9,
+ 0x04075514,
+ 0x02079e8b,
+ 0xa969c010,
+ 0x04859e43,
+ 0x8c80e031,
+ 0xff749ea2,
+ 0xb7209b4b,
+ 0x018b430c,
+ 0x46b1b76a,
+ 0x984bff94,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x9c228c60,
+ 0x4314b720,
+ 0x5cb0d1a2,
+ 0xb7859e9a,
+ 0xc1407ec9,
+ 0xc0210880,
+ 0xc2000880,
+ 0xc0205a24,
+ 0x3a042a00,
+ 0xf0009d9d,
+ 0x0185a261,
+ 0xff820906,
+ 0xf8399320,
+ 0x8420a205,
+ 0x430cb720,
+ 0xb59f0a02,
+ 0xb7417e6a,
+ 0x02035f2b,
+ 0x0a20c09c,
+ 0xd2087480,
+ 0xc000a981,
+ 0xb7409104,
+ 0xe0326ead,
+ 0xfff48d00,
+ 0xf9f89bd3,
+ 0x9c22aa19,
+ 0xa205f839,
+ 0xb7208420,
+ 0x0a02430c,
+ 0x7e6ab59f,
+ 0x5f2bb741,
+ 0x74809ea2,
+ 0x9204c000,
+ 0xc0a40203,
+ 0xd2080a14,
+ 0xb720a102,
+ 0xe032430c,
+ 0x09ea8d00,
+ 0x6cadb740,
+ 0x9bb4fff4,
+ 0xaa19f9f8,
+ 0xa6059c22,
+ 0x430cb720,
+ 0xb7810687,
+ 0x75045dad,
+ 0x93c4c000,
+ 0x558cb780,
+ 0xc0409e5a,
+ 0x7500aa45,
+ 0x9142c000,
+ 0x2ebed3f1,
+ 0xfff49eab,
+ 0x9eab9bb2,
+ 0x9180c000,
+ 0xc0a40203,
+ 0xd2080a14,
+ 0x7500aa01,
+ 0x90c2c000,
+ 0x2dfcc00e,
+ 0x9bbefff4,
+ 0x430cb720,
+ 0xc09c0203,
+ 0xd2080a18,
+ 0x7500aa01,
+ 0x9162c001,
+ 0x5dadb781,
+ 0xc0017504,
+ 0xb78690c4,
+ 0x75004129,
+ 0x9022c001,
+ 0x558cb780,
+ 0xc0409e6a,
+ 0x7500aa45,
+ 0x9142c000,
+ 0x68adb760,
+ 0x2dded3f2,
+ 0x09060d02,
+ 0x9bd6ff74,
+ 0x430cb720,
+ 0x2dded3f2,
+ 0xb7600d02,
+ 0x0906692d,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0xff7e8c40,
+ 0xb79f90e0,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa61d9c22,
+ 0x558cb740,
+ 0x4314b720,
+ 0x9e920a02,
+ 0xc0200687,
+ 0xb741a269,
+ 0xd0115dcd,
+ 0x75021a28,
+ 0x9172c000,
+ 0x534bb78c,
+ 0xc0007500,
+ 0xb74090c2,
+ 0xc0006e55,
+ 0xb7849140,
+ 0xc2006ccd,
+ 0xc1015a08,
+ 0xb74000c2,
+ 0x748a6cb5,
+ 0x90a2c000,
+ 0xc0000906,
+ 0xb78090e0,
+ 0x9e6b500c,
+ 0xa965c040,
+ 0x46d1b76a,
+ 0xb7409e53,
+ 0xc0205514,
+ 0xff74a96a,
+ 0xb7209b8b,
+ 0xb7814314,
+ 0x1a105dcd,
+ 0xc0037502,
+ 0xb78c91b2,
+ 0x7500534b,
+ 0x9102c003,
+ 0x0ed2d011,
+ 0x0b02c008,
+ 0x5ab0c280,
+ 0xc0219eae,
+ 0x0f400f00,
+ 0xf0009dee,
+ 0x010ba361,
+ 0x0900c040,
+ 0x0900c021,
+ 0xaa619d2d,
+ 0x3a40c002,
+ 0xf0009dad,
+ 0xd2a2a261,
+ 0x9ead5c88,
+ 0x0092c101,
+ 0x6fb5b749,
+ 0x0e80c021,
+ 0xd0310ea0,
+ 0xd0106d29,
+ 0xc1010624,
+ 0xc1380122,
+ 0xd0080920,
+ 0xc101a951,
+ 0x02440244,
+ 0x5a08c200,
+ 0x00c2c101,
+ 0x0a02c401,
+ 0x70adb7e9,
+ 0x0a00c008,
+ 0xf0009dde,
+ 0x9eaba261,
+ 0x09929eb2,
+ 0xff14010d,
+ 0x0a0299f6,
+ 0xf0009dde,
+ 0x9deea261,
+ 0xa361f000,
+ 0xc021020b,
+ 0xc0400a00,
+ 0x9d4d0a20,
+ 0x1a10a962,
+ 0xaa619d4d,
+ 0x0a80c021,
+ 0x0ac0c046,
+ 0xa9619d5d,
+ 0xc0007480,
+ 0xd12290e2,
+ 0x112e5e35,
+ 0x11282a7c,
+ 0x430cb720,
+ 0x76adb544,
+ 0x7e6eb79f,
+ 0x7eeeb7bf,
+ 0x7f6eb7df,
+ 0x7feeb7ff,
+ 0x8c00c002,
+ 0xa6059c22,
+ 0x068774c0,
+ 0x9324c000,
+ 0x500cb780,
+ 0xc0409e5a,
+ 0x7500aa45,
+ 0x9222c000,
+ 0x430cb720,
+ 0xc09c0203,
+ 0xd2080a10,
+ 0x7500aa01,
+ 0x90e2c000,
+ 0x4729b78a,
+ 0xc0017117,
+ 0xb7209302,
+ 0x0203430c,
+ 0x0a38c0a2,
+ 0xaa01d208,
+ 0xc0007500,
+ 0x010393c4,
+ 0x4729b78a,
+ 0x091cc0a4,
+ 0xa941d008,
+ 0x4629b76a,
+ 0x124ac101,
+ 0xd0126245,
+ 0xc10119b2,
+ 0xc0a2029a,
+ 0xc1010ac0,
+ 0xd20805b8,
+ 0xc2b4aaa5,
+ 0x08049bcf,
+ 0xc0007140,
+ 0xb7409244,
+ 0xd2a24f94,
+ 0xd2a25e30,
+ 0x9e535d08,
+ 0x0a00c021,
+ 0xa945e030,
+ 0x0a40c146,
+ 0xf0009dcd,
+ 0xb79fa161,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa60d9c22,
+ 0x030774c6,
+ 0xc0009e5d,
+ 0x74c490a2,
+ 0x90a4c000,
+ 0xc0009eb6,
+ 0xb74092c0,
+ 0x0205430c,
+ 0x0a14c09c,
+ 0xaa01d208,
+ 0xc0007500,
+ 0x0f0690a4,
+ 0x9120c000,
+ 0xc06e0205,
+ 0x71480a10,
+ 0xd0020f02,
+ 0xf2080f62,
+ 0xb720aa31,
+ 0x73994314,
+ 0x9344c000,
+ 0x58cdb783,
+ 0xa221f208,
+ 0x430cb720,
+ 0x592db723,
+ 0xa0a5f208,
+ 0x430cb720,
+ 0x59adb723,
+ 0xa0a9f208,
+ 0x430cb720,
+ 0x5a2db723,
+ 0xa0adf208,
+ 0x90e0c001,
+ 0x5c88d322,
+ 0x46d1b76a,
+ 0x0092c101,
+ 0x6a35b740,
+ 0x09c2c010,
+ 0xff34010b,
+ 0xb7209aea,
+ 0xf208430c,
+ 0xb583aa21,
+ 0xb72058ad,
+ 0xf208430c,
+ 0xb583aa25,
+ 0xb720592d,
+ 0xf208430c,
+ 0xb583aa29,
+ 0xb72059ad,
+ 0xf208430c,
+ 0xb583aa2d,
+ 0xb7205a2d,
+ 0xd0524314,
+ 0xb7880ad8,
+ 0xba0c6c49,
+ 0xc0004008,
+ 0x5888922a,
+ 0x0092c101,
+ 0x46d1b76a,
+ 0x46b5b742,
+ 0xc00e9e6a,
+ 0xff3409f2,
+ 0x0a169abc,
+ 0xa231f208,
+ 0xaa21d210,
+ 0x0982c400,
+ 0x0902c200,
+ 0xa221f210,
+ 0xa92ef208,
+ 0x2d7dc9fe,
+ 0xa12ef208,
+ 0x430cb720,
+ 0x752db784,
+ 0xb3437500,
+ 0x75004422,
+ 0x4422b342,
+ 0x9e533126,
+ 0xf2083126,
+ 0xb720a12d,
+ 0xb784430c,
+ 0x75026cad,
+ 0x90c2c000,
+ 0x3900c020,
+ 0xa12df208,
+ 0xaa2df208,
+ 0x3a00c010,
+ 0xa22df208,
+ 0x430cb720,
+ 0x6cadb784,
+ 0xc0007506,
+ 0xf20891c4,
+ 0xc080aa21,
+ 0xc0007d00,
+ 0x010391c2,
+ 0x0914c098,
+ 0xc0000a02,
+ 0x010390c0,
+ 0x0914c098,
+ 0xd0080a0a,
+ 0xf208a241,
+ 0xc040aa21,
+ 0xc0027d00,
+ 0xb72093a2,
+ 0xb784430c,
+ 0x75046cad,
+ 0x92c4c002,
+ 0xc0980183,
+ 0xd0080990,
+ 0x7480a961,
+ 0x91c4c002,
+ 0x702db784,
+ 0x7d3edffd,
+ 0x92c2c000,
+ 0xc0980203,
+ 0xd2080a08,
+ 0x7500aa01,
+ 0x9082c001,
+ 0xc0980203,
+ 0xd2080a0c,
+ 0x7500aa01,
+ 0x9382c000,
+ 0xa161d008,
+ 0x90a0c001,
+ 0x01030203,
+ 0x0a58c096,
+ 0x0954c096,
+ 0xa902d208,
+ 0xaa41d008,
+ 0xc0007299,
+ 0xb7859164,
+ 0x75007ea9,
+ 0x90c4c000,
+ 0xa261d008,
+ 0x91e0c000,
+ 0xc0980103,
+ 0x0a060910,
+ 0xa241d008,
+ 0x430cb780,
+ 0xc0980902,
+ 0xd2080a14,
+ 0xb780a101,
+ 0xc098430c,
+ 0xd2080a10,
+ 0x7500aa01,
+ 0x9102c000,
+ 0xaa2df208,
+ 0x2a7dcfbe,
+ 0xa22df208,
+ 0x430cb720,
+ 0x5dadb781,
+ 0xc0007504,
+ 0xf2089102,
+ 0xcfdeaa2d,
+ 0xf2082a7d,
+ 0xb720a22d,
+ 0xb781430c,
+ 0x75045dad,
+ 0x9304c001,
+ 0xa92df208,
+ 0x0a5ecfff,
+ 0x0a7cc7fe,
+ 0x2128d020,
+ 0xa12ef208,
+ 0x430cb720,
+ 0xc09a0203,
+ 0xd2080a40,
+ 0x7500aa01,
+ 0x90e2c000,
+ 0x4b2db781,
+ 0xc0007502,
+ 0xb78690e4,
+ 0x75004029,
+ 0x9142c000,
+ 0xc8009e53,
+ 0x32262900,
+ 0x3a20c001,
+ 0xa22df208,
+ 0x430cb720,
+ 0x4b2db781,
+ 0xc0007502,
+ 0xf2089164,
+ 0xcfffaa2d,
+ 0xc8fe0962,
+ 0x2244097c,
+ 0xa22df208,
+ 0x430cb780,
+ 0xa9adf208,
+ 0x0d02c011,
+ 0x0a08c098,
+ 0xa901d208,
+ 0xc0215964,
+ 0x31262900,
+ 0xa12df208,
+ 0x430cb780,
+ 0x0a0cc098,
+ 0xaa01d208,
+ 0x5a68c200,
+ 0x2a00c041,
+ 0xf2083128,
+ 0xb780a12d,
+ 0xcfef430c,
+ 0xc098297e,
+ 0xd2080a14,
+ 0x7500aa01,
+ 0xb3420a02,
+ 0x9e544444,
+ 0xf2083144,
+ 0xb720a12d,
+ 0xb781430c,
+ 0x7500612d,
+ 0x90c2c000,
+ 0x3940c001,
+ 0xa12df208,
+ 0x430cb720,
+ 0x5e2db781,
+ 0xc0007508,
+ 0xb78191a4,
+ 0x75045dad,
+ 0x9104c000,
+ 0xaa2df208,
+ 0x3a00c201,
+ 0xa22df208,
+ 0x9eab7780,
+ 0x09829eb2,
+ 0x09b2d001,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0xffdd8c60,
+ 0xf8399320,
+ 0x04b6a285,
+ 0xb7600407,
+ 0x9e485114,
+ 0x518cb780,
+ 0xa8aac030,
+ 0x5094b760,
+ 0xc00f59c0,
+ 0xc84029fc,
+ 0xc030aa05,
+ 0x9e9da98a,
+ 0x5a20c200,
+ 0x76c0349a,
+ 0xe0329e48,
+ 0xcff08d80,
+ 0x32082a00,
+ 0xa9f9d9d0,
+ 0x90c2c000,
+ 0x3a00c201,
+ 0x90e0c000,
+ 0xc0007680,
+ 0xc1019082,
+ 0x74803a00,
+ 0x9082c000,
+ 0x3a00c011,
+ 0xc0007440,
+ 0x74c09122,
+ 0x3a00c021,
+ 0x9082c000,
+ 0x3a00c041,
+ 0x5d08d028,
+ 0x549cb740,
+ 0xe0209e2d,
+ 0xf9f8a26d,
+ 0x9c22aa9d,
+ 0x8420a61d,
+ 0x558cb780,
+ 0xb7c09e5e,
+ 0xc2404314,
+ 0xa199aa45,
+ 0x9e8f9e73,
+ 0x8c80e0b1,
+ 0x09c4c09a,
+ 0x9e937500,
+ 0xabe1d008,
+ 0x7e29b7bf,
+ 0x7e33b7bf,
+ 0x7d33b73f,
+ 0x9244c000,
+ 0xb7849e71,
+ 0x7500752d,
+ 0x90e4c000,
+ 0x6cadb784,
+ 0xc0007502,
+ 0x9e7190e2,
+ 0x6cadb704,
+ 0x9060c000,
+ 0x77400802,
+ 0x9142c000,
+ 0xb7849e71,
+ 0xd1106c2d,
+ 0x9ea30639,
+ 0x2dfccffe,
+ 0xc09c9e74,
+ 0xd2080a18,
+ 0x7480a901,
+ 0x90a4c000,
+ 0xc0019e90,
+ 0x9e749100,
+ 0x0a38c0a2,
+ 0xaa01d208,
+ 0xc0007500,
+ 0x0c0690a2,
+ 0x93a0c000,
+ 0xc1019e71,
+ 0xb78a01ec,
+ 0x710c4729,
+ 0x9124c000,
+ 0xc0a20207,
+ 0xd2080a20,
+ 0xc000a812,
+ 0x010791c0,
+ 0xc0a20207,
+ 0xc0a20924,
+ 0xd2080a20,
+ 0xd008aa11,
+ 0xd020a951,
+ 0x75421044,
+ 0x90d2c000,
+ 0x00ecc101,
+ 0x4a31b56a,
+ 0xc0007640,
+ 0x9e719104,
+ 0x6729b786,
+ 0xc0067500,
+ 0xb7209062,
+ 0x5d184314,
+ 0x5908c380,
+ 0x049dd110,
+ 0x684db784,
+ 0x4a31b76a,
+ 0x2d00c03c,
+ 0x5a38c200,
+ 0x2a00cc00,
+ 0x4b29b76a,
+ 0x2970c002,
+ 0x31289e51,
+ 0xb78c5de0,
+ 0x3122534b,
+ 0x59c09e59,
+ 0x31263122,
+ 0xd0117500,
+ 0xd0202a06,
+ 0xc00031a8,
+ 0xc0809082,
+ 0x9e4c3d80,
+ 0x0a18c09c,
+ 0xaa01d208,
+ 0xc0007500,
+ 0xc0409082,
+ 0x77c03d80,
+ 0x9082c000,
+ 0x3d80c100,
+ 0x570cb780,
+ 0x5938d324,
+ 0x5d40d022,
+ 0xaa45c240,
+ 0x2d00cc00,
+ 0xc2009e51,
+ 0xc2805a08,
+ 0x324259e0,
+ 0xc00f3246,
+ 0xd020297c,
+ 0x9e4c3144,
+ 0xc0967740,
+ 0xd2080a50,
+ 0xb740aa01,
+ 0xd002590c,
+ 0xc2003d24,
+ 0xc1015a08,
+ 0xf208024c,
+ 0xc380aa11,
+ 0xc101588c,
+ 0x75000092,
+ 0x5a18c300,
+ 0xa9190244,
+ 0x3d22d002,
+ 0xf2080289,
+ 0xb721a109,
+ 0xc12461af,
+ 0xf2080e82,
+ 0xf208a18e,
+ 0xfa08a112,
+ 0x9e6ba081,
+ 0x9eb39eaa,
+ 0xfef40ec0,
+ 0xc2509a52,
+ 0x0ac07740,
+ 0x92e4ffff,
+ 0x568cb780,
+ 0x5a88d324,
+ 0xd1109eb3,
+ 0xc00406d9,
+ 0xf2080982,
+ 0xc124a922,
+ 0xfef40902,
+ 0x9eb39bed,
+ 0x9804ff14,
+ 0xb7869e71,
+ 0x75006729,
+ 0x92c2c000,
+ 0x4f8cb740,
+ 0xf208aa19,
+ 0xd110a922,
+ 0x0a3c0555,
+ 0xa9c1f008,
+ 0x0d00c004,
+ 0xa122f208,
+ 0x2a410246,
+ 0xa241f008,
+ 0x9300c000,
+ 0xa922f208,
+ 0xc0049eb3,
+ 0xc1240982,
+ 0xff140902,
+ 0x9eb39be0,
+ 0x7deeb79f,
+ 0x7e6eb7bf,
+ 0x7eeeb7df,
+ 0x7f6eb7ff,
+ 0x8c20c002,
+ 0x9280fefe,
+ 0x7deeb79f,
+ 0x7e6eb7bf,
+ 0x7eeeb7df,
+ 0x7f6eb7ff,
+ 0x8c20c002,
+ 0xb7209c22,
+ 0x9e5a430c,
+ 0xc0a20203,
+ 0xd2080a38,
+ 0x7500aa01,
+ 0x9222c000,
+ 0x02039e8a,
+ 0x0d20c0a4,
+ 0x0a40c0a2,
+ 0xa901d208,
+ 0xaa41d010,
+ 0x70880802,
+ 0x0802d003,
+ 0xb7859c22,
+ 0x75007ea9,
+ 0x9162c000,
+ 0x500cb780,
+ 0xaa45c040,
+ 0xc0007500,
+ 0x08069082,
+ 0x02229c22,
+ 0xc0a20109,
+ 0xc0a20920,
+ 0xd2080a40,
+ 0xd008aa05,
+ 0x0802a951,
+ 0xd0037104,
+ 0x9c220802,
+ 0x430cb720,
+ 0x02030c06,
+ 0x0a20c136,
+ 0xa902d208,
+ 0x46b1b76a,
+ 0xa886d228,
+ 0x19049e52,
+ 0xc0007097,
+ 0xb74a9096,
+ 0x9e404729,
+ 0x29aed3f1,
+ 0x7913520c,
+ 0x90a4c000,
+ 0xffff7095,
+ 0xd0319244,
+ 0x6a520a24,
+ 0x478cb740,
+ 0xd0310244,
+ 0xf0080948,
+ 0x7508aa41,
+ 0xc1360203,
+ 0xb4020a20,
+ 0xd2084a22,
+ 0x9c22a181,
+ 0x430cb740,
+ 0xc1360205,
+ 0xd2080a24,
+ 0x7500aa01,
+ 0x90e4c000,
+ 0xc1360205,
+ 0xd2080a20,
+ 0x9e5aa182,
+ 0xc2000a06,
+ 0xb7405208,
+ 0xc136430c,
+ 0xd0080924,
+ 0x9e53a942,
+ 0xd0083246,
+ 0x9c22a241,
+ 0x9e5da605,
+ 0x9080c000,
+ 0x9846ff74,
+ 0x430cb780,
+ 0xc1360d92,
+ 0xd2080a20,
+ 0x710aaa01,
+ 0x92c4ffff,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0x550cb780,
+ 0xb7209e5a,
+ 0xc040430c,
+ 0x5db0aa45,
+ 0x67adb741,
+ 0x2a7cc002,
+ 0x0d80c021,
+ 0xc1163244,
+ 0x9dbe0df0,
+ 0xa261f000,
+ 0xa60d9c22,
+ 0x430cb720,
+ 0x02039e5e,
+ 0x0a30c096,
+ 0xaa01d208,
+ 0xc0047500,
+ 0xb7a49344,
+ 0xb76468ad,
+ 0x7540672d,
+ 0x9222c000,
+ 0x67b5b744,
+ 0xc000729b,
+ 0x02039188,
+ 0x0a34c096,
+ 0xa901d208,
+ 0x1254c101,
+ 0xc0007104,
+ 0x74c090c6,
+ 0x9022c004,
+ 0x01030287,
+ 0x67adb784,
+ 0x0934c096,
+ 0xa941d008,
+ 0x71041258,
+ 0x92a8c003,
+ 0x46b1b76a,
+ 0xc2000e86,
+ 0xc00e55ac,
+ 0xff942dfc,
+ 0xb7209819,
+ 0xc004430c,
+ 0xb78409d2,
+ 0xc09667ad,
+ 0x125808b4,
+ 0x4029b580,
+ 0x430cb720,
+ 0xc0960203,
+ 0xd2080a34,
+ 0x1a04aa01,
+ 0x4badb581,
+ 0x430cb720,
+ 0xc0960203,
+ 0xb7410a34,
+ 0xd2085c35,
+ 0xb76aaa01,
+ 0x010346b1,
+ 0x5a1cc200,
+ 0x0d01cff0,
+ 0x0528c101,
+ 0x0960c09c,
+ 0x9acfff14,
+ 0x430cb720,
+ 0x46b1b76a,
+ 0x55acc200,
+ 0x2dfcc00e,
+ 0x9871ff94,
+ 0x478cb740,
+ 0xaa41f048,
+ 0xc0007508,
+ 0xf0489084,
+ 0xb720a2c2,
+ 0x0203430c,
+ 0x0a34c096,
+ 0xaa01d208,
+ 0x532bb749,
+ 0xc2001a04,
+ 0xcff05a0c,
+ 0xc00e291d,
+ 0x32442a60,
+ 0x532bb589,
+ 0x430cb740,
+ 0xc09c0205,
+ 0xd2080a14,
+ 0x7500aa01,
+ 0x9224c000,
+ 0xc0960205,
+ 0xd2080a30,
+ 0x0092a881,
+ 0xb7890094,
+ 0xc00e532b,
+ 0xc2002a60,
+ 0xd2085a0f,
+ 0xb720a241,
+ 0xd208430c,
+ 0xb744aa41,
+ 0xba246e2d,
+ 0x02444000,
+ 0x752db584,
+ 0x430cb720,
+ 0x752db784,
+ 0x08f0c098,
+ 0x4029b580,
+ 0x430cb720,
+ 0x752db784,
+ 0x6d2db744,
+ 0xb5841244,
+ 0xb79f752d,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0xa61d9c22,
+ 0x8460c010,
+ 0x4314b7a0,
+ 0x9e699e6c,
+ 0x0a10c0a4,
+ 0xa902d208,
+ 0x5e35b7e1,
+ 0xc0337680,
+ 0x0a069184,
+ 0x44abb585,
+ 0x430cb780,
+ 0x0585096a,
+ 0x0a1cc09c,
+ 0xa101d208,
+ 0x430cb720,
+ 0xb5440405,
+ 0xb72079b5,
+ 0xb76a430c,
+ 0xba1b46a9,
+ 0xc0004000,
+ 0xb5049080,
+ 0xb7207a55,
+ 0xd110430c,
+ 0x0d840637,
+ 0x4731b74a,
+ 0x5908c200,
+ 0x00a2d020,
+ 0xffff7115,
+ 0x0203925c,
+ 0x0a40c09a,
+ 0xaa01d208,
+ 0xc0007500,
+ 0xb78490e2,
+ 0x0a0475ad,
+ 0x75adb584,
+ 0x430cb740,
+ 0x0930c096,
+ 0xaa41d008,
+ 0xd0080a04,
+ 0xb780a241,
+ 0x9ea2430c,
+ 0xc0960109,
+ 0xc0960d30,
+ 0xd0100934,
+ 0xd008aa41,
+ 0x7104a941,
+ 0x1248d011,
+ 0x4442b422,
+ 0x430cb720,
+ 0x692db784,
+ 0xb5841a04,
+ 0xb740692d,
+ 0x0205430c,
+ 0x0a30c096,
+ 0xa881d208,
+ 0x00929e92,
+ 0xb7890094,
+ 0xc09a532b,
+ 0x2a100d40,
+ 0x5a0bc200,
+ 0xa241d010,
+ 0x4314b720,
+ 0xc0969e4c,
+ 0xd2080a30,
+ 0x0092a881,
+ 0x0092c101,
+ 0x532bb789,
+ 0xc0f00109,
+ 0xd1242900,
+ 0xc2005821,
+ 0x9e435831,
+ 0x70c0c812,
+ 0xb5842a0c,
+ 0xb7206ccd,
+ 0x09044314,
+ 0x9e4a0385,
+ 0x0930c096,
+ 0xa8c1d008,
+ 0xc1010092,
+ 0xb7890092,
+ 0xc00e532b,
+ 0xc2002a60,
+ 0xb59d5a0f,
+ 0xd0087468,
+ 0x7500aa41,
+ 0x9084c006,
+ 0x67cdb784,
+ 0x6e4db584,
+ 0x430cb720,
+ 0x4835b745,
+ 0xc0027684,
+ 0xb7449182,
+ 0x748067ad,
+ 0x9242c000,
+ 0x4b2db781,
+ 0xc0007500,
+ 0xb78490e2,
+ 0x7500692d,
+ 0x911cc000,
+ 0xc0007682,
+ 0x768090a2,
+ 0x92e4c001,
+ 0x6d2db784,
+ 0x1128e000,
+ 0x9142c000,
+ 0x4aadb781,
+ 0xc0007088,
+ 0x768090a8,
+ 0x93c4c000,
+ 0xc09c0103,
+ 0x0a060914,
+ 0xa241d008,
+ 0x430cb720,
+ 0xb5440902,
+ 0xb72075ad,
+ 0xb544430c,
+ 0xb720752d,
+ 0xb784430c,
+ 0xb58467ad,
+ 0xb7206d2d,
+ 0x0a7f430c,
+ 0x7468b55d,
+ 0x6fadb584,
+ 0x430cb720,
+ 0xb5840a06,
+ 0xb7206cad,
+ 0xb781430c,
+ 0xb5844b2d,
+ 0xb720692d,
+ 0xc00e430c,
+ 0xb5850a7e,
+ 0xb720482d,
+ 0xb781430c,
+ 0x75065dad,
+ 0x9144c000,
+ 0xc09c0203,
+ 0xd2080a18,
+ 0x7500aa01,
+ 0x9342c002,
+ 0xc0980103,
+ 0xd008091c,
+ 0x0d86aa41,
+ 0xd0080a04,
+ 0xb780a241,
+ 0x9ea2430c,
+ 0xc0980109,
+ 0xc0980d1c,
+ 0xd0100918,
+ 0xd008aa41,
+ 0x7104a941,
+ 0x1244d014,
+ 0x4448b422,
+ 0x93a0c000,
+ 0xaa41d010,
+ 0x0639d110,
+ 0xa25dd008,
+ 0x430cb780,
+ 0x0146c101,
+ 0xc0980185,
+ 0xc0980a18,
+ 0xd2080980,
+ 0xd008a901,
+ 0x0d84aa7d,
+ 0xd0207104,
+ 0xc0001144,
+ 0xd0089086,
+ 0xb720a17e,
+ 0xc101430c,
+ 0x01090216,
+ 0xc0980203,
+ 0xd2080a18,
+ 0x9e8aaa01,
+ 0x0900c098,
+ 0x0d1cc098,
+ 0xfffe72d9,
+ 0xb78892b8,
+ 0x02186e29,
+ 0x0a00c098,
+ 0xaa1dd208,
+ 0xc0980218,
+ 0xd2080a20,
+ 0x1904a90d,
+ 0xa10dd208,
+ 0x4314b740,
+ 0xc09a9e54,
+ 0xd2080a40,
+ 0x7500aa01,
+ 0x91c2c001,
+ 0x2bfcc00e,
+ 0x062fd110,
+ 0xc0989e52,
+ 0xd2080a00,
+ 0xc096aa1d,
+ 0xd0080950,
+ 0xb780a241,
+ 0x0109430c,
+ 0x0950c096,
+ 0xa942d008,
+ 0xa941d1e8,
+ 0x0244c101,
+ 0x0a20c098,
+ 0xa10dd208,
+ 0x430cb720,
+ 0xc0960203,
+ 0xd2080a50,
+ 0xb744aa01,
+ 0xc20075ad,
+ 0x00c25a08,
+ 0x722db544,
+ 0x92a0c001,
+ 0xc0989e54,
+ 0xd2080a18,
+ 0xd110a901,
+ 0xc0980525,
+ 0xd0080900,
+ 0x0a04aa5d,
+ 0xa25dd008,
+ 0x430cb780,
+ 0xc0980109,
+ 0xd0080918,
+ 0x0228a941,
+ 0x09089ea2,
+ 0x0d00c098,
+ 0xaa5dd010,
+ 0xc0007104,
+ 0x1a0890b8,
+ 0xa25dd010,
+ 0x430cb780,
+ 0xc0980109,
+ 0xd0080918,
+ 0x9ea2a941,
+ 0xc0980244,
+ 0xd2080a00,
+ 0xc096aa1d,
+ 0xd0100d50,
+ 0xb740a241,
+ 0xc101430c,
+ 0xc0980220,
+ 0xd2080a00,
+ 0xc096aa1d,
+ 0xd0080954,
+ 0xb720a241,
+ 0xb748430c,
+ 0xb7846e29,
+ 0x9e8a75ad,
+ 0xc0967104,
+ 0xc0000d58,
+ 0x02109134,
+ 0x0a00c098,
+ 0xaa1dd208,
+ 0x90e0c000,
+ 0xc0960203,
+ 0xd2080a54,
+ 0xd010aa01,
+ 0xb740a241,
+ 0x9e69430c,
+ 0xc0980d02,
+ 0xb7850910,
+ 0xd0087e29,
+ 0xb720a142,
+ 0x75004314,
+ 0x6ccdb764,
+ 0x90c2c000,
+ 0x6acdb781,
+ 0x9080c000,
+ 0x6b4db781,
+ 0xb58474c6,
+ 0xc00063cd,
+ 0x74c29182,
+ 0x9222c000,
+ 0x430cb720,
+ 0x63adb784,
+ 0xc0003a04,
+ 0xb7209100,
+ 0xb784430c,
+ 0xc00863ad,
+ 0xb5843a00,
+ 0xb78063ad,
+ 0xc096430c,
+ 0xd2080a50,
+ 0xc200aa01,
+ 0xc1015a08,
+ 0xf2080242,
+ 0x7480a911,
+ 0x9222c000,
+ 0xa111f248,
+ 0x430cb780,
+ 0xc0960902,
+ 0xd2080a50,
+ 0xc200aa01,
+ 0xc1015a08,
+ 0xf2080242,
+ 0xb742a111,
+ 0x76804555,
+ 0x9102c002,
+ 0x430cb780,
+ 0x6cc9b748,
+ 0x0a50c096,
+ 0xaa01d208,
+ 0xc2007480,
+ 0xc1015a08,
+ 0xf2480242,
+ 0xf208a911,
+ 0xb780a111,
+ 0x0902430c,
+ 0x454db542,
+ 0x0a50c096,
+ 0xaa01d208,
+ 0x5a08c200,
+ 0x0242c101,
+ 0xa112f248,
+ 0x90c2c001,
+ 0x430cb720,
+ 0x7ea9b785,
+ 0xc0007500,
+ 0x74c29202,
+ 0x91a2c000,
+ 0xc0980203,
+ 0xd2080a00,
+ 0x0103aa01,
+ 0x0904c098,
+ 0xc0004a04,
+ 0x010390c0,
+ 0x0904c098,
+ 0xd0080a02,
+ 0xb720a241,
+ 0x0a02430c,
+ 0x6cc9b588,
+ 0x75adb784,
+ 0x6fadb584,
+ 0x430cb720,
+ 0x45cdb742,
+ 0xc0960203,
+ 0xd2080a50,
+ 0x7480aa01,
+ 0x5a08c200,
+ 0x0242c101,
+ 0xaa11f248,
+ 0x642db584,
+ 0x430cb720,
+ 0x9262c000,
+ 0xb5440a02,
+ 0xb582652d,
+ 0xb74045cd,
+ 0xb748430c,
+ 0xb5886d51,
+ 0xc0986d49,
+ 0xd0080908,
+ 0xc001a142,
+ 0x020390c0,
+ 0x0a54c096,
+ 0xaa01d208,
+ 0x5a08c200,
+ 0x0242c101,
+ 0xaa11f248,
+ 0x652db584,
+ 0x430cb720,
+ 0xc0960203,
+ 0xd2080a54,
+ 0xb744aa01,
+ 0x9e8a6f2d,
+ 0x5a08c200,
+ 0xb72400c2,
+ 0x0a02722d,
+ 0x0d08c098,
+ 0xd0017082,
+ 0xd0100a42,
+ 0xb742a241,
+ 0xb720464d,
+ 0x7480430c,
+ 0x9262c000,
+ 0xb5440a02,
+ 0xb582662d,
+ 0xb740464d,
+ 0xb748430c,
+ 0xb5886dd1,
+ 0xc0986dc9,
+ 0xd008090c,
+ 0xc001a142,
+ 0x020390c0,
+ 0x0a58c096,
+ 0xaa01d208,
+ 0x5a08c200,
+ 0x0242c101,
+ 0xaa11f248,
+ 0x662db584,
+ 0x430cb720,
+ 0xc0960203,
+ 0xd2080a58,
+ 0xb744aa01,
+ 0x9e8a6f2d,
+ 0x5a08c200,
+ 0xb72400c2,
+ 0x0a02722d,
+ 0x0d0cc098,
+ 0xd0017082,
+ 0xd0100a42,
+ 0xb720a241,
+ 0xb740430c,
+ 0x048b4155,
+ 0x6badb744,
+ 0xb7845d10,
+ 0x5910642d,
+ 0xcffe097c,
+ 0xc1012901,
+ 0xb7426125,
+ 0x02285b53,
+ 0x64adb584,
+ 0x430cb720,
+ 0x5bcbb762,
+ 0x652db784,
+ 0x61b4c101,
+ 0x193ed012,
+ 0xb5840228,
+ 0xb72065ad,
+ 0x769c430c,
+ 0x662db784,
+ 0xb5440128,
+ 0xc00066ad,
+ 0x0d3a90bc,
+ 0x90a0c000,
+ 0xc8120d82,
+ 0xb7207286,
+ 0xb780430c,
+ 0xd031500c,
+ 0xb70a2dae,
+ 0x0b0246a9,
+ 0xba000f06,
+ 0x02884000,
+ 0x5a30c000,
+ 0xc0219ea3,
+ 0xc1c80d80,
+ 0xc0050d90,
+ 0x9e699140,
+ 0xa322d208,
+ 0x4f35b702,
+ 0xc0037600,
+ 0xb7429022,
+ 0xd2114dad,
+ 0x9dcd1e38,
+ 0xa161f000,
+ 0x4e2db742,
+ 0x1e38d1f1,
+ 0xf0009dcd,
+ 0xb742a161,
+ 0xd2114ead,
+ 0x9dcd1e30,
+ 0xa161f000,
+ 0x4018b960,
+ 0xd152048b,
+ 0xb7821d38,
+ 0x9dae4fad,
+ 0xa261f000,
+ 0x0d20d051,
+ 0xf0009dad,
+ 0xb742a261,
+ 0xd09151ad,
+ 0x9dcd1e20,
+ 0xa161f000,
+ 0x1e20d051,
+ 0xf0009dcd,
+ 0x0890a161,
+ 0xffff0d10,
+ 0x9e699101,
+ 0x1e38d071,
+ 0x4d2db742,
+ 0xf0009dcd,
+ 0x7e04a161,
+ 0x9042c001,
+ 0x1d30d072,
+ 0x53cdb782,
+ 0xf0009dae,
+ 0xb742a261,
+ 0xd011564d,
+ 0x9dcd0e28,
+ 0xa161f000,
+ 0x72860d20,
+ 0xffff0c90,
+ 0x048b9224,
+ 0x554db782,
+ 0xf0009dbe,
+ 0xb742a261,
+ 0xd01155cd,
+ 0x9dcd0e38,
+ 0xa161f000,
+ 0x1e30d091,
+ 0xf0009dcd,
+ 0xb720a062,
+ 0xf210430c,
+ 0x0507a929,
+ 0x6badb724,
+ 0xc00e1904,
+ 0xd011297c,
+ 0xc2001a12,
+ 0xc00f5a40,
+ 0xcf4a2a7c,
+ 0x32440d01,
+ 0xf0009dae,
+ 0x9e69a261,
+ 0xb7419e5c,
+ 0xcf8068ad,
+ 0x9dcd0a01,
+ 0xa161f000,
+ 0x692db741,
+ 0x9dcd0a20,
+ 0xa161f000,
+ 0x682db741,
+ 0x1a30c008,
+ 0xf0009dcd,
+ 0xb781a161,
+ 0x1a0c5dad,
+ 0xc0007504,
+ 0x9e5c9112,
+ 0x0a41cf6a,
+ 0xf0009dcd,
+ 0x0b04a1e1,
+ 0xc1000a84,
+ 0xb7200d80,
+ 0x0260430c,
+ 0x4729b72a,
+ 0xfffa7102,
+ 0x77c0925c,
+ 0x9182c000,
+ 0xb785048b,
+ 0x75007e49,
+ 0x9024c001,
+ 0x59cdb582,
+ 0x93a0c000,
+ 0x048b9e69,
+ 0x67a9b786,
+ 0x6829b746,
+ 0x68b1b746,
+ 0x7529b589,
+ 0x75a9b549,
+ 0x7631b549,
+ 0x430cb720,
+ 0x5bcbb782,
+ 0x5b4bb742,
+ 0x6c2db724,
+ 0xb5246244,
+ 0xb582714b,
+ 0xb740584d,
+ 0xf048478c,
+ 0x7508aa41,
+ 0x90a4c000,
+ 0xf0481a0c,
+ 0xb720a241,
+ 0x9d8f430c,
+ 0x68b5b7c4,
+ 0x672db7c4,
+ 0x0da1cfee,
+ 0x9360c000,
+ 0x9b08ff34,
+ 0x430cb720,
+ 0x02039d8f,
+ 0x0a10c0a4,
+ 0xaa01d208,
+ 0x0da1cfee,
+ 0xc0107500,
+ 0xb78493c4,
+ 0x7188672d,
+ 0x90e4c000,
+ 0x68adb784,
+ 0xc0007399,
+ 0xffd49082,
+ 0xb7809ad0,
+ 0xc098430c,
+ 0xd2080a70,
+ 0xff94a982,
+ 0xc01c99d3,
+ 0x0281743e,
+ 0xfffe0d92,
+ 0xb7809322,
+ 0x0906430c,
+ 0x0a18c09c,
+ 0xa101d208,
+ 0x430cb720,
+ 0xc0a20203,
+ 0xd2080a70,
+ 0x7500aa01,
+ 0x9082c000,
+ 0x532bb54c,
+ 0x430cb720,
+ 0x2bfcc00e,
+ 0x6e29b728,
+ 0xd01210f2,
+ 0xcffe1992,
+ 0xff942dfc,
+ 0x9e699b29,
+ 0x7e29b785,
+ 0xc0007500,
+ 0xb7209162,
+ 0x0a16430c,
+ 0x5eadb583,
+ 0x430cb720,
+ 0x422db584,
+ 0x048b0d02,
+ 0x7e51b545,
+ 0x430cb720,
+ 0x532bb78c,
+ 0xc0007500,
+ 0x010392e2,
+ 0x0970c0a2,
+ 0xaa41d008,
+ 0xc0007500,
+ 0xd00891e2,
+ 0xb740a142,
+ 0x0205430c,
+ 0x0a74c0a2,
+ 0xaa01d208,
+ 0x0918c09c,
+ 0xa241d008,
+ 0x430cb720,
+ 0x532bb78c,
+ 0xc0007500,
+ 0xb78191a2,
+ 0x75065dad,
+ 0x9104c000,
+ 0xc09c0103,
+ 0x1a0c0918,
+ 0xa241d008,
+ 0x430cb720,
+ 0xc09c0203,
+ 0xd2080a18,
+ 0x7500aa01,
+ 0x9302c001,
+ 0x67adb784,
+ 0xc0007500,
+ 0x9ea190a4,
+ 0x9080c000,
+ 0x6135b721,
+ 0x4badb786,
+ 0x61adb741,
+ 0x46b1b74a,
+ 0x2a7cc00e,
+ 0x4731b70a,
+ 0x31c4d020,
+ 0xc0000802,
+ 0x9dcd9260,
+ 0xa0e2f000,
+ 0xc0210205,
+ 0xc0320a00,
+ 0x9dcd0a70,
+ 0xa1e2f000,
+ 0x9dcd0a10,
+ 0xa061f000,
+ 0x293ed3f2,
+ 0x0da2d011,
+ 0xc1807280,
+ 0x02055930,
+ 0x0a00c021,
+ 0x0a60c032,
+ 0x90b4ffff,
+ 0xb5840a02,
+ 0xb72079ad,
+ 0x9ea3430c,
+ 0xb76a9ea1,
+ 0xba1b46a9,
+ 0xc0004000,
+ 0xf0089080,
+ 0xb720a0c2,
+ 0xd110430c,
+ 0x0d840637,
+ 0x4731b74a,
+ 0x5908c200,
+ 0xc09c0122,
+ 0x71150940,
+ 0x923cffff,
+ 0x47b1b76a,
+ 0x9a50ff54,
+ 0xb7809e69,
+ 0xb743430c,
+ 0xc09a572b,
+ 0x74800a44,
+ 0xa281d208,
+ 0x9262c008,
+ 0x4314b7c0,
+ 0xc09c9e74,
+ 0xd2080a18,
+ 0x7500aa01,
+ 0x9182c007,
+ 0x048d9e74,
+ 0x0a44c09a,
+ 0xaa01d208,
+ 0xb76a9d17,
+ 0xc20046d1,
+ 0xc1015a08,
+ 0xb74000cc,
+ 0xcfee7035,
+ 0xc0100ab1,
+ 0x010b0986,
+ 0x9905fef4,
+ 0x430cb780,
+ 0xa921d208,
+ 0xc0a4030b,
+ 0xd2080a1c,
+ 0xb720a101,
+ 0xd229430c,
+ 0xb785a945,
+ 0xc0a47ea9,
+ 0x750008a0,
+ 0xd0020a02,
+ 0x51500a42,
+ 0x4029b540,
+ 0x430cb720,
+ 0x582db762,
+ 0x4135b760,
+ 0x995fc274,
+ 0x9ea80982,
+ 0xc00e9e9b,
+ 0xc001287c,
+ 0xd20893c0,
+ 0x7480aac1,
+ 0xc10100da,
+ 0xb56a0092,
+ 0xd030532b,
+ 0xb720a90a,
+ 0xc000430c,
+ 0xb78a9164,
+ 0xba2452ab,
+ 0x62094002,
+ 0x0629d110,
+ 0x9240c000,
+ 0xc0a40203,
+ 0xd2080a1c,
+ 0x1a04aa01,
+ 0xc0007088,
+ 0xb78a9184,
+ 0xba2452ab,
+ 0x62094002,
+ 0x1629d110,
+ 0xc00e9ea2,
+ 0x9e712d7c,
+ 0x430cb780,
+ 0x5babb742,
+ 0x29fccffe,
+ 0x009800da,
+ 0x0627d110,
+ 0x6124c101,
+ 0x532bb54b,
+ 0x0d840189,
+ 0xb7200b08,
+ 0xd3f14314,
+ 0x9e4c2d3e,
+ 0x0a1cc0a4,
+ 0xaa01d208,
+ 0xfffd7104,
+ 0xb7829352,
+ 0xb7425bcb,
+ 0x61465c53,
+ 0xc0007295,
+ 0x00da9188,
+ 0x0092c101,
+ 0x532bb78b,
+ 0x1124c101,
+ 0xb58b1244,
+ 0x048b532b,
+ 0x5dcdb781,
+ 0xc0007508,
+ 0x750a90a2,
+ 0x91e4c000,
+ 0x430cb720,
+ 0x532bb78c,
+ 0xc0007500,
+ 0x9e8b9102,
+ 0xc06e098e,
+ 0xc0000d90,
+ 0xb7209120,
+ 0x9e8b430c,
+ 0x6cadb764,
+ 0x0d90c06e,
+ 0x9931ffb4,
+ 0xb7859e69,
+ 0x75007ea9,
+ 0x9082c001,
+ 0x430cb720,
+ 0xc09c0203,
+ 0xd2080a14,
+ 0x9e8baa01,
+ 0xc0807500,
+ 0xc0000d80,
+ 0xb7649102,
+ 0xffb46cad,
+ 0xc000991a,
+ 0x9e8a9220,
+ 0xc06e0109,
+ 0xb9600d10,
+ 0x9e5348d8,
+ 0xc0209e91,
+ 0x0904aa65,
+ 0xa269c010,
+ 0x9321ffff,
+ 0x430cb720,
+ 0x09020a7f,
+ 0x6c29b588,
+ 0x430cb780,
+ 0x0a3cc0a2,
+ 0xa101d208,
+ 0x430cb720,
+ 0x47b1b76a,
+ 0x99d7ff54,
+ 0x430cb720,
+ 0xb78a0902,
+ 0xc1364729,
+ 0xb58008a0,
+ 0xb7804029,
+ 0xc136430c,
+ 0xd2080a24,
+ 0xb780a101,
+ 0xc0a2430c,
+ 0xd2080a38,
+ 0xc001a101,
+ 0x9e929080,
+ 0x4030b960,
+ 0x430cb720,
+ 0x5c8cc100,
+ 0xd1100d04,
+ 0xb5430493,
+ 0xb7207bab,
+ 0xd110430c,
+ 0xb5430493,
+ 0xb7207c2b,
+ 0xd110430c,
+ 0xb5430493,
+ 0xb7807cab,
+ 0xc101430c,
+ 0xb5430498,
+ 0xffff7d4b,
+ 0xfff690a1,
+ 0xb79f91e0,
+ 0xb7bf6cee,
+ 0xb7df6d6e,
+ 0xb7ff6dee,
+ 0xcfec6e6e,
+ 0x9c228421,
+ 0xa205f839,
+ 0x430cb720,
+ 0x0d860902,
+ 0x6e29b788,
+ 0x0898c098,
+ 0xb5800a08,
+ 0xb7804029,
+ 0xc098430c,
+ 0xd2080a1c,
+ 0xc000a101,
+ 0xd0089320,
+ 0xb780a1de,
+ 0xc101430c,
+ 0x01850146,
+ 0x0a18c098,
+ 0x0980c098,
+ 0xa901d208,
+ 0xaa7dd008,
+ 0x71040d84,
+ 0x1144d020,
+ 0x9086c000,
+ 0xa17ed008,
+ 0x430cb720,
+ 0x0216c101,
+ 0x02030109,
+ 0x0a18c098,
+ 0xaa01d208,
+ 0x0900c098,
+ 0xfffe72d9,
+ 0xb7819398,
+ 0x01034bad,
+ 0x0934c096,
+ 0xd0080a04,
+ 0xb780a241,
+ 0x097f430c,
+ 0xc09609c2,
+ 0xd2080a30,
+ 0xb720a101,
+ 0xb76a430c,
+ 0xb74046b1,
+ 0x010374b5,
+ 0x0930c094,
+ 0x9b91fed4,
+ 0x430cb720,
+ 0x4badb781,
+ 0x532bb749,
+ 0x5a0cc200,
+ 0x291dcff0,
+ 0x2a60c00e,
+ 0xb5893244,
+ 0xb720532b,
+ 0x0a02430c,
+ 0x68adb584,
+ 0xaa1df9f8,
+ 0xa61d9c22,
+ 0x8420c006,
+ 0x430cb720,
+ 0x0a060902,
+ 0x4829b54a,
+ 0x4314b720,
+ 0x5ab5b703,
+ 0x5b35b7a3,
+ 0xb54aa01a,
+ 0xb72048c9,
+ 0x9e6b4314,
+ 0x5bb5b7e3,
+ 0x44cbb545,
+ 0x520cc200,
+ 0xb7800389,
+ 0xb723430c,
+ 0xc00e5c2d,
+ 0xc0a42bfc,
+ 0x9ebb0a10,
+ 0xa101d208,
+ 0xff54a09d,
+ 0xb720986f,
+ 0x9e4c4314,
+ 0x0a10c0a4,
+ 0xaa01d208,
+ 0xc0157500,
+ 0xf1b192a4,
+ 0xb7cd8c80,
+ 0xb7cd6049,
+ 0x9e4960d1,
+ 0xb960010b,
+ 0xb7834028,
+ 0xf0084c2d,
+ 0xb783a241,
+ 0xf0084cad,
+ 0xb783a245,
+ 0xf0084d2d,
+ 0xb783a249,
+ 0x08c04dad,
+ 0xa24df008,
+ 0xffff0940,
+ 0xb74391c1,
+ 0x9e4a5bd5,
+ 0x2dded3f2,
+ 0x0992c144,
+ 0x9b1ffed4,
+ 0x430cb720,
+ 0xb9600d02,
+ 0xb5cd4028,
+ 0xb7206029,
+ 0xb5cd430c,
+ 0xb72060b1,
+ 0xf208430c,
+ 0xc100aa21,
+ 0xd1105c90,
+ 0x0d040493,
+ 0x4c2db583,
+ 0x430cb720,
+ 0xaa25f208,
+ 0x0493d110,
+ 0x4cadb583,
+ 0x430cb720,
+ 0xaa29f208,
+ 0x0493d110,
+ 0x4d2db583,
+ 0x430cb780,
+ 0xa92df208,
+ 0xc1010ac0,
+ 0xb5430498,
+ 0xfffe4dcd,
+ 0xb7209381,
+ 0xaa19430c,
+ 0x5a0cb700,
+ 0x5a94b700,
+ 0x5aadb583,
+ 0x430cb720,
+ 0x0c82c021,
+ 0x0d82c021,
+ 0x5b35b5a3,
+ 0x430cb720,
+ 0x0c80c014,
+ 0x0d90c014,
+ 0x5bb5b5e3,
+ 0x430cb720,
+ 0xb960a99d,
+ 0xb5634038,
+ 0xb7a05c2d,
+ 0x9e694314,
+ 0x0dd8d2f1,
+ 0x7029b788,
+ 0x6fa9b748,
+ 0x6f31b748,
+ 0x47a9b58a,
+ 0x430cb7a0,
+ 0x6ea9b788,
+ 0xb54a008b,
+ 0xb7204729,
+ 0xb54a430c,
+ 0xb72046b1,
+ 0xb58a430c,
+ 0xb7204629,
+ 0xb562430c,
+ 0xb720612d,
+ 0xd9e9430c,
+ 0x9d9eaa1d,
+ 0xa261f000,
+ 0xaa1dd9f1,
+ 0xf0009dbe,
+ 0xffffa261,
+ 0x048b92c1,
+ 0x7e49b785,
+ 0xc0067500,
+ 0xc00e91c2,
+ 0xb5850a7e,
+ 0xb720482d,
+ 0x0a82430c,
+ 0xb5a40906,
+ 0xb72067ad,
+ 0xb5a4430c,
+ 0xb720682d,
+ 0xb5a4430c,
+ 0xb720692d,
+ 0xb544430c,
+ 0xb7206cad,
+ 0xb5a4430c,
+ 0xb7806d2d,
+ 0xc09c430c,
+ 0xd2080a10,
+ 0xb780a281,
+ 0xc09c430c,
+ 0xd2080a18,
+ 0xfff4a101,
+ 0xb7809a8d,
+ 0x0109430c,
+ 0x0918c098,
+ 0xa941d008,
+ 0xc0980244,
+ 0xd2080a00,
+ 0xb720a11d,
+ 0xb781430c,
+ 0x75004bad,
+ 0x9262c000,
+ 0x5c35b741,
+ 0x46b1b76a,
+ 0x5a1cc200,
+ 0x1d00c008,
+ 0xc1010103,
+ 0xc09c0528,
+ 0xc0040960,
+ 0xfed409d2,
+ 0xb7809a4e,
+ 0x090a430c,
+ 0xa9aef210,
+ 0x0a40c09a,
+ 0xa281d208,
+ 0x430cb720,
+ 0xb762040b,
+ 0xb5a65b4a,
+ 0xb7804aad,
+ 0xb742430c,
+ 0xf2105bd2,
+ 0xc0c4a8aa,
+ 0xd2080a60,
+ 0xb720a101,
+ 0xd111430c,
+ 0xb56465a7,
+ 0xb7a06bb5,
+ 0x008b430c,
+ 0x6bb5b764,
+ 0xc25465b3,
+ 0x048b9aa0,
+ 0x5f4bb781,
+ 0x75009ea8,
+ 0x6c4cb504,
+ 0x4314b740,
+ 0x9142c000,
+ 0x0a6a9e52,
+ 0x091cc09c,
+ 0xa241d008,
+ 0x9140c000,
+ 0x9e549e69,
+ 0x67a9b746,
+ 0x0a1cc09c,
+ 0xa101d208,
+ 0x430cb720,
+ 0x087f0d82,
+ 0x67a9b786,
+ 0x08a0c09c,
+ 0x0c7ec00e,
+ 0x4029b580,
+ 0x430cb720,
+ 0x46b1b74a,
+ 0x4001ba12,
+ 0x91e0c000,
+ 0x6fcdb789,
+ 0x02426a32,
+ 0x0a20c138,
+ 0xa011d208,
+ 0x430cb720,
+ 0xb50900b2,
+ 0xb7206fb5,
+ 0xd010430c,
+ 0x0d840534,
+ 0x4729b78a,
+ 0x5988c100,
+ 0x00b2d020,
+ 0xffff7088,
+ 0x090290fc,
+ 0xb9600d7f,
+ 0xb7804040,
+ 0x0228430c,
+ 0x0a60c098,
+ 0xa116d208,
+ 0xffff0904,
+ 0x048b9301,
+ 0x5e55b741,
+ 0x430cb780,
+ 0x76800902,
+ 0x0a18c0a4,
+ 0x0922d002,
+ 0xa101d208,
+ 0x430cb720,
+ 0x76860a02,
+ 0x52abb58a,
+ 0x9102c009,
+ 0xc0017684,
+ 0xb7819304,
+ 0x1a0c5dcd,
+ 0xc0017504,
+ 0xb7209252,
+ 0xc809430c,
+ 0xccf00a42,
+ 0xb5890a20,
+ 0xb7206aad,
+ 0xc809430c,
+ 0xc9be0a46,
+ 0xb5890a60,
+ 0xb7206b2d,
+ 0xc809430c,
+ 0xccf80a42,
+ 0xb5890a50,
+ 0xb7206a2d,
+ 0xc809430c,
+ 0xc5f80a46,
+ 0xb5890a20,
+ 0xb7206c2d,
+ 0xc809430c,
+ 0xc40a0a46,
+ 0xb5890a10,
+ 0xb7206bad,
+ 0xc809430c,
+ 0xcd0a0a42,
+ 0xb5890a10,
+ 0x040b6cad,
+ 0x5dccb781,
+ 0xc0017504,
+ 0xb78192e4,
+ 0x75045e4c,
+ 0x9244c001,
+ 0x430cb720,
+ 0x0a42c809,
+ 0x0a70cf1a,
+ 0x6aadb589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a30cac6,
+ 0x6b2db589,
+ 0x430cb720,
+ 0x0a42c809,
+ 0x0a70cd0c,
+ 0x6a2db589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a30cb92,
+ 0x6c2db589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a50cc94,
+ 0x6badb589,
+ 0x430cb720,
+ 0x0a42c809,
+ 0x0a20cd2e,
+ 0x6cadb589,
+ 0xb7819e69,
+ 0x75025e2d,
+ 0x92e4c001,
+ 0x5dadb781,
+ 0xc0017504,
+ 0xb7209244,
+ 0xc809430c,
+ 0xcf1a0a42,
+ 0xb5890a70,
+ 0xb7206aad,
+ 0xc809430c,
+ 0xcb360a46,
+ 0xb5890a00,
+ 0xb7206b2d,
+ 0xc809430c,
+ 0xc9dc0a46,
+ 0xb5890a10,
+ 0xb7206a2d,
+ 0xc809430c,
+ 0xcb0a0a46,
+ 0xb5890a30,
+ 0xb7206c2d,
+ 0xc809430c,
+ 0xcc940a46,
+ 0xb5890a50,
+ 0xb7206bad,
+ 0xc809430c,
+ 0xcd2e0a42,
+ 0xb5890a20,
+ 0x048b6cad,
+ 0x5e4db781,
+ 0xc0017508,
+ 0xb78192e4,
+ 0x75045dcd,
+ 0x9244c001,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a40caca,
+ 0x6aadb589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a00cb36,
+ 0x6b2db589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a10c9dc,
+ 0x6a2db589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a30cb0a,
+ 0x6c2db589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a50cc94,
+ 0x6badb589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a70cab8,
+ 0x6cadb589,
+ 0xb781040b,
+ 0x75025e4c,
+ 0x9304c001,
+ 0x5dccb781,
+ 0x75041a0c,
+ 0x9252c001,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a30c050,
+ 0x6aadb589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a50c9c8,
+ 0x6b2db589,
+ 0x430cb720,
+ 0x0a42c809,
+ 0x0a10cd38,
+ 0x6a2db589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a60c1a0,
+ 0x6c2db589,
+ 0x430cb720,
+ 0x0a46c809,
+ 0x0a00c0aa,
+ 0x6badb589,
+ 0x430cb720,
+ 0x0a42c809,
+ 0x0a20cd68,
+ 0x6cadb589,
+ 0x430cb720,
+ 0xb5ea9ebb,
+ 0xb7204829,
+ 0xb5ea430c,
+ 0xb79f48a9,
+ 0xb7bf77ee,
+ 0xb7df786e,
+ 0xb7ff78ee,
+ 0xc008796e,
+ 0xff318c20,
+ 0xa60d9140,
+ 0xb7208440,
+ 0x0203430c,
+ 0x0a10c0a4,
+ 0xaa01d208,
+ 0xc0107500,
+ 0x02039104,
+ 0x0a18c09c,
+ 0xaa01d208,
+ 0xc0017500,
+ 0xb7aa90c2,
+ 0xb7c046a9,
+ 0xc000478c,
+ 0xd0319320,
+ 0xc00e0a54,
+ 0x6a522a7c,
+ 0xd032024c,
+ 0xc0000ac8,
+ 0xff149080,
+ 0xf2109945,
+ 0x0d92aa21,
+ 0xffff750c,
+ 0xd0119344,
+ 0x02890a52,
+ 0x2afcc00e,
+ 0x430cb720,
+ 0x4729b78a,
+ 0xffff710a,
+ 0xb7209088,
+ 0x0203430c,
+ 0x0a18c09c,
+ 0xaa01d208,
+ 0xc0027500,
+ 0x9ea39304,
+ 0x4731b7ca,
+ 0x98b8ff74,
+ 0x430cb780,
+ 0x0a10c0a4,
+ 0xab01d208,
+ 0xc00e7580,
+ 0x0a869084,
+ 0x56d89eaa,
+ 0x2efcc00e,
+ 0xff34058b,
+ 0x9eb29945,
+ 0x018d9eb1,
+ 0x010d008b,
+ 0xa31d058d,
+ 0xa295a319,
+ 0x9af4ff94,
+ 0x4314b700,
+ 0x5e88d322,
+ 0x4490b760,
+ 0x00d0c101,
+ 0x6fb5b729,
+ 0x6c50b74d,
+ 0xd0319e73,
+ 0xc00e6d19,
+ 0xc1012cfc,
+ 0xc1380120,
+ 0xd0080920,
+ 0x1b04a951,
+ 0x99ebff14,
+ 0x430cb740,
+ 0x00d4058b,
+ 0x6fadb789,
+ 0x02446a32,
+ 0x0a20c138,
+ 0xa311d208,
+ 0x430cb780,
+ 0x008b02d8,
+ 0x0a7ec00e,
+ 0x6fadb589,
+ 0x9997ff34,
+ 0x560cb780,
+ 0xc2209ea2,
+ 0xb720a349,
+ 0x0203430c,
+ 0x0a18c0a4,
+ 0xaa01d208,
+ 0xc0007500,
+ 0xb78991a2,
+ 0xb7646bad,
+ 0x018379b5,
+ 0x09b0c0c4,
+ 0x9c629ea4,
+ 0x9080c000,
+ 0x532bb58c,
+ 0x430cb720,
+ 0x67adb784,
+ 0xb5840a04,
+ 0xb7a067ad,
+ 0x008b430c,
+ 0x612db781,
+ 0xc0017500,
+ 0xb7849122,
+ 0x750067ad,
+ 0x9082c001,
+ 0x46a9b74a,
+ 0x4729b76a,
+ 0xc0000d82,
+ 0x9d4d9100,
+ 0xd3f1aa61,
+ 0xc1012d2e,
+ 0xd01205b8,
+ 0x70860922,
+ 0x5e30d122,
+ 0x0a00c021,
+ 0x0a00c034,
+ 0x9234ffff,
+ 0xaa2df208,
+ 0xa9a9f208,
+ 0xc25461c7,
+ 0x008b9816,
+ 0x4badb506,
+ 0x430cb720,
+ 0xb5a50a82,
+ 0xb72044ab,
+ 0xb76a430c,
+ 0xff344831,
+ 0xb720993a,
+ 0x0203430c,
+ 0x0a18c09c,
+ 0xaa01d208,
+ 0xc0027500,
+ 0xb7849162,
+ 0x0a04682d,
+ 0x682db584,
+ 0x430cb740,
+ 0x0914c09c,
+ 0xaa41d008,
+ 0xc0007500,
+ 0xd0089322,
+ 0xb720a2c1,
+ 0x097f430c,
+ 0x6e29b788,
+ 0xc0980218,
+ 0xd2080a04,
+ 0x0098aa1d,
+ 0x08a0c098,
+ 0x41a9b5a0,
+ 0x430cb780,
+ 0x0a30c096,
+ 0xa101d208,
+ 0x430cb720,
+ 0x6cadb784,
+ 0xc0007506,
+ 0xb7849182,
+ 0xb584782d,
+ 0xb720772d,
+ 0xb784430c,
+ 0xb58477ad,
+ 0xb720782d,
+ 0xb784430c,
+ 0xb5846fad,
+ 0xb7406f2d,
+ 0x0205430c,
+ 0x0a04c098,
+ 0xaa01d208,
+ 0x0900c098,
+ 0xa241d008,
+ 0x9180c000,
+ 0xc0c40203,
+ 0xd2080a60,
+ 0xb786a901,
+ 0x02444aad,
+ 0x4aadb586,
+ 0x430cb780,
+ 0xc09c0a82,
+ 0xd2080a10,
+ 0xb720a281,
+ 0xb744430c,
+ 0x748068ad,
+ 0x91a2c000,
+ 0x67adb784,
+ 0xc0007088,
+ 0xffd49104,
+ 0xb7209b37,
+ 0xb5a5430c,
+ 0xb720482d,
+ 0xb743430c,
+ 0x76805733,
+ 0x93c4c004,
+ 0x5eadb783,
+ 0x09020e82,
+ 0xd00a7502,
+ 0x75040ed2,
+ 0x0922d001,
+ 0x0a027508,
+ 0x0a42d001,
+ 0x31287740,
+ 0x90a2c000,
+ 0xc0009e55,
+ 0x748090c0,
+ 0xd0010a86,
+ 0x08020ad2,
+ 0x5b14b7c0,
+ 0x49049e83,
+ 0x0b7ec03e,
+ 0x4018b960,
+ 0x9000c004,
+ 0xaa49c060,
+ 0x430cb760,
+ 0x5c88c100,
+ 0x4000ba24,
+ 0x0058d020,
+ 0xd0220496,
+ 0x04925c88,
+ 0x0096c101,
+ 0x0496c101,
+ 0xb7830092,
+ 0x00967bcb,
+ 0x57abb723,
+ 0x4002ba24,
+ 0x0242e000,
+ 0x90acc000,
+ 0xc0000201,
+ 0xd0109080,
+ 0xd022710c,
+ 0xc1015c88,
+ 0x00920096,
+ 0xb5830096,
+ 0xd12257ab,
+ 0xb7805c88,
+ 0xc101430c,
+ 0x00920096,
+ 0x00980d04,
+ 0xb5037688,
+ 0xfffe7bab,
+ 0x77409124,
+ 0x9364c001,
+ 0xaa49c060,
+ 0x430cb760,
+ 0x5c88c100,
+ 0x4000ba24,
+ 0x0028d020,
+ 0xd0220496,
+ 0x04925c88,
+ 0x0096c101,
+ 0x0496c101,
+ 0xb7830092,
+ 0x00967bcb,
+ 0x57abb723,
+ 0x4002ba24,
+ 0x0242e000,
+ 0x90acc000,
+ 0xc0000201,
+ 0xd0109080,
+ 0xd022710c,
+ 0xc1015c88,
+ 0x00920096,
+ 0xb5830096,
+ 0xd12257ab,
+ 0xb7805c88,
+ 0xc101430c,
+ 0x00920096,
+ 0x00980d04,
+ 0xb503768e,
+ 0xfffe7bab,
+ 0x0d849124,
+ 0x9101c000,
+ 0x4490b760,
+ 0x9a60fef4,
+ 0x90a0c000,
+ 0xfffc9e82,
+ 0xb79f9020,
+ 0xb7bf7dee,
+ 0xb7df7e6e,
+ 0xc0027eee,
+ 0x9c228c20,
+ 0x430cb720,
+ 0x5dadb781,
+ 0x7eadb742,
+ 0xc1007504,
+ 0xc000580c,
+ 0xcd8e90a4,
+ 0x9c220841,
+ 0x0850cfff,
+ 0x0800c480,
+ 0x9e589c22,
+ 0xe0000c86,
+ 0xd0054030,
+ 0x9e511c94,
+ 0x72c0c814,
+ 0x70c0c814,
+ 0x7480c008,
+ 0x483cb323,
+ 0x445cb323,
+ 0x927cc000,
+ 0x5a1be100,
+ 0x90ccc000,
+ 0x0a02c002,
+ 0x90e0c000,
+ 0x7100d002,
+ 0xba240a04,
+ 0x9e504000,
+ 0x51d3d024,
+ 0xc0055193,
+ 0xc00074c0,
+ 0xe180923c,
+ 0xc0005a57,
+ 0xc00290cc,
+ 0xc0000a02,
+ 0xd00290e0,
+ 0x0a047100,
+ 0x4000ba24,
+ 0x51f30098,
+ 0x470cb740,
+ 0xd0109e58,
+ 0xc0200636,
+ 0xb740a906,
+ 0x7293468c,
+ 0xaa05ca20,
+ 0xd11d6039,
+ 0xd11e1623,
+ 0xd01a1215,
+ 0xd01c5013,
+ 0xc1015010,
+ 0x9c226003,
+ 0xb7a0a60d,
+ 0x0707430c,
+ 0xb781008b,
+ 0x75045dad,
+ 0x93c4c007,
+ 0x9b94fff4,
+ 0x5c88d322,
+ 0x009a0d06,
+ 0x7f2db722,
+ 0x10029e52,
+ 0x7080d812,
+ 0xb781008b,
+ 0x9eb55e2d,
+ 0xc0007502,
+ 0x750890a2,
+ 0x93a4c000,
+ 0xb787008b,
+ 0x75004e2d,
+ 0x92e2c000,
+ 0x402db743,
+ 0xb3207104,
+ 0xc0004834,
+ 0x008b9154,
+ 0x4629b76a,
+ 0x11c4d020,
+ 0xfff40d02,
+ 0x0a069b7e,
+ 0x7100d012,
+ 0x7188c810,
+ 0xb7209e95,
+ 0xb7804314,
+ 0x0b1e448c,
+ 0x75009e4a,
+ 0x0938c0a2,
+ 0xa941d008,
+ 0x4422b346,
+ 0xc0007480,
+ 0xb7809282,
+ 0xb764414d,
+ 0xb7436bd5,
+ 0xb76a41cd,
+ 0xc1014649,
+ 0x0d0265b9,
+ 0x15b4c101,
+ 0x9b59fff4,
+ 0xc0009e82,
+ 0xd3229180,
+ 0xc1015c88,
+ 0xb7830092,
+ 0xb72540ad,
+ 0x109846ad,
+ 0xcffe9e8a,
+ 0x76802d7c,
+ 0x9382c004,
+ 0x7740c080,
+ 0x9094c000,
+ 0x0e81cfc0,
+ 0x4314b720,
+ 0x5e4db781,
+ 0xc0007508,
+ 0xb78192c4,
+ 0x1a045ecd,
+ 0xc0007502,
+ 0xb7449192,
+ 0x0a1a6ccd,
+ 0x74829e69,
+ 0x1a46d009,
+ 0xc00052b1,
+ 0xd2a29140,
+ 0xc0005e85,
+ 0xd2a290c0,
+ 0xd1105e0d,
+ 0x9e5316d9,
+ 0x15dac101,
+ 0xfff40d02,
+ 0x09869b1c,
+ 0x70c0c812,
+ 0x4314b700,
+ 0x5c88d322,
+ 0x5e4cb781,
+ 0x0090c101,
+ 0x7f35b742,
+ 0xd0127508,
+ 0xc0001248,
+ 0xc10090c4,
+ 0xd0125a05,
+ 0xc03e70c8,
+ 0xc41009fe,
+ 0x0a067106,
+ 0x9ea39e89,
+ 0x7246c412,
+ 0x7086cc10,
+ 0x70c8cc12,
+ 0x0a7ec00f,
+ 0x0a7ccffe,
+ 0x0155d120,
+ 0xc8109ea3,
+ 0x0d827286,
+ 0x7286c812,
+ 0x5c88d322,
+ 0x5918c300,
+ 0x0090c101,
+ 0x40adb723,
+ 0x9e4c5cd8,
+ 0x2900c01c,
+ 0x04877440,
+ 0xd0013904,
+ 0x31280cb4,
+ 0xc0037440,
+ 0xcf000a7e,
+ 0xd0010a00,
+ 0x59b00db8,
+ 0x9e5c21b8,
+ 0xd0113126,
+ 0x31280de2,
+ 0x59b09e4c,
+ 0x02073128,
+ 0x0a00c021,
+ 0x0a40c01e,
+ 0xf0009dcd,
+ 0xc0ffa161,
+ 0xcffe0a7e,
+ 0x9ea30a40,
+ 0x0a0a7440,
+ 0x1a44d002,
+ 0x25265d10,
+ 0x3d049ea3,
+ 0x0980c021,
+ 0xc01e3526,
+ 0x9dbd09d0,
+ 0xa162f000,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0x8440a61d,
+ 0x0a32d011,
+ 0x5b30c200,
+ 0x6931d0d1,
+ 0x540cb780,
+ 0xa1969e9e,
+ 0x03a8d020,
+ 0xc021010d,
+ 0xa1120900,
+ 0x0930c03c,
+ 0xa9619d2d,
+ 0xda109e92,
+ 0xcff0aa61,
+ 0x5d212d00,
+ 0x0629d110,
+ 0x5e88d322,
+ 0xa261da10,
+ 0x430cb720,
+ 0x297cc00e,
+ 0x00d2018d,
+ 0x40adb783,
+ 0x0980c021,
+ 0x0980c03a,
+ 0x0629d110,
+ 0x40adb583,
+ 0x430cb720,
+ 0x41adb783,
+ 0x0244c101,
+ 0x41adb583,
+ 0xaa65da10,
+ 0xda100228,
+ 0xb720a265,
+ 0x00d2430c,
+ 0x40adb783,
+ 0xb5830228,
+ 0xb72040ad,
+ 0xb783430c,
+ 0x024441ad,
+ 0x41adb583,
+ 0xa9e19d3d,
+ 0xaa65f210,
+ 0x29fec7ff,
+ 0x0246010d,
+ 0x0900c021,
+ 0x0920c03a,
+ 0xa265f210,
+ 0xa9619d2d,
+ 0xaa69f210,
+ 0xc7ff9eb2,
+ 0x0244297e,
+ 0x0d00c021,
+ 0x0d10c03a,
+ 0xa269f210,
+ 0xa9629d2e,
+ 0xaa61f250,
+ 0xc021010d,
+ 0xc1010900,
+ 0xc03a0244,
+ 0xf2500930,
+ 0x9d2da261,
+ 0xf250a961,
+ 0x9eb3aa65,
+ 0xc040000d,
+ 0x02440800,
+ 0x0d80c021,
+ 0xc042a019,
+ 0xf2500dc0,
+ 0x9d3ea265,
+ 0xf210a9e2,
+ 0xc00faa71,
+ 0x018d2dfe,
+ 0x0246c101,
+ 0x0980c021,
+ 0x09d0c042,
+ 0xa271f210,
+ 0xa9e19d3d,
+ 0xaa75f210,
+ 0x29fec00f,
+ 0x0246038d,
+ 0x0b80c021,
+ 0x0ba0c042,
+ 0xa275f210,
+ 0xa8e19d7d,
+ 0xaa7df210,
+ 0x28fccffe,
+ 0x0242010d,
+ 0x0900c021,
+ 0x0930c042,
+ 0xa27df210,
+ 0xa9619d2d,
+ 0xaa79f210,
+ 0xc00f9eb2,
+ 0x0244297e,
+ 0x0d00c021,
+ 0x0d40c048,
+ 0xa279f210,
+ 0xa9629d2e,
+ 0x01b6c101,
+ 0x020d01b2,
+ 0xc00f01b4,
+ 0xc0212d7e,
+ 0xc1010a00,
+ 0xc04601b4,
+ 0x9d4d0a60,
+ 0x9ea2aa61,
+ 0xa979da10,
+ 0x2d7cc3ff,
+ 0xda105d41,
+ 0xc101a9fe,
+ 0xb7400524,
+ 0xc3fe430c,
+ 0xc1012a7c,
+ 0x01540246,
+ 0xa17ada10,
+ 0xa27dda10,
+ 0x0960c05e,
+ 0xaa41f008,
+ 0x0238058d,
+ 0xa241f008,
+ 0x430cb720,
+ 0x402db783,
+ 0xb5830246,
+ 0xb7a0402d,
+ 0xfff44314,
+ 0xc1019999,
+ 0xc05e02da,
+ 0xf2080ae0,
+ 0x7140aaa1,
+ 0x90e6c000,
+ 0x04dcd010,
+ 0xb58c0a06,
+ 0xb7204829,
+ 0xb747430c,
+ 0x74804e2d,
+ 0x9222c000,
+ 0x402db783,
+ 0xc0007104,
+ 0xb78c9186,
+ 0x750065ab,
+ 0x90e4c000,
+ 0x009cc101,
+ 0xb58c0a04,
+ 0xb7204929,
+ 0xb781430c,
+ 0x75045dad,
+ 0x92c4c002,
+ 0xc021020d,
+ 0xc01e0a00,
+ 0x9d4d0a60,
+ 0x9e79aa61,
+ 0xb7009e78,
+ 0x9ea25431,
+ 0x54b1b760,
+ 0x55b0b720,
+ 0x5529b720,
+ 0xc2000109,
+ 0xcff059e1,
+ 0xc1012900,
+ 0xc00f0240,
+ 0xb5802d7c,
+ 0x59215428,
+ 0x5d41020d,
+ 0x0126c101,
+ 0x0522c101,
+ 0x01b2c101,
+ 0x0a00c021,
+ 0x54a8b540,
+ 0x5530b540,
+ 0x55a8b560,
+ 0x0a70c01e,
+ 0xaa619d4d,
+ 0xb7009ea2,
+ 0xb7605630,
+ 0xb72056b0,
+ 0xb7205728,
+ 0x010957b0,
+ 0x2900cff0,
+ 0x2d7cc00f,
+ 0x59e1c200,
+ 0x5d415921,
+ 0x0126c101,
+ 0x0522c101,
+ 0x01b2c101,
+ 0x0240c101,
+ 0x5628b580,
+ 0x56a8b540,
+ 0x5730b540,
+ 0x57a8b560,
+ 0x2deed3f2,
+ 0x9978fff4,
+ 0x7600a812,
+ 0x9322c005,
+ 0xb780a919,
+ 0xc300590c,
+ 0xc0215d18,
+ 0xc0020900,
+ 0xd1300910,
+ 0x9d2d0529,
+ 0x9e92a961,
+ 0xa9cad858,
+ 0xa8cdd858,
+ 0xa8d2d858,
+ 0xa856d858,
+ 0xcff00205,
+ 0xc00f2a00,
+ 0xc1002d7c,
+ 0xc20059e1,
+ 0x5d415a21,
+ 0x297cc00e,
+ 0x0246c101,
+ 0x05220122,
+ 0x01b0c101,
+ 0xa249d858,
+ 0xa14dd858,
+ 0xa152d858,
+ 0xa1d5d858,
+ 0xaa619d7d,
+ 0xabf9da10,
+ 0xa87ef210,
+ 0xa941f058,
+ 0xa879f210,
+ 0xaaf1f210,
+ 0xaaf6f210,
+ 0xab7dda10,
+ 0xa9dbd858,
+ 0xa946f058,
+ 0xa9c9f058,
+ 0xa9cef058,
+ 0xa8d5f058,
+ 0xa8d2f058,
+ 0xc101a39d,
+ 0xf0580120,
+ 0xa81eabd9,
+ 0xa964f212,
+ 0x5a61c200,
+ 0x9eb80490,
+ 0x0246c301,
+ 0xc1014c7d,
+ 0x01ba0520,
+ 0x009c05ba,
+ 0xd8587205,
+ 0xf058a259,
+ 0xf058a141,
+ 0xf058a146,
+ 0xf058a1c9,
+ 0xf058a1ce,
+ 0xf058a0d5,
+ 0xc000a0d2,
+ 0xd0d891b2,
+ 0xe010aa59,
+ 0x0a048b21,
+ 0xa159f058,
+ 0xa259d0d8,
+ 0x90c0c000,
+ 0x0274c001,
+ 0xa259f058,
+ 0xaa5df058,
+ 0xa969f210,
+ 0x4d7d9ea2,
+ 0xc0007295,
+ 0xd0d891b2,
+ 0xc101aa5d,
+ 0xf0581124,
+ 0x0a04a15d,
+ 0xa25dd0d8,
+ 0x90a0c000,
+ 0xf0580244,
+ 0xb720a25d,
+ 0x0203430c,
+ 0x0a18c0a4,
+ 0xaa01d208,
+ 0xc0007500,
+ 0xb7899122,
+ 0xa9966a2d,
+ 0x050f9e73,
+ 0x9c629ea4,
+ 0xc002058f,
+ 0xb79f09c2,
+ 0xb7bf7d6e,
+ 0xb7df7dee,
+ 0xb7ff7e6e,
+ 0xc0027eee,
+ 0xfe698c40,
+ 0xb79f9200,
+ 0xb7bf7d6e,
+ 0xb7df7dee,
+ 0xb7ff7e6e,
+ 0xc0027eee,
+ 0x9c228c40,
+ 0x430cb740,
+ 0x0d020487,
+ 0xc0c60185,
+ 0xb9600980,
+ 0xc0204258,
+ 0x9e9baa29,
+ 0xa269c020,
+ 0xffff0d04,
+ 0xc0d29341,
+ 0x0a060960,
+ 0xa241d008,
+ 0x430cb720,
+ 0x0a70c002,
+ 0x792bb58c,
+ 0x5749b780,
+ 0x430cb720,
+ 0x4aabb58d,
+ 0xb7209c22,
+ 0x9e53430c,
+ 0xa95df010,
+ 0xa956f010,
+ 0x712db786,
+ 0x0124c101,
+ 0xb5860244,
+ 0xb720712d,
+ 0xf008430c,
+ 0xf008a972,
+ 0xb786a979,
+ 0xc10170ad,
+ 0x02440124,
+ 0x70adb586,
+ 0x430cb720,
+ 0xa966f008,
+ 0xa969f008,
+ 0x71adb786,
+ 0x0124c101,
+ 0xb5860244,
+ 0xb72071ad,
+ 0xd808430c,
+ 0xd808a961,
+ 0xb786a966,
+ 0xc101702d,
+ 0x02440124,
+ 0x702db586,
+ 0x430cb720,
+ 0x64adb786,
+ 0xb3407502,
+ 0xd80848a4,
+ 0xb786a965,
+ 0x02446fad,
+ 0x6fadb586,
+ 0xf0109c22,
+ 0xb720a9e2,
+ 0x0906430c,
+ 0x6435b566,
+ 0x430cb780,
+ 0x0a70c0d0,
+ 0xa101d208,
+ 0xa6059c22,
+ 0x430cb7a0,
+ 0xaa41d810,
+ 0xa945d810,
+ 0x0685008b,
+ 0x5e35b741,
+ 0x00890244,
+ 0xf2107684,
+ 0xcffea9b1,
+ 0xc00028fc,
+ 0x9ea99224,
+ 0x5a4bb78d,
+ 0x6dcdb746,
+ 0xb58d0238,
+ 0xf2105a4b,
+ 0x0128aa35,
+ 0x6dcdb546,
+ 0x9120c000,
+ 0xb56d9ea9,
+ 0xf2105a4b,
+ 0xb586aa35,
+ 0x9ea96dcd,
+ 0xa9a1f250,
+ 0x6fcdb786,
+ 0x704db746,
+ 0x6ed5b746,
+ 0xb5860246,
+ 0xf2506fcd,
+ 0xb786a9a5,
+ 0xb7666f4d,
+ 0x01265ad5,
+ 0x704db546,
+ 0xa925da10,
+ 0xb5860242,
+ 0xc1016f4d,
+ 0xb5460524,
+ 0xf2106ed5,
+ 0xf210a931,
+ 0xd0a4aa35,
+ 0x01835905,
+ 0xc1010244,
+ 0xb56605b8,
+ 0xda105ad5,
+ 0xda10aa39,
+ 0xc101a9be,
+ 0x05b405b8,
+ 0x9a5ec214,
+ 0xb786008b,
+ 0xb74c69ad,
+ 0x02406d2b,
+ 0xb5860904,
+ 0xb54c69ad,
+ 0xb7806d2b,
+ 0x7500448c,
+ 0x90a4c000,
+ 0xc0000109,
+ 0x9e699180,
+ 0xb9600902,
+ 0xb7804030,
+ 0x088454a9,
+ 0xffff0128,
+ 0x9ea99381,
+ 0x614bb54d,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0xaa61f010,
+ 0xb7209e5a,
+ 0x7500430c,
+ 0x9082c000,
+ 0x76adb586,
+ 0xa956d008,
+ 0x76bec01c,
+ 0x90e2c000,
+ 0xc0da0203,
+ 0xd2080a64,
+ 0xd008a102,
+ 0xc01ca959,
+ 0xc00074be,
+ 0x020390e2,
+ 0x0a68c0da,
+ 0xa101d208,
+ 0xc0da0103,
+ 0x0a060940,
+ 0xa241d008,
+ 0xa6059c22,
+ 0xf0109e55,
+ 0xf010a955,
+ 0xf010aa51,
+ 0xf208a95a,
+ 0xb720a9bd,
+ 0x0244430c,
+ 0x0244c101,
+ 0xb5860246,
+ 0xf208752d,
+ 0xf208a929,
+ 0xda08a9a6,
+ 0xda08aa25,
+ 0xb7a0a9a1,
+ 0xd1204314,
+ 0x01c601a7,
+ 0xffd40d02,
+ 0x9e699b22,
+ 0x712db506,
+ 0x430cb720,
+ 0xa935f208,
+ 0x702db786,
+ 0xb5860244,
+ 0xb720702d,
+ 0xf208430c,
+ 0xb786a935,
+ 0x0244722d,
+ 0x722db586,
+ 0x430cb720,
+ 0xa925f208,
+ 0x6fadb786,
+ 0xb5860244,
+ 0xb7206fad,
+ 0xda08430c,
+ 0xb78da939,
+ 0x0244612b,
+ 0x612bb58d,
+ 0x430cb720,
+ 0xa921da08,
+ 0x61abb78d,
+ 0xb58d0244,
+ 0xb72061ab,
+ 0xf208430c,
+ 0xb786a929,
+ 0x0244732d,
+ 0x732db586,
+ 0x430cb720,
+ 0xa93dda08,
+ 0x652bb78d,
+ 0xb58d0244,
+ 0xb720652b,
+ 0xda08430c,
+ 0xb78da925,
+ 0x024465ab,
+ 0x65abb58d,
+ 0x430cb720,
+ 0xaa25f208,
+ 0xa92af208,
+ 0x73adb746,
+ 0xc1010244,
+ 0xb5860244,
+ 0xb72073ad,
+ 0xf208430c,
+ 0xb786a939,
+ 0x0244742d,
+ 0x742db586,
+ 0x430cb720,
+ 0xa93df208,
+ 0x74adb786,
+ 0xb5860244,
+ 0xb72074ad,
+ 0xda08430c,
+ 0xda08a93a,
+ 0xb786a93d,
+ 0xc10175ad,
+ 0x02440124,
+ 0x75adb586,
+ 0x430cb720,
+ 0xaa21da08,
+ 0xa926da08,
+ 0x632bb74d,
+ 0xc1010244,
+ 0xb58d0244,
+ 0xb720632b,
+ 0xb787430c,
+ 0xb746462d,
+ 0x0244752d,
+ 0x462db587,
+ 0x430cb720,
+ 0xa939da08,
+ 0xaabdda08,
+ 0x492db787,
+ 0x024a02d4,
+ 0x492db587,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0xa9e2f010,
+ 0x430cb720,
+ 0xb5660906,
+ 0xb7806435,
+ 0xc0d0430c,
+ 0xd2080a70,
+ 0x9c22a101,
+ 0x430cb720,
+ 0xb78e9e58,
+ 0x11095bab,
+ 0x9060c000,
+ 0x70080004,
+ 0x93d6ffff,
+ 0x9060c000,
+ 0x74000008,
+ 0x93d8ffff,
+ 0x287ccffe,
+ 0xa6059c22,
+ 0x430cb7a0,
+ 0xb78e008b,
+ 0xc2005b2b,
+ 0x009a5888,
+ 0x7eb5b7a6,
+ 0x7eb5b566,
+ 0x09c2d012,
+ 0x9bdcfff4,
+ 0xb50e008b,
+ 0x9e685b2b,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0x430cb720,
+ 0x5e2db781,
+ 0xc0007502,
+ 0xb78691c4,
+ 0x750040a9,
+ 0x9122c000,
+ 0x5aabb78c,
+ 0x75020802,
+ 0x0802d002,
+ 0x08029c22,
+ 0xa60d9c22,
+ 0x4314b7a0,
+ 0xb7669e69,
+ 0xfff45ab5,
+ 0x03019bca,
+ 0x9be2fff4,
+ 0xc0007400,
+ 0x9e6991c2,
+ 0x6cadb786,
+ 0x5aadb746,
+ 0x4e35b746,
+ 0xc1010128,
+ 0xb5461124,
+ 0xb7a06cad,
+ 0x008b430c,
+ 0x75adb786,
+ 0xc0017500,
+ 0xfff49264,
+ 0x74009bc9,
+ 0x91a2c000,
+ 0xb78e008b,
+ 0xb7445bab,
+ 0x708867ad,
+ 0xd0040a02,
+ 0xc0000a42,
+ 0x008b9360,
+ 0x5e2db781,
+ 0xc0007502,
+ 0x750890a2,
+ 0x9204c000,
+ 0xb784008b,
+ 0xb74667ad,
+ 0xb7464e2d,
+ 0x624550b5,
+ 0x0a027115,
+ 0x0a42d00b,
+ 0x9080c000,
+ 0x008b0a06,
+ 0x75adb586,
+ 0xb706008b,
+ 0x740075ad,
+ 0x93a2c000,
+ 0x9b98fff4,
+ 0xc0007400,
+ 0x9e6992a2,
+ 0x532db786,
+ 0x7188c010,
+ 0x5b2bb78e,
+ 0x5a08c200,
+ 0x00cac101,
+ 0x7eadb786,
+ 0x12400268,
+ 0x7eadb586,
+ 0x90a0c000,
+ 0xb7069e69,
+ 0xb79f4e2d,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0xf8399c22,
+ 0xfff4a205,
+ 0x74009b73,
+ 0x430cb700,
+ 0x91e2c000,
+ 0x5b2ab78e,
+ 0x532cb746,
+ 0x5a08c200,
+ 0xb78600c0,
+ 0xc0107ead,
+ 0xc0017104,
+ 0xb74192a0,
+ 0x74885e2c,
+ 0x9164c000,
+ 0x5b2ab78e,
+ 0x5a08c200,
+ 0xb70600c0,
+ 0xc0017ead,
+ 0xb78690e0,
+ 0x750075ac,
+ 0x9004c001,
+ 0xc0007482,
+ 0x748890a2,
+ 0x91e4c000,
+ 0x67acb784,
+ 0x4e2cb746,
+ 0x50b4b746,
+ 0x71156245,
+ 0xd00b0a02,
+ 0xc0000a42,
+ 0x0a069060,
+ 0x75acb586,
+ 0x75acb786,
+ 0xc0007500,
+ 0x000990a4,
+ 0x9080c000,
+ 0x4e2cb706,
+ 0xaa1df9f8,
+ 0xf0109c22,
+ 0xf010a966,
+ 0x9e5ca9e2,
+ 0x7100c814,
+ 0x7080c802,
+ 0x0e22d011,
+ 0x753c0244,
+ 0x915cc000,
+ 0x087ec7ff,
+ 0xcffe76c0,
+ 0xd00e087c,
+ 0x9c220804,
+ 0xc0007680,
+ 0xd02290bc,
+ 0x9c225468,
+ 0xc1019e5a,
+ 0x50531205,
+ 0x76e69c22,
+ 0x1000d01e,
+ 0x48bcb340,
+ 0x76c0c380,
+ 0x1524d02e,
+ 0x913cc000,
+ 0x084ec002,
+ 0xd0119c22,
+ 0xd2240e32,
+ 0x9e525987,
+ 0x76c8c004,
+ 0x4002ba12,
+ 0xffff0d04,
+ 0xb78092da,
+ 0x9e5b5d8c,
+ 0xc040691a,
+ 0x0244aa65,
+ 0xba000009,
+ 0x9c224002,
+ 0xa205f839,
+ 0x1cb0f011,
+ 0x9e539e99,
+ 0x90d6c000,
+ 0x0902c002,
+ 0x9100c000,
+ 0x7040d002,
+ 0x01090a04,
+ 0x4000ba12,
+ 0x19400d02,
+ 0xc8129e50,
+ 0x0a067004,
+ 0xcc1251eb,
+ 0xc00270c8,
+ 0xcc100dce,
+ 0xcc127246,
+ 0xba1b72c4,
+ 0xb7804003,
+ 0x05b65d0c,
+ 0x512b9e58,
+ 0xa986c840,
+ 0x4003ba1b,
+ 0x65b5c101,
+ 0x9bc4c1f4,
+ 0xfff49e83,
+ 0xf9f89baa,
+ 0x9c22aa1d,
+ 0x0e30f011,
+ 0x4314b760,
+ 0x92e2c000,
+ 0xc10100c8,
+ 0xb74c0096,
+ 0x750273b3,
+ 0xba120912,
+ 0xd0024003,
+ 0xd0311926,
+ 0xd0100e20,
+ 0xc10170c8,
+ 0xd0120124,
+ 0xba1c7104,
+ 0x9e5c4002,
+ 0x0a48c0c8,
+ 0xa801d208,
+ 0x70c0c010,
+ 0xaa1dd3e8,
+ 0x7008c012,
+ 0x287ccffe,
+ 0xf8399c22,
+ 0xb720a205,
+ 0x0a02430c,
+ 0x9e8b0992,
+ 0x702bb58c,
+ 0x0db0c0d4,
+ 0x6d2bb58c,
+ 0x5aadb586,
+ 0x542bb58d,
+ 0x672db586,
+ 0x67adb586,
+ 0x692db586,
+ 0x64adb586,
+ 0x996ffe54,
+ 0x4314b760,
+ 0x0982c002,
+ 0x0da0c0d6,
+ 0xaa1df9f8,
+ 0x90a0fe4b,
+ 0x430cb780,
+ 0xd11005b6,
+ 0xb78c04b9,
+ 0x01b87cab,
+ 0x4002ba1b,
+ 0x70c0d014,
+ 0x753ec7fc,
+ 0x48bab340,
+ 0x7cabb56c,
+ 0xa6059c22,
+ 0x430cb7a0,
+ 0xb781008b,
+ 0xb7a65e2d,
+ 0x75026bb5,
+ 0x9184c000,
+ 0x9a40fff4,
+ 0xc0007400,
+ 0x008b90e4,
+ 0x50adb706,
+ 0x9060c000,
+ 0xd1100802,
+ 0xb79f1451,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa6059c22,
+ 0x430cb7a0,
+ 0xb766008b,
+ 0xfff45ab5,
+ 0x008b9a0c,
+ 0x6cadb746,
+ 0x5aadb786,
+ 0x12400244,
+ 0x6cadb586,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0xb7a0a60d,
+ 0x097a430c,
+ 0xb786008b,
+ 0xd010502d,
+ 0x196c7104,
+ 0x7104cc12,
+ 0x4e2db786,
+ 0xb76c9e9d,
+ 0x9ea35a2b,
+ 0x6a7ac006,
+ 0x5bb3b5ae,
+ 0x0d025988,
+ 0x5b1fc200,
+ 0x983fffd4,
+ 0x7740008b,
+ 0x4eadb506,
+ 0x9c87c820,
+ 0x9116c000,
+ 0x4000b960,
+ 0x9080c000,
+ 0x7e2db5c6,
+ 0xffff0890,
+ 0x008b93a1,
+ 0x4e2db786,
+ 0x655dd131,
+ 0x6659d131,
+ 0x6cadb546,
+ 0x4fadb586,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0x8420a61d,
+ 0x430cb7a0,
+ 0x0787a199,
+ 0xb7869ea8,
+ 0x7502714c,
+ 0x1092d019,
+ 0x9132c000,
+ 0x08829ea8,
+ 0x634ab78d,
+ 0xd00a7502,
+ 0x9ea80892,
+ 0xb786a09d,
+ 0x7504564c,
+ 0x9364c001,
+ 0x5e4cb781,
+ 0x734ab7cc,
+ 0x594cb7e6,
+ 0xc0007502,
+ 0xfff49284,
+ 0x740099b1,
+ 0x91e4c000,
+ 0xb786008b,
+ 0xb74650ad,
+ 0xc2006bad,
+ 0x70885a08,
+ 0x90bcc000,
+ 0xc0000f32,
+ 0x0f129060,
+ 0xb7819ea8,
+ 0x75025e4c,
+ 0x9104c006,
+ 0x4e4cb786,
+ 0x4fccb746,
+ 0x70886a1e,
+ 0x9016c006,
+ 0x5e05d322,
+ 0x9ea50268,
+ 0x2efccffe,
+ 0x9180c001,
+ 0xc0c8020b,
+ 0xd2080a38,
+ 0xc001aa82,
+ 0x77c090a0,
+ 0x91e4c000,
+ 0xb78c008b,
+ 0x75006e2b,
+ 0x9122c000,
+ 0xc0c8020b,
+ 0xd2080a38,
+ 0xc003ab01,
+ 0xd01092e0,
+ 0x9ea804fe,
+ 0xc380009a,
+ 0xb78c5c88,
+ 0xb7cc654a,
+ 0xc101742b,
+ 0xb7e6049a,
+ 0x752a5b4d,
+ 0x9eb50f0e,
+ 0x1f62d009,
+ 0x7440a89d,
+ 0x9004c003,
+ 0xc00075c0,
+ 0x020b9124,
+ 0x0a48c0c8,
+ 0xa902d208,
+ 0x9100c004,
+ 0x9ebba91a,
+ 0x4002ba1e,
+ 0x9a6afff4,
+ 0x430cb720,
+ 0xb7819e82,
+ 0xcffe5e2d,
+ 0x75042d7c,
+ 0x9304c003,
+ 0xb7869ea8,
+ 0x7504564c,
+ 0x9242c003,
+ 0x5c88d3a2,
+ 0xb746009a,
+ 0xb7865f2d,
+ 0xb726602d,
+ 0xb7666135,
+ 0xe0006235,
+ 0xc0030244,
+ 0xc2009062,
+ 0x01a85905,
+ 0x6525d131,
+ 0xc18005b2,
+ 0xc1015a05,
+ 0xc10105b8,
+ 0xc1f405b4,
+ 0x9e829a2a,
+ 0x2d7ccffe,
+ 0x9200c002,
+ 0x5eadb781,
+ 0x75021a04,
+ 0x9192c000,
+ 0x066cd010,
+ 0xc1019e51,
+ 0xd010024a,
+ 0x9e6a7048,
+ 0x9120c000,
+ 0x066ad010,
+ 0xd0109e51,
+ 0xd0107048,
+ 0xd012155c,
+ 0x03097104,
+ 0x2b7ccffe,
+ 0x4314b7a0,
+ 0xb781040b,
+ 0x75025e4c,
+ 0x92a4c001,
+ 0x98f4fff4,
+ 0xc0017400,
+ 0x008b9204,
+ 0x562db786,
+ 0xc0017504,
+ 0x040b9144,
+ 0x64cab78d,
+ 0x7510c004,
+ 0x9074c001,
+ 0x50adb786,
+ 0x6badb746,
+ 0x5a08c200,
+ 0xc0007088,
+ 0x020b9356,
+ 0x0a38c0c8,
+ 0xab01d208,
+ 0x9260c000,
+ 0xb7ac008b,
+ 0x774073b3,
+ 0x9204fffb,
+ 0x9080fffa,
+ 0x430cb720,
+ 0x5e2db781,
+ 0xfffd7508,
+ 0xfffd93a4,
+ 0x058f9160,
+ 0x4002ba1e,
+ 0x9a12fff4,
+ 0x7deeb79f,
+ 0x7e6eb7bf,
+ 0x7eeeb7df,
+ 0x7f6eb7ff,
+ 0x8c20c002,
+ 0xa60d9c22,
+ 0x430cb7a0,
+ 0xb786008b,
+ 0x750075ad,
+ 0x9264c001,
+ 0x98a8fff4,
+ 0xc0007400,
+ 0x008b91a2,
+ 0x5babb78e,
+ 0x67adb744,
+ 0x0a027088,
+ 0x0a42d004,
+ 0x9360c000,
+ 0xb781008b,
+ 0x75025e2d,
+ 0x90a2c000,
+ 0xc0007508,
+ 0x008b9204,
+ 0x67adb784,
+ 0x4e2db746,
+ 0x50b5b746,
+ 0x71156245,
+ 0xd00b0a02,
+ 0xc0000a42,
+ 0x0a069080,
+ 0xb586008b,
+ 0x008b75ad,
+ 0x75adb786,
+ 0xc0037500,
+ 0x008b9042,
+ 0x5e2db781,
+ 0xc0027508,
+ 0x75029382,
+ 0x9224c000,
+ 0x986efff4,
+ 0x0a80f012,
+ 0x9164c000,
+ 0xb786008b,
+ 0xb7464e2d,
+ 0x6a1e4fad,
+ 0xc0017088,
+ 0x008b93f6,
+ 0x53abb78c,
+ 0xc0027500,
+ 0x008b9082,
+ 0x7433b7ac,
+ 0x6badb786,
+ 0xd8120906,
+ 0xb7817104,
+ 0x75025e2d,
+ 0x9184c000,
+ 0x984cfff4,
+ 0xc0007400,
+ 0x008b90e4,
+ 0x50adb786,
+ 0x9060c000,
+ 0xba2d0a02,
+ 0x9ea24003,
+ 0x9eb39e6b,
+ 0x995afff4,
+ 0x15d1d110,
+ 0x59870a0e,
+ 0x70c8cc10,
+ 0xcc121a08,
+ 0x008b70c8,
+ 0x6db3b76c,
+ 0xba1b1187,
+ 0xb79f4002,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0x90c0ffee,
+ 0x99d7fff4,
+ 0x9e6c1001,
+ 0x7008c012,
+ 0xfffe7400,
+ 0xfffd905a,
+ 0xb79f9340,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0xa6059c22,
+ 0x4314b7a0,
+ 0xb78c9e69,
+ 0x75025aab,
+ 0x1a42d001,
+ 0x90c2c000,
+ 0xb78c9e69,
+ 0x0a045b2b,
+ 0x652bb58d,
+ 0xb74c9e69,
+ 0xd0115aab,
+ 0xcffe1a22,
+ 0xc07c2a7c,
+ 0xc000753e,
+ 0x097f91f4,
+ 0x0a7ecffe,
+ 0x63abb54d,
+ 0x732db586,
+ 0x642bb54d,
+ 0x64abb54d,
+ 0x9120c001,
+ 0x9e939e69,
+ 0x5b2bb7ac,
+ 0x09d2d011,
+ 0x98f0c1f4,
+ 0x75401804,
+ 0x09029e69,
+ 0x287ccffe,
+ 0x0922d002,
+ 0xb50d0104,
+ 0x0a06642b,
+ 0x7088c812,
+ 0x652bb78d,
+ 0x4e35b746,
+ 0x62400a84,
+ 0xb5ad02d8,
+ 0xcffe64ab,
+ 0xc1012afc,
+ 0xb54d62d5,
+ 0xb5a663ab,
+ 0x9e69732d,
+ 0x652bb78d,
+ 0x4e2db746,
+ 0xb5866245,
+ 0xb79f73ad,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa61d9c22,
+ 0xb7c08440,
+ 0x008d430c,
+ 0x5e2db781,
+ 0x76adb726,
+ 0xc0087508,
+ 0x744092e4,
+ 0x90c2c006,
+ 0x0d029eb1,
+ 0x654bb76d,
+ 0x64cbb78d,
+ 0x4e4db746,
+ 0x5985d1a4,
+ 0xc1011094,
+ 0xa09905b8,
+ 0x99d7ffb4,
+ 0x008da015,
+ 0x652bb7ed,
+ 0x68abb78d,
+ 0x76b5b7c6,
+ 0x6333b7ed,
+ 0x1a041278,
+ 0x7780c005,
+ 0xc0009ea5,
+ 0xb7669152,
+ 0xc0054e2d,
+ 0xd31474c0,
+ 0xc0005da8,
+ 0x9eb190f4,
+ 0xb766058d,
+ 0x59ab4e4d,
+ 0x987fc1f4,
+ 0xcffea916,
+ 0xa9992efc,
+ 0x162ed010,
+ 0xba249e6a,
+ 0x624f4002,
+ 0x0659d110,
+ 0xb766008d,
+ 0xb7466ab5,
+ 0xba246b35,
+ 0xba124002,
+ 0x62474002,
+ 0xb76c6127,
+ 0xc1015a2b,
+ 0xc10105b8,
+ 0xb5660524,
+ 0xb5466ab5,
+ 0xb5c66b35,
+ 0x058d4e35,
+ 0x02815988,
+ 0x985cc1f4,
+ 0xb786008d,
+ 0xb506532d,
+ 0xc0054ead,
+ 0xc0007500,
+ 0xc20090f4,
+ 0x61595a2b,
+ 0x90a0c000,
+ 0xc2006259,
+ 0x9eb15929,
+ 0x4fcdb786,
+ 0x534db546,
+ 0x7500c005,
+ 0x90f4c000,
+ 0x5a2bc200,
+ 0xc0006159,
+ 0x625990a0,
+ 0x5929c200,
+ 0xb786008d,
+ 0xb5466bad,
+ 0xc0054fad,
+ 0xc0007500,
+ 0xc20090f4,
+ 0x62595a2b,
+ 0x90a0c000,
+ 0xc2006259,
+ 0x9eb15a29,
+ 0x6ccdb706,
+ 0x6bcdb586,
+ 0x7400c005,
+ 0x90f4c000,
+ 0x5a2bc000,
+ 0xc0006059,
+ 0x625190a0,
+ 0x5829c200,
+ 0xb7a6008d,
+ 0xb7a64e2d,
+ 0xb5064fb5,
+ 0xd2a46cad,
+ 0xd01059fd,
+ 0xc101065a,
+ 0xc10105ba,
+ 0xc200024a,
+ 0x5d875a13,
+ 0x01c7d120,
+ 0x50adb586,
+ 0xc1d4018b,
+ 0x600b9bff,
+ 0xb5069eb1,
+ 0xc01250cd,
+ 0x0a02700a,
+ 0x16d0c101,
+ 0x5155b5a6,
+ 0x76cdb586,
+ 0x50cdb506,
+ 0xc0da9eb2,
+ 0xd0100d60,
+ 0x7480a941,
+ 0x9142c000,
+ 0xc0c8020d,
+ 0xd2080a38,
+ 0x0a02a101,
+ 0xa241d010,
+ 0xc0da9eb2,
+ 0xd0100d68,
+ 0xc01ca941,
+ 0xc00074be,
+ 0xc0029222,
+ 0xd0100a4e,
+ 0x09027088,
+ 0x7104d012,
+ 0xc0c8010d,
+ 0xd0080948,
+ 0x0a7fa241,
+ 0xa241d010,
+ 0xc0da9eb2,
+ 0xd0100d64,
+ 0xc01ca941,
+ 0xc00074be,
+ 0xc0029222,
+ 0xd0100a4e,
+ 0x09027088,
+ 0x7104d012,
+ 0xc0c8010d,
+ 0xd0080944,
+ 0x0a7fa241,
+ 0xa241d010,
+ 0x9a8ffff4,
+ 0x7d6eb79f,
+ 0x7deeb7bf,
+ 0x7e6eb7df,
+ 0x7eeeb7ff,
+ 0x8c40c002,
+ 0x9380ffe3,
+ 0xc0067440,
+ 0x9eb190c2,
+ 0xb76d0d02,
+ 0xb78d654b,
+ 0xb74664cb,
+ 0xd1a44e4d,
+ 0x10945985,
+ 0x05b8c101,
+ 0xffb4a09d,
+ 0xa01198c2,
+ 0xb7ed008d,
+ 0xb78d652b,
+ 0xb7c668ab,
+ 0xb7ed76b5,
+ 0x12786333,
+ 0xc0051a04,
+ 0x9ea57780,
+ 0x9152c000,
+ 0x4e2db766,
+ 0x74c0c005,
+ 0x5da8d314,
+ 0x90f4c000,
+ 0x058d9eb1,
+ 0x4e4db766,
+ 0xc1d459ab,
+ 0xa9129b6a,
+ 0x2efccffe,
+ 0xd010a99d,
+ 0x9e6a162e,
+ 0x4002ba24,
+ 0xd110624f,
+ 0x008d0659,
+ 0x6ab5b766,
+ 0x6b35b746,
+ 0x4002ba24,
+ 0x4002ba12,
+ 0x61276247,
+ 0x5a2bb76c,
+ 0x05b8c101,
+ 0x0524c101,
+ 0x6ab5b566,
+ 0x6b35b546,
+ 0x4e35b5c6,
+ 0x5988058d,
+ 0xc1d40281,
+ 0x008d9b47,
+ 0x532db786,
+ 0x4eadb506,
+ 0x7500c005,
+ 0x90f4c000,
+ 0x5a2bc200,
+ 0xc0006159,
+ 0x625990a0,
+ 0x5929c200,
+ 0xb7869eb1,
+ 0xb5464fcd,
+ 0xc005534d,
+ 0xc0007500,
+ 0xc20090f4,
+ 0x61595a2b,
+ 0x90a0c000,
+ 0xc2006259,
+ 0x008d5929,
+ 0x6badb786,
+ 0x4fadb546,
+ 0x7500c005,
+ 0x90f4c000,
+ 0x5a2bc200,
+ 0xc0006259,
+ 0x625990a0,
+ 0x5a29c200,
+ 0xb7069eb1,
+ 0xb5866ccd,
+ 0xc0056bcd,
+ 0xc0007400,
+ 0xc00090f4,
+ 0x60595a2b,
+ 0x90a0c000,
+ 0xc2006251,
+ 0x008d5829,
+ 0x4e2db7a6,
+ 0x4fb5b7a6,
+ 0x6cadb506,
+ 0x59fdd2a4,
+ 0x065ad010,
+ 0x05bac101,
+ 0x024ac101,
+ 0x5a13c200,
+ 0xd1205d87,
+ 0xb58601c7,
+ 0x018b50ad,
+ 0x9aeac1d4,
+ 0x9eb1600b,
+ 0x50cdb506,
+ 0x700ac012,
+ 0xc1010a02,
+ 0xb5a616d0,
+ 0xb5865155,
+ 0xb50676cd,
+ 0x9eb250cd,
+ 0x0d60c0da,
+ 0xa941d010,
+ 0xc0007480,
+ 0x020d9142,
+ 0x0a38c0c8,
+ 0xa101d208,
+ 0xd0100a02,
+ 0x9eb2a241,
+ 0x0d68c0da,
+ 0xa941d010,
+ 0x74bec01c,
+ 0x9222c000,
+ 0x0a4ec002,
+ 0x7088d010,
+ 0xd0120902,
+ 0x010d7104,
+ 0x0948c0c8,
+ 0xa241d008,
+ 0xd0100a7f,
+ 0x9eb2a241,
+ 0x0d64c0da,
+ 0xa941d010,
+ 0x74bec01c,
+ 0x9222c000,
+ 0x0a4ec002,
+ 0x7088d010,
+ 0xd0120902,
+ 0x010d7104,
+ 0x0944c0c8,
+ 0xa241d008,
+ 0xd0100a7f,
+ 0xb79fa241,
+ 0xb7bf7d6e,
+ 0xb7df7dee,
+ 0xb7ff7e6e,
+ 0xc0027eee,
+ 0xffeb8c40,
+ 0x9e5a9200,
+ 0x70c0d014,
+ 0x1a0476c0,
+ 0x0244d01b,
+ 0x1228d01c,
+ 0xff9d9ea3,
+ 0xa60d92c0,
+ 0x4314b7a0,
+ 0x03079e5d,
+ 0xc0d8058b,
+ 0xc00e0da0,
+ 0xfe3409b2,
+ 0xb760986c,
+ 0xc00a4314,
+ 0xc0ca0992,
+ 0xfe340dc0,
+ 0xb7609864,
+ 0xc0024314,
+ 0xc0d60982,
+ 0xfe340da0,
+ 0xb760985c,
+ 0x09d24314,
+ 0x0dd0c0d4,
+ 0x9855fe34,
+ 0x4314b760,
+ 0x09f2c04c,
+ 0x0dd0c0e6,
+ 0x984dfe34,
+ 0x018b058b,
+ 0x0d80c0c6,
+ 0x0d32c004,
+ 0x9870fe34,
+ 0x9e699e68,
+ 0x4facb786,
+ 0x040b097f,
+ 0x7520c001,
+ 0xc0c89e6c,
+ 0xc0da0c38,
+ 0xc0de0a64,
+ 0xd20808a0,
+ 0x0a22a101,
+ 0x4029b580,
+ 0x4050b760,
+ 0xd00d9e6b,
+ 0x0c821a48,
+ 0xb421050b,
+ 0x9e69443a,
+ 0x09e8c0da,
+ 0x0d60c0da,
+ 0xd0089e4d,
+ 0x9d0aa161,
+ 0xb56c018d,
+ 0xd0107332,
+ 0xb526a0c2,
+ 0x040b76b4,
+ 0x08e0c12a,
+ 0x0b02c010,
+ 0xa961d008,
+ 0x9e689e44,
+ 0x0a6cc0da,
+ 0xa101d208,
+ 0xaa61d008,
+ 0x6fb2b74d,
+ 0xa9c2d018,
+ 0x0a027500,
+ 0x0a42d002,
+ 0x0244c101,
+ 0x6faab58d,
+ 0x7e7edffc,
+ 0x0d38d012,
+ 0x430cb740,
+ 0x05a2d022,
+ 0xc0c80205,
+ 0xd2080a48,
+ 0x9e58aa01,
+ 0x7008d010,
+ 0x0944c0c8,
+ 0xa941d008,
+ 0x7104d012,
+ 0x76440c84,
+ 0x462bb59a,
+ 0x402bb5c0,
+ 0x412bb5a0,
+ 0x0c040984,
+ 0xfffe0888,
+ 0xb7209124,
+ 0xb78c430c,
+ 0x752a652b,
+ 0x9112c000,
+ 0x0a169e69,
+ 0x7b2bb58d,
+ 0x90a0c000,
+ 0xb52d9e68,
+ 0x9e697b32,
+ 0xb7660d02,
+ 0xb76650b5,
+ 0xfff44e2d,
+ 0xcffe9b40,
+ 0x0a7a287c,
+ 0x7008c010,
+ 0xc0121a74,
+ 0x9e697008,
+ 0x5babb50e,
+ 0x98a1fff4,
+ 0xb78c9e68,
+ 0x750065aa,
+ 0xd0010a02,
+ 0xb5800a42,
+ 0xb79f448c,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0xb7209c22,
+ 0x0a02430c,
+ 0x4aabb72d,
+ 0xa271f010,
+ 0xa269f010,
+ 0xa0e1f010,
+ 0xa265f010,
+ 0xa27dd810,
+ 0xa279d810,
+ 0xa279f010,
+ 0xa271f050,
+ 0xa261f050,
+ 0xa27dd850,
+ 0xa279d850,
+ 0xa269d850,
+ 0xa26df050,
+ 0xa269f050,
+ 0xa265f050,
+ 0xa275f050,
+ 0x430cb720,
+ 0x4aabb74d,
+ 0x6f2db546,
+ 0x430cb720,
+ 0x712db586,
+ 0x430cb720,
+ 0x702db586,
+ 0x430cb720,
+ 0x6fadb586,
+ 0x430cb720,
+ 0x61abb58d,
+ 0x430cb720,
+ 0x612bb58d,
+ 0x430cb720,
+ 0x722db586,
+ 0x430cb720,
+ 0x732db586,
+ 0x430cb720,
+ 0x65abb58d,
+ 0x430cb720,
+ 0x652bb58d,
+ 0x430cb720,
+ 0x632bb58d,
+ 0x430cb720,
+ 0x74adb586,
+ 0x430cb720,
+ 0x742db586,
+ 0x430cb720,
+ 0x73adb586,
+ 0x430cb720,
+ 0x75adb586,
+ 0x430cb720,
+ 0x752db586,
+ 0x74c29c22,
+ 0xc0019e98,
+ 0xb78091a4,
+ 0x0902430c,
+ 0x0a00c0e2,
+ 0xa101d208,
+ 0x4314b720,
+ 0x684bb78c,
+ 0xc0017500,
+ 0xb7269104,
+ 0xb74d54cd,
+ 0xb7876353,
+ 0xb766464d,
+ 0xb7466dcd,
+ 0xb766514d,
+ 0xc10150d5,
+ 0x02466523,
+ 0x1244c101,
+ 0x1126c101,
+ 0xc0007104,
+ 0x9e4c925c,
+ 0x0a00c0e2,
+ 0xa002d208,
+ 0x9160c000,
+ 0xc00074c0,
+ 0xf0109104,
+ 0xb720aa61,
+ 0xb58d430c,
+ 0xb7804aab,
+ 0xc0e2430c,
+ 0xd2080a00,
+ 0x9c22a801,
+ 0x8420a61d,
+ 0xa1199e57,
+ 0x9e4a9e9f,
+ 0xcc120d02,
+ 0xd0317286,
+ 0xaa1966be,
+ 0x7540d001,
+ 0x020b134e,
+ 0x4828b334,
+ 0xc0007580,
+ 0x9ea3935c,
+ 0xff94018d,
+ 0xaa199a22,
+ 0xc2009e86,
+ 0x71885a0b,
+ 0x931ac000,
+ 0x5990d3a4,
+ 0x01fec101,
+ 0xff940d02,
+ 0xc1019a14,
+ 0xd024600d,
+ 0xc0005b13,
+ 0xb7209160,
+ 0x0d02430c,
+ 0x4c2db726,
+ 0xff941194,
+ 0x9e869a06,
+ 0x77400e82,
+ 0x913ac000,
+ 0xc0007740,
+ 0xcfff90e4,
+ 0xc000757e,
+ 0x676f9074,
+ 0xb79f9e70,
+ 0xb7bf7dee,
+ 0xb7df7e6e,
+ 0xb7ff7eee,
+ 0xc0027f6e,
+ 0x9c228c20,
+ 0xb720a61d,
+ 0x0d02430c,
+ 0x4c2db766,
+ 0x45b5b767,
+ 0x5ab3b7cc,
+ 0x632bb7ad,
+ 0x712db7e6,
+ 0x73b5b7e6,
+ 0x99dbff94,
+ 0x430cb720,
+ 0xb7860301,
+ 0xb70764ad,
+ 0x7504462d,
+ 0x9264c001,
+ 0x45adb787,
+ 0xd0207540,
+ 0xc00012c0,
+ 0xb7669282,
+ 0x018b74b5,
+ 0xff940d02,
+ 0xb72099c4,
+ 0x0a02430c,
+ 0x4c2db726,
+ 0x6003109a,
+ 0x1451d110,
+ 0x7100c812,
+ 0xb7209e95,
+ 0xb786430c,
+ 0x750265ad,
+ 0x91b4c000,
+ 0xc00077ba,
+ 0x9ea99152,
+ 0x49adb747,
+ 0x4003ba09,
+ 0xc001058b,
+ 0xb76690c0,
+ 0x058b4c2d,
+ 0xc0010d02,
+ 0xb7879120,
+ 0x754044ad,
+ 0x11c0d020,
+ 0x9142c000,
+ 0x4c2db786,
+ 0x4aadb747,
+ 0x6245124a,
+ 0x15b8c101,
+ 0x65adb786,
+ 0xc0007502,
+ 0xb78d90f2,
+ 0x75004a2b,
+ 0x91a2c000,
+ 0xb7479ea9,
+ 0xba094a2d,
+ 0x018f4003,
+ 0xfff4050f,
+ 0xc0009b45,
+ 0xb7669100,
+ 0x9ea24c2d,
+ 0xff9411ba,
+ 0x0a029976,
+ 0x7100d012,
+ 0x000c006c,
+ 0x7008c010,
+ 0x7e6eb79f,
+ 0x7eeeb7bf,
+ 0x7f6eb7df,
+ 0x7feeb7ff,
+ 0x8c00c002,
+ 0xf8399c22,
+ 0xb740a205,
+ 0xd0105c8c,
+ 0x02440624,
+ 0xaa1ddbc8,
+ 0x9e5b0107,
+ 0x70886a0c,
+ 0x4834b320,
+ 0x91b4c001,
+ 0xc00076c0,
+ 0xb760927c,
+ 0xc18040d5,
+ 0x0d025a07,
+ 0x61a7d132,
+ 0xc1015d99,
+ 0xff9405b8,
+ 0x582b9944,
+ 0x4002ba00,
+ 0x90c0c000,
+ 0x0e22d011,
+ 0x4002ba04,
+ 0x430cb740,
+ 0xc0e40205,
+ 0xd2080a14,
+ 0x7008aa01,
+ 0x443ab340,
+ 0x913ac000,
+ 0xc0e40205,
+ 0xd2080a10,
+ 0xc012aa01,
+ 0xf9f87008,
+ 0x9c22aa1d,
+ 0xa205f839,
+ 0x430cb720,
+ 0x64adb786,
+ 0xc0007504,
+ 0xc0e690c4,
+ 0xc0000890,
+ 0xc0e69080,
+ 0xb78008d0,
+ 0xdffc43ab,
+ 0xc0007d3e,
+ 0x9e5090a4,
+ 0x90a0c000,
+ 0xfff49e89,
+ 0xb7209ba8,
+ 0xb78d430c,
+ 0x75024dab,
+ 0x91c4c000,
+ 0xc0c80203,
+ 0xd2080a60,
+ 0x7500aa01,
+ 0x90c4c000,
+ 0x0a04d011,
+ 0x4002ba04,
+ 0xc0d20203,
+ 0xd2080a60,
+ 0x7502aa01,
+ 0x90c4c000,
+ 0x0a02d011,
+ 0x4002ba04,
+ 0x522bb74d,
+ 0x0a28d011,
+ 0xc0007008,
+ 0xd01190fa,
+ 0x70081a28,
+ 0x9096c000,
+ 0x4002ba04,
+ 0xc0e40203,
+ 0xd2080a0c,
+ 0x097ba902,
+ 0xc1010183,
+ 0xba241204,
+ 0xd0124002,
+ 0x09107104,
+ 0x7104d010,
+ 0x0994c0e4,
+ 0x0528c101,
+ 0xa961d008,
+ 0xba009e50,
+ 0x70044002,
+ 0x443ab320,
+ 0x913ac000,
+ 0xc0e40203,
+ 0xd2080a10,
+ 0xc012aa01,
+ 0xf9f87008,
+ 0x9c22aa1d,
+ 0x76c0a61d,
+ 0x430cb720,
+ 0x9264c000,
+ 0x65abb74d,
+ 0x61abb78d,
+ 0xb7a69e8d,
+ 0xb766702d,
+ 0xb72d6fad,
+ 0xd0206133,
+ 0xc0e60128,
+ 0xc0000ed0,
+ 0x9e8d91a0,
+ 0x65b3b74d,
+ 0x722db7a6,
+ 0x732db766,
+ 0x6533b72d,
+ 0x0e90c0e6,
+ 0x7680c008,
+ 0x92bcc000,
+ 0x5e1bf122,
+ 0x90ccc000,
+ 0x0a02c002,
+ 0x90e0c000,
+ 0x7100d002,
+ 0xba240a04,
+ 0x9ea34000,
+ 0x52b3c200,
+ 0x554f54af,
+ 0x768051f3,
+ 0x93a2c002,
+ 0x468cb780,
+ 0x0524d010,
+ 0xc8409e51,
+ 0xb780ab46,
+ 0xda10470c,
+ 0xc100a931,
+ 0xc0405d87,
+ 0xd131aba5,
+ 0x09046667,
+ 0xda109e53,
+ 0xc200a131,
+ 0x05b2531f,
+ 0xff940d02,
+ 0xc00c985e,
+ 0x9e877588,
+ 0xa935da10,
+ 0x937cc001,
+ 0x4002ba12,
+ 0x12440a6e,
+ 0x65ebc101,
+ 0x02899eba,
+ 0x4002ba2d,
+ 0xc07e55eb,
+ 0xcc100d7e,
+ 0x9eaa72c4,
+ 0x5a07c300,
+ 0xc10155e8,
+ 0x018d05b8,
+ 0xff940d02,
+ 0x9e83983e,
+ 0xaa21f210,
+ 0xa9b1da10,
+ 0xc10165bf,
+ 0xf21005b8,
+ 0xba1ba1a2,
+ 0x0d024002,
+ 0x982fff94,
+ 0xf210755e,
+ 0xd04da025,
+ 0xd01a1a50,
+ 0xc0005213,
+ 0x0a4290ba,
+ 0x5210124a,
+ 0xa225f210,
+ 0xda100a06,
+ 0xb79fa23d,
+ 0xb7bf7e6e,
+ 0xb7df7eee,
+ 0xb7ff7f6e,
+ 0xc0027fee,
+ 0x9c228c00,
+ 0x76c2a605,
+ 0x91e4c001,
+ 0x430cb720,
+ 0x64adb786,
+ 0xd0227504,
+ 0xfff415b6,
+ 0xb7209b53,
+ 0xb74d430c,
+ 0xb786632b,
+ 0x70884c2d,
+ 0x93a2c000,
+ 0x712db7a6,
+ 0x9a0efff4,
+ 0x430cb780,
+ 0x018b9e83,
+ 0xc0c80109,
+ 0xd0080938,
+ 0xc0e4a941,
+ 0x9e920a0c,
+ 0xa101d208,
+ 0xfff40916,
+ 0xb7809ac5,
+ 0xc0c8430c,
+ 0xd2080a38,
+ 0xb780a001,
+ 0xc0c8430c,
+ 0xd2080a38,
+ 0xb79fa801,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa6059c22,
+ 0x4314b720,
+ 0x05839e5b,
+ 0x0d88c0e2,
+ 0xa961d010,
+ 0x4655b747,
+ 0x6dcdb786,
+ 0xd1107480,
+ 0xc0000529,
+ 0xb7869102,
+ 0x02484e4d,
+ 0xc00110c4,
+ 0xb7809060,
+ 0xc101440c,
+ 0xb5400528,
+ 0xb7864414,
+ 0x729950cd,
+ 0x91b8c000,
+ 0xd0100a06,
+ 0xb720a261,
+ 0xb726430c,
+ 0x00924e2d,
+ 0xc0001094,
+ 0xb78d91a0,
+ 0xb7464a4b,
+ 0x0a084e4d,
+ 0x729360c5,
+ 0x9216c000,
+ 0x1094c101,
+ 0xc0007440,
+ 0xd071917c,
+ 0xc2000a1e,
+ 0xf0085a17,
+ 0x0a06a265,
+ 0x90a0c000,
+ 0xf0080a02,
+ 0xd808a265,
+ 0xb7a0a265,
+ 0x0d02430c,
+ 0xb78d008b,
+ 0xb76d65ab,
+ 0xb74761ab,
+ 0x01b8492d,
+ 0x5985d1a4,
+ 0x05b4c101,
+ 0x9b7bff74,
+ 0xb50c008b,
+ 0xb7206a2b,
+ 0xb78c430c,
+ 0xc0c86a2b,
+ 0xb58008b8,
+ 0xb7204029,
+ 0xb787430c,
+ 0xb587452d,
+ 0xb72044ad,
+ 0xb786430c,
+ 0x750464ad,
+ 0x9024c001,
+ 0x65adb786,
+ 0xc0007502,
+ 0xb7879284,
+ 0x09964b2d,
+ 0xc2000d02,
+ 0xb5875a0b,
+ 0xb7a04bad,
+ 0x008b430c,
+ 0x7d35b766,
+ 0x9b4fff74,
+ 0xb506008b,
+ 0xb7207d2d,
+ 0xb786430c,
+ 0xb58773ad,
+ 0xc00149ad,
+ 0xb7869040,
+ 0xb587742d,
+ 0xb7204bad,
+ 0xb786430c,
+ 0xb58773ad,
+ 0xb7204a2d,
+ 0xb787430c,
+ 0xb58644ad,
+ 0xb7207d2d,
+ 0xb78d430c,
+ 0xb74c52ab,
+ 0x02446a2b,
+ 0x52abb58d,
+ 0x430cb720,
+ 0x4a2bb78d,
+ 0xb58d0a04,
+ 0xb79f4a2b,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0x9e9a9c22,
+ 0xd1220906,
+ 0x52445c87,
+ 0x72d91a04,
+ 0x933cc000,
+ 0x1a32d011,
+ 0x19045250,
+ 0x9160c000,
+ 0xc0007917,
+ 0xd11090c2,
+ 0xc0001627,
+ 0xc2009360,
+ 0xba1a5a07,
+ 0x70c24002,
+ 0xffff0904,
+ 0x08029278,
+ 0xd0119c22,
+ 0x75001a12,
+ 0xc8100d02,
+ 0x51509c83,
+ 0x4818b323,
+ 0x9160c000,
+ 0xc0007897,
+ 0x121890e2,
+ 0xba000009,
+ 0x9c224002,
+ 0x9e545907,
+ 0xba240d04,
+ 0xffff4002,
+ 0x08069261,
+ 0xa6059c22,
+ 0x430cb7a0,
+ 0x09020687,
+ 0xb9609eaa,
+ 0xc0c64258,
+ 0x9e690d00,
+ 0xc0209e93,
+ 0x0904aa25,
+ 0xa249c030,
+ 0x9321ffff,
+ 0xc0020d82,
+ 0xfff40982,
+ 0x74009bae,
+ 0x0a02010b,
+ 0x0a42d002,
+ 0x0964c0c8,
+ 0xa241d008,
+ 0x430cb7a0,
+ 0x008b09ba,
+ 0x4c35b766,
+ 0x9b9dfff4,
+ 0xb50e008b,
+ 0xb7205bab,
+ 0x0902430c,
+ 0x5babb78e,
+ 0x5fabb58e,
+ 0x430cb720,
+ 0x0a06c001,
+ 0x5cabb54e,
+ 0x430cb720,
+ 0x60abb54e,
+ 0x430cb720,
+ 0x4d2db587,
+ 0x430cb720,
+ 0x4e2db786,
+ 0x6e2db586,
+ 0xb7809e69,
+ 0xb7205729,
+ 0xb58d430c,
+ 0xb79f4aab,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0x0a029c22,
+ 0xa261d810,
+ 0x430cb720,
+ 0x632bb78c,
+ 0xb3407500,
+ 0xb74648a4,
+ 0xb7664e2d,
+ 0xb7464fad,
+ 0x02246db5,
+ 0xc1010244,
+ 0xc20011b4,
+ 0xb7465a07,
+ 0xd0126e2d,
+ 0xb7477104,
+ 0xb74c46b5,
+ 0x70c85aab,
+ 0x1124c101,
+ 0xc0001904,
+ 0x0a0690b6,
+ 0xa261d810,
+ 0x430cb720,
+ 0x6035b746,
+ 0xc0007680,
+ 0x748691e2,
+ 0x919ac000,
+ 0x4e2db786,
+ 0x02466229,
+ 0xc0007115,
+ 0x0a0690b6,
+ 0xa261d810,
+ 0x430cb720,
+ 0x4e2db786,
+ 0x6dadb746,
+ 0x70880248,
+ 0x1248d01c,
+ 0x4459b423,
+ 0xa6059c22,
+ 0x430cb720,
+ 0xb7870687,
+ 0x1a04472d,
+ 0x472db587,
+ 0xa961d810,
+ 0x430cb720,
+ 0xc0007482,
+ 0x0a029164,
+ 0x44adb587,
+ 0x430cb720,
+ 0x4dabb54d,
+ 0x9160c000,
+ 0xb58d0a02,
+ 0xb7204dab,
+ 0xb787430c,
+ 0xb58746ad,
+ 0xb72047ad,
+ 0x9e8b430c,
+ 0x4e35b746,
+ 0x0d84c0e2,
+ 0xa9e1d010,
+ 0xc00074c0,
+ 0x058590a2,
+ 0x9280c000,
+ 0x67adb784,
+ 0x50adb746,
+ 0x6629d131,
+ 0xb3337104,
+ 0xc0004458,
+ 0x0a069138,
+ 0xa261d010,
+ 0x430cb720,
+ 0x4e35b766,
+ 0x430cb720,
+ 0x0d02091e,
+ 0x6dadb766,
+ 0x462db787,
+ 0xc1010246,
+ 0xb5861246,
+ 0xb7206dad,
+ 0xb786430c,
+ 0xb76776ad,
+ 0x1246462d,
+ 0x76adb586,
+ 0x430cb720,
+ 0x4e35b726,
+ 0x462db787,
+ 0x5e2fb763,
+ 0x1419d110,
+ 0x587fd024,
+ 0x01b0e000,
+ 0xd00305b0,
+ 0xb5630db2,
+ 0xb7205e2f,
+ 0xb787430c,
+ 0xb726462d,
+ 0xb7634e35,
+ 0xd1105d2f,
+ 0xd0241419,
+ 0xe000587f,
+ 0x05b001b0,
+ 0x0db2d003,
+ 0x5d2fb563,
+ 0x430cb7a0,
+ 0xb763008b,
+ 0xc1b45d2f,
+ 0xd022984e,
+ 0x01015e74,
+ 0x3144590d,
+ 0x597fd124,
+ 0xb543008b,
+ 0xb7205daf,
+ 0xb786430c,
+ 0xb74764ad,
+ 0x75044635,
+ 0x9344c000,
+ 0x6035b546,
+ 0x430cb720,
+ 0xb7860d02,
+ 0xb5866dad,
+ 0xb7a0762d,
+ 0x008b430c,
+ 0x5aabb76c,
+ 0x6db5b766,
+ 0xff741984,
+ 0x008b999a,
+ 0x4c2db507,
+ 0x9180c000,
+ 0x6e2db746,
+ 0x02440224,
+ 0x0244c101,
+ 0x5a09c200,
+ 0x6e2db586,
+ 0xfff4058b,
+ 0xb7209afc,
+ 0x0a02430c,
+ 0x462db587,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0xb7a0a60d,
+ 0x0687430c,
+ 0x03070d86,
+ 0x9964fff4,
+ 0xb506008b,
+ 0xb7606f2d,
+ 0x09864314,
+ 0x0de0c0d6,
+ 0x9af3ffd4,
+ 0x430cb720,
+ 0x4635b5a7,
+ 0xaa41da08,
+ 0xc0007500,
+ 0x9eb390a4,
+ 0x998bfff4,
+ 0x4314b700,
+ 0xc0d09e44,
+ 0xd2080a70,
+ 0x7502aa01,
+ 0x9044c002,
+ 0x6454b746,
+ 0x4e54b726,
+ 0x5acab74c,
+ 0x4e54b546,
+ 0x430cb720,
+ 0x4a52b76d,
+ 0xb7661522,
+ 0xb78c642d,
+ 0x19045aab,
+ 0x1126c101,
+ 0xc1016247,
+ 0xb5866125,
+ 0xb72053ad,
+ 0xb786430c,
+ 0x024476ad,
+ 0x76adb586,
+ 0x430cb7a0,
+ 0xb76c008b,
+ 0xb7665a2b,
+ 0x59884e35,
+ 0x9bf8c194,
+ 0xb506008b,
+ 0xb7a04ead,
+ 0x008b430c,
+ 0x4eb5b766,
+ 0x4cadb766,
+ 0x9be7c194,
+ 0xb506008b,
+ 0xb78054ad,
+ 0x0902430c,
+ 0x0a70c0d0,
+ 0xa101d208,
+ 0xb79f9eb3,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0x90e0fff6,
+ 0x6e3bd331,
+ 0xd2249e5a,
+ 0xd0115921,
+ 0x72991a26,
+ 0x90bcc000,
+ 0xba129ea2,
+ 0xd0114003,
+ 0x0d8a09a6,
+ 0xc8120124,
+ 0x70867286,
+ 0x9096c000,
+ 0x4002ba13,
+ 0xd0100a7e,
+ 0xb7407088,
+ 0xc0e4430c,
+ 0xd0080914,
+ 0xb780a241,
+ 0xc0e4430c,
+ 0xd2080a10,
+ 0x9c22a102,
+ 0xb720a60d,
+ 0x76c2430c,
+ 0x46adb787,
+ 0xb5870a04,
+ 0xb72046ad,
+ 0xb566430c,
+ 0xc00a64b5,
+ 0xb7209364,
+ 0x0d02430c,
+ 0xb7860a82,
+ 0xb747762d,
+ 0xb7674c2d,
+ 0xb76746b5,
+ 0x124447ad,
+ 0x762db586,
+ 0x4314b720,
+ 0x1737d110,
+ 0x4bb5b7a7,
+ 0x474db767,
+ 0x76d5b766,
+ 0x98b9ff74,
+ 0x430cb720,
+ 0x6dadb746,
+ 0x7635b746,
+ 0x4e2db786,
+ 0x1124c101,
+ 0x12445907,
+ 0x7148d012,
+ 0x08040008,
+ 0xb5075807,
+ 0xb72045ad,
+ 0xb787430c,
+ 0xb74645ad,
+ 0xd0127c2d,
+ 0xb5877104,
+ 0xb72045ad,
+ 0xb747430c,
+ 0xb78645ad,
+ 0xc8107b2d,
+ 0x75827088,
+ 0x45adb547,
+ 0x93dcc000,
+ 0x430cb720,
+ 0x058b9eaa,
+ 0x4c2db766,
+ 0x9887ff74,
+ 0x430cb720,
+ 0x02030936,
+ 0x0a64c0c8,
+ 0xa902d208,
+ 0x4c2db726,
+ 0xc1011d04,
+ 0x02401264,
+ 0x7104d010,
+ 0x0244c101,
+ 0x62c2d032,
+ 0x430cb720,
+ 0x45adb787,
+ 0x124ac101,
+ 0x44adb587,
+ 0x430cb720,
+ 0x4b2db787,
+ 0x44adb747,
+ 0x5a1bc200,
+ 0xc0007088,
+ 0xb5879096,
+ 0xb72044ad,
+ 0x9eaa430c,
+ 0x4c2db786,
+ 0x5a05c200,
+ 0x0659d110,
+ 0x4aadb587,
+ 0x430cb7a0,
+ 0xb767008b,
+ 0xb7664ab5,
+ 0xff744c2d,
+ 0x008b984a,
+ 0x4aadb507,
+ 0x430cb720,
+ 0x44adb787,
+ 0x0659d110,
+ 0x45adb587,
+ 0x430cb720,
+ 0x65adb786,
+ 0xc0007502,
+ 0xb78d90f2,
+ 0x75004a2b,
+ 0x92a2c001,
+ 0x4d2bb74d,
+ 0x6a2bb70c,
+ 0xd0027480,
+ 0xc0010a02,
+ 0xb7869264,
+ 0xcffe7d2d,
+ 0x7500287c,
+ 0x919cc000,
+ 0x44adb767,
+ 0x9e926209,
+ 0x5987d1a4,
+ 0x01c7d120,
+ 0x9819ff74,
+ 0x430cb720,
+ 0x6a33b74c,
+ 0x1204c101,
+ 0x7100d014,
+ 0xc0007504,
+ 0xb50c90da,
+ 0xc0006a2b,
+ 0x729192a0,
+ 0x0a7acffe,
+ 0xb324090a,
+ 0xc101443c,
+ 0xc0000244,
+ 0xb74c9120,
+ 0xb78c6a2b,
+ 0x02446aab,
+ 0x5a07c200,
+ 0x6a2bb58c,
+ 0x430cb720,
+ 0x4d2bb78d,
+ 0xc0007500,
+ 0xb78c9204,
+ 0x753e6a2b,
+ 0x90b4c000,
+ 0xc0000a7e,
+ 0x750090c0,
+ 0x90a4c000,
+ 0xb58c0a04,
+ 0xb7206a2b,
+ 0xb786430c,
+ 0x750265ad,
+ 0x90f2c000,
+ 0x4a2bb78d,
+ 0xc0007500,
+ 0xb78d91e2,
+ 0x75004d2b,
+ 0x92e4c000,
+ 0x6a33b76c,
+ 0x2dfcc00e,
+ 0x9ac2fff4,
+ 0x91e0c000,
+ 0xc0e40103,
+ 0x0a0a0910,
+ 0xa241d008,
+ 0x430cb780,
+ 0xc0e4097e,
+ 0xd2080a14,
+ 0xb720a101,
+ 0xb78e430c,
+ 0x7502502b,
+ 0x91e4c000,
+ 0xc0c80203,
+ 0xd2080a60,
+ 0x7500aa01,
+ 0x90e4c000,
+ 0x6a2bb78c,
+ 0xb58c0a08,
+ 0xb7206a2b,
+ 0xb78c430c,
+ 0xc0c86a2b,
+ 0x0a0408b8,
+ 0x4029b580,
+ 0x430cb740,
+ 0x02059e92,
+ 0x0d38c0c8,
+ 0x0a14c0e4,
+ 0xaa01d208,
+ 0xa9c1d010,
+ 0xc00070c8,
+ 0x02059152,
+ 0x0a10c0e4,
+ 0xaa01d208,
+ 0xc00070c8,
+ 0xd0109088,
+ 0xb740a241,
+ 0x0205430c,
+ 0x0a38c0c8,
+ 0xaa01d208,
+ 0x090cc0e4,
+ 0xa241d008,
+ 0x430cb720,
+ 0xb5870a02,
+ 0xb7204cad,
+ 0xb58e430c,
+ 0xb7205b2b,
+ 0xb58e430c,
+ 0xb7205c2b,
+ 0xb587430c,
+ 0xb7204ead,
+ 0xb58e430c,
+ 0xb7205f2b,
+ 0xb58e430c,
+ 0xb720602b,
+ 0xb587430c,
+ 0xb720462d,
+ 0xb587430c,
+ 0xb720452d,
+ 0xb587430c,
+ 0xb720492d,
+ 0x0203430c,
+ 0x0a0cc0e4,
+ 0xaa01d208,
+ 0x522bb58d,
+ 0x430cb720,
+ 0xc0c80203,
+ 0xd2080a38,
+ 0xb58daa01,
+ 0xb79f4aab,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0xa6059c22,
+ 0x430cb720,
+ 0xb7860d02,
+ 0x0a0465ad,
+ 0x65adb586,
+ 0x430cb720,
+ 0x46b5b547,
+ 0x430cb720,
+ 0x5aabb78c,
+ 0x472db587,
+ 0x430cb720,
+ 0x5033b54e,
+ 0x430cb780,
+ 0x5aabb76c,
+ 0x0a60c0d2,
+ 0xa102d208,
+ 0x430cb720,
+ 0x65adb786,
+ 0xc0007502,
+ 0xb5469184,
+ 0xb72076b5,
+ 0xb78c430c,
+ 0xb58d6aab,
+ 0xc00052ab,
+ 0xb7869120,
+ 0xb74650ad,
+ 0x12446dad,
+ 0x76adb586,
+ 0x430cb720,
+ 0x76adb746,
+ 0x4e2db786,
+ 0xd1240144,
+ 0xb543597f,
+ 0xb7205e2f,
+ 0xb746430c,
+ 0xb78676ad,
+ 0x0144512d,
+ 0x597fd124,
+ 0x5d2fb543,
+ 0x430cb720,
+ 0x5d2fb743,
+ 0x5dafb543,
+ 0x430cb720,
+ 0x76adb786,
+ 0x53adb746,
+ 0xb5860244,
+ 0xb72076ad,
+ 0xb786430c,
+ 0xb74653ad,
+ 0xc20076ad,
+ 0x70885a07,
+ 0x93f6c000,
+ 0xc0d20203,
+ 0x0d060a60,
+ 0xa102d208,
+ 0x430cb720,
+ 0x53adb786,
+ 0x76adb746,
+ 0xc2006a16,
+ 0x70885a13,
+ 0x91b6c000,
+ 0x5033b54e,
+ 0x430cb720,
+ 0x53adb786,
+ 0xc2006a16,
+ 0xb5865a13,
+ 0xb7a076ad,
+ 0x0990430c,
+ 0x9ea80d02,
+ 0x76d4b766,
+ 0x9aafff54,
+ 0xb507008b,
+ 0xb7204b2d,
+ 0xb787430c,
+ 0x6a164b2d,
+ 0x45adb587,
+ 0x430cb720,
+ 0x76adb786,
+ 0xc0007500,
+ 0x0a0290b6,
+ 0x45adb587,
+ 0x430cb7a0,
+ 0x094ec002,
+ 0x9ea80d02,
+ 0x4f4cb766,
+ 0x59ffd1a4,
+ 0x991dc194,
+ 0xb767008b,
+ 0xd02245ad,
+ 0xc0005e64,
+ 0x30c2589d,
+ 0xc00070c2,
+ 0x9ea890bc,
+ 0x45ccb527,
+ 0x430cb720,
+ 0x45adb787,
+ 0x7c2db746,
+ 0x7104d012,
+ 0x45adb587,
+ 0x430cb720,
+ 0x45adb787,
+ 0x7b2db746,
+ 0x7104d010,
+ 0x45adb587,
+ 0x430cb720,
+ 0x45adb787,
+ 0x44adb587,
+ 0x430cb7a0,
+ 0xb76d008b,
+ 0x74c04a2b,
+ 0x9142c000,
+ 0x52b3b76d,
+ 0xff540d02,
+ 0x9ea89a5a,
+ 0x52cab50d,
+ 0x430cb720,
+ 0x65adb786,
+ 0xc0007502,
+ 0x018393b4,
+ 0x09e0c0c8,
+ 0xa962d008,
+ 0xc0007680,
+ 0xb78d92a2,
+ 0x753052ab,
+ 0x9212c000,
+ 0x53adb786,
+ 0x76adb746,
+ 0xc2006a1e,
+ 0x70885a0f,
+ 0x90dcc000,
+ 0x1e22d011,
+ 0xa261d008,
+ 0x430cb720,
+ 0x6a2bb78c,
+ 0xd0097506,
+ 0xd3e91a44,
+ 0xc0002ace,
+ 0x0a8a9072,
+ 0x65adb786,
+ 0xc0007502,
+ 0x757a9194,
+ 0xb3540a72,
+ 0x9ea34426,
+ 0xfff40289,
+ 0xc0009913,
+ 0x010391e0,
+ 0x0910c0e4,
+ 0xd0080a0a,
+ 0xb780a241,
+ 0x097e430c,
+ 0x0a14c0e4,
+ 0xa101d208,
+ 0x430cb720,
+ 0x502bb78e,
+ 0xc0007502,
+ 0x020391c4,
+ 0x0a60c0c8,
+ 0xaa01d208,
+ 0xd0017500,
+ 0xd3e10a54,
+ 0xc000294e,
+ 0x010b9062,
+ 0xc0d20203,
+ 0xd2080a60,
+ 0x7502aa01,
+ 0x0a24d001,
+ 0x294ed3e1,
+ 0xc0e40203,
+ 0xd2080a14,
+ 0x7097a982,
+ 0x4832b332,
+ 0x91f2c000,
+ 0xc0e40203,
+ 0xd2080a10,
+ 0x9e92a981,
+ 0x2a3ed3f1,
+ 0xb3327088,
+ 0xd3f14446,
+ 0xb78c2d2e,
+ 0xd0206a2b,
+ 0x76bd1128,
+ 0x1a44d00c,
+ 0x90d8c000,
+ 0xc0007684,
+ 0x0a0890bc,
+ 0x294ed3f1,
+ 0xc0007097,
+ 0x020391f2,
+ 0x0a10c0e4,
+ 0xa981d208,
+ 0xd3f19e92,
+ 0x70882a3e,
+ 0x4446b332,
+ 0x2daed3f2,
+ 0xc0e40203,
+ 0xd2080a0c,
+ 0xb780a182,
+ 0x0902430c,
+ 0x0a38c0c8,
+ 0xa182d208,
+ 0x430cb720,
+ 0x4a2bb54d,
+ 0x430cb720,
+ 0x52abb54d,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0xa205f839,
+ 0x430cb720,
+ 0xb56d76c2,
+ 0xc0004d2b,
+ 0xb72091f2,
+ 0x0982430c,
+ 0xb72c9e9a,
+ 0xd0125aab,
+ 0xfff41992,
+ 0x0d8a9a52,
+ 0x90c0c000,
+ 0xc00076c4,
+ 0x0d8690a4,
+ 0x98a2fff4,
+ 0x4314b760,
+ 0x0de0c0d6,
+ 0x9a9dffb4,
+ 0x430cb700,
+ 0x6f2cb706,
+ 0xaa1df9f8,
+ 0xa60d9c22,
+ 0x4314b740,
+ 0x09760687,
+ 0x0d48c0c8,
+ 0xaa41d010,
+ 0x7104d010,
+ 0xa241d010,
+ 0x430cb720,
+ 0xd0110a02,
+ 0xb5860932,
+ 0xd01158ad,
+ 0xb7200e32,
+ 0x6129430c,
+ 0x77adb546,
+ 0x430cb720,
+ 0x6a2db786,
+ 0x5d2db586,
+ 0x430cb720,
+ 0x5b2bb56c,
+ 0x430cb720,
+ 0x65adb786,
+ 0xc0007500,
+ 0xb78690e2,
+ 0xb7466dad,
+ 0x124450ad,
+ 0x59adb586,
+ 0x430cb720,
+ 0x76adb786,
+ 0x512db746,
+ 0xd1240128,
+ 0xb543597f,
+ 0xb7205d2f,
+ 0xb786430c,
+ 0xc20065ad,
+ 0x749c5908,
+ 0x90f2c000,
+ 0xc0ca0103,
+ 0xc000094c,
+ 0x02489260,
+ 0xc000751c,
+ 0x01039112,
+ 0x094cc0ca,
+ 0xc0000a0e,
+ 0xc00491a0,
+ 0x01037498,
+ 0x094cc0ca,
+ 0x90b2c000,
+ 0xc0000a0a,
+ 0x0a069060,
+ 0xa241d008,
+ 0x430cb720,
+ 0x4f35b766,
+ 0xc0ca0203,
+ 0xd2080a4c,
+ 0xc101a902,
+ 0xb7661207,
+ 0xd13159ad,
+ 0xc1806629,
+ 0x70885910,
+ 0x91f6c000,
+ 0xc0ca0203,
+ 0xd2080a50,
+ 0x7500aa01,
+ 0x90e4c000,
+ 0x792bb78c,
+ 0xc0000a04,
+ 0xd0319280,
+ 0xc1806526,
+ 0x71045a0c,
+ 0x931cc000,
+ 0xc0ca0203,
+ 0xd2080a50,
+ 0x7500aa01,
+ 0x9204c000,
+ 0x792bb78c,
+ 0xb58c1a04,
+ 0xb780792b,
+ 0x0906430c,
+ 0x0a50c0ca,
+ 0xa101d208,
+ 0x9100c000,
+ 0xc0ca0103,
+ 0x0a020950,
+ 0xa241d008,
+ 0x430cb720,
+ 0x792bb78c,
+ 0x4002ba24,
+ 0x752ac004,
+ 0x90dac000,
+ 0x0a5ac002,
+ 0x9100c000,
+ 0x7502c008,
+ 0x90dcc000,
+ 0x0a06c004,
+ 0x792bb58c,
+ 0x430cb720,
+ 0xb78c0902,
+ 0xb546792b,
+ 0xb74676ad,
+ 0xb7204e2d,
+ 0xba244314,
+ 0xb7264002,
+ 0x624577ad,
+ 0x40cbb74e,
+ 0x74826243,
+ 0x0a00c002,
+ 0x5999c200,
+ 0x9184c000,
+ 0x76cdb746,
+ 0x59cdb786,
+ 0xd2187480,
+ 0x12285a07,
+ 0x93e0c000,
+ 0x59d5b746,
+ 0x4f4db786,
+ 0x5d10d122,
+ 0x70881209,
+ 0x90b8c000,
+ 0xc0007680,
+ 0xd010921c,
+ 0xb7460624,
+ 0xc10176cd,
+ 0xc2000244,
+ 0x11285a13,
+ 0xb5460126,
+ 0xc00076cd,
+ 0xb78690e0,
+ 0x024676cd,
+ 0x76cdb586,
+ 0x430cb720,
+ 0x7735b5a6,
+ 0x430cb720,
+ 0x65adb786,
+ 0xc000751c,
+ 0x0a0490b2,
+ 0x65adb586,
+ 0x430cb720,
+ 0x65adb786,
+ 0xc0007502,
+ 0x02039224,
+ 0x0a38c0c8,
+ 0xaa01d208,
+ 0x6aabb58c,
+ 0x430cb720,
+ 0x6aabb78c,
+ 0x512bb58d,
+ 0x9380c007,
+ 0x50abb78d,
+ 0xc0007500,
+ 0xb78e9184,
+ 0x750040ab,
+ 0x9142c000,
+ 0x532bb78d,
+ 0xc0007504,
+ 0x0a0690b4,
+ 0x9060c000,
+ 0xb58d0a02,
+ 0xb7204f2b,
+ 0x0b02430c,
+ 0x662db746,
+ 0x4a2bb76d,
+ 0xc0c80203,
+ 0x11b40a4c,
+ 0xa902d208,
+ 0x5fadb726,
+ 0x5987d1a4,
+ 0xc1010524,
+ 0x55eb05b2,
+ 0x9825ff54,
+ 0x430cb720,
+ 0x5fadb5c6,
+ 0x430cb720,
+ 0x5db5b746,
+ 0x65adb786,
+ 0x1944d011,
+ 0x6125c101,
+ 0xc2001a04,
+ 0x02445a05,
+ 0xb5860240,
+ 0xb7a05dad,
+ 0x020b430c,
+ 0x0a4cc0c8,
+ 0xaa01d208,
+ 0xb766008b,
+ 0xb7665db5,
+ 0x9ea265ad,
+ 0x55eb1984,
+ 0x9bffff34,
+ 0xb5069ea9,
+ 0xb7005dcd,
+ 0xb74d430c,
+ 0x748050aa,
+ 0x9342c000,
+ 0x51aab78d,
+ 0xc0007500,
+ 0xb76d90e2,
+ 0x74c0532a,
+ 0x90c4c000,
+ 0x4aaab70d,
+ 0x92e0c000,
+ 0x5985d1a4,
+ 0xc1019eb2,
+ 0xff3405b8,
+ 0x18049be0,
+ 0x91a0c000,
+ 0x4a2ab76d,
+ 0x422ab78e,
+ 0xd1a49e92,
+ 0xc1015985,
+ 0xff3405b8,
+ 0xb7209bd2,
+ 0xb766430c,
+ 0x74c465ad,
+ 0x93d4c000,
+ 0x512bb74d,
+ 0x0a04d011,
+ 0xc0007088,
+ 0xb74c913a,
+ 0xd0116f33,
+ 0x72991a24,
+ 0x90b6c000,
+ 0xc0001804,
+ 0xd01191a0,
+ 0x70881a04,
+ 0x90f8c000,
+ 0x0a24d011,
+ 0xc0007299,
+ 0x0804907c,
+ 0x0e52d031,
+ 0x5a13c200,
+ 0x0a027502,
+ 0x0a42d00d,
+ 0x1108d020,
+ 0xc0007696,
+ 0xb74c921c,
+ 0xd0116f2b,
+ 0x72991a24,
+ 0x0d22d00e,
+ 0x90fcc000,
+ 0x0a24d011,
+ 0xd00d7299,
+ 0x74c41d22,
+ 0x9334c000,
+ 0x652bb78c,
+ 0xc000752c,
+ 0xb76c9292,
+ 0xb78d6f2b,
+ 0xd01152ab,
+ 0xba241938,
+ 0x71044002,
+ 0x9136c000,
+ 0x6a2bb74c,
+ 0x1a34d011,
+ 0xd00d7088,
+ 0x02031d24,
+ 0x0a48c0c8,
+ 0xa901d208,
+ 0xc8109e54,
+ 0x02037088,
+ 0x0a44c0c8,
+ 0xaa01d208,
+ 0x7104d012,
+ 0x6a2bb58c,
+ 0x430cb720,
+ 0x6a2bb78c,
+ 0x6aabb58c,
+ 0x430cb720,
+ 0x6aabb78c,
+ 0x512bb58d,
+ 0x430cb720,
+ 0x6a2bb78c,
+ 0x4aabb58d,
+ 0x430cb720,
+ 0xb78c0902,
+ 0xb58d6aab,
+ 0xb72052ab,
+ 0xb546430c,
+ 0xb720662d,
+ 0xb78c430c,
+ 0xc0d25b2b,
+ 0x0a0408e0,
+ 0x4029b580,
+ 0x430cb720,
+ 0x6aabb78c,
+ 0x59abb58d,
+ 0x430cb720,
+ 0x6aabb78c,
+ 0x592bb58d,
+ 0x430cb720,
+ 0x40abb54e,
+ 0x430cb720,
+ 0x502bb54d,
+ 0x430cb720,
+ 0x402bb54e,
+ 0x430cb720,
+ 0x50abb54d,
+ 0x430cb720,
+ 0x422bb54e,
+ 0x430cb720,
+ 0x4a2bb54d,
+ 0x430cb720,
+ 0x51abb54d,
+ 0x430cb720,
+ 0x532bb54d,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0xb720a605,
+ 0xb566430c,
+ 0xb72064b5,
+ 0xb786430c,
+ 0x750264ad,
+ 0x9004c006,
+ 0xb5460902,
+ 0xb7206fad,
+ 0xb786430c,
+ 0xc00477ad,
+ 0xc0037510,
+ 0xb7669092,
+ 0xb76659b5,
+ 0x9e9258ad,
+ 0xff3405b6,
+ 0x0a229af0,
+ 0x7008c010,
+ 0xcc121a40,
+ 0xb7207008,
+ 0xb78d430c,
+ 0x75004a2b,
+ 0x92e2c000,
+ 0x58adb786,
+ 0x6badb586,
+ 0x430cb720,
+ 0x4e35b746,
+ 0x6badb746,
+ 0x02246d7e,
+ 0x0a080244,
+ 0xc1015d17,
+ 0xc2000244,
+ 0xb5865a0b,
+ 0xb7206bad,
+ 0xb746430c,
+ 0xb74658ad,
+ 0x02246bb5,
+ 0xc2000244,
+ 0x9e525a0b,
+ 0x7104d012,
+ 0x6badb586,
+ 0x430cb720,
+ 0x50abb78d,
+ 0xc0007500,
+ 0xb78e91c2,
+ 0x750040ab,
+ 0x9124c000,
+ 0x6badb786,
+ 0xc2006a26,
+ 0xb5865a0f,
+ 0xb7206bad,
+ 0xb746430c,
+ 0x62356bad,
+ 0x5a17c200,
+ 0xb5461128,
+ 0xb7206bad,
+ 0xb786430c,
+ 0xb7466bad,
+ 0xd0107b2d,
+ 0xb5867104,
+ 0xc0006bad,
+ 0xb7469280,
+ 0xb78650ad,
+ 0x12446dad,
+ 0x59adb586,
+ 0x430cb720,
+ 0x59adb786,
+ 0x4e2db746,
+ 0x5a17c200,
+ 0xb5461128,
+ 0xb7206bad,
+ 0xb78c430c,
+ 0x75005b2b,
+ 0x9342c001,
+ 0x78adb786,
+ 0xc0007500,
+ 0xb7669302,
+ 0x74c0792d,
+ 0x9262c000,
+ 0x0d029ea3,
+ 0x9a6fff34,
+ 0x9e857420,
+ 0x90b4c000,
+ 0xc0000ec2,
+ 0x74029120,
+ 0x90d2c000,
+ 0xc0000e8a,
+ 0x0e969060,
+ 0x430cb7a0,
+ 0x008b0d02,
+ 0x6bb5b766,
+ 0x5b2bb76c,
+ 0xd11065d7,
+ 0xff3405d7,
+ 0x008b9a54,
+ 0x6badb506,
+ 0x430cb7a0,
+ 0x0d029e6b,
+ 0xb766008b,
+ 0xff346bb5,
+ 0x008b9a48,
+ 0x6c2db506,
+ 0x430cb720,
+ 0xb5860a02,
+ 0xb72070ad,
+ 0xb586430c,
+ 0xb720712d,
+ 0xb586430c,
+ 0xb7206fad,
+ 0xb586430c,
+ 0xb720712d,
+ 0xb586430c,
+ 0xb72070ad,
+ 0xb586430c,
+ 0xb79f7e2d,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa6059c22,
+ 0xaa61d810,
+ 0x430cb720,
+ 0xb58d0687,
+ 0xb7204dab,
+ 0xb786430c,
+ 0xb74676ad,
+ 0x12447e2d,
+ 0x76adb586,
+ 0x430cb720,
+ 0xb7469e8b,
+ 0xc0e24e35,
+ 0xd0100d84,
+ 0x74c0a9e1,
+ 0x9284c000,
+ 0x67adb784,
+ 0x50adb746,
+ 0x6629d131,
+ 0xb3327104,
+ 0xc0004458,
+ 0x0a069138,
+ 0xa261d010,
+ 0x430cb720,
+ 0x4e35b746,
+ 0x430cb720,
+ 0x6dadb746,
+ 0x7e2db786,
+ 0xc1010244,
+ 0xb5861244,
+ 0xb7206dad,
+ 0xb746430c,
+ 0xb78659b5,
+ 0xb7467e2d,
+ 0xc1014e2d,
+ 0x12440244,
+ 0x59adb586,
+ 0x430cb720,
+ 0x59adb786,
+ 0x7500de01,
+ 0x90d6c000,
+ 0x0a02cf01,
+ 0x59adb586,
+ 0x430cb720,
+ 0x0d02091e,
+ 0x7e2db786,
+ 0x4e35b726,
+ 0x5d2fb763,
+ 0x1419d110,
+ 0x587fd024,
+ 0x01b0e000,
+ 0xd00305b0,
+ 0xb5630db2,
+ 0xb7a05d2f,
+ 0x008b430c,
+ 0x5d2fb763,
+ 0x9845c174,
+ 0x5e74d022,
+ 0x590d0101,
+ 0xd1243144,
+ 0x008b597f,
+ 0x5dafb543,
+ 0x430cb720,
+ 0x64adb786,
+ 0xc0027504,
+ 0xb7869124,
+ 0xc00477ad,
+ 0xc0017510,
+ 0xb74693b2,
+ 0x768076b5,
+ 0x909cc001,
+ 0x772db786,
+ 0xc0c80103,
+ 0xd008094c,
+ 0xc200a941,
+ 0xc1015a05,
+ 0xc2000244,
+ 0xb5865209,
+ 0xb7a058ad,
+ 0x020b430c,
+ 0xc0c8008b,
+ 0xb7660a4c,
+ 0xb76658b5,
+ 0xd208772d,
+ 0xff34a902,
+ 0x008b997e,
+ 0x58adb506,
+ 0x9100c000,
+ 0x4e2db786,
+ 0x5a0bc200,
+ 0x58adb586,
+ 0x430cb720,
+ 0x4e2db786,
+ 0x58adb746,
+ 0x5a0bc200,
+ 0x7088c812,
+ 0x58adb546,
+ 0x9300c001,
+ 0x4e2db786,
+ 0x58adb586,
+ 0x9240c001,
+ 0xb58d0a02,
+ 0xb7204eab,
+ 0xb746430c,
+ 0x768264b5,
+ 0x9024c001,
+ 0x772db786,
+ 0xb5861a04,
+ 0xb720772d,
+ 0xb74c430c,
+ 0xb7865aab,
+ 0x7104772d,
+ 0x9094c000,
+ 0x7735b546,
+ 0x430cb720,
+ 0x4a2bb78d,
+ 0xb58d0a04,
+ 0xb7204a2b,
+ 0xb786430c,
+ 0xb5867e2d,
+ 0xc00078ad,
+ 0x76809120,
+ 0x90c4c000,
+ 0x7e2db786,
+ 0x792db586,
+ 0x430cb720,
+ 0x6dadb746,
+ 0xc0007480,
+ 0xb7869196,
+ 0x024476ad,
+ 0x76adb586,
+ 0x430cb720,
+ 0xb5860a02,
+ 0xb7206dad,
+ 0xb78c430c,
+ 0x7500632b,
+ 0x9184c001,
+ 0x6dadb746,
+ 0x4fadb786,
+ 0xc0017088,
+ 0x0203905c,
+ 0x0a48c0c8,
+ 0x4aabb74d,
+ 0xaa01d208,
+ 0xc0007088,
+ 0x0a069306,
+ 0xa221da10,
+ 0x430cb720,
+ 0x772db786,
+ 0xc0007500,
+ 0x02039222,
+ 0x0a48c0c8,
+ 0x422bb74e,
+ 0xaa01d208,
+ 0xb58e0244,
+ 0xc000422b,
+ 0x0a0290a0,
+ 0xa221da10,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0x430cb720,
+ 0x64adb786,
+ 0xc0087502,
+ 0xb78d9284,
+ 0x75004a2b,
+ 0x90e2c000,
+ 0x4eb3b76d,
+ 0xc00076c0,
+ 0xb7869102,
+ 0xb5866fad,
+ 0xc0015c2d,
+ 0xb74691c0,
+ 0xb7664c35,
+ 0xd0106fad,
+ 0xd0110624,
+ 0xc101693b,
+ 0x70880244,
+ 0x9154c000,
+ 0x5c2db786,
+ 0x5908c180,
+ 0x70886a16,
+ 0x9132c000,
+ 0x6e27d3f1,
+ 0x5a21c200,
+ 0xc00070c8,
+ 0x0a069214,
+ 0x402bb58e,
+ 0x430cb720,
+ 0x5b2bb78c,
+ 0x08e0c0d2,
+ 0xb5800a0c,
+ 0xc0004029,
+ 0xb56e9080,
+ 0xb7204033,
+ 0xb786430c,
+ 0xc00477ad,
+ 0xc0047510,
+ 0xb78693b2,
+ 0x750265ad,
+ 0x9364c000,
+ 0x4a2bb74d,
+ 0xc0007480,
+ 0xb54d92c4,
+ 0xb7204fab,
+ 0xb786430c,
+ 0xb5867e2d,
+ 0xb7205a2d,
+ 0xb786430c,
+ 0xb5866fad,
+ 0xb720582d,
+ 0xb54d430c,
+ 0xc003502b,
+ 0xb76d93e0,
+ 0xb746502b,
+ 0x74c06fb5,
+ 0x93e2c000,
+ 0x582db746,
+ 0x0624d010,
+ 0x0244c101,
+ 0xc2000244,
+ 0xb5865a09,
+ 0xb720582d,
+ 0xb786430c,
+ 0xb5867e2d,
+ 0xb7205a2d,
+ 0x0a02430c,
+ 0x502bb58d,
+ 0x430cb720,
+ 0xb58d0a04,
+ 0xc0024fab,
+ 0xb7869360,
+ 0xd0314c2d,
+ 0x6a266d25,
+ 0xc0017088,
+ 0xb78691f4,
+ 0xb7465a2d,
+ 0x01487e35,
+ 0x05240128,
+ 0xc0017295,
+ 0x0d0690bc,
+ 0x5033b54d,
+ 0x430cb720,
+ 0x50b3b54d,
+ 0x430cb720,
+ 0x51abb56d,
+ 0x430cb720,
+ 0x532bb56d,
+ 0x430cb720,
+ 0x5aabb78c,
+ 0x4a2bb74d,
+ 0xc2006a14,
+ 0x70885a0f,
+ 0x90dac000,
+ 0x40abb56e,
+ 0x9140c001,
+ 0x40b3b54e,
+ 0x90c0c001,
+ 0xb58d0a02,
+ 0xb720502b,
+ 0xb78e430c,
+ 0x7500402b,
+ 0x9384c000,
+ 0x582db746,
+ 0x6fb5b746,
+ 0x02440224,
+ 0x0244c101,
+ 0x5a09c200,
+ 0x582db586,
+ 0x430cb720,
+ 0x5a2db746,
+ 0x7e35b746,
+ 0x02440224,
+ 0x0244c101,
+ 0x5a0bc200,
+ 0x5a2db586,
+ 0x430cb720,
+ 0x6fadb786,
+ 0x5c2db586,
+ 0x430cb720,
+ 0x50abb78d,
+ 0xc0007500,
+ 0xb78d9382,
+ 0x7500502b,
+ 0x91e2c000,
+ 0x40abb78e,
+ 0xc0007500,
+ 0xb78c9144,
+ 0x01035b2b,
+ 0x0960c0d2,
+ 0xc0000a0c,
+ 0xb78c9100,
+ 0x01035b2b,
+ 0x0960c0d2,
+ 0xd0080a08,
+ 0xb720a241,
+ 0xb78d430c,
+ 0x75004a2b,
+ 0x48a4b340,
+ 0x70adb786,
+ 0x5eadb586,
+ 0x75049c22,
+ 0x48a4b340,
+ 0x7e2db766,
+ 0x5a4be180,
+ 0x90ccc000,
+ 0x0d82c002,
+ 0x9100c000,
+ 0x7100d002,
+ 0x9ea30a04,
+ 0x4001ba1b,
+ 0x6aabb78c,
+ 0xba24097e,
+ 0xd0104002,
+ 0x0c867104,
+ 0xd0129e48,
+ 0xba247100,
+ 0x9e584002,
+ 0x5a18c200,
+ 0x65b5b746,
+ 0xba245163,
+ 0x62454002,
+ 0xc1010936,
+ 0x76841126,
+ 0x518bc200,
+ 0x9054c006,
+ 0xb58d0a02,
+ 0xb7204eab,
+ 0xb78d430c,
+ 0x75004f2b,
+ 0x92c4c005,
+ 0x592db746,
+ 0x590cd1a4,
+ 0x6a2bd031,
+ 0xc0007299,
+ 0xd01190f6,
+ 0x72996a2b,
+ 0x911ac005,
+ 0x4eb3b52d,
+ 0x430cb720,
+ 0x592db746,
+ 0x022470c4,
+ 0x921cc002,
+ 0xc00070c8,
+ 0x023690d6,
+ 0xc0017104,
+ 0xb78c90ba,
+ 0x0a18792b,
+ 0x792bb58c,
+ 0x430cb720,
+ 0x77b5b746,
+ 0x4e2db786,
+ 0x76adb746,
+ 0x6245c101,
+ 0xc2000a20,
+ 0x01285a11,
+ 0x76adb546,
+ 0x430cb780,
+ 0xc0d2091a,
+ 0xd2080a60,
+ 0xb780a101,
+ 0x190c430c,
+ 0x0a48c0ca,
+ 0x9240c003,
+ 0x792bb78c,
+ 0xb58c0a08,
+ 0xb720792b,
+ 0xb746430c,
+ 0xb78677b5,
+ 0xb7464e2d,
+ 0xc10176ad,
+ 0x0a406245,
+ 0x5a15c200,
+ 0xb5460128,
+ 0xb78076ad,
+ 0x090e430c,
+ 0x0a60c0d2,
+ 0xa101d208,
+ 0x430cb780,
+ 0x0a48c0ca,
+ 0xa082d208,
+ 0x9200c002,
+ 0xc00070c8,
+ 0x023690d6,
+ 0xc0017104,
+ 0xb78c90ba,
+ 0x1a08792b,
+ 0x792bb58c,
+ 0x430cb720,
+ 0x77b5b746,
+ 0x4e2db786,
+ 0x76adb746,
+ 0x6245c101,
+ 0xc2000a40,
+ 0x11285a15,
+ 0x76adb546,
+ 0x430cb780,
+ 0xc0d2091a,
+ 0xd2080a60,
+ 0xb780a101,
+ 0x1920430c,
+ 0x0a48c0ca,
+ 0x9080c001,
+ 0x792bb78c,
+ 0xb58c1a04,
+ 0xb720792b,
+ 0xb746430c,
+ 0xb78677b5,
+ 0xb7464e2d,
+ 0xc10176ad,
+ 0xc0026245,
+ 0xc2000a00,
+ 0x11285a19,
+ 0x76adb546,
+ 0x430cb780,
+ 0xc0d2090e,
+ 0xd2080a60,
+ 0xb780a101,
+ 0x1910430c,
+ 0x0a48c0ca,
+ 0xa101d208,
+ 0x430cb720,
+ 0x592db566,
+ 0x430cb720,
+ 0x4eabb78d,
+ 0x50abb58d,
+ 0xa60d9c22,
+ 0x430cb720,
+ 0xb7460307,
+ 0x768464b5,
+ 0x90c4c000,
+ 0x6ab3b74c,
+ 0x9020c00a,
+ 0xc0077682,
+ 0xb78d9364,
+ 0x75004a2b,
+ 0x9224c000,
+ 0x4eabb78d,
+ 0xc0007500,
+ 0xb78c9184,
+ 0xb74e6aab,
+ 0x9ea2422b,
+ 0xb58e0244,
+ 0xc009422b,
+ 0xb78d9140,
+ 0x75004eab,
+ 0x90c2c001,
+ 0x01030203,
+ 0x0a48c0ca,
+ 0xaa01d208,
+ 0x0938c0c8,
+ 0xa942d008,
+ 0x4000ba24,
+ 0xc1010910,
+ 0xd0080244,
+ 0xd010a941,
+ 0x01037104,
+ 0x0944c0c8,
+ 0xa941d008,
+ 0x7104c812,
+ 0x4dabb78d,
+ 0x4233b74e,
+ 0xd0027500,
+ 0x75000922,
+ 0x9000c001,
+ 0x502bb78d,
+ 0xc0017500,
+ 0x020392c2,
+ 0x0a38c0c8,
+ 0xaa01d208,
+ 0xc0c80103,
+ 0xd0080948,
+ 0x0a04a941,
+ 0x7104d010,
+ 0xc0c80103,
+ 0xd0080944,
+ 0xc812a941,
+ 0xb78d7104,
+ 0xb74e4dab,
+ 0x75004233,
+ 0x0a28d002,
+ 0x4422b324,
+ 0x2a7ccffe,
+ 0x0244c101,
+ 0x422bb58e,
+ 0x430cb720,
+ 0xb78d9e92,
+ 0x022851ab,
+ 0x51abb58d,
+ 0x430cb720,
+ 0x532bb78d,
+ 0xb58d0a04,
+ 0xc006532b,
+ 0xb7869100,
+ 0xb5865f2d,
+ 0xb7a05e2d,
+ 0x9e694314,
+ 0x6badb746,
+ 0xc0007480,
+ 0xb78e9316,
+ 0x750040ab,
+ 0x09e2d002,
+ 0x9104c000,
+ 0xc0d29e6c,
+ 0xd2080a60,
+ 0x01e8aa01,
+ 0xc0c89e6c,
+ 0xd2080a48,
+ 0xcc10aa01,
+ 0xc00170c8,
+ 0x9e6990e0,
+ 0x5e2db766,
+ 0x4e2db786,
+ 0x6a35b746,
+ 0xc20011a6,
+ 0xcc125a0b,
+ 0x9e6c70c8,
+ 0xc0c89e6a,
+ 0xd2080a4c,
+ 0xc0d2aa01,
+ 0xd0080960,
+ 0x0a14aac1,
+ 0x554c9ea3,
+ 0x5987d1a4,
+ 0xc15405b4,
+ 0x581998d6,
+ 0xc010026a,
+ 0x12ea7008,
+ 0x700acc12,
+ 0xb78d9e69,
+ 0x9e694dab,
+ 0xb74e7500,
+ 0xd002422b,
+ 0x750009b2,
+ 0x0a38d002,
+ 0x4422b334,
+ 0xcffe9e69,
+ 0x02442a7c,
+ 0x422bb58e,
+ 0x430cb720,
+ 0x50abb78d,
+ 0xc0007500,
+ 0xb78d91c2,
+ 0x023851ab,
+ 0x51abb58d,
+ 0x430cb720,
+ 0x532bb78d,
+ 0xb58d0a04,
+ 0xb720532b,
+ 0x0203430c,
+ 0x0a48c0c8,
+ 0xaa01d208,
+ 0x70c8d010,
+ 0xc0c80103,
+ 0xd0080944,
+ 0xd012a941,
+ 0x9ea27104,
+ 0x6f2bb58c,
+ 0x90a0c002,
+ 0xc0007680,
+ 0x000790a2,
+ 0x93c0c001,
+ 0x6c2db766,
+ 0x5aadb746,
+ 0x4e2db786,
+ 0xc20011b4,
+ 0xcc125a0b,
+ 0x020370c8,
+ 0x0a4cc0c8,
+ 0xaa01d208,
+ 0x6aadb746,
+ 0x5a05c200,
+ 0x515051f3,
+ 0x5987d1a4,
+ 0xc1015914,
+ 0xff1405b4,
+ 0xb72099a8,
+ 0x5819430c,
+ 0xb74d0810,
+ 0xb78d59ab,
+ 0x0244592b,
+ 0x5a07c200,
+ 0x094ed011,
+ 0x7004c010,
+ 0xc0120a04,
+ 0x02037008,
+ 0x0a48c0c8,
+ 0xaa01d208,
+ 0x7008c010,
+ 0x08c4c0c8,
+ 0x4029b780,
+ 0x7008c012,
+ 0x9e509e82,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0xb720a605,
+ 0x0687430c,
+ 0x64adb786,
+ 0xc00a7502,
+ 0xb7469144,
+ 0xb76d712d,
+ 0xe1004a33,
+ 0xc0005a4b,
+ 0xc00290cc,
+ 0xc0000982,
+ 0xd0029100,
+ 0x0a047100,
+ 0xba1b0189,
+ 0x9e6c4000,
+ 0x514fd024,
+ 0x4002ba24,
+ 0xd010097e,
+ 0x19787104,
+ 0x7104d012,
+ 0xc0c80103,
+ 0xba24094c,
+ 0xd0084002,
+ 0xc200a941,
+ 0xba245a18,
+ 0xc1014002,
+ 0x09146245,
+ 0x76c01126,
+ 0x510bd224,
+ 0x90e2c000,
+ 0x4eabb78d,
+ 0xc0007500,
+ 0xb78693c2,
+ 0xc1015fad,
+ 0xb5860244,
+ 0xb7205fad,
+ 0xb546430c,
+ 0xb7206a35,
+ 0xb546430c,
+ 0xb7206ab5,
+ 0xb786430c,
+ 0xb58670ad,
+ 0xb7205ead,
+ 0xb786430c,
+ 0xb5865ead,
+ 0xc0065f2d,
+ 0xb78d9260,
+ 0x7500502b,
+ 0x9124c006,
+ 0x402bb78e,
+ 0xc0067500,
+ 0xb7869084,
+ 0xc1015fad,
+ 0xb5860244,
+ 0xb7205fad,
+ 0xb78d430c,
+ 0xb7664fab,
+ 0x750070ad,
+ 0x90c2c000,
+ 0x5eadb566,
+ 0x9180c000,
+ 0x5eadb746,
+ 0x02460236,
+ 0x02446916,
+ 0x5a0dc200,
+ 0x5eadb586,
+ 0x430cb720,
+ 0x5eadb786,
+ 0x5f2db586,
+ 0x430cb720,
+ 0x4fabb78d,
+ 0xc0007500,
+ 0xb54690c2,
+ 0xc0016a35,
+ 0xb78d91e0,
+ 0xb74650ab,
+ 0x75006a2d,
+ 0x9182c001,
+ 0x532bb78d,
+ 0x62451a04,
+ 0x0629d110,
+ 0x6a2db586,
+ 0x430cb720,
+ 0x532bb78d,
+ 0x6a2db746,
+ 0x5a05c200,
+ 0xb5460128,
+ 0xb7a06a2d,
+ 0x020b430c,
+ 0x0a4cc0c8,
+ 0xaa01d208,
+ 0xb766008b,
+ 0x9ea26a35,
+ 0x532bb76d,
+ 0xff1455eb,
+ 0x9ea998c2,
+ 0x6a4db506,
+ 0x430cb720,
+ 0x9100c001,
+ 0x662db786,
+ 0x4a2bb76d,
+ 0xd01111b8,
+ 0x62451a32,
+ 0x0629d110,
+ 0x5907c180,
+ 0x6a2db586,
+ 0x430cb720,
+ 0x6a2db786,
+ 0xb5860244,
+ 0xb7a06a2d,
+ 0x008b430c,
+ 0x6a35b766,
+ 0x9b6cc134,
+ 0xb5069ea9,
+ 0xb7206a4d,
+ 0xb786430c,
+ 0x750265ad,
+ 0x90c4c000,
+ 0x6a2db786,
+ 0x9280c000,
+ 0x4a33b74d,
+ 0x5aabb74c,
+ 0x6a35b766,
+ 0x0624d010,
+ 0x71041904,
+ 0x5d2db766,
+ 0x9156c000,
+ 0x0236c101,
+ 0x5a07c200,
+ 0x6aadb586,
+ 0x9140c001,
+ 0x1224c101,
+ 0x6526d031,
+ 0x02446247,
+ 0x6aadb586,
+ 0x430cb720,
+ 0x5aabb78c,
+ 0x6aadb746,
+ 0xc2001a04,
+ 0x01285a07,
+ 0x6aadb546,
+ 0x430cb7a0,
+ 0xb76c008b,
+ 0xb7665aab,
+ 0x19846ab5,
+ 0x9b2ac134,
+ 0xb5069ea9,
+ 0xc0006acd,
+ 0xb78690e0,
+ 0x0a04662d,
+ 0x662db586,
+ 0x430cb720,
+ 0x6ab5b746,
+ 0x5dadb746,
+ 0x6b35b546,
+ 0x430cb720,
+ 0x65adb786,
+ 0xc0007502,
+ 0x022491d4,
+ 0xb7460244,
+ 0xc1016aad,
+ 0xc2000244,
+ 0xc8105a0b,
+ 0xb5467088,
+ 0xb7206aad,
+ 0xb78d430c,
+ 0xb58d592b,
+ 0xb72059ab,
+ 0xb5ad430c,
+ 0xb7205933,
+ 0xb786430c,
+ 0x750064ad,
+ 0x90c4c000,
+ 0x70adb786,
+ 0x5aadb586,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0x430cb720,
+ 0x6435b746,
+ 0x4e2db746,
+ 0x5aabb78c,
+ 0x4e35b546,
+ 0x4314b720,
+ 0x4a2bb72d,
+ 0x1524c101,
+ 0xb7461a04,
+ 0x124276cd,
+ 0x6245c101,
+ 0xb5460128,
+ 0xb78076cd,
+ 0x0902430c,
+ 0x0a70c0d0,
+ 0xa101d208,
+ 0xa6059c22,
+ 0x430cb720,
+ 0xb5660287,
+ 0xd8087e35,
+ 0x7500aa61,
+ 0x9144c000,
+ 0x430cb720,
+ 0x6f35b766,
+ 0x9a6efff4,
+ 0x9b04ffd4,
+ 0x430cb780,
+ 0x0a70c0d0,
+ 0xaa01d208,
+ 0xc0007502,
+ 0xfff49084,
+ 0x9eab9bc3,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0xffcd8c40,
+ 0x01079240,
+ 0xa9e1f010,
+ 0x70c0d014,
+ 0x753ec03f,
+ 0x90dcc000,
+ 0x4226e000,
+ 0x48acb340,
+ 0xf0100226,
+ 0x9c22a261,
+ 0xb7c0a60d,
+ 0x008d430c,
+ 0x5b33b7ae,
+ 0x1dd2d012,
+ 0x9948ff34,
+ 0x028c5808,
+ 0xd0129ea9,
+ 0xb7a61dd4,
+ 0xff347ecd,
+ 0x5808993f,
+ 0xb726008c,
+ 0x9eb17ead,
+ 0xc2000252,
+ 0xb7665a05,
+ 0x9ea25ad5,
+ 0x72c4cc12,
+ 0x4e4db766,
+ 0x0d025d9c,
+ 0x9b9ffef4,
+ 0xc0dc020d,
+ 0xd2080a50,
+ 0xc006a901,
+ 0x11c00a7a,
+ 0xc0007480,
+ 0x9e9390a4,
+ 0x90e0c000,
+ 0xb786008d,
+ 0xd0207a2d,
+ 0x9eb111b8,
+ 0xb7460a02,
+ 0x592779cd,
+ 0x7088c810,
+ 0xc0dc020d,
+ 0xd2080a60,
+ 0xc180aa01,
+ 0x9eb25a8f,
+ 0xd01102d8,
+ 0x9eb36e3f,
+ 0x0d50c0dc,
+ 0x02d40e86,
+ 0x5a23c200,
+ 0xa2c2d010,
+ 0x7a4db566,
+ 0x0db0c0dc,
+ 0xfff402d8,
+ 0xc0089b96,
+ 0xd4100a02,
+ 0x9e6c7148,
+ 0x7148d412,
+ 0x2afcc00e,
+ 0xb79f000b,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0xa60d9c22,
+ 0x430cb7a0,
+ 0xb786008b,
+ 0xb741562d,
+ 0x75045e2d,
+ 0x92a4c005,
+ 0xc0007488,
+ 0xb78190e4,
+ 0x75045ead,
+ 0x9322c005,
+ 0xb746008b,
+ 0xb7464fad,
+ 0xb7616bb5,
+ 0xd1205e2d,
+ 0xc02111a5,
+ 0xc00076c0,
+ 0x74c29354,
+ 0x93e4c009,
+ 0x4e2db786,
+ 0x70886a1e,
+ 0x9336c009,
+ 0x5e15d1a2,
+ 0x6ac5d072,
+ 0x9060c001,
+ 0xc00074c2,
+ 0xd1a293e2,
+ 0xd0325e15,
+ 0xc0006acd,
+ 0x74c29340,
+ 0x9184c009,
+ 0xb786008b,
+ 0x6a1e4e2d,
+ 0xc0097088,
+ 0xd07190b6,
+ 0xc0006e35,
+ 0x74c290e0,
+ 0x9102c000,
+ 0x6e3dd031,
+ 0x5a97d224,
+ 0x9060c000,
+ 0x008b0e82,
+ 0x5ab3b7cc,
+ 0xc0027782,
+ 0xff3491c4,
+ 0x008b9a72,
+ 0x4e35b7a6,
+ 0xc0109e6b,
+ 0xc1017006,
+ 0xd812120b,
+ 0xb7867008,
+ 0x750075ad,
+ 0x92e4c001,
+ 0x98acff34,
+ 0xc0007400,
+ 0x008b91a2,
+ 0x5babb78e,
+ 0x67adb744,
+ 0x0a027088,
+ 0x0a42d004,
+ 0x92c0c000,
+ 0xb781008b,
+ 0x75025e2d,
+ 0x90a2c000,
+ 0xc0007508,
+ 0x008b9224,
+ 0x67adb784,
+ 0x50adb746,
+ 0x6659d131,
+ 0x0a027104,
+ 0x0a42d00b,
+ 0x75adb586,
+ 0x90a0c000,
+ 0xb5c6008b,
+ 0x008b75b5,
+ 0x75adb786,
+ 0xc0007500,
+ 0xc81290c4,
+ 0xc000710c,
+ 0x010d9060,
+ 0xb786008b,
+ 0xd0204e2d,
+ 0x008b12c4,
+ 0x5e2db781,
+ 0xc0007502,
+ 0xff349264,
+ 0x7400986b,
+ 0x91c4c000,
+ 0xb786008b,
+ 0xb74650ad,
+ 0xc2006bad,
+ 0x70885a08,
+ 0x909cc000,
+ 0x73b5b7a6,
+ 0xd4120d06,
+ 0xc0047344,
+ 0x748291e0,
+ 0x92c2c001,
+ 0xc0047488,
+ 0x008b9024,
+ 0x5eadb781,
+ 0xc0007500,
+ 0xb7a690c2,
+ 0xc0034e35,
+ 0x008b93e0,
+ 0x65abb78c,
+ 0x75009ead,
+ 0xd0010a02,
+ 0xb5800a42,
+ 0xb78c448c,
+ 0xc0dc71ab,
+ 0x75000ee0,
+ 0x90c2c000,
+ 0xa922d210,
+ 0x90e0c000,
+ 0x9ab0fff4,
+ 0xd2109e82,
+ 0x008ba021,
+ 0x4e2db786,
+ 0xd131092a,
+ 0xc2006629,
+ 0xcc125a1f,
+ 0x9e9d7104,
+ 0x9300c002,
+ 0xb78c008b,
+ 0xc2006dab,
+ 0x00ca5a08,
+ 0x5cb5b7a6,
+ 0x9816ff34,
+ 0x0b00f011,
+ 0x90e4c001,
+ 0xb786008b,
+ 0x750075ad,
+ 0x9244c000,
+ 0x67adb784,
+ 0x4e2db746,
+ 0x50b5b746,
+ 0x61450802,
+ 0xd00b7095,
+ 0x74000802,
+ 0x75adb506,
+ 0x9122c000,
+ 0x99afff34,
+ 0xc0121001,
+ 0xc000700c,
+ 0x008b9100,
+ 0x6badb786,
+ 0xc0121228,
+ 0xc1017100,
+ 0x008b06d0,
+ 0x5e2db781,
+ 0xc0007502,
+ 0xff1493e4,
+ 0x74009be5,
+ 0x9344c000,
+ 0xb78d008b,
+ 0xc00464ab,
+ 0xc0007510,
+ 0xb7869274,
+ 0xc2004e2d,
+ 0x9ea25a0f,
+ 0x7344d412,
+ 0x9140c000,
+ 0xb78c008b,
+ 0xc2006dab,
+ 0x00ca5a08,
+ 0x5cb5b7a6,
+ 0x430cb720,
+ 0x5e2db781,
+ 0xc0007508,
+ 0x750290a2,
+ 0x92e4c000,
+ 0xb787008b,
+ 0x75004e2d,
+ 0x9222c000,
+ 0xd4109ea2,
+ 0xc0007344,
+ 0x72959180,
+ 0x9238fff6,
+ 0x9180fff6,
+ 0xfff77295,
+ 0xfff69078,
+ 0x9e6893c0,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0xb7a0a60d,
+ 0x9e5e430c,
+ 0xb781008b,
+ 0x75025e2d,
+ 0x91a6c004,
+ 0xc0007504,
+ 0x750890f4,
+ 0x90e4c004,
+ 0x93a0c001,
+ 0x76c2008b,
+ 0x5635b566,
+ 0x6db3b7ac,
+ 0x9084c000,
+ 0x9ad5ff34,
+ 0x9a71fff4,
+ 0xb506008b,
+ 0x01815dad,
+ 0xff34058b,
+ 0x9e8299a7,
+ 0x0902008b,
+ 0x9100c000,
+ 0x7c2bb78c,
+ 0x4002ba24,
+ 0x0528c101,
+ 0x0888709b,
+ 0xffff0904,
+ 0xb74092fc,
+ 0x9e53430c,
+ 0xc0c80205,
+ 0xd2080a48,
+ 0xd010aa01,
+ 0xc0c870c8,
+ 0xd0080944,
+ 0xc412a941,
+ 0x9e8d7104,
+ 0xb5ac008b,
+ 0xff347c33,
+ 0xc00298d4,
+ 0x008b9140,
+ 0xb56676c2,
+ 0xb7ac5635,
+ 0xc0006db3,
+ 0xff349084,
+ 0xfff49a9a,
+ 0x008b9a36,
+ 0x5dadb506,
+ 0x058b0181,
+ 0x996cff34,
+ 0x008b9e82,
+ 0xc0000902,
+ 0xb78c9100,
+ 0xba247c2b,
+ 0xc1014002,
+ 0x709b0528,
+ 0x09040888,
+ 0x92fcffff,
+ 0x430cb740,
+ 0x02059e53,
+ 0x0a48c0c8,
+ 0xaa01d208,
+ 0x70c8d010,
+ 0x0944c0c8,
+ 0xa941d008,
+ 0x7104c412,
+ 0x008b9e8d,
+ 0x7c33b5ac,
+ 0x9899ff34,
+ 0xc0007584,
+ 0xb72091c4,
+ 0xb781430c,
+ 0x75045ead,
+ 0x90e2c000,
+ 0xc0c80203,
+ 0xd2080a38,
+ 0x9e68aa82,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0x8440a61d,
+ 0x4314b7a0,
+ 0x1236c101,
+ 0x9e699ea2,
+ 0xb72c9e6a,
+ 0xc0de712b,
+ 0xd0080920,
+ 0xa095ab42,
+ 0x55d89e69,
+ 0x73adb726,
+ 0xc114a099,
+ 0x9e6c9bfd,
+ 0xc0de0301,
+ 0xd2080a10,
+ 0x7480a901,
+ 0x90c2c000,
+ 0xb7869e69,
+ 0x11087c2d,
+ 0xb78d9e69,
+ 0xb7ac64ab,
+ 0xc0046e2b,
+ 0xc0007510,
+ 0xd3249114,
+ 0x0b825b93,
+ 0xc000a391,
+ 0x9e699180,
+ 0xb7866916,
+ 0x590f7bad,
+ 0x5b87d324,
+ 0x5b97c200,
+ 0x9e69a111,
+ 0xb7467780,
+ 0x0a22712d,
+ 0x4824b364,
+ 0x9ea67482,
+ 0x9332c003,
+ 0x632bb78d,
+ 0xc0037502,
+ 0xc0019292,
+ 0xa91593c0,
+ 0x3154e000,
+ 0x9324c001,
+ 0xb7869e69,
+ 0x750075ad,
+ 0x9144c002,
+ 0x9aa8ff14,
+ 0xc0007400,
+ 0x9e6991a2,
+ 0x5babb78e,
+ 0x67adb744,
+ 0x0a027088,
+ 0x0a42d004,
+ 0x9360c000,
+ 0xb7819e69,
+ 0x75025e2d,
+ 0x90a2c000,
+ 0xc0007508,
+ 0x9e699204,
+ 0x67adb784,
+ 0x4e2db746,
+ 0x50b5b746,
+ 0x71156245,
+ 0xd00b0a02,
+ 0xc0000a42,
+ 0x9e699080,
+ 0xb5860a06,
+ 0x9e6975ad,
+ 0x75adb786,
+ 0xc0007500,
+ 0x75409324,
+ 0x9104c000,
+ 0xb78d9e69,
+ 0x7500632b,
+ 0x9104c000,
+ 0x0a029e6a,
+ 0x0910c0de,
+ 0xa241d008,
+ 0xaa19a915,
+ 0x74800802,
+ 0x4422b340,
+ 0x90e0c001,
+ 0x9e699e6c,
+ 0x0a10c0de,
+ 0x058b0a86,
+ 0xa281d208,
+ 0x7c2db5c6,
+ 0x0df0c0dc,
+ 0xfff4018d,
+ 0xa91298c2,
+ 0xa9199e71,
+ 0x0674d010,
+ 0x52a4c200,
+ 0x024a024e,
+ 0xc2006245,
+ 0xc0005007,
+ 0x9e699140,
+ 0x632bb78d,
+ 0xfffe7500,
+ 0xfffc9102,
+ 0xb79f9140,
+ 0xb7bf7d6e,
+ 0xb7df7dee,
+ 0xb7ff7e6e,
+ 0xc0027eee,
+ 0x9c228c40,
+ 0xb7a0a60d,
+ 0x008b430c,
+ 0x5e2db781,
+ 0xc0047502,
+ 0xff149004,
+ 0xf0119a27,
+ 0xc0000880,
+ 0x9ea99184,
+ 0x4e55b766,
+ 0x4fcdb746,
+ 0x6e3fd011,
+ 0xc0017088,
+ 0x008b91d6,
+ 0x64abb78d,
+ 0x6cadb7c6,
+ 0x73b5b7a6,
+ 0x7510c004,
+ 0x9332c002,
+ 0x63b3b76d,
+ 0x6333b74d,
+ 0x4e2db786,
+ 0x592db746,
+ 0x15b49e5b,
+ 0xba1b7088,
+ 0x1d844003,
+ 0x1248d01a,
+ 0x1228d019,
+ 0x61c7d132,
+ 0x4002ba1b,
+ 0x5a07c180,
+ 0x05b8c101,
+ 0xfef40d02,
+ 0xc1019834,
+ 0xc00106d0,
+ 0x9ea99300,
+ 0x64cbb78d,
+ 0x6bcdb7c6,
+ 0x50d5b7a6,
+ 0x7510c004,
+ 0x9012c001,
+ 0x594db706,
+ 0x63cbb74d,
+ 0x6353b74d,
+ 0x01857017,
+ 0x1007d119,
+ 0x11a5d120,
+ 0x4434b310,
+ 0x4003ba1b,
+ 0xba1b1d84,
+ 0xd1324002,
+ 0xc1806187,
+ 0xc1015a07,
+ 0x0d0205b8,
+ 0x9809fef4,
+ 0x06d0c101,
+ 0xb78c008b,
+ 0x75006eab,
+ 0x9122c000,
+ 0x065ad010,
+ 0x024ac101,
+ 0xd224024c,
+ 0x9eb35a8b,
+ 0xfff49e6b,
+ 0x0a029ab1,
+ 0x7100d012,
+ 0xb5869ea9,
+ 0xc0016b4d,
+ 0x008b90a0,
+ 0x64abb78d,
+ 0x63b3b74d,
+ 0x632bb74d,
+ 0x7510c004,
+ 0x6aadb706,
+ 0x90d4c000,
+ 0x73adb706,
+ 0x9220c000,
+ 0x1625d110,
+ 0xba1b0189,
+ 0x74c24002,
+ 0x913cc000,
+ 0x5987d1a4,
+ 0xc1010d02,
+ 0xfed405b0,
+ 0x9ea99bce,
+ 0x6b4db506,
+ 0x430cb7a0,
+ 0x008b0902,
+ 0x632bb78d,
+ 0x7135b746,
+ 0x68abb54d,
+ 0x76820a04,
+ 0x632bb58d,
+ 0x5b2bb74c,
+ 0x9192c005,
+ 0x2a7ccffe,
+ 0xc0057502,
+ 0xb74d90f2,
+ 0x76806833,
+ 0x9144c005,
+ 0xc0017480,
+ 0xb78693c2,
+ 0x74826b2d,
+ 0x5907c200,
+ 0x5cadb546,
+ 0x9104c000,
+ 0x02440224,
+ 0x5a07c200,
+ 0x5cadb586,
+ 0x0e869ea9,
+ 0x6b55b766,
+ 0x5ccdb786,
+ 0x6fcbb76d,
+ 0x15b8c101,
+ 0xfed41984,
+ 0x03019b90,
+ 0x9200c000,
+ 0xa9cdd008,
+ 0x9b89fed4,
+ 0x0e52d011,
+ 0x5c88d2a2,
+ 0x009a9ea5,
+ 0x5cadb506,
+ 0x2efccffe,
+ 0x025ac101,
+ 0x0109008b,
+ 0x6fabb78d,
+ 0x0960c0da,
+ 0x711b9eb3,
+ 0xffff0d02,
+ 0xc00390f2,
+ 0x9ea99160,
+ 0xb76d9e92,
+ 0xb78664cb,
+ 0x19846acd,
+ 0x29fccffe,
+ 0x5985d1a4,
+ 0x05b8c101,
+ 0x9b61fed4,
+ 0xb506008b,
+ 0xc0025cad,
+ 0x9ea992e0,
+ 0x64cbb78d,
+ 0x7510c004,
+ 0x9194c000,
+ 0x4e4db786,
+ 0xb7460904,
+ 0x61296c55,
+ 0x1124c101,
+ 0x6b4db546,
+ 0x0902008b,
+ 0x6b2db786,
+ 0x7088d812,
+ 0x5b33b7ac,
+ 0xc0017740,
+ 0xb78691c2,
+ 0x75005b2d,
+ 0x9262c000,
+ 0x5badb766,
+ 0xc00074c0,
+ 0x9ea391c2,
+ 0xfed49e92,
+ 0x0a429b32,
+ 0x7008c010,
+ 0xcc121a38,
+ 0xc0007008,
+ 0x09969060,
+ 0x61bcd032,
+ 0xc1010d02,
+ 0xfed401ba,
+ 0x9ea99b22,
+ 0x11e0d020,
+ 0xb5069e6b,
+ 0x0d025ccd,
+ 0x9b19fed4,
+ 0xb506008b,
+ 0xc0005d2d,
+ 0x9ea991e0,
+ 0x5ccdb5c6,
+ 0x9140c000,
+ 0xb781008b,
+ 0x75085e2d,
+ 0x9004fffe,
+ 0x91a0fffd,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0xb7a0a605,
+ 0x008b430c,
+ 0x752db766,
+ 0xc00074c0,
+ 0x008b91c2,
+ 0x74adb786,
+ 0x5985d1a4,
+ 0x05b8c101,
+ 0x99b5c114,
+ 0xcffe0181,
+ 0x008b29fc,
+ 0x5e2db781,
+ 0x73abb56c,
+ 0xc0007504,
+ 0x75089142,
+ 0x9302c001,
+ 0xc0047502,
+ 0xc00193c4,
+ 0xff149260,
+ 0x74009895,
+ 0xb746008b,
+ 0xc0007335,
+ 0xb74690c2,
+ 0xc0006cad,
+ 0x008b90a0,
+ 0x6badb746,
+ 0xb781008b,
+ 0x75025e2d,
+ 0x9124c000,
+ 0xc0007400,
+ 0xb78690c4,
+ 0xc00050ad,
+ 0x0a029060,
+ 0x9e521228,
+ 0x7104d010,
+ 0x1105c101,
+ 0x7104d012,
+ 0x5a07c200,
+ 0x1629d110,
+ 0xb586008b,
+ 0x0a0a6aad,
+ 0x7b2bb58d,
+ 0x9160c003,
+ 0x9864ff14,
+ 0x008b7400,
+ 0x7335b746,
+ 0x9222c000,
+ 0x6cadb786,
+ 0xd0109e52,
+ 0xc1017104,
+ 0xd0121105,
+ 0xc2007104,
+ 0xd1105a07,
+ 0xc0011429,
+ 0x008b92e0,
+ 0x5e2db781,
+ 0xc0007502,
+ 0x008b90a4,
+ 0x50adb706,
+ 0xb786008b,
+ 0xb746712d,
+ 0x75006bad,
+ 0x90e4c000,
+ 0x50adb786,
+ 0xc0001040,
+ 0x008b93e0,
+ 0x64abb78d,
+ 0xc0041020,
+ 0xc0007510,
+ 0x740092f2,
+ 0x5807d018,
+ 0x9258c000,
+ 0xb786008b,
+ 0xc2004fad,
+ 0x70885a07,
+ 0x1000d01e,
+ 0x911cc000,
+ 0xb786008b,
+ 0x6209542d,
+ 0x5833c200,
+ 0x1421d110,
+ 0xb781008b,
+ 0x75025e2d,
+ 0x90a4c000,
+ 0xb5871a04,
+ 0x008b4e2d,
+ 0x5e2db781,
+ 0xc0007508,
+ 0xb7819144,
+ 0x75045ead,
+ 0x90a2c000,
+ 0xb5870a02,
+ 0x0a024e2d,
+ 0x7100d012,
+ 0xb586008b,
+ 0x008b6aad,
+ 0x712db786,
+ 0x09b29eab,
+ 0xb5860a04,
+ 0xc0ca712d,
+ 0xfd540de0,
+ 0x9eab9af6,
+ 0xc0cc0992,
+ 0xfd540d90,
+ 0x9eab9af0,
+ 0xc0ce0992,
+ 0xfd540dc8,
+ 0x9eab9aea,
+ 0xc0da09a2,
+ 0xfd540d90,
+ 0x0a029ae4,
+ 0xb58d008b,
+ 0xb58d68ab,
+ 0xb58d632b,
+ 0xb79f682b,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0x9000ffed,
+ 0xd0149e52,
+ 0xd00270c0,
+ 0xf0127100,
+ 0xc0000942,
+ 0x9e5290a4,
+ 0x9120c000,
+ 0xc1010a7a,
+ 0xd0101244,
+ 0x11287104,
+ 0xf01051f0,
+ 0xf010a165,
+ 0x9c22a1e1,
+ 0xaa65f010,
+ 0xa961f010,
+ 0xc4129e50,
+ 0xd1207100,
+ 0x12181115,
+ 0x9e545153,
+ 0x51f39e8a,
+ 0xfffe11a6,
+ 0x0d029320,
+ 0x91c0ffff,
+ 0xa966f008,
+ 0xa9e1f008,
+ 0x9100ffff,
+ 0xaa65f010,
+ 0xa8e1f010,
+ 0xc4129ea0,
+ 0xd1107204,
+ 0x15141619,
+ 0x9e5450b3,
+ 0xc81451f3,
+ 0xc8027040,
+ 0xd0147080,
+ 0xd00270c0,
+ 0x09047100,
+ 0xc8120a04,
+ 0x19747088,
+ 0x9e900d02,
+ 0x7280c812,
+ 0x5d070d04,
+ 0x05249e54,
+ 0x51f350b3,
+ 0x052201b2,
+ 0x9100fffd,
+ 0xfffe0d02,
+ 0xf01092e0,
+ 0xf010aa65,
+ 0x9ea0a8e1,
+ 0x7204c412,
+ 0x1619d110,
+ 0x50b31514,
+ 0x51f39e54,
+ 0x7040c814,
+ 0x7080c802,
+ 0x70c0d014,
+ 0x7100d002,
+ 0x196c0128,
+ 0x9e900d02,
+ 0x7280c812,
+ 0x5d070d04,
+ 0x05249e54,
+ 0x51f350b3,
+ 0x052261b3,
+ 0x9000fffc,
+ 0xa966f008,
+ 0xa9e1f008,
+ 0x92e0fffe,
+ 0xfffe0d02,
+ 0xa60d9280,
+ 0xaae5f010,
+ 0x9e5e0685,
+ 0xf0107155,
+ 0xd11ca962,
+ 0x0107165b,
+ 0x4458b343,
+ 0x4838b355,
+ 0x554fd018,
+ 0xd0120986,
+ 0x9ea370c4,
+ 0x70c4cc12,
+ 0x05b45d87,
+ 0x983ac114,
+ 0xd1209eb3,
+ 0x0181115b,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0xfffa8c60,
+ 0xf00891e0,
+ 0xf008a966,
+ 0xfffea9e1,
+ 0x0d029280,
+ 0x9220fffe,
+ 0xa966f010,
+ 0xaa61f010,
+ 0xc8109e51,
+ 0x11b47046,
+ 0x518fc200,
+ 0x1524c101,
+ 0x9300fff9,
+ 0xc002a61d,
+ 0xd1718420,
+ 0xb7c06e35,
+ 0x0246430c,
+ 0x06b6d010,
+ 0x5a08c200,
+ 0x00cca199,
+ 0xb7e702dc,
+ 0xc12a502d,
+ 0xda080ac0,
+ 0xc180aab1,
+ 0xc1015e88,
+ 0x9e6906dc,
+ 0xc00f0707,
+ 0xe0b22bfe,
+ 0x09028d80,
+ 0x4002ba2d,
+ 0xb7a6018f,
+ 0xb55f6835,
+ 0xb5bf7bec,
+ 0xfff47b6c,
+ 0xe0b29b95,
+ 0x09a28d80,
+ 0x9bc4fff4,
+ 0x0e62d011,
+ 0x8d80f092,
+ 0x5a0cc200,
+ 0xa21d01e8,
+ 0x0980c0d2,
+ 0x058f0a02,
+ 0xa2e1f210,
+ 0x7cecb59f,
+ 0x9b78fff4,
+ 0x09a2058f,
+ 0x9baefff4,
+ 0xb786008d,
+ 0xf0515dad,
+ 0x9e6b8c80,
+ 0x09029eab,
+ 0xa221f208,
+ 0xfff4a115,
+ 0x008d9b6d,
+ 0x5a2bb78c,
+ 0x6d2bb76c,
+ 0x11c69eab,
+ 0x9b95fff4,
+ 0x09a29eab,
+ 0x9b94fff4,
+ 0x7b6cb79f,
+ 0xc0007500,
+ 0xf21090fc,
+ 0x7500aa61,
+ 0x923ac000,
+ 0x0902aa1d,
+ 0x7becb55f,
+ 0xb5ff00cc,
+ 0xb7867b6c,
+ 0xb72664ad,
+ 0xb59f642d,
+ 0xf2107cec,
+ 0xb79fa0e1,
+ 0x75007b6c,
+ 0x913cc000,
+ 0x8c80f092,
+ 0xaa21f210,
+ 0xc0007500,
+ 0x008d919a,
+ 0x4eadb746,
+ 0xb59f0a06,
+ 0xb55f7dec,
+ 0xc0007d6c,
+ 0x008d9340,
+ 0x5dadb786,
+ 0x8c80f071,
+ 0x8d80e051,
+ 0xf2089eab,
+ 0x0a02a221,
+ 0x7decb59f,
+ 0x9ac6fff4,
+ 0xe0b19eab,
+ 0xfff48d80,
+ 0x9eab9b17,
+ 0xfff49e6b,
+ 0xa9199b44,
+ 0x6e65d171,
+ 0x04ecd010,
+ 0x02480244,
+ 0xc114024c,
+ 0x009c0a20,
+ 0xa991da08,
+ 0x5f2bb72e,
+ 0xd02a7046,
+ 0xd02901b6,
+ 0xd0090192,
+ 0xd00a0a32,
+ 0xd1200a12,
+ 0x098e01c7,
+ 0x9b4dc0f4,
+ 0x8d80e0b2,
+ 0xfff40181,
+ 0xe0719af7,
+ 0xe0b28d80,
+ 0xfff48d80,
+ 0xe0b29b1c,
+ 0xfef48d80,
+ 0x9e839b0a,
+ 0x9b29fef4,
+ 0x7beeb79f,
+ 0x7c6eb7bf,
+ 0x7ceeb7df,
+ 0x7d6eb7ff,
+ 0x8c20c004,
+ 0xa6059c22,
+ 0x430cb720,
+ 0x0d829e5d,
+ 0x4aabb76d,
+ 0x998dffb4,
+ 0xa021f208,
+ 0x430cb720,
+ 0x64adb786,
+ 0xc0007500,
+ 0xb50d9082,
+ 0x0a024aab,
+ 0xa225f208,
+ 0xa22df208,
+ 0xa231f208,
+ 0xa235f208,
+ 0xa229f208,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0xb720a605,
+ 0x76c2430c,
+ 0xb56d9e5c,
+ 0xc0004d2b,
+ 0xb7209292,
+ 0xb7ac430c,
+ 0xb76c5b2b,
+ 0xd0115ab3,
+ 0xc0f409d2,
+ 0x9e839afd,
+ 0x1d84018b,
+ 0x9ab7ff74,
+ 0xc0000d8a,
+ 0x76c69160,
+ 0x15b6d021,
+ 0x90c2c000,
+ 0xc0007504,
+ 0x0d8690a4,
+ 0x9910ff94,
+ 0x4314b760,
+ 0x0de0c0d6,
+ 0x9bb1fff4,
+ 0x430cb700,
+ 0x6f2cb706,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0x8420a60d,
+ 0x430cb720,
+ 0xb5850a06,
+ 0xb72044ab,
+ 0xb76a430c,
+ 0xfdd447b1,
+ 0xb7209b6d,
+ 0x0982430c,
+ 0x46b1b76a,
+ 0x4731b74a,
+ 0x5e30d1a2,
+ 0xc0219e5a,
+ 0xc1120a00,
+ 0xc0000a10,
+ 0x9dcd90e0,
+ 0xa1e1f000,
+ 0x0a00c100,
+ 0x09047095,
+ 0x9334ffff,
+ 0x5badb7c3,
+ 0x8d00e031,
+ 0x9eb209a2,
+ 0x9a2dfd74,
+ 0xb720aa19,
+ 0x09024314,
+ 0x2ac6d011,
+ 0x68d1d131,
+ 0x46d1b7aa,
+ 0x0092c101,
+ 0x602db540,
+ 0x430cb720,
+ 0x9e889d29,
+ 0xb76a9e89,
+ 0xb70a46b1,
+ 0xc0184729,
+ 0xc01c0c30,
+ 0xc0010cb0,
+ 0xd1a29020,
+ 0x9dcb5e30,
+ 0xc0210189,
+ 0xc1280980,
+ 0xe0200980,
+ 0x9dbdaa09,
+ 0xa261f000,
+ 0xa929e020,
+ 0x0a30d111,
+ 0xf0009dcd,
+ 0x0d10a161,
+ 0x7680c008,
+ 0xffff0990,
+ 0xd0119204,
+ 0x9ea30e32,
+ 0x2dfcc00e,
+ 0xffff72d1,
+ 0xaa1d9014,
+ 0xc0017d02,
+ 0xd1319342,
+ 0xa99a69d1,
+ 0x5d2db780,
+ 0x5d890196,
+ 0x0990c010,
+ 0x9c629ea4,
+ 0x025ac101,
+ 0xc2000a04,
+ 0x01095a30,
+ 0x0900c021,
+ 0xc0260d02,
+ 0x9dad0970,
+ 0xa162f000,
+ 0x0a00c021,
+ 0x0a60c026,
+ 0xf0009dcd,
+ 0xb720a162,
+ 0x098e430c,
+ 0x46a9b74a,
+ 0x4731b74a,
+ 0x5a30c100,
+ 0x0a00c021,
+ 0x0a50c114,
+ 0x90e0c000,
+ 0xf0009dcd,
+ 0xc100a1e1,
+ 0x70950a00,
+ 0xffff0904,
+ 0xb7209334,
+ 0xd132430c,
+ 0xd05168d1,
+ 0xd1100a60,
+ 0x9eab0493,
+ 0x5fadb580,
+ 0x430cb780,
+ 0x0498c101,
+ 0xc200aa1d,
+ 0xb5805a05,
+ 0xfdd45f4d,
+ 0x75409802,
+ 0x90c4c000,
+ 0x430cb720,
+ 0x44abb5a5,
+ 0x430cb720,
+ 0x47b1b76a,
+ 0x9b3bfdd4,
+ 0xfd340d8e,
+ 0x0d8a9b9d,
+ 0x9b9afd34,
+ 0x7e6eb79f,
+ 0x7eeeb7bf,
+ 0x7f6eb7df,
+ 0x8c00c002,
+ 0xa60d9c22,
+ 0x430cb720,
+ 0xc0a40203,
+ 0xd2080a10,
+ 0xb7caaa82,
+ 0x77404729,
+ 0x92a4c002,
+ 0xb5a50a86,
+ 0xb72044ab,
+ 0xb76a430c,
+ 0xfdd447b1,
+ 0x9eab9a89,
+ 0x99e8fe14,
+ 0x430cb720,
+ 0x5dadb781,
+ 0x75061a08,
+ 0x4832b353,
+ 0x92d2c000,
+ 0x5908d226,
+ 0xe0508520,
+ 0x94088044,
+ 0x940c940a,
+ 0xc1019401,
+ 0xc0000982,
+ 0x09869120,
+ 0x90c0c000,
+ 0xc000098e,
+ 0x098a9060,
+ 0x7eb1b745,
+ 0x0a02c020,
+ 0x5930c300,
+ 0xb3247680,
+ 0xc1404822,
+ 0x32460900,
+ 0x0900c021,
+ 0xf0009dad,
+ 0x0203a261,
+ 0x0a20c09c,
+ 0x5badb763,
+ 0xa902d208,
+ 0x09069eb3,
+ 0x98acfdb4,
+ 0x430cb720,
+ 0x47b1b76a,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0xfdd68c60,
+ 0xb79f9180,
+ 0xb7bf7eee,
+ 0xb7df7f6e,
+ 0x8c607fee,
+ 0xa61d9c22,
+ 0x8420c016,
+ 0x6f74b57f,
+ 0xaa61d050,
+ 0xa9e6d050,
+ 0x03091a28,
+ 0xc00e1d88,
+ 0xc00e2b7c,
+ 0xfd742dfc,
+ 0xd031987d,
+ 0xd0116869,
+ 0xd2240a62,
+ 0x02015b30,
+ 0x0a20c014,
+ 0x5808d324,
+ 0x0a00c021,
+ 0x6aecb59f,
+ 0x568cb780,
+ 0x6e6cb51f,
+ 0x9e400101,
+ 0x020801ec,
+ 0x57ecb59f,
+ 0x4f8cb780,
+ 0x0930c014,
+ 0x0900c021,
+ 0xb59f0208,
+ 0xb7806b6c,
+ 0xb55f558c,
+ 0x9e716a6c,
+ 0xb59f0268,
+ 0xb780576c,
+ 0xd0d1500c,
+ 0x02686961,
+ 0x7c6cb59f,
+ 0x518cb780,
+ 0x0880c040,
+ 0x59ecb53f,
+ 0xb59f0238,
+ 0xb78071ec,
+ 0xb51f540c,
+ 0xb7207b74,
+ 0x0128430c,
+ 0x596cb55f,
+ 0x59f4b71f,
+ 0xb7800906,
+ 0x9e91570c,
+ 0x7becb57f,
+ 0xc24000e2,
+ 0xc021a0c6,
+ 0x0a020c00,
+ 0xb51f9e72,
+ 0x9e737974,
+ 0x4c29b58a,
+ 0x9e749e70,
+ 0xc0219e71,
+ 0x050d0900,
+ 0x078d058d,
+ 0x048d040d,
+ 0x0900c01e,
+ 0x0d00c021,
+ 0x0980c021,
+ 0x0d80c021,
+ 0x0a00c021,
+ 0x0f80c021,
+ 0x0800c021,
+ 0x0c00c021,
+ 0x0880c021,
+ 0x0c80c021,
+ 0x756cb55f,
+ 0x0d10c01e,
+ 0x09a0c01e,
+ 0x0df0c014,
+ 0x0a00c010,
+ 0x0f90c010,
+ 0x0820c010,
+ 0x0c30c010,
+ 0x0880c004,
+ 0x0ca0c012,
+ 0x1950c00a,
+ 0x75f4b55f,
+ 0x776cb57f,
+ 0x5a74b57f,
+ 0x5cecb59f,
+ 0x5bf4b5ff,
+ 0x5b6cb51f,
+ 0x5af4b51f,
+ 0x61ecb53f,
+ 0x6174b53f,
+ 0x60ecb55f,
+ 0x1d50c00c,
+ 0x19d0c00c,
+ 0x1d90c004,
+ 0xc0080a70,
+ 0xc0020f90,
+ 0xc0020830,
+ 0xc00e0c30,
+ 0x0cf008f0,
+ 0xb55f0950,
+ 0xb57f72f4,
+ 0xb57f726c,
+ 0xb59f7174,
+ 0xb5ff70ec,
+ 0xc0026074,
+ 0xb51f0d60,
+ 0x09f05fec,
+ 0x7074b51f,
+ 0x78ecb53f,
+ 0x5ef4b53f,
+ 0x5e6cb55f,
+ 0x0db0c034,
+ 0x0a50c004,
+ 0xc0041fd0,
+ 0xc00c0810,
+ 0xc0240c20,
+ 0xc00e08d0,
+ 0xc0121cd0,
+ 0xb55f0900,
+ 0xb57f5df4,
+ 0xb57f5d6c,
+ 0xb59f7374,
+ 0xb5ff73ec,
+ 0xb51f69f4,
+ 0xb51f696c,
+ 0xb53f68f4,
+ 0xb53f746c,
+ 0xb55f6874,
+ 0xc03074ec,
+ 0xc0320d50,
+ 0xcfd009c0,
+ 0xc01e0da1,
+ 0x1ff00a40,
+ 0x0850c06a,
+ 0x0c50c026,
+ 0x08e1cfe0,
+ 0x0cf0c014,
+ 0x0931cfee,
+ 0x67f4b55f,
+ 0x676cb57f,
+ 0x66f4b57f,
+ 0x666cb59f,
+ 0x65f4b5ff,
+ 0x656cb51f,
+ 0x64f4b51f,
+ 0x646cb53f,
+ 0x63f4b53f,
+ 0x766cb55f,
+ 0x0d31cfd0,
+ 0x09c1cfce,
+ 0xcfe21de0,
+ 0xcfea0a01,
+ 0xcf7c0fe1,
+ 0x0c200871,
+ 0x08d0c01e,
+ 0x0cf0c024,
+ 0x0910c032,
+ 0x76f4b55f,
+ 0x77ecb57f,
+ 0x62f4b57f,
+ 0x626cb59f,
+ 0x7a74b5ff,
+ 0x79ecb51f,
+ 0x6df4b51f,
+ 0x6d6cb53f,
+ 0x6cf4b53f,
+ 0x6c6cb55f,
+ 0x9060c059,
+ 0x09829eb3,
+ 0x9b91fdf4,
+ 0x4314b740,
+ 0xb55f9e50,
+ 0xb7437af4,
+ 0x7480572a,
+ 0x9364c003,
+ 0x6cacb784,
+ 0x0e820b82,
+ 0xd00a7502,
+ 0x75040bf2,
+ 0x0ed2d001,
+ 0xc00075c0,
+ 0x774090c4,
+ 0xd0010906,
+ 0xb73f0922,
+ 0xc1007aec,
+ 0x69635a0c,
+ 0x0118d020,
+ 0xc0010802,
+ 0x04050abe,
+ 0x03a8d020,
+ 0xcfc09d09,
+ 0xc0340a80,
+ 0xb9600c78,
+ 0xd0114018,
+ 0xb75f4e52,
+ 0xc2007aec,
+ 0xd0105a08,
+ 0x024004f0,
+ 0xd0200248,
+ 0xb78000c4,
+ 0xb7405a2b,
+ 0x75c0502b,
+ 0x4002ba24,
+ 0x5a28c200,
+ 0xc03e224a,
+ 0x9dcb297c,
+ 0xb76031c4,
+ 0xb7204052,
+ 0xc000462b,
+ 0xb78392a4,
+ 0xb7436dcb,
+ 0xb74377cb,
+ 0xba2463d3,
+ 0xc2004002,
+ 0x224a5a28,
+ 0x9ea15950,
+ 0x2940c3ff,
+ 0x2d7cc03e,
+ 0x351431b4,
+ 0x2a0ed3f1,
+ 0xc0007506,
+ 0xb75f90e2,
+ 0x9dad756c,
+ 0xa1e1f000,
+ 0x4002ba21,
+ 0x2dfcc03e,
+ 0xc2009e5b,
+ 0x224a5a28,
+ 0xb77f3246,
+ 0x9dbe75f4,
+ 0xa261f000,
+ 0x776cb79f,
+ 0xf0009dcd,
+ 0x0804a162,
+ 0xfffd0c08,
+ 0x9eb391c1,
+ 0x9be6fe34,
+ 0x430cb720,
+ 0x4cadb742,
+ 0x7c80c020,
+ 0x90e2c000,
+ 0x438cb780,
+ 0xc0005158,
+ 0x7c8291e0,
+ 0x438cb740,
+ 0x91a2c000,
+ 0x0a02c010,
+ 0x5218c200,
+ 0x22444a7d,
+ 0x51580906,
+ 0xc0003244,
+ 0xc0109100,
+ 0xc2000a06,
+ 0x4a7d5218,
+ 0xb5802228,
+ 0xc021438c,
+ 0xb7400a02,
+ 0xc020438c,
+ 0x9dcd0a00,
+ 0xa161f000,
+ 0x430cb780,
+ 0x7b6cb71f,
+ 0x9ea80288,
+ 0x6fccb789,
+ 0x753ec01c,
+ 0x9164c001,
+ 0xfdf49eb3,
+ 0x008b9b5e,
+ 0x6fadb509,
+ 0x91e0c000,
+ 0x9becfd94,
+ 0xb7a09eb3,
+ 0xfdf4430c,
+ 0xb75f9b52,
+ 0x02aa7b6c,
+ 0xb5099ea8,
+ 0xb7806fcc,
+ 0xb75f430c,
+ 0x0d927b6c,
+ 0xb72900a8,
+ 0xc01c6fad,
+ 0xffff747e,
+ 0x68b29122,
+ 0x57f4b75f,
+ 0xb7290098,
+ 0xf010712d,
+ 0xb740a0c1,
+ 0xb77f430c,
+ 0x00b47b6c,
+ 0x6fadb789,
+ 0x02446a32,
+ 0x0a20c138,
+ 0xaa11d208,
+ 0x753ec01c,
+ 0x9084c001,
+ 0xfdf49eb3,
+ 0xb7209ac7,
+ 0xb77f4314,
+ 0xd0107b74,
+ 0xb74904b2,
+ 0xd0316fb5,
+ 0xd0106d29,
+ 0xc1010624,
+ 0xc1380122,
+ 0xd0080920,
+ 0xc101a951,
+ 0x02440244,
+ 0x5a08c200,
+ 0x00c3d120,
+ 0x6b6cb79f,
+ 0x70d5b729,
+ 0xa082f208,
+ 0xfe349eb3,
+ 0x74009ac8,
+ 0x90c4c000,
+ 0x097ec00e,
+ 0x9020c004,
+ 0x430cb740,
+ 0xc0a20205,
+ 0xd2080a38,
+ 0x7680a902,
+ 0x90e2c001,
+ 0x01859e93,
+ 0x09c0c0a2,
+ 0x0d9cc0a4,
+ 0xa962d008,
+ 0xaa61d010,
+ 0xc0007115,
+ 0xb7ff91f2,
+ 0x0a027c74,
+ 0xa261d210,
+ 0xaa61d008,
+ 0xa961d010,
+ 0x9ea21244,
+ 0x2d7cc00e,
+ 0x430cb740,
+ 0x0940c0a2,
+ 0xaa41d008,
+ 0xd0080a04,
+ 0xc001a241,
+ 0x00ac9380,
+ 0x02030103,
+ 0x0940c0a2,
+ 0x0a20c0a2,
+ 0xa945d008,
+ 0xaa11d208,
+ 0xc0017088,
+ 0xb71f9064,
+ 0xb5407c6c,
+ 0xb54a4030,
+ 0xb7a04c31,
+ 0x9ea8430c,
+ 0xc0a4020b,
+ 0xb76a0a1c,
+ 0xd2084750,
+ 0xb76aaa01,
+ 0xc1014648,
+ 0x02dc15bc,
+ 0x65b9c101,
+ 0x1a32d011,
+ 0x01c7d120,
+ 0x0ac0c0a2,
+ 0x9ac6c0d4,
+ 0xa025d208,
+ 0x430cb780,
+ 0xc0a2024c,
+ 0xd2080a40,
+ 0xd011a906,
+ 0xd2080d22,
+ 0xb780a105,
+ 0xc09c430c,
+ 0xd2080a14,
+ 0x7500aa01,
+ 0x9142c000,
+ 0x7c6cb73f,
+ 0x4029b780,
+ 0x0a027500,
+ 0x0a42d002,
+ 0x5774b73f,
+ 0xb5809e52,
+ 0xb7804049,
+ 0xc00e430c,
+ 0xb760297c,
+ 0x9e93560c,
+ 0xc23000cc,
+ 0xb78aa1c6,
+ 0x75004c29,
+ 0x9304c000,
+ 0x4b29b54a,
+ 0x7c74b7ff,
+ 0xaa61d210,
+ 0xc0007500,
+ 0xb72091e4,
+ 0x0103430c,
+ 0x091cc0a4,
+ 0xb78a009c,
+ 0xd0084b29,
+ 0x0244a941,
+ 0x4b29b58a,
+ 0x560cb700,
+ 0xaa45c200,
+ 0xc0007500,
+ 0x9eb390a4,
+ 0x9957fe14,
+ 0x7c74b71f,
+ 0x430cb7a0,
+ 0x4048b780,
+ 0xc0007500,
+ 0x9eaf90e2,
+ 0x0f90c06e,
+ 0x90a0c000,
+ 0xc0809eaf,
+ 0x9eb30f80,
+ 0x9be2fe94,
+ 0x7aecb73f,
+ 0xa9e1f210,
+ 0x6835b741,
+ 0x2d4d7cc2,
+ 0x9202c000,
+ 0x430cb780,
+ 0xc0983d10,
+ 0xd2080a10,
+ 0x7500aa01,
+ 0x92c2c000,
+ 0x3d00c200,
+ 0x9240c000,
+ 0x7cc0c010,
+ 0x91c2c000,
+ 0x430cb780,
+ 0xc0983d20,
+ 0xd2080a14,
+ 0x7500aa01,
+ 0x9084c000,
+ 0x3d00c100,
+ 0x430cb720,
+ 0x5dadb781,
+ 0xc001750a,
+ 0xb7859084,
+ 0x75007ea9,
+ 0x93e2c000,
+ 0x7f29b785,
+ 0xc0007500,
+ 0xb73f9202,
+ 0xc8007c74,
+ 0xb7800902,
+ 0x75004049,
+ 0xb3420a02,
+ 0x9e934424,
+ 0xc0003526,
+ 0xb71f9180,
+ 0xb7807c6c,
+ 0xc2004028,
+ 0xc8005a3c,
+ 0x9ea02a00,
+ 0xb73f3520,
+ 0x9d9d796c,
+ 0xa162f000,
+ 0x560cb740,
+ 0x7cc2c010,
+ 0x508cb780,
+ 0xb55f0164,
+ 0xd008636c,
+ 0x0902aac2,
+ 0x0922d001,
+ 0xb71f9e92,
+ 0xc2407aec,
+ 0xb780a146,
+ 0x06da430c,
+ 0x5baab742,
+ 0x06d8c101,
+ 0xb74a040b,
+ 0xb7825352,
+ 0xb7205b2a,
+ 0xb7a0412c,
+ 0x6244510c,
+ 0x63c5c101,
+ 0x01839ebb,
+ 0x6fecb53f,
+ 0x99d9c0d4,
+ 0x71f4b73f,
+ 0x6fecb77f,
+ 0xb5009ebb,
+ 0xb75f404b,
+ 0x02aa7bec,
+ 0x9a29c0d4,
+ 0xa021da08,
+ 0xb73f9e68,
+ 0xb74b7bec,
+ 0xb780532a,
+ 0x0d02520c,
+ 0xb7409e91,
+ 0xc840538c,
+ 0xb780a0a6,
+ 0xb77f528c,
+ 0x01145974,
+ 0xa126c840,
+ 0x5332b7aa,
+ 0x530cb780,
+ 0x09c2c002,
+ 0xa142d808,
+ 0xa2a6c840,
+ 0x9ba5fd14,
+ 0x4314b720,
+ 0xc0a49e4c,
+ 0xd2080a18,
+ 0x7500aa01,
+ 0x9202c001,
+ 0x636cb75f,
+ 0x7c74b75f,
+ 0xa8c1d008,
+ 0xaa41d010,
+ 0xc1010092,
+ 0x75000092,
+ 0x532bb74a,
+ 0x9104c000,
+ 0x6c4db784,
+ 0x01090228,
+ 0x297ccffe,
+ 0x6b4db709,
+ 0x018d9e93,
+ 0x9c629e84,
+ 0x430cb720,
+ 0x5dadb781,
+ 0x75021a10,
+ 0x90f2c000,
+ 0x532bb78c,
+ 0xc0007500,
+ 0xb78090e4,
+ 0x9e83550c,
+ 0xa1c6c240,
+ 0x7aecb71f,
+ 0x5dacb781,
+ 0xc0007506,
+ 0xb7209204,
+ 0x00e2430c,
+ 0x4829b78c,
+ 0xc0007500,
+ 0x097e9102,
+ 0x550cb780,
+ 0xc2409e90,
+ 0xb73fa046,
+ 0xb7817aec,
+ 0x1a085dad,
+ 0xc0037506,
+ 0xd22691f2,
+ 0x8520590c,
+ 0x8044e050,
+ 0x9100c000,
+ 0x9240c002,
+ 0x9260c001,
+ 0x9280c001,
+ 0x7af4b73f,
+ 0x4149b786,
+ 0xc0007500,
+ 0xb7409202,
+ 0xc220560c,
+ 0x7500aa45,
+ 0x9124c000,
+ 0x7c74b75f,
+ 0xaa41d010,
+ 0xc0007500,
+ 0x9eb390c4,
+ 0xfe14018f,
+ 0xb77f992a,
+ 0xb7407c6c,
+ 0x9d07550c,
+ 0xa8e2d008,
+ 0xa945c220,
+ 0x0a029eb3,
+ 0xd052018f,
+ 0xcfe80d78,
+ 0xb59e08e1,
+ 0xfd74666a,
+ 0xc0019aef,
+ 0x0d829260,
+ 0x9080c001,
+ 0x560cb700,
+ 0xaa45c200,
+ 0xc0007500,
+ 0x9ea390a4,
+ 0x9853fe14,
+ 0x430cb720,
+ 0xb76a018f,
+ 0xfe1446b1,
+ 0xb71f9900,
+ 0x9ebb7c74,
+ 0x0df8d051,
+ 0x4050b740,
+ 0x9280c000,
+ 0x560cb720,
+ 0xaa45c210,
+ 0xc0007500,
+ 0x9ea390e4,
+ 0x9839fe14,
+ 0x9140c000,
+ 0x430cb760,
+ 0x0d060d82,
+ 0x09e0c06e,
+ 0x9b5cfdf4,
+ 0xfe349eb3,
+ 0xb7a09925,
+ 0xb780430c,
+ 0x9ea9550c,
+ 0xaa45c240,
+ 0x70c9b744,
+ 0x2a7cc002,
+ 0xc0305920,
+ 0x32442900,
+ 0x5a6cb75f,
+ 0xf0009dad,
+ 0xb75fa261,
+ 0x020b7c74,
+ 0x5dc5b741,
+ 0xabc1d010,
+ 0x0a44c09a,
+ 0xc00075c0,
+ 0xd20891e2,
+ 0xc200aa01,
+ 0x00ca5a08,
+ 0x6cb5b742,
+ 0x63adb742,
+ 0x682db782,
+ 0x91a0c000,
+ 0xaa01d208,
+ 0x5a08c200,
+ 0xb74200ca,
+ 0xb7427a35,
+ 0xb782712d,
+ 0xb77f75ad,
+ 0x9dbd5cec,
+ 0xa161f000,
+ 0x5bf4b77f,
+ 0xf0009dbe,
+ 0xb79fa261,
+ 0x9dcd5b6c,
+ 0xa162f000,
+ 0xb781000b,
+ 0xb71f482c,
+ 0x9d8e5af4,
+ 0xa261f000,
+ 0xa8a9f208,
+ 0xb53f75c0,
+ 0xb7496eec,
+ 0x0203573a,
+ 0x5a10c200,
+ 0xc0029d4b,
+ 0xcffc0a7c,
+ 0x59102a01,
+ 0x0148d020,
+ 0xcffe097c,
+ 0xc1012901,
+ 0x02030528,
+ 0x6525c101,
+ 0x2a710a0c,
+ 0x5890c200,
+ 0xc0005d05,
+ 0xb78391a2,
+ 0x0e825e2c,
+ 0xe003048b,
+ 0xb3152a00,
+ 0x05834442,
+ 0x92e0c000,
+ 0xc3019ea9,
+ 0xb7846115,
+ 0xd0f241cd,
+ 0xc00508ae,
+ 0xcffc7d00,
+ 0xc0002c81,
+ 0x068390a2,
+ 0x9080c000,
+ 0x0692c101,
+ 0x0092c101,
+ 0x020b0585,
+ 0x0a10c098,
+ 0xd2089ea8,
+ 0xb781a801,
+ 0x09c24b4c,
+ 0xd0217400,
+ 0x750209b8,
+ 0x1124d011,
+ 0x9102c000,
+ 0xc1800207,
+ 0xc0815920,
+ 0x31443a20,
+ 0x61ecb77f,
+ 0xf0009dbd,
+ 0x9ea8a161,
+ 0x7ec8b785,
+ 0x664cb764,
+ 0x66ccb744,
+ 0xc0007500,
+ 0x740092e2,
+ 0x9282c000,
+ 0xc00075c0,
+ 0xb7649102,
+ 0xb744654c,
+ 0xc00065cc,
+ 0x9ea89100,
+ 0xb7649eba,
+ 0xb744644c,
+ 0x858664cc,
+ 0x9080c000,
+ 0x05078582,
+ 0xb7849ea8,
+ 0xb71f644c,
+ 0xd1106174,
+ 0x9d8e0639,
+ 0xa261f000,
+ 0xb7849ea8,
+ 0xb71f64cc,
+ 0xd11060f4,
+ 0x9d8e0639,
+ 0xa261f000,
+ 0xb7849ea8,
+ 0xb71f654c,
+ 0xd11072f4,
+ 0x9d8e0639,
+ 0xa261f000,
+ 0xb7849ea8,
+ 0xd11065cc,
+ 0xb77f0639,
+ 0x9dbe7274,
+ 0xa261f000,
+ 0x0627d110,
+ 0x7174b71f,
+ 0xf0009d8e,
+ 0xd110a261,
+ 0xb75f0625,
+ 0x9dad70ec,
+ 0xa261f000,
+ 0x75c09ea8,
+ 0x7f48b785,
+ 0x9102c000,
+ 0x7d3ec01c,
+ 0x9104c000,
+ 0x9140c000,
+ 0x7d3ec01c,
+ 0x90c4c000,
+ 0x0a060d02,
+ 0x9080c000,
+ 0x0a020d0a,
+ 0x74809d6b,
+ 0x9102c000,
+ 0xc0007500,
+ 0x090690a2,
+ 0x9100c000,
+ 0x09029d6f,
+ 0x31c6e000,
+ 0x0922d001,
+ 0x4a049e53,
+ 0x59083238,
+ 0xb77f3244,
+ 0x9dbe6074,
+ 0xa261f000,
+ 0xc096020b,
+ 0xd2080a58,
+ 0xb71fa901,
+ 0xc1005ff4,
+ 0x024a5a08,
+ 0xaa11f288,
+ 0x0619d110,
+ 0xf0009d8e,
+ 0x590ca261,
+ 0xf0c8012a,
+ 0x0114a951,
+ 0x706cb73f,
+ 0xf0009d9d,
+ 0xb75fa161,
+ 0x9dae78f4,
+ 0xa161f000,
+ 0xc096020b,
+ 0xd2080a50,
+ 0xb77fa901,
+ 0xd0205eec,
+ 0xd1220124,
+ 0x024a5e08,
+ 0xaa11f2c8,
+ 0x0659d110,
+ 0xf0009dbd,
+ 0x5908a261,
+ 0xf088012a,
+ 0xb77fa951,
+ 0xd1105e74,
+ 0x9dbe0515,
+ 0xa161f000,
+ 0x5d080d04,
+ 0x052ac101,
+ 0xa952f0d0,
+ 0x5decb79f,
+ 0x9dcd0554,
+ 0xa162f000,
+ 0x7b6cb75f,
+ 0x5d74b75f,
+ 0xb72100aa,
+ 0x9dae48ad,
+ 0xa0e1f000,
+ 0xb7849ea8,
+ 0xf21163cc,
+ 0xc00128c0,
+ 0x74009004,
+ 0x93a4c000,
+ 0x72f4b73f,
+ 0xaa619d1e,
+ 0x716cb75f,
+ 0xf0009dad,
+ 0xb75fa261,
+ 0x9d2e7274,
+ 0xb77faa61,
+ 0x9dbd70ec,
+ 0xa261f000,
+ 0x7074b77f,
+ 0xaa619d3e,
+ 0x78ecb71f,
+ 0xf0009d8d,
+ 0xb71fa261,
+ 0xb75f7aec,
+ 0xb7807bec,
+ 0xb722510c,
+ 0x000b5b32,
+ 0x622cb701,
+ 0xa9c7c840,
+ 0x520cb780,
+ 0xb51f9dc0,
+ 0x7604566c,
+ 0x71f4b75f,
+ 0xa846c840,
+ 0xa965f210,
+ 0xaac2d810,
+ 0x56ecb55f,
+ 0x9104c001,
+ 0x5b8cb760,
+ 0x5c14b740,
+ 0xb9600902,
+ 0xe0304018,
+ 0xb77faa45,
+ 0x9dbe6af4,
+ 0xa261f000,
+ 0xb77f9e50,
+ 0xe0006a74,
+ 0x9dbeaa45,
+ 0xa261f000,
+ 0xffff0910,
+ 0xb79f9201,
+ 0xc0746e6c,
+ 0xc0140976,
+ 0xc0210a40,
+ 0x9dcd0a00,
+ 0xa161f000,
+ 0x9060c001,
+ 0x75069d50,
+ 0x93e4c000,
+ 0x0906c001,
+ 0x0a02c021,
+ 0x0900c4d0,
+ 0x0a20c014,
+ 0xf0009dcd,
+ 0xc02ca161,
+ 0x0a100932,
+ 0xf0009dcd,
+ 0xc002a161,
+ 0x0a10097e,
+ 0xf0009dcd,
+ 0x0a02a161,
+ 0x736cb71f,
+ 0xf0009d8d,
+ 0xf011a261,
+ 0x75028e28,
+ 0x9072c001,
+ 0x0906c001,
+ 0x0a02c021,
+ 0x0900cca0,
+ 0x0a20c014,
+ 0xf0009dcd,
+ 0xc040a161,
+ 0x0a10091a,
+ 0xf0009dcd,
+ 0xc002a161,
+ 0x0a10097e,
+ 0xf0009dcd,
+ 0x000ba161,
+ 0x60acb781,
+ 0x736cb75f,
+ 0x2a1c1a04,
+ 0xf0009dad,
+ 0x9e78a261,
+ 0x73f4b75f,
+ 0x622cb740,
+ 0xc1010205,
+ 0x9dae3a00,
+ 0xa261f000,
+ 0xf0009dae,
+ 0xb780a161,
+ 0xb77f62ac,
+ 0x9dbd69ec,
+ 0xa261f000,
+ 0x632cb780,
+ 0x6974b77f,
+ 0xf0009dbe,
+ 0xf210a261,
+ 0xb71faa69,
+ 0x9d8d68ec,
+ 0xa261f000,
+ 0xc098020b,
+ 0xd2080a0c,
+ 0x7500aa01,
+ 0x9222c000,
+ 0xc0007440,
+ 0x008b91c2,
+ 0x62adb781,
+ 0x746cb75f,
+ 0x3a00c020,
+ 0xf0009dad,
+ 0xc000a261,
+ 0x000b9140,
+ 0x62acb781,
+ 0x746cb73f,
+ 0xf0009d9d,
+ 0x000ba261,
+ 0x632cb781,
+ 0x686cb73f,
+ 0xf0009d9d,
+ 0xb78ca261,
+ 0xb741532a,
+ 0x750063ac,
+ 0x90c2c001,
+ 0x7c82c001,
+ 0x90c4c000,
+ 0x9e92097e,
+ 0x9220c000,
+ 0x292ed3f2,
+ 0x9ea30a7e,
+ 0x2900cff0,
+ 0x592176be,
+ 0x4846b323,
+ 0xb32474be,
+ 0x05074426,
+ 0xc0010109,
+ 0x9e503d04,
+ 0x5a20c100,
+ 0xb73f3208,
+ 0x9d9d74ec,
+ 0xa261f000,
+ 0x90e0c000,
+ 0x74f4b75f,
+ 0xf0009dae,
+ 0x000ba161,
+ 0x652cb781,
+ 0x67ecb73f,
+ 0xf0009d9d,
+ 0xb781a261,
+ 0xb75f65ac,
+ 0x9dad676c,
+ 0xa261f000,
+ 0x6602d011,
+ 0x8db2e012,
+ 0x6fecb77f,
+ 0x05b8c101,
+ 0x7d7cb55f,
+ 0x7de4b55f,
+ 0x9a17c0b4,
+ 0x0451d110,
+ 0x566cb77f,
+ 0xcff05820,
+ 0x30062800,
+ 0x66f4b77f,
+ 0xf0009dbe,
+ 0x000ba061,
+ 0x662cb781,
+ 0x6674b71f,
+ 0xf0009d8e,
+ 0xd2a2a261,
+ 0xb73f5e20,
+ 0xcff056ec,
+ 0xb73f2a00,
+ 0x324265f4,
+ 0xf0009d9e,
+ 0xb781a261,
+ 0xb75f672c,
+ 0x9dad656c,
+ 0xa261f000,
+ 0x532ab783,
+ 0x64f4b75f,
+ 0xf0009dae,
+ 0xc021a261,
+ 0xc0120902,
+ 0x9dad0950,
+ 0xa261f000,
+ 0x7decb77f,
+ 0x7d7cb75f,
+ 0xc00074c4,
+ 0xb7819204,
+ 0xb77f642c,
+ 0x9dbe6474,
+ 0xa261f000,
+ 0x64acb781,
+ 0x63ecb71f,
+ 0xf0009d8d,
+ 0x020ba261,
+ 0xc09a75c0,
+ 0xc0000a44,
+ 0xd20893c2,
+ 0xb71faa01,
+ 0xc2007674,
+ 0x00ca5a08,
+ 0x7aadb780,
+ 0xf0009d8e,
+ 0xb780a261,
+ 0xb73f7f2d,
+ 0x9d9e76f4,
+ 0xa261f000,
+ 0x43adb721,
+ 0x77ecb75f,
+ 0xf0009dad,
+ 0xc001a0e1,
+ 0xd2089380,
+ 0xb77ea902,
+ 0xb77f7a68,
+ 0x5d087674,
+ 0x293ed3f1,
+ 0x5a0cc100,
+ 0x04abd110,
+ 0x0a7cc002,
+ 0x7ab5b740,
+ 0x2a01cffc,
+ 0x6245c301,
+ 0x0244c101,
+ 0xf0009dbe,
+ 0xd012a261,
+ 0x9d53692b,
+ 0x5d0b0d0c,
+ 0x7f2db760,
+ 0x5a18c200,
+ 0x6245c101,
+ 0x76ecb71f,
+ 0x9d8d0246,
+ 0xa261f000,
+ 0x6125c301,
+ 0x43adb721,
+ 0xc0020124,
+ 0xcffc097c,
+ 0x01222901,
+ 0x77f4b71f,
+ 0xf0009d8e,
+ 0xf210a161,
+ 0xb73fa96d,
+ 0x9d9d62ec,
+ 0xa161f000,
+ 0xb75f9ea9,
+ 0xb7816274,
+ 0x9dae66cd,
+ 0xa261f000,
+ 0x7aecb71f,
+ 0x7fa8b785,
+ 0xc0007500,
+ 0xb78a9164,
+ 0x75024649,
+ 0x90c4c000,
+ 0x3900c080,
+ 0xa16df210,
+ 0x0a7ec7fe,
+ 0x7a74b71f,
+ 0xf0009d8e,
+ 0xc401a261,
+ 0xc5000a02,
+ 0xb73f0a00,
+ 0x9d9d79ec,
+ 0xa261f000,
+ 0x991afe14,
+ 0x7becb75f,
+ 0x528cb780,
+ 0x7b6cb77f,
+ 0xb59f0228,
+ 0xb780786c,
+ 0x0228520c,
+ 0x5c6cb59f,
+ 0x510cb780,
+ 0x03a8d020,
+ 0x548cb780,
+ 0xb59f0238,
+ 0xb780586c,
+ 0x03a8538c,
+ 0x560cb780,
+ 0xb59f0268,
+ 0xc0095f6c,
+ 0x74809380,
+ 0x08021a04,
+ 0x0802d001,
+ 0x70880c02,
+ 0x0c02d001,
+ 0xb51f7400,
+ 0xc0006bec,
+ 0x008b9142,
+ 0x5b2bb762,
+ 0x5333b74c,
+ 0xc0020906,
+ 0xb73f9080,
+ 0x020b5f74,
+ 0x0a1cc0a4,
+ 0xa901d208,
+ 0x4049b780,
+ 0xc0017104,
+ 0x00dc93c8,
+ 0x4929b78c,
+ 0x08c0c060,
+ 0xc0007500,
+ 0xb7809204,
+ 0x75004029,
+ 0x9164c000,
+ 0xb78c000b,
+ 0x0d02532a,
+ 0xd0027500,
+ 0xc0000d22,
+ 0x0d069060,
+ 0xda10008b,
+ 0xb742aa61,
+ 0x02445b2b,
+ 0xa261da10,
+ 0x9160c000,
+ 0x71f4b73f,
+ 0xa1e1da10,
+ 0x404bb780,
+ 0xb5800a04,
+ 0xf208404b,
+ 0xda10a929,
+ 0x7104aa61,
+ 0xffff11c4,
+ 0x000b9228,
+ 0x5b2ab762,
+ 0x9eb30902,
+ 0x9e419e91,
+ 0xfdf4a01e,
+ 0xb71f9ada,
+ 0x9e745874,
+ 0x0a00c021,
+ 0x404cb740,
+ 0x0a00c016,
+ 0xf0009dcd,
+ 0x9eb3a161,
+ 0x98dafe14,
+ 0x0992058d,
+ 0x0d02c400,
+ 0x9a57fd74,
+ 0x0a02c400,
+ 0x7a6cb73f,
+ 0xf0009d9d,
+ 0xb73fa261,
+ 0xda087874,
+ 0x000baa61,
+ 0x404bb740,
+ 0xda080a04,
+ 0x0904a261,
+ 0x404bb540,
+ 0x5baab742,
+ 0x2a7ccffe,
+ 0xc0007088,
+ 0x0a0291e4,
+ 0xa261da08,
+ 0x7becb73f,
+ 0x530cb740,
+ 0xd8080114,
+ 0x0a04aa41,
+ 0xa241d808,
+ 0x7c74b73f,
+ 0x530cb780,
+ 0x7becb75f,
+ 0x4051b760,
+ 0xaa45c840,
+ 0xc00076c0,
+ 0x9ea590a2,
+ 0x9100c000,
+ 0x430cb720,
+ 0x6c2db724,
+ 0x02c2d020,
+ 0x6bf4b75f,
+ 0xc0017680,
+ 0xda089024,
+ 0x000ba961,
+ 0x5baab762,
+ 0x0d027482,
+ 0x0d22d001,
+ 0xd00174c2,
+ 0xb3321a44,
+ 0xc0004442,
+ 0x748290a2,
+ 0x1a42d00a,
+ 0xc00076c0,
+ 0xb72090e4,
+ 0xb724430c,
+ 0x02426c2d,
+ 0x018d9ea3,
+ 0x9adefe74,
+ 0x430cb720,
+ 0xc0a40203,
+ 0xd2080a18,
+ 0x7500aa01,
+ 0x9322c001,
+ 0x5f74b71f,
+ 0xc0a40203,
+ 0xd2080a1c,
+ 0xb780a901,
+ 0x71044048,
+ 0x91e8c002,
+ 0x7874b73f,
+ 0x5c74b75f,
+ 0x404bb740,
+ 0xaa41d810,
+ 0xc0027088,
+ 0xda089088,
+ 0x7500aa61,
+ 0x93e4c001,
+ 0x6b2db709,
+ 0x018d058b,
+ 0x9c629e84,
+ 0x430cb720,
+ 0x5dadb781,
+ 0x75021a10,
+ 0x90f2c000,
+ 0x532bb78c,
+ 0xc0017500,
+ 0xb7809184,
+ 0x9e80550c,
+ 0xc2409eb3,
+ 0xc001a046,
+ 0x009c9040,
+ 0x4829b78c,
+ 0xc0007500,
+ 0x008b93c2,
+ 0x5dadb781,
+ 0x550cb740,
+ 0xc0007504,
+ 0xc0029124,
+ 0x9ea10a44,
+ 0xa0c6c220,
+ 0x9180c000,
+ 0xd2080264,
+ 0x0d7ea901,
+ 0x9e930908,
+ 0x7286c810,
+ 0xa102d208,
+ 0xfe149eb3,
+ 0xfdf49825,
+ 0xb79f9bbf,
+ 0xb71f786c,
+ 0xb7a05c6c,
+ 0xda08430c,
+ 0xb780a901,
+ 0x7088402a,
+ 0x9366fff5,
+ 0xc0a2020b,
+ 0xd2080a38,
+ 0x7500aa01,
+ 0x90a2c000,
+ 0xc0000a06,
+ 0x025c9200,
+ 0xc0a20109,
+ 0xc0a20920,
+ 0xd2080a40,
+ 0xd008aa05,
+ 0x7104a951,
+ 0xd0010a02,
+ 0x9eb30a42,
+ 0xfdf40389,
+ 0x058d9be3,
+ 0xc1000992,
+ 0xfd740d02,
+ 0xcffe9960,
+ 0xc1002bfc,
+ 0xb71f0a02,
+ 0x9d8e7a74,
+ 0xa261f000,
+ 0xb73f0a86,
+ 0x9d9d6dec,
+ 0xa2e1f000,
+ 0x6d74b73f,
+ 0xf0009d9e,
+ 0xb77fa2e1,
+ 0xc00659f4,
+ 0x0d0209f2,
+ 0xfcf4010b,
+ 0xc0389958,
+ 0x0d0209f2,
+ 0x058d010b,
+ 0x9951fcf4,
+ 0x7c74b75f,
+ 0x530cb780,
+ 0x7becb77f,
+ 0xa941d010,
+ 0xaa65c840,
+ 0x1a047480,
+ 0x9144c000,
+ 0x430cb720,
+ 0x2a7ccffe,
+ 0x6c2db724,
+ 0x020300c2,
+ 0x2a7ccffe,
+ 0x9eaa9ea3,
+ 0xfe74018d,
+ 0x75c09a09,
+ 0x90a4c000,
+ 0xc000010f,
+ 0xb7209280,
+ 0xb785430c,
+ 0x75007ea9,
+ 0x90a4c000,
+ 0xc000010b,
+ 0xb77f9140,
+ 0x09027c74,
+ 0xaa61d010,
+ 0xd0017500,
+ 0x9e950922,
+ 0xcffe75c0,
+ 0xc0002efc,
+ 0xb7209124,
+ 0xb786430c,
+ 0x75006729,
+ 0x9042c002,
+ 0x0a82c008,
+ 0x7a6cb79f,
+ 0xf0009dcd,
+ 0xb7ffa2e1,
+ 0x9d7e7974,
+ 0xc002aa61,
+ 0x9dfe3a40,
+ 0xa261f000,
+ 0x0a02c401,
+ 0x0a00c008,
+ 0x79ecb71f,
+ 0xf0009d8d,
+ 0x9eaaa261,
+ 0x058d0992,
+ 0x98ddfd74,
+ 0xb71f0a02,
+ 0x9d8e79f4,
+ 0xa261f000,
+ 0x7a6cb73f,
+ 0xf0009d9d,
+ 0xb73fa2e1,
+ 0x9d1e6cf4,
+ 0xb75faa61,
+ 0x9d2d6c6c,
+ 0x7680a962,
+ 0x91c2c000,
+ 0x6b6cb77f,
+ 0x5a35c200,
+ 0xf0082a7c,
+ 0xd110a961,
+ 0x12a81525,
+ 0x9060c000,
+ 0xb7400a82,
+ 0x9eb3430c,
+ 0xc0a20986,
+ 0xd008093c,
+ 0x0a04aa41,
+ 0xa241d008,
+ 0x430cb740,
+ 0xc050012c,
+ 0xd0080960,
+ 0x0a04aa41,
+ 0xa241d008,
+ 0x9a45fdb4,
+ 0xc00075c0,
+ 0xb7209322,
+ 0xd2a4430c,
+ 0xb784590c,
+ 0xc10179ad,
+ 0xb5840244,
+ 0xb78079ad,
+ 0xb71f430c,
+ 0x02087b6c,
+ 0x0a40c09c,
+ 0xa901f208,
+ 0x0524c101,
+ 0xa102f208,
+ 0xc0007740,
+ 0xb7809242,
+ 0x9ea2430c,
+ 0x0a3cc0a2,
+ 0x0d20c0a4,
+ 0xa901d208,
+ 0xaa41d010,
+ 0x0a027088,
+ 0x4828b354,
+ 0xb7009ea5,
+ 0xb7814314,
+ 0x1a105dcc,
+ 0xc0007502,
+ 0xb78c9132,
+ 0x7500534a,
+ 0x9082c000,
+ 0x76ccb7a4,
+ 0x560cb740,
+ 0x7bf4b75f,
+ 0x7c6cb77f,
+ 0xa8c5c220,
+ 0xb7209e44,
+ 0x00925314,
+ 0x0a50c096,
+ 0x0090c101,
+ 0x532bb72a,
+ 0xa8cac810,
+ 0xa902d208,
+ 0xaa61d008,
+ 0x040cc101,
+ 0x4c50b70a,
+ 0xc1017500,
+ 0x01031492,
+ 0x9e699eb3,
+ 0xd0010a02,
+ 0xcffe0a42,
+ 0x018b2cfc,
+ 0xa219a01e,
+ 0xfdf4a395,
+ 0x75c098ed,
+ 0x9042c001,
+ 0x4314b720,
+ 0x7b74b7ff,
+ 0x9eb39d0b,
+ 0x04f2d010,
+ 0xb7290992,
+ 0xcfe86fad,
+ 0xd0320971,
+ 0x02126919,
+ 0xc1380522,
+ 0xd0100d20,
+ 0x0242a952,
+ 0x0244c101,
+ 0x5a08c200,
+ 0x00c3d120,
+ 0x70d5b749,
+ 0x99f1fd14,
+ 0x430cb720,
+ 0x5733b743,
+ 0xc0067680,
+ 0xb7839184,
+ 0x87825ead,
+ 0x75020902,
+ 0x87b2e00a,
+ 0x75049d60,
+ 0x0922d001,
+ 0x0a027508,
+ 0x0a42d001,
+ 0xd0407400,
+ 0xc0003128,
+ 0x9e5490a2,
+ 0x90e0c000,
+ 0x0a069dc0,
+ 0xd0017600,
+ 0xc0010a42,
+ 0xc200853e,
+ 0x0e825a08,
+ 0x8500cfc0,
+ 0x58ecb59f,
+ 0x4018b960,
+ 0x2e5ed3f1,
+ 0xd0117506,
+ 0xc00012da,
+ 0xb73f90c2,
+ 0x9d1d756c,
+ 0xb73faae1,
+ 0x9d1e75f4,
+ 0xb75fa9e1,
+ 0x9d2d776c,
+ 0xb75fa9e2,
+ 0xb72058f4,
+ 0xd0104314,
+ 0x042a045a,
+ 0xd0100400,
+ 0xd1200482,
+ 0xb7230083,
+ 0xb78357ab,
+ 0x9e9a7bcb,
+ 0xc03e9d48,
+ 0x12422d7c,
+ 0x0244c101,
+ 0x7bcbb583,
+ 0x4314b720,
+ 0x59084904,
+ 0x0482d010,
+ 0x0083d120,
+ 0x012ac101,
+ 0x7dcbb783,
+ 0x5dabb723,
+ 0x0124d020,
+ 0x12429d4b,
+ 0x59a921b4,
+ 0xb5830246,
+ 0xb7207dcb,
+ 0x010b4314,
+ 0x297cc03e,
+ 0x0482d010,
+ 0x0083d120,
+ 0x67abb723,
+ 0x7fcbb783,
+ 0xc03e0787,
+ 0x12422ffc,
+ 0xb5830244,
+ 0xb7207fcb,
+ 0x9d4f430c,
+ 0xc1019d7e,
+ 0x00820402,
+ 0xb7849ddf,
+ 0xb70341ab,
+ 0x215671d2,
+ 0x9dfc25be,
+ 0xc1015929,
+ 0xc3ff1240,
+ 0x02442ac0,
+ 0x5da977c0,
+ 0xc2800e84,
+ 0xb58459d1,
+ 0xc00141ab,
+ 0xb72091c4,
+ 0xd0104314,
+ 0xd12004a2,
+ 0xb7230083,
+ 0xb78463ab,
+ 0x124243cb,
+ 0x0246c301,
+ 0x43cbb584,
+ 0x4314b720,
+ 0x04a2d010,
+ 0x0083d120,
+ 0x6dabb723,
+ 0x45cbb784,
+ 0xc1011242,
+ 0xb5840246,
+ 0xb72045cb,
+ 0xd0104314,
+ 0xd12004a2,
+ 0xb7230083,
+ 0xb78477ab,
+ 0x124247cb,
+ 0xb5840246,
+ 0xfffa47cb,
+ 0xb70093e1,
+ 0x9e444314,
+ 0x0a38c0a2,
+ 0xaa01d208,
+ 0xc0007500,
+ 0xd1109204,
+ 0xb700060d,
+ 0xc0a2560c,
+ 0xd2080a20,
+ 0xc200aa11,
+ 0x1a04a945,
+ 0xc0027088,
+ 0x75c09038,
+ 0x9202c001,
+ 0x7b74b73f,
+ 0x6c50b74d,
+ 0x4490b760,
+ 0x0490d010,
+ 0xb729018d,
+ 0xd0316fb5,
+ 0xc00e6d19,
+ 0xc1012cfc,
+ 0xc1380120,
+ 0xd0080920,
+ 0xfd54a951,
+ 0xb7409ad8,
+ 0xb77f430c,
+ 0x00b47b6c,
+ 0x6fadb789,
+ 0x02446a32,
+ 0x0a20c138,
+ 0xd208097f,
+ 0xb720a111,
+ 0xc00e430c,
+ 0x00b20a7e,
+ 0x6fadb589,
+ 0x9200c000,
+ 0x7b74b77f,
+ 0xd010097f,
+ 0xb78904b0,
+ 0x6a326fad,
+ 0x0240c101,
+ 0x0a20c138,
+ 0xa111d208,
+ 0x430cb740,
+ 0xb7000a7f,
+ 0x9ea0560c,
+ 0xc2000205,
+ 0xc0a2a046,
+ 0xd2080a38,
+ 0x7500aa01,
+ 0x92a4c000,
+ 0x02059e92,
+ 0x0a3cc0a2,
+ 0x0d20c0a4,
+ 0xa901d208,
+ 0xaa41d010,
+ 0xc0007088,
+ 0x9eb39108,
+ 0x98c7fdf4,
+ 0xffa67400,
+ 0xb73f9364,
+ 0x0a1a6f6c,
+ 0x41adb580,
+ 0x430cb720,
+ 0xc0a20203,
+ 0xd2080a38,
+ 0x7500aa01,
+ 0x9044c001,
+ 0x4729b76a,
+ 0x46a9b74a,
+ 0x4794b720,
+ 0x0d069ea3,
+ 0x9120c000,
+ 0x5aadb780,
+ 0x750c0787,
+ 0x4842b327,
+ 0xd051050f,
+ 0x70866a29,
+ 0x00c2c101,
+ 0x0a22d011,
+ 0xc00e0109,
+ 0xffff297c,
+ 0x768091f4,
+ 0x9162c000,
+ 0x4488b780,
+ 0x478cb740,
+ 0x02446a52,
+ 0xf2480906,
+ 0x9e72a115,
+ 0x0a02c002,
+ 0x0900c021,
+ 0xf0009dad,
+ 0xc002a261,
+ 0x9dad1a00,
+ 0xa261f000,
+ 0x430cb740,
+ 0xc2000a86,
+ 0xc13652b8,
+ 0xd0080924,
+ 0x0a7fa942,
+ 0x9e50424a,
+ 0xd0082240,
+ 0xfdf4a241,
+ 0xd3f298a5,
+ 0xfd7429de,
+ 0xb79f99f8,
+ 0xb7bf67ee,
+ 0xb7df686e,
+ 0xb7ff68ee,
+ 0xcfe6696e,
+ 0x9c228461,
+ 0xfcd4a605,
+ 0xc0219a45,
+ 0x09060a02,
+ 0x0a00c010,
+ 0xf0009dcd,
+ 0x0d82a161,
+ 0x9baafcf4,
+ 0xfcd40d8a,
+ 0x09029a41,
+ 0x0a02c021,
+ 0x438cb540,
+ 0x0a00c020,
+ 0xf0009dcd,
+ 0xc01ca161,
+ 0x9d4d0a40,
+ 0xc021aa61,
+ 0xc00f0a82,
+ 0xb5802a7e,
+ 0xc004450c,
+ 0x9d5d0ae0,
+ 0x2afcaae1,
+ 0xfd349eab,
+ 0xb7809bbc,
+ 0xb9604e0c,
+ 0xd2084038,
+ 0xc146a281,
+ 0xffff0a20,
+ 0xc0219381,
+ 0xc0060a02,
+ 0x9d4d0a50,
+ 0x7d1eaa61,
+ 0x92c2c000,
+ 0x0a02c021,
+ 0x0a40c006,
+ 0xa9629d4d,
+ 0xc008090a,
+ 0x9dcd0a40,
+ 0xa161f000,
+ 0xc00076bf,
+ 0xc00c90e2,
+ 0x9dcd1a70,
+ 0xa162f000,
+ 0x0a02c401,
+ 0x0902c021,
+ 0x0a00cff0,
+ 0x9dad0920,
+ 0xa261f000,
+ 0x578cb720,
+ 0xb7400a02,
+ 0x9ea34a14,
+ 0x4588b580,
+ 0x4688b580,
+ 0x018928e1,
+ 0xc03c851a,
+ 0xc0580ac2,
+ 0xc01c0802,
+ 0xc0140c02,
+ 0xb9600c82,
+ 0xd3f14058,
+ 0xf0102d3e,
+ 0x7480a147,
+ 0xa141d010,
+ 0x90e4c000,
+ 0xf1d09e4b,
+ 0xc000a0dd,
+ 0xf1d09240,
+ 0x7482aa49,
+ 0x02460a1c,
+ 0xf1d02a61,
+ 0xb303a25d,
+ 0xc0004822,
+ 0x749290c2,
+ 0xb303018b,
+ 0x0d844432,
+ 0xffff0d50,
+ 0xc0219001,
+ 0x0a820a02,
+ 0x0a10c010,
+ 0xf0009dcd,
+ 0x0a10a2e1,
+ 0xf0009dcd,
+ 0x090aa2e1,
+ 0x0a30c104,
+ 0xf0009dcd,
+ 0xc123a161,
+ 0xc5660952,
+ 0xcefa0960,
+ 0x9dcd0a31,
+ 0xa161f000,
+ 0x99fffcd4,
+ 0x0926c829,
+ 0x0900cc3e,
+ 0xaa41f008,
+ 0xc0007500,
+ 0x0a0490a2,
+ 0xa241f008,
+ 0x9b06fd94,
+ 0xfcd49eab,
+ 0x000b998d,
+ 0x7f6eb79f,
+ 0x7feeb7bf,
+ 0x9c228c40,
+ 0x4314b720,
+ 0xd0109e9a,
+ 0xb78c0494,
+ 0x75004829,
+ 0x9342c000,
+ 0x6f4db746,
+ 0xc0c89e4c,
+ 0xd2080a48,
+ 0x0908aa01,
+ 0x7104d010,
+ 0x6f4db586,
+ 0x430cb720,
+ 0x550cb780,
+ 0x6f2db746,
+ 0xaa65c040,
+ 0x7104d012,
+ 0x6f2db586,
+ 0x430cb700,
+ 0x6f2cb706,
+ 0xa6059c22,
+ 0x430cb7a0,
+ 0x9e9d0d86,
+ 0x98e8fed4,
+ 0xb506008b,
+ 0xb7206f2d,
+ 0xd0104314,
+ 0xb78c049a,
+ 0x75004829,
+ 0x9082c001,
+ 0x6f4db786,
+ 0xc0c89e4a,
+ 0xd0080948,
+ 0x0a08a941,
+ 0x7104d010,
+ 0x6f4db586,
+ 0x430cb720,
+ 0x550cb740,
+ 0xb7869e6b,
+ 0xc0206f2d,
+ 0xd012a965,
+ 0xb5867088,
+ 0xb7206f2d,
+ 0x0908430c,
+ 0x6f2db786,
+ 0x7104d010,
+ 0x6f2db586,
+ 0x4314b760,
+ 0xc0d60986,
+ 0xfeb40de0,
+ 0xb7209a4c,
+ 0xb786430c,
+ 0xb58d6f2d,
+ 0xb7004aab,
+ 0xb706430c,
+ 0xb79f6f2c,
+ 0xb7bf7f6e,
+ 0x8c407fee,
+ 0xa61d9c22,
+ 0x8420c002,
+ 0x430cb7e0,
+ 0xaa41d810,
+ 0xa945d810,
+ 0x9eb9008f,
+ 0xb7419e55,
+ 0xb57f5e35,
+ 0xb7267c6c,
+ 0x024469d5,
+ 0x00897684,
+ 0x7d74b53f,
+ 0xcffe9e5e,
+ 0xf20828fc,
+ 0xc000a932,
+ 0x9eb99244,
+ 0x5a4bb78d,
+ 0x6dcdb746,
+ 0x0629d110,
+ 0x5a4bb58d,
+ 0xaa35f208,
+ 0xb5460128,
+ 0xc0006dcd,
+ 0x9eb99120,
+ 0x5a53b54d,
+ 0xaa35f208,
+ 0x6dcdb586,
+ 0xf2489eb9,
+ 0xb786a9a1,
+ 0xb7466fcd,
+ 0xb746704d,
+ 0x02466ed5,
+ 0x6fcdb586,
+ 0xa9a5f248,
+ 0x6f4db786,
+ 0x5ad5b766,
+ 0xb5460126,
+ 0xda08704d,
+ 0x0242a925,
+ 0x6f4db586,
+ 0x0524c101,
+ 0x6ed5b546,
+ 0xa931f208,
+ 0xaa35f208,
+ 0x5905d0a4,
+ 0x02440183,
+ 0x05b8c101,
+ 0x5ad5b566,
+ 0xaa39da08,
+ 0xa9beda08,
+ 0x05b8c101,
+ 0xc07405b4,
+ 0x008f9b19,
+ 0x69adb786,
+ 0x6d2bb74c,
+ 0x09040240,
+ 0x69adb586,
+ 0x6d2bb54c,
+ 0x448cb780,
+ 0x4314b720,
+ 0xc0007500,
+ 0x010990a4,
+ 0x9180c000,
+ 0x0902008b,
+ 0x4030b960,
+ 0x54a9b780,
+ 0x01280884,
+ 0x9381ffff,
+ 0x614bb54d,
+ 0xf2089eb9,
+ 0xb786a935,
+ 0xf2085dcd,
+ 0xb766a932,
+ 0x1244564d,
+ 0x1244c101,
+ 0x5dcdb586,
+ 0x69d5b726,
+ 0xb53f74c4,
+ 0xc0167cf4,
+ 0x008f91e2,
+ 0x71abb78c,
+ 0xc0167500,
+ 0xb7a09124,
+ 0xf2084314,
+ 0xf208a925,
+ 0x048baa29,
+ 0x4ccdb766,
+ 0x6dd3b7cc,
+ 0xd1a40244,
+ 0x9eb75985,
+ 0x01c7d120,
+ 0x2ffccffe,
+ 0x9ac3c074,
+ 0x6e65d171,
+ 0x5c88d322,
+ 0x024ec101,
+ 0x5a08c200,
+ 0x024ac101,
+ 0xc101a01d,
+ 0xc0e6009a,
+ 0xd2480a60,
+ 0xb746ab0d,
+ 0x048b682d,
+ 0x5a4bb76d,
+ 0x5c53b74d,
+ 0x6d53b72c,
+ 0xba12112c,
+ 0xa0924003,
+ 0x682db546,
+ 0x9e69a892,
+ 0x066cd010,
+ 0xa11a1514,
+ 0xb72d9e9b,
+ 0xc101612b,
+ 0xba1b024a,
+ 0xc12a4003,
+ 0x76c00a40,
+ 0xaa99da08,
+ 0x7decb53f,
+ 0x917cc001,
+ 0xb786048b,
+ 0x75006dcd,
+ 0x90bcc001,
+ 0x05b9d110,
+ 0xc0745da0,
+ 0x75409a87,
+ 0x90a4c000,
+ 0xc0000101,
+ 0x025a90e0,
+ 0x0240024a,
+ 0x590bc200,
+ 0x066cd010,
+ 0xc1017580,
+ 0xc12a024a,
+ 0xda080a40,
+ 0xc000a119,
+ 0x028190a4,
+ 0x90e0c000,
+ 0x02400200,
+ 0xc200024c,
+ 0xc00e5a8b,
+ 0xc810097e,
+ 0xd1717144,
+ 0x9e696e65,
+ 0x01cec101,
+ 0x5a08c180,
+ 0x024ac101,
+ 0x0a60c0e6,
+ 0xa10dd248,
+ 0x7df4b73f,
+ 0x5433b74d,
+ 0x76400a02,
+ 0x0a42d002,
+ 0x692db746,
+ 0x0244c101,
+ 0x012aa91e,
+ 0x692db546,
+ 0xb58d7680,
+ 0xc000542b,
+ 0xb7869122,
+ 0x9e494cad,
+ 0x70481a04,
+ 0x9326c000,
+ 0x04ecd010,
+ 0xb74d048b,
+ 0xc1015c4b,
+ 0xb72e009a,
+ 0x02365f2b,
+ 0x024ac101,
+ 0xc1140904,
+ 0x0d060a20,
+ 0xda08a116,
+ 0xb54da091,
+ 0xc0045c4b,
+ 0x9e6990e0,
+ 0x6dadb746,
+ 0xc0007480,
+ 0x0a169204,
+ 0x7b6cb59f,
+ 0x7becb55f,
+ 0x5c2bb78d,
+ 0xa0960c86,
+ 0xb58d0a04,
+ 0xc0005c2b,
+ 0x0a029100,
+ 0x7b6cb55f,
+ 0x7becb59f,
+ 0xb79fa215,
+ 0xb75f7d6c,
+ 0x11a87cec,
+ 0x4002ba1b,
+ 0x0a4ec002,
+ 0x70c8cc10,
+ 0x1a4cc002,
+ 0x70c8cc12,
+ 0x5d0cb780,
+ 0x4002ba1b,
+ 0xc84001b6,
+ 0xe0b2a9e5,
+ 0xba1b8d80,
+ 0xff744002,
+ 0xa99d9999,
+ 0x8d80e0b2,
+ 0x99c5ff74,
+ 0x8d80e0b2,
+ 0x99adfe74,
+ 0xc0120a16,
+ 0xd1717008,
+ 0xcffe6e65,
+ 0xc101287c,
+ 0x0248024e,
+ 0x024ac101,
+ 0xc1140289,
+ 0xda080aa0,
+ 0x7580ab31,
+ 0x9142c000,
+ 0x01ecd020,
+ 0x0d88098e,
+ 0x05b0c101,
+ 0x99cac074,
+ 0x6de5d172,
+ 0x5c88d322,
+ 0x05b605be,
+ 0xda0805ba,
+ 0xc101a031,
+ 0xc114009a,
+ 0xd8100da0,
+ 0xb787a9f2,
+ 0x048b4ead,
+ 0x5a4bb76c,
+ 0x05b8c101,
+ 0x15bcc101,
+ 0xcc129e9a,
+ 0xd01072c4,
+ 0xc18006ec,
+ 0xb5675a05,
+ 0xc1014eb5,
+ 0xc10102da,
+ 0xc07405b8,
+ 0x008b999e,
+ 0x5f2bb50e,
+ 0x6e65d171,
+ 0x5d8cc300,
+ 0x024ec101,
+ 0x5a08c200,
+ 0x00cac101,
+ 0x502db7a7,
+ 0xc0d205d6,
+ 0xc00f0da0,
+ 0x018b2afe,
+ 0x98d7ff74,
+ 0x7640a896,
+ 0x93e4c000,
+ 0x6e69d091,
+ 0x4038b960,
+ 0x0659d110,
+ 0xc12c9ea2,
+ 0xf0100d70,
+ 0xf090aa41,
+ 0xf010a949,
+ 0xf090a245,
+ 0x1d10a14d,
+ 0x92e1ffff,
+ 0x6ce9d091,
+ 0x009ac101,
+ 0x612db5a9,
+ 0xb549a91d,
+ 0xd171582d,
+ 0xa91d6e65,
+ 0x024ec101,
+ 0x5a08c200,
+ 0x00cac101,
+ 0x502db787,
+ 0x297ec00f,
+ 0xcff1a99d,
+ 0x32442a00,
+ 0xb587058b,
+ 0xc0d2502d,
+ 0xff740de0,
+ 0xa91a98cf,
+ 0xc0027680,
+ 0x9e69917c,
+ 0x682bb74d,
+ 0x5a2bb78c,
+ 0xa9116244,
+ 0x75080244,
+ 0x901cc002,
+ 0x0a040205,
+ 0x75062a0c,
+ 0x9344c001,
+ 0x9e510a18,
+ 0x7102d010,
+ 0x6d69d091,
+ 0x4002ba24,
+ 0x0555d110,
+ 0x75020982,
+ 0xd0109e92,
+ 0x9e9b19c2,
+ 0x0d30c130,
+ 0x4418b333,
+ 0x9140c000,
+ 0xaa41f010,
+ 0xa959f150,
+ 0x01b80d10,
+ 0x05b4c101,
+ 0x9301ffff,
+ 0xd01074c0,
+ 0xc00004ec,
+ 0xc1019184,
+ 0xc12a021a,
+ 0xc0100a40,
+ 0xda080902,
+ 0xc000a111,
+ 0xc1019160,
+ 0x5da0029a,
+ 0x0ac0c12a,
+ 0x9903c074,
+ 0xa031da08,
+ 0xb7869eb9,
+ 0x7502564d,
+ 0x9184c007,
+ 0x430cb7a0,
+ 0xb7cc0a10,
+ 0x008b6d4b,
+ 0x652bb74c,
+ 0x6f2db726,
+ 0xd00374ae,
+ 0x61441a44,
+ 0xd0137044,
+ 0xc0001124,
+ 0x9ea99146,
+ 0x4ccdb786,
+ 0x70480228,
+ 0xd0030902,
+ 0x9ea90922,
+ 0x674bb78c,
+ 0x6dd3b7ac,
+ 0xc0067500,
+ 0x74809124,
+ 0x9222c003,
+ 0x6f53b76c,
+ 0xc00276c0,
+ 0xb7669324,
+ 0xb746704d,
+ 0xb7866fd5,
+ 0xd110644d,
+ 0x02480527,
+ 0xc0027104,
+ 0xd05191a8,
+ 0x70c86e29,
+ 0x9114c002,
+ 0x6ecdb786,
+ 0x6917d051,
+ 0x71046a52,
+ 0x9006c002,
+ 0x7c4bb78c,
+ 0xc0c8010b,
+ 0xd0080938,
+ 0x0a08a941,
+ 0x19149ea2,
+ 0x2d7ccffe,
+ 0x72950a06,
+ 0x6ecbb58c,
+ 0x704bb58c,
+ 0x90f6c000,
+ 0x0e28d011,
+ 0xcffe9ea2,
+ 0xb79f2d7c,
+ 0x9e52786a,
+ 0x2a7ccffe,
+ 0x00ca0248,
+ 0x7c2bb72d,
+ 0x0992d011,
+ 0x7086cc12,
+ 0x430cb740,
+ 0xc0c80205,
+ 0xd2080a48,
+ 0xcc10aa01,
+ 0xc0c870c8,
+ 0xd0080944,
+ 0xcc12aa41,
+ 0xba1b70c8,
+ 0xfe744002,
+ 0x008b98c7,
+ 0x472bb50d,
+ 0x7c2bb50c,
+ 0xb7469ea9,
+ 0xb7866455,
+ 0xb7666fcd,
+ 0xd010704d,
+ 0x02460524,
+ 0x0124c101,
+ 0xc2000244,
+ 0xb5865a09,
+ 0xc002644d,
+ 0x008b92e0,
+ 0x702bb78c,
+ 0xc0027500,
+ 0xd2a29222,
+ 0x9ea95c88,
+ 0xb766009a,
+ 0xb76c5cb5,
+ 0xc0745a4b,
+ 0x008b985b,
+ 0xb766600d,
+ 0xc0005aad,
+ 0xb74d5a08,
+ 0x70c84733,
+ 0x91fcc000,
+ 0x04dad010,
+ 0xb72c009a,
+ 0x08a0742b,
+ 0xc0017293,
+ 0xd0119036,
+ 0xc0000e22,
+ 0x02009340,
+ 0xc00070c8,
+ 0x9ea99336,
+ 0x6bcdb786,
+ 0x514db746,
+ 0x71040238,
+ 0x9216c000,
+ 0x04dad010,
+ 0xb72c009a,
+ 0x1890742b,
+ 0xc0007293,
+ 0xd01190fc,
+ 0x9ea21e22,
+ 0x2d7ccffe,
+ 0xc0c8020b,
+ 0xd2080a48,
+ 0x9e52aa01,
+ 0x7088d010,
+ 0xc0c8010b,
+ 0xd0080944,
+ 0xd012a941,
+ 0x008b7104,
+ 0x472bb58d,
+ 0x7beeb79f,
+ 0x7c6eb7bf,
+ 0x7ceeb7df,
+ 0x7d6eb7ff,
+ 0x8c20c004,
+ 0xf0109c22,
+ 0x9e5baa61,
+ 0x430cb720,
+ 0xc0007500,
+ 0xb5869082,
+ 0xd00876ad,
+ 0xc01ca975,
+ 0xc00074be,
+ 0x020390e2,
+ 0x0a64c0da,
+ 0xa101d208,
+ 0xa979d008,
+ 0x74bec01c,
+ 0x90e2c000,
+ 0xc0da0203,
+ 0xd2080a68,
+ 0x0103a101,
+ 0xc0da9e8a,
+ 0x0a060940,
+ 0xa241d008,
+ 0x0d50c0dc,
+ 0xd0101a04,
+ 0xd008a241,
+ 0x0203a971,
+ 0x0a60c0da,
+ 0xa101d208,
+ 0xb7209c22,
+ 0xd1104314,
+ 0xb70c0497,
+ 0xb78c7c4b,
+ 0x75004829,
+ 0x48a2b340,
+ 0xc0c89e4c,
+ 0xd2080a48,
+ 0x9c22a801,
+ 0xb7a0a60d,
+ 0x9e5d4314,
+ 0x058b0307,
+ 0x0da0c0d8,
+ 0x09b2c00e,
+ 0x99affcb4,
+ 0x4314b760,
+ 0x0992c00a,
+ 0x0dc0c0ca,
+ 0x99a7fcb4,
+ 0x4314b760,
+ 0x0982c002,
+ 0x0da0c0d6,
+ 0x999ffcb4,
+ 0x4314b760,
+ 0xc0d409d2,
+ 0xfcb40dd0,
+ 0xb7609998,
+ 0xc04c4314,
+ 0xc0e609f2,
+ 0xfcb40dd0,
+ 0x058b9990,
+ 0xc0c6018b,
+ 0xc0040d80,
+ 0xfcb40d32,
+ 0x9e6899b3,
+ 0xb7869e69,
+ 0x097f4fac,
+ 0xc001040b,
+ 0x9e6c7520,
+ 0x0c38c0c8,
+ 0x0a64c0da,
+ 0x08a0c0de,
+ 0xa101d208,
+ 0xb5800a22,
+ 0xb7604029,
+ 0x9e6b4050,
+ 0x1a48d00d,
+ 0x050b0c82,
+ 0x443ab421,
+ 0xc0da9e69,
+ 0xc0da09e8,
+ 0x9e4d0d60,
+ 0xa161d008,
+ 0x018d9d0a,
+ 0x7332b56c,
+ 0xa0c2d010,
+ 0x76b4b526,
+ 0xc12a040b,
+ 0xc01008e0,
+ 0xd0080b02,
+ 0x9e44a961,
+ 0xc0da9e68,
+ 0xd2080a6c,
+ 0xd008a101,
+ 0xb74daa61,
+ 0xd0186fb2,
+ 0x7500a9c2,
+ 0xd0020a02,
+ 0xc1010a42,
+ 0xb58d0244,
+ 0xdffc6faa,
+ 0xd0127e7e,
+ 0xb7400d38,
+ 0xd022430c,
+ 0x020505a2,
+ 0x0a48c0c8,
+ 0xaa01d208,
+ 0xd0109e58,
+ 0xc0c87008,
+ 0xd0080944,
+ 0xd012a941,
+ 0x0c847104,
+ 0xb59a7644,
+ 0xb5c0462b,
+ 0xb5a0402b,
+ 0x0984412b,
+ 0x08880c04,
+ 0x9124fffe,
+ 0x430cb720,
+ 0x652bb78c,
+ 0xc000752a,
+ 0x9e699112,
+ 0xb58d0a16,
+ 0xc0007b2b,
+ 0x9e6890a0,
+ 0x7b32b52d,
+ 0x0d029e69,
+ 0x50b5b766,
+ 0x4e2db766,
+ 0x9883fe94,
+ 0x287ccffe,
+ 0xc0100a7a,
+ 0x1a747008,
+ 0x7008c012,
+ 0xb50e9e69,
+ 0xfe745bab,
+ 0x9e6899e4,
+ 0xb78c9e6a,
+ 0xc0dc65aa,
+ 0x75000960,
+ 0xd0010a02,
+ 0xb5800a42,
+ 0xc006448c,
+ 0xd0080a02,
+ 0xb720a241,
+ 0xb781430c,
+ 0x75045ead,
+ 0x93c4c000,
+ 0x5f35b761,
+ 0x4e2cb7a6,
+ 0x0992c006,
+ 0xd1320d02,
+ 0xb5a761d7,
+ 0xc0024e2c,
+ 0xfe340dc8,
+ 0x9e699810,
+ 0xb746000a,
+ 0xb5074e2d,
+ 0x02244e2d,
+ 0x7100d010,
+ 0x7088c812,
+ 0x4e2db547,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0xfe5d8c60,
+ 0xa60d9020,
+ 0x430cb7a0,
+ 0x07079e9d,
+ 0x030b008b,
+ 0x712bb78c,
+ 0x0b40c0da,
+ 0xa941d208,
+ 0x31c6e000,
+ 0xd0020a02,
+ 0x74800a42,
+ 0x6db3b54c,
+ 0x712bb58c,
+ 0x71b3b5ac,
+ 0x90e2c000,
+ 0x99e9fe74,
+ 0xd2080a02,
+ 0x7786a241,
+ 0x92d2c001,
+ 0x5d08d326,
+ 0xe0508520,
+ 0x94298044,
+ 0x94059428,
+ 0x0d829401,
+ 0x90e0c001,
+ 0xb78c008b,
+ 0xb5ac5b2b,
+ 0x75007133,
+ 0x9162c000,
+ 0x430cb720,
+ 0x67adb784,
+ 0xc0007502,
+ 0xc0009204,
+ 0x9ea99100,
+ 0x684bb78d,
+ 0xc0007500,
+ 0x008b9104,
+ 0x632bb78d,
+ 0xb58d1a04,
+ 0xff34632b,
+ 0x0d869b39,
+ 0x90a0c000,
+ 0x98aaff54,
+ 0xff340d8a,
+ 0x9e8599c1,
+ 0x430cb720,
+ 0x02039e6a,
+ 0x0a48c0c8,
+ 0xaa01d208,
+ 0x7088d010,
+ 0xc0c80103,
+ 0xd0080944,
+ 0xcc12a941,
+ 0x9ea97104,
+ 0xb74c0207,
+ 0xcffe694b,
+ 0x9e9a2a7c,
+ 0xb58d7480,
+ 0xb58d7ccb,
+ 0xc0017c4b,
+ 0x020b91a2,
+ 0x0a1cc0ca,
+ 0xaa01d208,
+ 0xc0017299,
+ 0x020390bc,
+ 0x0a1cc0a4,
+ 0xa901d208,
+ 0xc0007482,
+ 0xb78a93b4,
+ 0x71044629,
+ 0x9304c000,
+ 0x52abb78a,
+ 0xc0ca010b,
+ 0x0a040918,
+ 0x52abb58a,
+ 0x430cb720,
+ 0xa941d008,
+ 0x52abb78a,
+ 0x4002ba24,
+ 0xc0007104,
+ 0x1205911c,
+ 0x9060c000,
+ 0xb58a0a02,
+ 0x9e5052ab,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0x8420a61d,
+ 0x4314b7a0,
+ 0x00b6a19d,
+ 0xb78c040b,
+ 0xc10171ca,
+ 0x9e5b009a,
+ 0xb7cd7500,
+ 0xb76c7c2b,
+ 0xb72c6dd2,
+ 0xc0005a4a,
+ 0xb7869184,
+ 0x7502714c,
+ 0x91b2c000,
+ 0x634ab78d,
+ 0xc0007502,
+ 0x048b9112,
+ 0x564db786,
+ 0xc0087504,
+ 0x040b92a4,
+ 0x564cb786,
+ 0xc0007502,
+ 0x750490f4,
+ 0x9184c008,
+ 0x9360c005,
+ 0xc00074c0,
+ 0x9e6991c4,
+ 0x5dadb786,
+ 0x7c2bb7cc,
+ 0xd00e7500,
+ 0xc0070a62,
+ 0xc007935c,
+ 0x048b9360,
+ 0x704bb78c,
+ 0xc0007500,
+ 0xb7cd90c2,
+ 0xc0014753,
+ 0x040b93e0,
+ 0x5dccb786,
+ 0xd00e7500,
+ 0xc0010a62,
+ 0xba1b929c,
+ 0xff544003,
+ 0xb72099b1,
+ 0x9e82430c,
+ 0x5e2db781,
+ 0xc0007508,
+ 0xb7819344,
+ 0x1a045ead,
+ 0xc0007502,
+ 0xcffe9292,
+ 0xd0d12d7c,
+ 0x9e510964,
+ 0x7044c810,
+ 0xb78d048b,
+ 0xba247b4b,
+ 0x12684002,
+ 0x7088c812,
+ 0xc0009e96,
+ 0x040b9220,
+ 0x7b4ab74d,
+ 0x2d7ccffe,
+ 0xba129e51,
+ 0x02644002,
+ 0x7048d010,
+ 0xd0121164,
+ 0x9ea67104,
+ 0x2f7ccffe,
+ 0x430cb720,
+ 0xb78c048b,
+ 0xb741654b,
+ 0x0b9a5e2d,
+ 0xd009752a,
+ 0x74881bf6,
+ 0x92c4c000,
+ 0x5eadb781,
+ 0x75021a04,
+ 0x9212c000,
+ 0x7c4bb78c,
+ 0x03fed020,
+ 0xd019753c,
+ 0xb374027e,
+ 0xd0194434,
+ 0x0389024e,
+ 0x9060c000,
+ 0x040b9ebf,
+ 0x4e4cb7a7,
+ 0xc0017540,
+ 0xb72c9222,
+ 0xa09a5a52,
+ 0x6d4ab76c,
+ 0xc2009e4c,
+ 0x70c85a0d,
+ 0x90d4c001,
+ 0x402db743,
+ 0x5a0cc100,
+ 0xc000710a,
+ 0xd1a493f4,
+ 0xc1015985,
+ 0xc05405b4,
+ 0xa919994e,
+ 0x700a6005,
+ 0x9286c000,
+ 0xb74c040b,
+ 0xd0117c4a,
+ 0x9ea60e68,
+ 0xcffe7184,
+ 0xc0002f7c,
+ 0xc00290d2,
+ 0xc0000fca,
+ 0x126490a0,
+ 0x0bc4d0d2,
+ 0x9e739e69,
+ 0x7c2bb74c,
+ 0x0675d110,
+ 0x70c8d010,
+ 0xc002112e,
+ 0x040b9180,
+ 0x5e4cb781,
+ 0xc0027508,
+ 0xb78d9162,
+ 0xb7465a4a,
+ 0xb74c6dcc,
+ 0xba246d52,
+ 0xe0004002,
+ 0xc10103c4,
+ 0xc0011194,
+ 0x74c093a2,
+ 0x9342c001,
+ 0x5dd4b766,
+ 0xcc120f06,
+ 0xc18072cc,
+ 0xc1015a05,
+ 0xc05405b8,
+ 0x9e82990a,
+ 0x4002ba2e,
+ 0x018b9ebb,
+ 0x9916fe54,
+ 0x6a67d011,
+ 0xc2000208,
+ 0x9ea25a0b,
+ 0x2d7ccffe,
+ 0xc000729d,
+ 0x700c91e4,
+ 0x9182c000,
+ 0x0a7ecffe,
+ 0xb346700c,
+ 0xd110444a,
+ 0x9ea2066d,
+ 0x2d7ccffe,
+ 0x9e539e69,
+ 0x7c2bb74c,
+ 0x0a26d011,
+ 0x70c8d010,
+ 0xd012190c,
+ 0x03097104,
+ 0x2b7ccffe,
+ 0x430cb760,
+ 0xc0c80207,
+ 0xd2080a48,
+ 0xd010aa01,
+ 0x01077188,
+ 0x0944c0c8,
+ 0xa941d008,
+ 0x7104d012,
+ 0x040baa9d,
+ 0x64ccb746,
+ 0x00bad020,
+ 0xb74c0009,
+ 0xcffe4851,
+ 0x00da287c,
+ 0x009ac101,
+ 0x76800120,
+ 0x64ccb546,
+ 0x7c2bb50d,
+ 0x90e4c000,
+ 0x4949b78c,
+ 0xc0007500,
+ 0x020790e2,
+ 0x0a48c0c8,
+ 0xa801d208,
+ 0x7deeb79f,
+ 0x7e6eb7bf,
+ 0x7eeeb7df,
+ 0x7f6eb7ff,
+ 0x8c20c002,
+ 0xa61d9c22,
+ 0x4314b7c0,
+ 0x07850287,
+ 0x9e769e71,
+ 0x712bb78c,
+ 0x0b40c0da,
+ 0x04a4d010,
+ 0xa941d208,
+ 0x009cc101,
+ 0x3246e000,
+ 0x742bb7ec,
+ 0x9e710a02,
+ 0x0a42d002,
+ 0xb58c7480,
+ 0x0687712b,
+ 0x71abb56c,
+ 0x6db3b54c,
+ 0x90e2c000,
+ 0x9bc1fe54,
+ 0xd2080a02,
+ 0x7746a241,
+ 0x92d2c001,
+ 0x5d08d2a6,
+ 0xe0508520,
+ 0x94298044,
+ 0x94059428,
+ 0x0d829401,
+ 0x90e0c001,
+ 0xb78c9e71,
+ 0xb5ac5b2b,
+ 0x7500712b,
+ 0x9162c000,
+ 0x430cb720,
+ 0x67adb784,
+ 0xc0007502,
+ 0xc0009204,
+ 0x9e719100,
+ 0x682bb78d,
+ 0xc0007500,
+ 0x9e719104,
+ 0x632bb78d,
+ 0xb58d1a04,
+ 0xff34632b,
+ 0x0d869911,
+ 0x90a0c000,
+ 0x9a82ff34,
+ 0xff140d8a,
+ 0x02819b99,
+ 0x4314b7a0,
+ 0xc0c89e6c,
+ 0xd2080a48,
+ 0xd010aa01,
+ 0x9e6a7148,
+ 0x0944c0c8,
+ 0xa941d008,
+ 0x7104d012,
+ 0xba349e69,
+ 0xb7464002,
+ 0x7482712d,
+ 0x9172c003,
+ 0x632bb78d,
+ 0xc0037502,
+ 0xc00390d2,
+ 0x9e6992c0,
+ 0x6cadb766,
+ 0x90a0c000,
+ 0xb7669e69,
+ 0x9e696bad,
+ 0x5e2db781,
+ 0xc0007502,
+ 0x76809124,
+ 0x90c4c000,
+ 0x50adb746,
+ 0x9060c000,
+ 0x11340902,
+ 0xcc120a02,
+ 0x9e697088,
+ 0xb78c9e9b,
+ 0xb7466dab,
+ 0xb766562d,
+ 0xc2004fad,
+ 0xc1015a08,
+ 0x748200ca,
+ 0x5b35b746,
+ 0x9172c002,
+ 0xb7869e69,
+ 0x75005dad,
+ 0x92dac000,
+ 0x4e2db746,
+ 0xc0007295,
+ 0x7540923a,
+ 0x90e2c000,
+ 0x5a07c180,
+ 0xc00072d9,
+ 0xc10090fc,
+ 0x71485a07,
+ 0x9094c000,
+ 0x4002ba37,
+ 0xb78c9e69,
+ 0x752a652b,
+ 0x9152c000,
+ 0x0a78d011,
+ 0x7188d010,
+ 0x1978d011,
+ 0x9160c000,
+ 0xb74d9e69,
+ 0xba127b2b,
+ 0x02744002,
+ 0x7188d010,
+ 0xd0121174,
+ 0xba347104,
+ 0xc0004002,
+ 0x9e699280,
+ 0xb7860902,
+ 0xb7466c2d,
+ 0xc1016bb5,
+ 0xd4121244,
+ 0xfe347088,
+ 0xf0129aab,
+ 0xfffc0900,
+ 0xfffc9204,
+ 0x9e6c9260,
+ 0x0a48c0c8,
+ 0xaa81d208,
+ 0x718ad410,
+ 0xaa1dd3e8,
+ 0x7148d412,
+ 0x4002ba2d,
+ 0x058f018b,
+ 0x9be8fe34,
+ 0x0a029e71,
+ 0x6dadb586,
+ 0x5a2bb58d,
+ 0x7c2bb50c,
+ 0x430cb740,
+ 0x2afccffe,
+ 0xc0c80205,
+ 0xd2080a48,
+ 0xd410aa01,
+ 0xc0c87148,
+ 0xd0080944,
+ 0xd412aa41,
+ 0x000b7148,
+ 0x7e6eb79f,
+ 0x7eeeb7bf,
+ 0x7f6eb7df,
+ 0x7feeb7ff,
+ 0x8c00c002,
+ 0xa61d9c22,
+ 0xb7808460,
+ 0x09024d8c,
+ 0x4038b960,
+ 0xa101d208,
+ 0xa105d208,
+ 0x0a20c146,
+ 0x9341ffff,
+ 0x0882c021,
+ 0x08d0c006,
+ 0xaa619d1d,
+ 0x2a7cc00e,
+ 0xc0007506,
+ 0x0d8a90f2,
+ 0x9bd0fcf4,
+ 0x9240ffff,
+ 0x0902c021,
+ 0x0940c006,
+ 0xab629d2d,
+ 0xabe29d2d,
+ 0xa9629d2d,
+ 0x9d2da11a,
+ 0x9e74a9e2,
+ 0x478cb720,
+ 0x2a00c7f0,
+ 0x5aa1d224,
+ 0xd011a196,
+ 0x6a520e54,
+ 0xd0310242,
+ 0xf2080b48,
+ 0x750caa41,
+ 0x9204c000,
+ 0x6e59d051,
+ 0x498cb740,
+ 0x0dc2c809,
+ 0x02440d02,
+ 0x0db0c5c2,
+ 0xa10ef208,
+ 0xa182f208,
+ 0x7f80c010,
+ 0x9322c003,
+ 0xfcb4058b,
+ 0xa91a9a23,
+ 0xe0719e7b,
+ 0xd1f28d00,
+ 0xfd342dee,
+ 0xb79f98d2,
+ 0xc01c7468,
+ 0xc000753e,
+ 0xfcf490c4,
+ 0xb51f9b9e,
+ 0xb77f7468,
+ 0x0d067470,
+ 0xc00e55cc,
+ 0xfd142dfc,
+ 0x9e6c9bb1,
+ 0x4c8cb720,
+ 0x6a22c146,
+ 0x7468b77f,
+ 0x008b02c2,
+ 0x5aadb723,
+ 0x9ead058b,
+ 0xc06ca091,
+ 0x008b0ee0,
+ 0x5ab5b5c3,
+ 0xaba1f210,
+ 0xa3a2f210,
+ 0xc300a919,
+ 0xb7c35d3d,
+ 0xb5435bb5,
+ 0xaa155bad,
+ 0xc06e030b,
+ 0x9e5a0b00,
+ 0xabc2f208,
+ 0xf2082d04,
+ 0xfd14a241,
+ 0x008b9867,
+ 0x7470b77f,
+ 0xa3a1f210,
+ 0x5bb5b5c3,
+ 0xa3c2f208,
+ 0x0d06aa11,
+ 0xc00e55cc,
+ 0xb5832dfc,
+ 0xfd345aad,
+ 0xb7409802,
+ 0xb740490c,
+ 0xf0084d14,
+ 0x7508aa41,
+ 0x9142c000,
+ 0xc0007504,
+ 0xd0109144,
+ 0x7500aa41,
+ 0x90a2c000,
+ 0xf0080886,
+ 0xb760a0c1,
+ 0x09504894,
+ 0x0d20c146,
+ 0xfffa7097,
+ 0xffff9162,
+ 0x058b9100,
+ 0x998ffcb4,
+ 0xc0017400,
+ 0x9e6c9044,
+ 0x4c8cb720,
+ 0x6a22c146,
+ 0x4c8cb740,
+ 0xc06c02c2,
+ 0xd2080a80,
+ 0xa916a9a2,
+ 0xd1a2099a,
+ 0x0d845c90,
+ 0x00940098,
+ 0x4db5b543,
+ 0x4c35b5c3,
+ 0x4cb5b5e3,
+ 0xb583aa19,
+ 0xc0344d2d,
+ 0xd2089adb,
+ 0xf208a021,
+ 0x7508aa41,
+ 0x9124c000,
+ 0xf2080886,
+ 0xc000a0c1,
+ 0xfcf49080,
+ 0x058b9af3,
+ 0x995dfcb4,
+ 0x0d927400,
+ 0x9324ffff,
+ 0x91c0fff8,
+ 0xb740a60d,
+ 0x0205430c,
+ 0x0a10c0a4,
+ 0xaa01d208,
+ 0xc0037500,
+ 0x9e9292a4,
+ 0x0d38c0a2,
+ 0xa941d010,
+ 0xc0007480,
+ 0x0a0492e4,
+ 0xa241d010,
+ 0x430cb780,
+ 0xc0a29e93,
+ 0xd2080a40,
+ 0xfd54a101,
+ 0xb7809a5f,
+ 0xc0a4430c,
+ 0xd2080a10,
+ 0x7500aa01,
+ 0x9304c002,
+ 0x430cb720,
+ 0xc09c0203,
+ 0xd2080a18,
+ 0x7500aa01,
+ 0x91c2c002,
+ 0x4729b7aa,
+ 0x4814b7c0,
+ 0x9220c001,
+ 0xaa2df210,
+ 0xc000750c,
+ 0x9eaa93e4,
+ 0x55e80d86,
+ 0x2dfcc00e,
+ 0x9ad8fd14,
+ 0x478cb720,
+ 0x4508b780,
+ 0x29ded3f2,
+ 0xb58100e2,
+ 0xfd9468a9,
+ 0x0a029a33,
+ 0xa22df210,
+ 0x0a46c809,
+ 0x0a30c656,
+ 0xa221f210,
+ 0x90c0c001,
+ 0x46a9b78a,
+ 0x02857148,
+ 0x9124c000,
+ 0xfcf40d92,
+ 0xb7209a87,
+ 0xb7aa430c,
+ 0xb7204729,
+ 0xba2d430c,
+ 0xd0514000,
+ 0x02036b59,
+ 0xc0a20103,
+ 0xc0a40a40,
+ 0xd2080920,
+ 0xd008a902,
+ 0xd120aa41,
+ 0xd01102ed,
+ 0x72991952,
+ 0x9386fffd,
+ 0x7eeeb79f,
+ 0x7f6eb7bf,
+ 0x7feeb7df,
+ 0x9c228c60,
+ 0x8440a61d,
+ 0x430cb780,
+ 0x0a10c0a4,
+ 0xaa01d208,
+ 0xc00a7500,
+ 0xfd9490a4,
+ 0xb7209b00,
+ 0x0203430c,
+ 0x0a10c0a4,
+ 0xa901d208,
+ 0xc0097480,
+ 0xc0069324,
+ 0xd2081a78,
+ 0x7500aa01,
+ 0x91a2c005,
+ 0xc0a40203,
+ 0xb7ca0a1c,
+ 0xd2084629,
+ 0x9e97ab81,
+ 0x1962d011,
+ 0xd020627d,
+ 0x068f0328,
+ 0xba26110f,
+ 0xa1194000,
+ 0xc002a211,
+ 0xb78a9000,
+ 0xc10146a9,
+ 0x02d8029a,
+ 0x99bcc034,
+ 0x0e72d011,
+ 0x0aa0c0a2,
+ 0x75889ea7,
+ 0x4001ba3f,
+ 0xa031d208,
+ 0x9324c000,
+ 0xba249e6c,
+ 0x75044000,
+ 0x9264c000,
+ 0xc00075c4,
+ 0xb7209204,
+ 0xb78a430c,
+ 0xd11046a9,
+ 0x02420659,
+ 0x0a20c0a2,
+ 0xa911d208,
+ 0xd2080904,
+ 0xb720a111,
+ 0xb78a430c,
+ 0xf11046a9,
+ 0x0e840659,
+ 0x9ea20242,
+ 0xc0a20109,
+ 0xc0a20d20,
+ 0xc000093c,
+ 0xd01090c2,
+ 0xd008aa51,
+ 0xa911a245,
+ 0x058da91a,
+ 0x73d5018d,
+ 0xb7200764,
+ 0xfffd430c,
+ 0xb78a9344,
+ 0x09024729,
+ 0x02189e93,
+ 0x0a40c0a2,
+ 0x9c8bc810,
+ 0xd2080005,
+ 0xc002a105,
+ 0xb7209000,
+ 0xb78a430c,
+ 0xd11046a9,
+ 0xc2000639,
+ 0x00c25a08,
+ 0x46adb505,
+ 0x430cb720,
+ 0x46a9b78a,
+ 0x0096c101,
+ 0xc0a20098,
+ 0xb76008c0,
+ 0xc00040a9,
+ 0xb78b91a0,
+ 0xb742534a,
+ 0xb7455b2b,
+ 0x624446d5,
+ 0x0244c101,
+ 0x46cdb585,
+ 0x430cb720,
+ 0xd0120136,
+ 0xb78a0932,
+ 0xd02046a9,
+ 0xd1100022,
+ 0xc2000639,
+ 0x02425908,
+ 0x0a20c0a2,
+ 0xaa11d208,
+ 0x00a2d020,
+ 0x9e537106,
+ 0x29fcc00e,
+ 0x93b2fffe,
+ 0xfffe0d84,
+ 0x0d829041,
+ 0x992cfd54,
+ 0x430cb720,
+ 0xc0a40203,
+ 0xd2080a10,
+ 0x7680a902,
+ 0x9344c003,
+ 0x1a78c006,
+ 0xaa01d208,
+ 0xc0037500,
+ 0x020390e2,
+ 0x0a1cc0a4,
+ 0xaa01d208,
+ 0x4629b74a,
+ 0x4731b7aa,
+ 0x71040705,
+ 0x4001ba2d,
+ 0x0d7ecfff,
+ 0xcffe0189,
+ 0xd0510d30,
+ 0xb3236e59,
+ 0x65554432,
+ 0x478cb740,
+ 0x0528c101,
+ 0x01449e6e,
+ 0x1fa2d031,
+ 0x29fcc00e,
+ 0x0aa2d351,
+ 0x0fa6d012,
+ 0xc000a195,
+ 0xfd149360,
+ 0xb780998f,
+ 0xd3f24508,
+ 0xd20829ee,
+ 0xfd94a221,
+ 0xc80998ed,
+ 0xc6560d46,
+ 0x9e7a0d30,
+ 0xa166e250,
+ 0x1a62d011,
+ 0xe0500d02,
+ 0xba34a146,
+ 0x1ad04000,
+ 0x430cb720,
+ 0x165cd010,
+ 0xd0240906,
+ 0xb72a51d0,
+ 0xa9154729,
+ 0x2dfcc00e,
+ 0x10940f04,
+ 0xfffe7102,
+ 0xc000931a,
+ 0xfcf49080,
+ 0xb7809931,
+ 0x0d92430c,
+ 0xc0a29ea2,
+ 0xc0a40a3c,
+ 0xd2080d1c,
+ 0xd010a901,
+ 0x7088aa41,
+ 0x9226ffff,
+ 0x7d6eb79f,
+ 0x7deeb7bf,
+ 0x7e6eb7df,
+ 0x7eeeb7ff,
+ 0x8c40c002,
+ 0x91a0fddd,
+ 0x7d6eb79f,
+ 0x7deeb7bf,
+ 0x7e6eb7df,
+ 0x7eeeb7ff,
+ 0x8c40c002,
+ 0xa61d9c22,
+ 0xb7c08460,
+ 0xa1954314,
+ 0xaae1d808,
+ 0xb706040d,
+ 0x9e715654,
+ 0xa012048d,
+ 0x6dabb72c,
+ 0x7540068d,
+ 0xb72da099,
+ 0x9e5e5453,
+ 0x0ec8c0c8,
+ 0x7d74b53f,
+ 0x9102c000,
+ 0xaaa2d210,
+ 0x7df4b5bf,
+ 0x9380c000,
+ 0xb76c040d,
+ 0xb7866d4a,
+ 0xb75f69cc,
+ 0xd1a47d6c,
+ 0x59085985,
+ 0x05b8c101,
+ 0x05b4c101,
+ 0x9867c034,
+ 0xaa21d210,
+ 0x7008c010,
+ 0x700ac012,
+ 0x287ccffe,
+ 0x7decb51f,
+ 0xb5c69e71,
+ 0xb7205aad,
+ 0xb781430c,
+ 0x75025e2d,
+ 0x90c4c000,
+ 0x9b5bfe14,
+ 0x90e0c000,
+ 0xc0007508,
+ 0xfe3490e4,
+ 0x9e839912,
+ 0x90a0c000,
+ 0xb766048d,
+ 0x040d4e55,
+ 0x5accb746,
+ 0x6accb786,
+ 0x6b54b746,
+ 0x6bccb766,
+ 0xc1011244,
+ 0x01261524,
+ 0x6accb586,
+ 0x6b54b546,
+ 0x1126c101,
+ 0x6c4cb566,
+ 0x0a02c201,
+ 0x7088c810,
+ 0x0a02ce01,
+ 0x7088c812,
+ 0x6bccb546,
+ 0xb740a895,
+ 0x7480402b,
+ 0x92a4c009,
+ 0x430cb7e0,
+ 0xb7869eb9,
+ 0x7502564d,
+ 0x9024c005,
+ 0x6e4bb78c,
+ 0x6e4bb54c,
+ 0x7d3edffc,
+ 0x6f4bb58c,
+ 0x9144c004,
+ 0x674bb78c,
+ 0xc0047500,
+ 0xb76690a4,
+ 0xb78c5acd,
+ 0xd1a4744b,
+ 0xc8025949,
+ 0x0d047280,
+ 0x4002ba24,
+ 0x094cc002,
+ 0x7104d010,
+ 0x194cc002,
+ 0x7104d012,
+ 0x5d0cb740,
+ 0x4002ba24,
+ 0xca200248,
+ 0xba12aa05,
+ 0x9e524001,
+ 0x7053b76c,
+ 0xba2451e9,
+ 0x62474002,
+ 0xc1010936,
+ 0x76c01124,
+ 0x5389d224,
+ 0x9084c001,
+ 0x6fcbb78c,
+ 0xc0027506,
+ 0xb74692d4,
+ 0xb7666fd5,
+ 0xd051704d,
+ 0x70c86e27,
+ 0x90f4c000,
+ 0x5c4db786,
+ 0xc00073d9,
+ 0x9eb89232,
+ 0x5e05d3a2,
+ 0x5c4cb746,
+ 0xc0027104,
+ 0xd0109014,
+ 0xc1010624,
+ 0x70c80244,
+ 0x9334c001,
+ 0x430cb7c0,
+ 0xc0020a06,
+ 0x008d09b2,
+ 0x6eabb58c,
+ 0x6e2bb58c,
+ 0x430cb7a0,
+ 0xc0de9eab,
+ 0xc0dc0aa0,
+ 0xd2080d80,
+ 0xfc74aaa2,
+ 0x9eb199a0,
+ 0xb7869eb3,
+ 0xb74c5acd,
+ 0xd208744b,
+ 0xb586a2a2,
+ 0x0a02594d,
+ 0x6fcbb58c,
+ 0x734bb54c,
+ 0x0d90c0da,
+ 0xfc7409a2,
+ 0xb720998c,
+ 0xb781430c,
+ 0x75045e2d,
+ 0x9124c000,
+ 0x0a049eb0,
+ 0x7b4ab58d,
+ 0x9060c000,
+ 0x008f0f82,
+ 0x5c2db786,
+ 0x6d7bd012,
+ 0x01280148,
+ 0x0524c101,
+ 0xb5465d0d,
+ 0xa9195c35,
+ 0x9e9b0982,
+ 0xd1100224,
+ 0x00870669,
+ 0xc0ce9ea2,
+ 0xc0000d48,
+ 0xd8109120,
+ 0xd831aa41,
+ 0xba24a0c5,
+ 0x01b84002,
+ 0x040da89a,
+ 0x6fcab74d,
+ 0x0632d010,
+ 0x71040d84,
+ 0x9238ffff,
+ 0xc0007640,
+ 0x05839142,
+ 0xcffe1d84,
+ 0xba1b2dfc,
+ 0xfe144002,
+ 0x040d9be7,
+ 0x6e4ab78c,
+ 0xc0037500,
+ 0xa8919002,
+ 0xc0027442,
+ 0xfe149384,
+ 0x74009a39,
+ 0x430cb720,
+ 0x90c2c000,
+ 0x6cadb746,
+ 0x9080c000,
+ 0x6badb746,
+ 0x5e2db781,
+ 0xc0007502,
+ 0x74009124,
+ 0x90c4c000,
+ 0x50adb786,
+ 0x9060c000,
+ 0x12440a02,
+ 0xd0120a82,
+ 0x7500710a,
+ 0x93fcc001,
+ 0xb766048d,
+ 0xb7664fcd,
+ 0xb78d6bd5,
+ 0xb746654b,
+ 0xd1204e55,
+ 0xb74d11b7,
+ 0xc101634b,
+ 0xb78d65b9,
+ 0x65b563cb,
+ 0x12449eaa,
+ 0x65b9c101,
+ 0x9845fdf4,
+ 0xb786040d,
+ 0x9e836acc,
+ 0xc0007500,
+ 0xb5a69096,
+ 0x9e716acc,
+ 0x6aadb786,
+ 0x632bb74d,
+ 0x6b35b746,
+ 0x5aadb766,
+ 0x0246c101,
+ 0xc1011904,
+ 0xb5861526,
+ 0xb54d6aad,
+ 0xb546632b,
+ 0xfef46b35,
+ 0xc0009bb3,
+ 0xa89290c0,
+ 0xc0017640,
+ 0x040d90c2,
+ 0xb77fa891,
+ 0xb7867dec,
+ 0xb74674cc,
+ 0x7442754c,
+ 0x09040246,
+ 0x74ccb586,
+ 0x754cb546,
+ 0x9142c000,
+ 0xc0007442,
+ 0x74449246,
+ 0x9184c007,
+ 0x9300c004,
+ 0xb78d040d,
+ 0xb74c684a,
+ 0x0a046fca,
+ 0xb58d0904,
+ 0xb54c684a,
+ 0xaa196fca,
+ 0x048d9e71,
+ 0x67adb746,
+ 0x6735b746,
+ 0xc2000a04,
+ 0xc1015a0c,
+ 0xb54600cc,
+ 0xb54664ad,
+ 0xaa196435,
+ 0x694db746,
+ 0xc200040d,
+ 0xd1205a08,
+ 0x0a0200cd,
+ 0x684db546,
+ 0x694cb586,
+ 0x674cb586,
+ 0x67ccb586,
+ 0xb780a895,
+ 0x7500402b,
+ 0x9344c002,
+ 0x5accb746,
+ 0xb546a999,
+ 0xb79f5b4d,
+ 0x00b67b6a,
+ 0x009cc101,
+ 0x742bb58c,
+ 0x430cb720,
+ 0x5e2db781,
+ 0xc0027504,
+ 0xb76690e4,
+ 0x74c05dcc,
+ 0x90c4c000,
+ 0x0d82c010,
+ 0x9080c001,
+ 0xc1807086,
+ 0xc0005a07,
+ 0xd02091bc,
+ 0x5da811a6,
+ 0x05b8c101,
+ 0x9a7ac014,
+ 0x0800c040,
+ 0x9180c000,
+ 0x11b4d020,
+ 0xc1015da8,
+ 0xc01405b8,
+ 0xc0409a6f,
+ 0x10400a02,
+ 0x0a02c100,
+ 0x7008d010,
+ 0xc4120902,
+ 0x9e8b7104,
+ 0xb71fa89a,
+ 0x05127df4,
+ 0xc0d0052c,
+ 0xd8100d60,
+ 0x9e49a941,
+ 0x65b0d031,
+ 0x02420224,
+ 0x5a08c200,
+ 0xc1010904,
+ 0x290400cc,
+ 0x612db566,
+ 0xa141d810,
+ 0x5f35b566,
+ 0x7640a892,
+ 0x9384c002,
+ 0xb78d040d,
+ 0x0a0468ca,
+ 0x68cab58d,
+ 0x9280c002,
+ 0x7d6cb73f,
+ 0xc0007440,
+ 0xb7209282,
+ 0xb781430c,
+ 0x75025e2d,
+ 0x91a4c000,
+ 0xb746048d,
+ 0xc1005acd,
+ 0x02445a7d,
+ 0x5a07c200,
+ 0xb5860244,
+ 0x040d5acd,
+ 0x7b6ab73f,
+ 0x5ad4b7a6,
+ 0x734ab52c,
+ 0x5954b5a6,
+ 0x430cb7a0,
+ 0xb7819ea9,
+ 0x75025e4d,
+ 0x92a4c000,
+ 0x9904fe14,
+ 0xc0007400,
+ 0x9ea89204,
+ 0x64cab78d,
+ 0x7510c004,
+ 0x9134c000,
+ 0xb5a69e71,
+ 0xb73f5b35,
+ 0xb52c7b72,
+ 0x040d7433,
+ 0x714cb786,
+ 0xc0007502,
+ 0xb7669264,
+ 0xb7665ad4,
+ 0xc0144e4c,
+ 0x741099f7,
+ 0x915cc000,
+ 0x1980d031,
+ 0x5987a99a,
+ 0x4002ba1b,
+ 0x9a7efe14,
+ 0x430cb720,
+ 0x5e2db781,
+ 0xc0017508,
+ 0x9e719302,
+ 0x712db786,
+ 0xc0017502,
+ 0xb7869244,
+ 0x7504562d,
+ 0x91a2c001,
+ 0x542bb78d,
+ 0xc0007500,
+ 0xa89992a4,
+ 0xb746048d,
+ 0x58885acd,
+ 0x009cc101,
+ 0x5cadb726,
+ 0x02125908,
+ 0x70880242,
+ 0x90d6c000,
+ 0x09ffa99a,
+ 0x9260c000,
+ 0x040da899,
+ 0x5accb746,
+ 0xc1015888,
+ 0xb726009c,
+ 0x01245cad,
+ 0x02420212,
+ 0xc0007088,
+ 0xa99a90dc,
+ 0xfe140986,
+ 0xa8959a41,
+ 0xb5800a02,
+ 0xb58040ab,
+ 0xb7a040ad,
+ 0x9ea9430c,
+ 0x5e55b7a1,
+ 0xc0037744,
+ 0x774893a2,
+ 0x9342c003,
+ 0x75cdb786,
+ 0xc0017500,
+ 0xfe149284,
+ 0x74009887,
+ 0x91e2c000,
+ 0xb78e9ea8,
+ 0xb7445bca,
+ 0x708867cc,
+ 0xd0040a02,
+ 0xb5860a42,
+ 0xc00075cc,
+ 0x77429380,
+ 0x90a2c000,
+ 0xc0007748,
+ 0x008b9244,
+ 0x67adb784,
+ 0x4e2db746,
+ 0x50b5b746,
+ 0x71156245,
+ 0xd00b0a02,
+ 0xb5860a42,
+ 0xc00075ad,
+ 0x9ea990c0,
+ 0xb5860a06,
+ 0x9ea875cd,
+ 0x75ccb786,
+ 0xc0027500,
+ 0xb7209062,
+ 0xb781430c,
+ 0x75025e2d,
+ 0x91a4c008,
+ 0x4e35b746,
+ 0x4fadb746,
+ 0x6e2fd011,
+ 0xc0087088,
+ 0xd0119096,
+ 0x76800e2e,
+ 0x4458b342,
+ 0x5d0fd122,
+ 0x9140c000,
+ 0x5b2bb78c,
+ 0xc0007500,
+ 0x02249082,
+ 0x59070144,
+ 0x50adb786,
+ 0x5a07c200,
+ 0x7088c810,
+ 0x6badb786,
+ 0xc0007104,
+ 0xa89590d6,
+ 0xb5801228,
+ 0xa89640ad,
+ 0x40cdb740,
+ 0xc0007480,
+ 0x0a0691a2,
+ 0x40cbb580,
+ 0xd0121a04,
+ 0x0a7c7088,
+ 0x5a17c200,
+ 0x40cdb580,
+ 0x0902a916,
+ 0x4314b7c0,
+ 0xa141d810,
+ 0x4314b7a0,
+ 0xb721040b,
+ 0xb7cc5e54,
+ 0x76486dca,
+ 0x9084c002,
+ 0x5eccb781,
+ 0x75021a04,
+ 0x9032c001,
+ 0x4e4cb7a6,
+ 0xc0069e92,
+ 0xb5a70992,
+ 0xb7204e4c,
+ 0xb721430c,
+ 0x60d35f2d,
+ 0x0994d0d2,
+ 0x9a33fdd4,
+ 0x000a9e69,
+ 0x4e2db746,
+ 0x4e2db507,
+ 0xd0100224,
+ 0xc8127100,
+ 0xb5477088,
+ 0xc0044e2d,
+ 0x048b9100,
+ 0x6ccdb786,
+ 0x4fcdb7a6,
+ 0xfe1412d8,
+ 0x000a9864,
+ 0x0a02c040,
+ 0x7008c012,
+ 0xb786040b,
+ 0xb5074e4c,
+ 0xc2004e4c,
+ 0x70085a07,
+ 0x9228c003,
+ 0x0a06a895,
+ 0x402bb580,
+ 0x9160c003,
+ 0xb746040b,
+ 0xb74c4e54,
+ 0xba2e6e4a,
+ 0xd0104002,
+ 0x74800624,
+ 0x0244c101,
+ 0x5987c200,
+ 0x9144c000,
+ 0x5888c280,
+ 0x009ac101,
+ 0x5b2db786,
+ 0x70c8cc12,
+ 0x76429e69,
+ 0x4fb5b766,
+ 0x6badb786,
+ 0x1539d110,
+ 0x92e4c000,
+ 0x6e2fd011,
+ 0xc00072d9,
+ 0xd0119136,
+ 0x76800e26,
+ 0x4458b342,
+ 0x5d8bd122,
+ 0x0a02c040,
+ 0x7088d012,
+ 0xb587048b,
+ 0xc0004e4d,
+ 0xc1019080,
+ 0x70860124,
+ 0x91d6c000,
+ 0x448cb780,
+ 0xc0007500,
+ 0x748090a2,
+ 0x90dac000,
+ 0x0a06a915,
+ 0xa241d808,
+ 0x992ffe14,
+ 0x10010d02,
+ 0xc0129e53,
+ 0x74007006,
+ 0x92dac000,
+ 0x430cb720,
+ 0x5e2db781,
+ 0xc0007504,
+ 0x9e6c9244,
+ 0xc10100da,
+ 0xc0c8009a,
+ 0xb74c0a48,
+ 0xd208742b,
+ 0x7088aa01,
+ 0x90a8c000,
+ 0xda08aa15,
+ 0xa816a102,
+ 0x404ab780,
+ 0xc0007500,
+ 0x9eb390c2,
+ 0xfe140992,
+ 0x9e7198f3,
+ 0x632bb78c,
+ 0xc0007500,
+ 0xa8969222,
+ 0xb5800a02,
+ 0xc000404b,
+ 0xb7869160,
+ 0xb746562d,
+ 0x75024e2d,
+ 0x9124fff8,
+ 0x9000fff8,
+ 0x7ceeb79f,
+ 0x7d6eb7bf,
+ 0x7deeb7df,
+ 0x7e6eb7ff,
+ 0x8c60c002,
+ 0xa68d9c22,
+ 0x02059e9d,
+ 0x29fccffe,
+ 0x2a7ccffe,
+ 0xd1240705,
+ 0xd0325941,
+ 0x07876046,
+ 0x5dc1c280,
+ 0xd1310285,
+ 0xc1016527,
+ 0xd02260c7,
+ 0x01145e41,
+ 0x704601a8,
+ 0x6426d031,
+ 0x9094c000,
+ 0x0804c001,
+ 0x665cd031,
+ 0x615fd132,
+ 0x5941c180,
+ 0xc1010104,
+ 0xcffe0244,
+ 0xcffe29fc,
+ 0x02442c7c,
+ 0xc10159c0,
+ 0x9ea001b0,
+ 0xb7bf0007,
+ 0xb7df7eee,
+ 0xb7ff7f6e,
+ 0x8c607fee,
+ 0x9e989c22,
+ 0x09020802,
+ 0x9140c000,
+ 0x08029e98,
+ 0x4530d010,
+ 0x72c0cc14,
+ 0x7200c014,
+ 0xc18072c0,
+ 0xe0095d09,
+ 0xf0127204,
+ 0xc0005d04,
+ 0x72c49254,
+ 0x0804d004,
+ 0x15b4d024,
+ 0xd00472c0,
+ 0xd0240802,
+ 0xe00015b0,
+ 0x11813124,
+ 0x442ab330,
+ 0x72c09c22,
+ 0xffff0886,
+ 0xc40293a6,
+ 0xd06572c0,
+ 0xd0652c9e,
+ 0xc8023c9e,
+ 0xd0657200,
+ 0xd0652d2e,
+ 0xe0003d2e,
+ 0x9e531514,
+ 0x5408d01a,
+ 0x50acd01a,
+ 0xd01472c0,
+ 0xd0240002,
+ 0xe08015b0,
+ 0x5c055885,
+ 0x9304ffff,
+ 0x3124e000,
+ 0xb3301181,
+ 0x9c22442a,
+ 0x9d3a9e64,
+ 0x0e46c809,
+ 0x1e06b09b,
+ 0x9dc39ea4,
+ 0xe0009e58,
+ 0x15873400,
+ 0x482ab330,
+ 0x9e649c22,
+ 0x0e46c809,
+ 0x1d66b09b,
+ 0x9e589ea4,
+ 0x00009c22,
+ 0x87c2c809,
+ 0x0e60b060,
+ 0x87c2c809,
+ 0x0b80b060,
+ 0x87c2c809,
+ 0x0ac0b060,
+};
+
+unsigned int all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x0000ff00,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x809000b0,
+ 0x809198c8,
+ 0x8288f08a,
+ 0x8288f10c,
+ 0x8288f1e0,
+ 0x8288f2a8,
+ 0x8288f2b4,
+ 0x8288f214,
+ 0x8288f208,
+ 0x8288f1e8,
+ 0x8288f2c8,
+ 0x8288f1ec,
+ 0x8288f1f0,
+ 0x8288f2d0,
+ 0x8288f2d8,
+ 0x8288fd1c,
+ 0x8288f998,
+ 0x8288f7e4,
+ 0x8288f7e7,
+ 0x8288fd1e,
+ 0x82899618,
+ 0x82899620,
+ 0x82899624,
+ 0x82899626,
+ 0x8289962a,
+ 0x8289962e,
+ 0x82899632,
+ 0x82899636,
+ 0x8289963a,
+ 0x82899640,
+ 0x828996a0,
+ 0x828996a8,
+ 0x828996ac,
+ 0x828996b0,
+ 0x828996b4,
+ 0x828996bc,
+ 0x828996c7,
+ 0x8289c35b,
+ 0x8289c350,
+ 0x8289c360,
+ 0x8288efa8,
+ 0x8288f012,
+ 0x8288f022,
+ 0x8288f024,
+ 0x8288f02c,
+ 0x8288f03c,
+ 0x8288f04c,
+ 0x8288f14e,
+ 0x8288f1b8,
+ 0x80901728,
+ 0x80901728,
+ 0x809163cc,
+ 0x8091c3f0,
+ 0x8090901c,
+ 0x8091c1d8,
+ 0x8090b7d4,
+ 0x8090abe4,
+ 0x80901cc0,
+ 0x8090515c,
+ 0x809018e0,
+ 0x80901728,
+ 0x80901728,
+ 0x80901728,
+ 0x80901728,
+ 0x80901728,
+ 0x80901728,
+ 0x80906738,
+ 0x80906678,
+ 0x80916060,
+ 0x80906010,
+ 0x80901728,
+ 0x80901728,
+ 0x01030004,
+ 0x01010102,
+ 0x02010101,
+ 0x04000301,
+ 0x01030004,
+ 0x01010102,
+ 0x02010101,
+ 0x04000301,
+ 0x0d080300,
+ 0x00100b06,
+ 0x00584aaf,
+ 0x007f1410,
+ 0x005030c6,
+ 0x007f500c,
+ 0x00027dac,
+ 0x000021f5,
+ 0x000308d1,
+ 0x0000057a,
+ 0x00260019,
+ 0x003f0032,
+ 0x0058004b,
+ 0x00710064,
+ 0x008a007d,
+ 0x00a30096,
+ 0x00bc00af,
+ 0x00d500c8,
+ 0x00ee00e1,
+ 0x010700fa,
+ 0x01070107,
+ 0x01070107,
+ 0x01070107,
+ 0x01070107,
+ 0x01070107,
+ 0x00000107,
+ 0x00200040,
+ 0x001002ab,
+ 0x015500cd,
+ 0x00080249,
+ 0x00cd01c7,
+ 0x0155005d,
+ 0x0249013b,
+ 0x00040111,
+ 0x01c700f1,
+ 0x00cd01af,
+ 0x005d00c3,
+ 0x01550059,
+ 0x013b0029,
+ 0x0249025f,
+ 0x01110235,
+ 0x00020021,
+ 0x00f1001f,
+ 0x01c70075,
+ 0x01af006f,
+ 0x00cd0069,
+ 0x00c30019,
+ 0x005d017d,
+ 0x0059005b,
+ 0x015502b9,
+ 0x002900a7,
+ 0x013b0283,
+ 0x025f0135,
+ 0x02490095,
+ 0x0235023f,
+ 0x0111008b,
+ 0x00210219,
+ 0x00010041,
+ 0x0b060600,
+ 0x0c0b0a06,
+ 0x0a0b0c06,
+ 0x0c0d0c0c,
+ 0x0d0d0c06,
+ 0x0b0b0c0c,
+ 0x0e0d0a0d,
+ 0x0a0d0e0e,
+ 0x0c0d0a06,
+ 0x0c0e0c0e,
+ 0x0e0d0a0d,
+ 0x0f0c0c0c,
+ 0x0f0b0d0e,
+ 0x0d0f0e0e,
+ 0x0d0f0f0f,
+ 0x0c0b0f0e,
+ 0x00140006,
+ 0x001a0016,
+ 0x0020001c,
+ 0x00280024,
+ 0x0034002c,
+ 0x00400038,
+ 0x00500048,
+ 0x00680058,
+ 0x00800070,
+ 0x00a00090,
+ 0x00d000b0,
+ 0x010000e0,
+ 0x01400120,
+ 0x01a00160,
+ 0x020001c0,
+ 0x02800240,
+ 0x034002c0,
+ 0x04000380,
+ 0x05000480,
+ 0x06800580,
+ 0x08000700,
+ 0x0a000900,
+ 0x0d000b00,
+ 0x10000e00,
+ 0x14001200,
+ 0x1a001600,
+ 0x00001c00,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x01010000,
+ 0x02020201,
+ 0x04030303,
+ 0x05040404,
+ 0x00000005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x1234baac,
+};
+
+unsigned int all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_textreloc[] = {
+ 0x00000000,
+};
+
+unsigned int all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_datareloc[] = {
+ 0x00000000,
+};
+
+unsigned int all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_textrelocfulladdr[] = {
+ 0x00000000,
+};
+
+unsigned int all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_textreloctype[] = {
+ 0x00000000,
+};
+
+struct IMG_COMPILED_FW_BIN_RECORD simg_compiled_all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1 = {
+ /* unsigned int ui32TextSize, ui32DataSize; */
+ 15278, 13643,
+ /* unsigned int ui32DataOrigin, ui32TextOrigin; */
+ 0x8288eeb8, 0x80900000,
+ /* unsigned int ui32TextRelocSize, ui32DataRelocSize; */
+ 0, 0,
+
+ /*
+ * unsigned int ui32Pipes;
+ * unsigned char *sFormat, *rcMode;
+ * unsigned int ui32FormatsMask, ui32HwConfig;
+ */
+ 2, "ALL_CODECS", "ALL", 31, 1,
+
+ /* unsigned int ui32IntDefineCount; */
+ 5,
+ /* unsigned char **pscIntDefineNames; */
+ all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_define_names_array,
+ /* unsigned int *pui32IntDefines; */
+ all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_define_values_array,
+
+ /*
+ * unsigned int *pui32Text, *pui32Data;
+ * unsigned int *pui32TextReloc, *pui32DataDeloc;
+ * unsigned int *pui32TextRelocFullAddr, *pui32TextRelocType;
+ */
+
+ all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_text,
+ all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_data,
+ all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_textreloc,
+ all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_datareloc,
+ all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_textrelocfulladdr,
+ all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1_textreloctype,
+};
+
+/* Py_Return simg_compiled_all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1 */
diff --git a/drivers/media/platform/vxe-vxd/encoder/fw_binaries/include_all_fw_variants.h b/drivers/media/platform/vxe-vxd/encoder/fw_binaries/include_all_fw_variants.h
new file mode 100644
index 000000000000..b93cc4251851
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/fw_binaries/include_all_fw_variants.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __INCLUDE_ALL_VARIANTS_INC_INCLUDED__
+#define __INCLUDE_ALL_VARIANTS_INC_INCLUDED__
+
+#define INCLUDE_ALL_VARIANTS_TEMPLATE_VERSION (1)
+
+#define FW_BIN_FORMAT_VERSION (2)
+
+struct IMG_COMPILED_FW_BIN_RECORD {
+ unsigned int text_size, data_size;
+ unsigned int data_origin, text_origin;
+ unsigned int text_reloc_size, data_reloc_size;
+
+ unsigned int pipes;
+ unsigned char *fmt, *rc_mode;
+ unsigned int formats_mask, hw_config;
+
+ unsigned int int_define_cnt;
+ unsigned char **int_define_names;
+ unsigned int *int_defines;
+
+ unsigned int *text, *data;
+ unsigned int *text_reloc, *data_reloc;
+ unsigned int *text_reloc_full_addr, *text_reloc_type;
+};
+
+#include "ALL_CODECS_FW_ALL_pipes_2_contexts_8_hwconfig_1_bin.c"
+
+unsigned int all_fw_binaries_cnt = 1;
+struct IMG_COMPILED_FW_BIN_RECORD *all_fw_binaries[] = {
+ &simg_compiled_all_codecs_fw_all_pipes_2_contexts_8_hwconfig_1,
+};
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/fw_headers/coreflags.h b/drivers/media/platform/vxe-vxd/encoder/fw_headers/coreflags.h
new file mode 100644
index 000000000000..d07eb02d80ab
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/fw_headers/coreflags.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _COREFLAGS_H_
+#define _COREFLAGS_H_
+
+#define SERIALIZED_PIPES (1)
+
+/* The number of TOPAZ cores present in the system */
+#define TOPAZHP_MAX_NUM_PIPES (4)
+
+#define TOPAZHP_MAX_POSSIBLE_STREAMS (8)
+#define TOPAZHP_MAX_BU_SUPPORT_HD 90
+#define TOPAZHP_MAX_BU_SUPPORT_4K 128
+
+#define USE_VCM_HW_SUPPORT (1)
+/* controls the firmwares ability to support the optional hardware input scaler */
+#define INPUT_SCALER_SUPPORTED (1)
+/* controls the firmwares ability to support secure mode firmware upload */
+#define SECURE_MODE_POSSIBLE (1)
+
+/* controls the firmwares ability to support secure input/output ports */
+#define SECURE_IO_PORTS (1)
+
+/* Line counter feature is not ready for Onyx yet
+ * (comment the define to remove the feature from builds)
+ */
+#define LINE_COUNTER_SUPPORTED (1)
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/fw_headers/defs.h b/drivers/media/platform/vxe-vxd/encoder/fw_headers/defs.h
new file mode 100644
index 000000000000..3181e7e2cc02
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/fw_headers/defs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#if !defined DEFS_H_
+#define DEFS_H_
+
+#include <linux/types.h>
+
+/*
+ * MACROS to insert values into fields within a word. The basename of the
+ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
+ */
+#define F_MASK(basename) (MASK_##basename)
+#define F_SHIFT(basename) (SHIFT_##basename)
+/*
+ * Extract a value from an instruction word.
+ */
+#define F_EXTRACT(val, basename) (((val) & (F_MASK(basename))) >> (F_SHIFT(basename)))
+
+/*
+ * Mask and shift a value to the position of a particular field.
+ */
+#define F_ENCODE(val, basename) (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
+#define F_DECODE(val, basename) (((val) & (F_MASK(basename))) >> (F_SHIFT(basename)))
+
+/*
+ * Insert a value into a word.
+ */
+#define F_INSERT(word, val, basename) (((word) & ~(F_MASK(basename))) | (F_ENCODE((val), basename)))
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/fw_headers/mtx_fwif.h b/drivers/media/platform/vxe-vxd/encoder/fw_headers/mtx_fwif.h
new file mode 100644
index 000000000000..7f66a80a0d1d
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/fw_headers/mtx_fwif.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _MTX_FWIF_H_
+#define _MTX_FWIF_H_
+
+#include "vxe_common.h"
+#include "topazscfwif.h"
+
+//#define VXE_MEASURE_MTX_CLK_FREQ
+
+/*
+ * enum describing the MTX load method
+ */
+enum mtx_load_method {
+ MTX_LOADMETHOD_NONE = 0, /* don't load MTX code */
+ MTX_LOADMETHOD_BACKDOOR, /* backdoor - writes MTX load data direct to out.res */
+ MTX_LOADMETHOD_REGIF, /* load mtx code via register interface */
+ MTX_LOADMETHOD_DMA, /* load mtx code via DMA */
+ MTX_LOADMETHOD_FORCE32BITS = 0x7FFFFFFFU
+
+};
+
+/*
+ * defines that should come from auto generated headers
+ */
+#define MTX_DMA_MEMORY_BASE (0x82880000)
+#define PC_START_ADDRESS (0x80900000)
+
+#define MTX_CORE_CODE_MEM (0x10)
+#define MTX_CORE_DATA_MEM (0x18)
+
+#define MTX_PC (0x05)
+
+/*
+ * MTX Firmware Context Structure
+ */
+
+/*
+ * struct img_fw_int_defines_table - contains info for the fw int defines
+ *
+ * @length: length of the table
+ * @names: array of names of entries
+ * @values: array of values of entries
+ */
+struct img_fw_int_defines_table {
+ unsigned int length;
+ unsigned char **names;
+ unsigned int *values;
+};
+
+/*
+ * struct img_fw_context - contains info for the context of the loaded firmware
+ *
+ * @initialized: TRUE if MTX core is initialized
+ * @populated: TRUE if MTX firmware context had been populated with data
+ * @active_ctx_mask: A bit mask of active encode contexts in the firmware
+ * @dev_ctx: Pointer to the device context
+ * @load_method: method used to load this MTX
+ * @supported_codecs: Codec mask
+ * @mtx_debug_val: Value in MTX Debug register (for RAM config)
+ * @mtx_ram_size: Size of MTX RAM
+ * @mtx_bank_size: Size of MTX RAM banks
+ * @mtx_reg_mem_space_addr: Memspace ID for MTX registers
+ * @topaz_reg_mem_space_addr: Memspace ID for TOPAZ registers
+ * @topaz_multicore_reg_addr: Memspace ID for TOPAZ multicore control registers
+ * @core_rev: Hardware core revision ID
+ * @core_des1: Hardware core designer (feature bits)
+ * @drv_has_mtx_ctrl: TRUE if driver (not DASH) has control of MTX
+ * @access_control: Use to get read/write access to MTX
+ * @hw_num_pipes: Number of pipes available in hardware
+ * @num_pipes: Number of pipes supported by firmware
+ * @num_contexts: Number of contexts supported by firmware
+ * @mtx_context_data_copy: Copy of MTX Context Data during hibernate
+ * @mtx_reg_copy: Copy of MTX Register block during hibernate
+ * @mtx_topaz_fw_text_size: Size of MTX Firmware Text Section in words
+ * @mtx_topaz_fw_text: Pointer to MTX Firmware Text Section
+ * @mtx_topaz_fw_data_size: Size of MTX Firmware Data Section in words
+ * @mtx_topaz_fw_data: Pointer to MTX Firmware Data Section
+ * @mtx_topaz_fw_data_origin: Offset to location of Data section
+ * @int_defines: table of int defines
+ */
+struct img_fw_context {
+ unsigned short initialized;
+ unsigned short populated;
+ unsigned char active_ctx_mask;
+
+ void *dev_ctx;
+
+ enum mtx_load_method load_method;
+
+ unsigned int supported_codecs;
+
+ unsigned int mtx_debug_val;
+ unsigned int mtx_ram_size;
+ unsigned int mtx_bank_size;
+
+ void *mtx_reg_mem_space_addr;
+ void *topaz_reg_mem_space_addr[TOPAZHP_MAX_NUM_PIPES];
+ void *topaz_multicore_reg_addr;
+ unsigned int core_rev;
+ unsigned int core_des1;
+
+ unsigned short drv_has_mtx_ctrl;
+ unsigned int access_control;
+
+ unsigned int hw_num_pipes;
+ unsigned int num_pipes;
+ unsigned int num_contexts;
+
+ struct vidio_ddbufinfo *mtx_context_data_copy[TOPAZHP_MAX_POSSIBLE_STREAMS];
+ unsigned int *mtx_reg_copy;
+
+ unsigned int mtx_topaz_fw_text_size;
+ unsigned int *mtx_topaz_fw_text;
+
+ unsigned int mtx_topaz_fw_data_size;
+ unsigned int *mtx_topaz_fw_data;
+
+ unsigned int mtx_topaz_fw_data_origin;
+
+ struct img_fw_int_defines_table int_defines;
+};
+
+/*
+ * Populates MTX context structure
+ * @param codec : version of codec specific firmware to associate with this MTX
+ * @param fw_ctx : Output context
+ * @return int : Standard IMG_ERRORCODE
+ */
+int mtx_populate_fw_ctx(enum img_codec codec,
+ struct img_fw_context *fw_ctx);
+
+/*
+ * Initialise the hardware using given (populated) MTX context structure
+ * @param fw_ctx : Pointer to the context of the target MTX
+ * @return None
+ */
+void mtx_initialize(void *dev_ctx, struct img_fw_context *fw_ctx);
+
+/*
+ * Return the integer define used to compile given version of firmware.
+ * @param fw_ctx : Pointer to the context of the target MTX
+ * @param name : Name of a define (string)
+ * @return Value of define or -1 if not found.
+ */
+int mtx_get_fw_config_int(struct img_fw_context const * const fw_ctx,
+ unsigned char const * const name);
+
+/*
+ * Load text and data sections onto an MTX.
+ * @param fw_ctx : Pointer to the context of the target MTX
+ * @param load_method : Method to use for loading code
+ * @return None
+ */
+void mtx_load(void *dev_ctx, struct img_fw_context *fw_ctx,
+ enum mtx_load_method load_method);
+
+/*
+ * Deinitialises MTX and MTX control structure
+ */
+void mtx_deinitialize(struct img_fw_context *fw_ctx);
+
+/*
+ * Saves MTX State -- Registers and Data Memory
+ */
+void mtx_save_state(struct img_fw_context *fw_ctx);
+
+/*
+ * Restores MTX State -- Registers and Data Memory
+ */
+void mtx_restore_state(void *ctx, struct img_fw_context *fw_ctx);
+
+/*
+ * mtx_start
+ */
+void mtx_start(struct img_fw_context *fw_ctx);
+
+/*
+ * mtx_stop
+ */
+void mtx_stop(struct img_fw_context *fw_ctx);
+
+/*
+ * Kicks MTX
+ */
+void mtx_kick(struct img_fw_context *fw_ctx, unsigned int kick_count);
+
+/*
+ * Waits for MTX to halt
+ */
+void mtx_wait_for_completion(struct img_fw_context *fw_ctx);
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/fw_headers/topazscfwif.h b/drivers/media/platform/vxe-vxd/encoder/fw_headers/topazscfwif.h
new file mode 100644
index 000000000000..dc403410a66d
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/fw_headers/topazscfwif.h
@@ -0,0 +1,1104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _TOPAZSCFWIF_H_
+#define _TOPAZSCFWIF_H_
+
+#include "coreflags.h"
+#include <linux/types.h>
+
+#define MAX_QP_H264 (51)
+
+/*
+ * The number of bytes used by each MVEA MV param & above param region
+ */
+#define MVEA_MV_PARAM_REGION_SIZE 16
+#define MVEA_ABOVE_PARAM_REGION_SIZE 96
+
+/*
+ * Macros to align to the correct number of bytes
+ */
+#define ALIGN_4(X) (((X) + 3) & ~3)
+#define ALIGN_16(X) (((X) + 15) & ~15)
+#define ALIGN_64(X) (((X) + 63) & ~63)
+#define ALIGN_128(X) (((X) + 127) & ~127)
+#define ALIGN_1024(X) (((X) + 1023) & ~1023)
+
+/*
+ * Context size allocated from host application
+ */
+#define MTX_CONTEXT_SIZE (13 * 1024)
+
+/*
+ * SEI (Buffering Period and Picture Timing) Constants shared
+ * between host and firmware
+ */
+#define BPH_SEI_NAL_INITIAL_CPB_REMOVAL_DELAY_SIZE 23
+#define PTH_SEI_NAL_CPB_REMOVAL_DELAY_SIZE 23
+#define PTH_SEI_NAL_DPB_OUTPUT_DELAY_SIZE 7
+
+/*
+ * Size of the header in output coded buffer. This varies based on
+ * whether data logging is enabled/disabled
+ */
+#if defined(INCLUDE_CRC_REGISTER_CHECKS)
+#define CRC_REGISTER_FEEDBACK_SIZE (80 * 4)
+#else
+#define CRC_REGISTER_FEEDBACK_SIZE 0
+#endif
+
+/* MUST be aligned to the DMA 64 byte boundary condition
+ * (CRC data is DMA'd after the coded buffer header)
+ */
+#define CODED_BUFFER_HEADER_SIZE 64
+#define CODED_BUFFER_INFO_SECTION_SIZE (CODED_BUFFER_HEADER_SIZE + CRC_REGISTER_FEEDBACK_SIZE)
+
+/*
+ * Mask defines for the -ui8EnableSelStatsFlags variable
+ */
+#define ESF_FIRST_STAGE_STATS 1
+#define ESF_MP_BEST_MB_DECISION_STATS 2
+#define ESF_MP_BEST_MOTION_VECTOR_STATS 4
+
+#define CUSTOM_QUANT_PARAMSIZE_8x8 2
+
+/*
+ * Combined size of H.264 quantization lists (6 * 16 + {2 or 6} * 64)
+ */
+#define QUANT_LISTS_SIZE (6 * 16 + CUSTOM_QUANT_PARAMSIZE_8x8 * 64)
+
+/*
+ * Size in bytes and words of memory to transfer partially coded header data
+ */
+#define MAX_HEADERSIZEBYTES (128)
+#define MAX_HEADERSIZEWORDS (32)
+
+/*
+ * Maximum number of slices per field
+ */
+#define MAX_SLICESPERPIC (128)
+
+/*
+ * Picture parameter flags used in the PIC_PARAM structure
+ */
+#define ISINTERP_FLAGS (0x00000001)
+#define ISRC_FLAGS (0x00000010)
+#define ISRC_I16BIAS (0x00000020)
+#define ISINTERB_FLAGS (0x00000080)
+#define ISSCENE_DISABLED (0x00000100)
+#define ISMULTIREF_FLAGS (0x00000200)
+#define SPATIALDIRECT_FLAGS (0x00000400)
+
+/*
+ * Enum describing contents of scratch registers
+ */
+enum mtx_scratch_regdata {
+ MTX_SCRATCHREG_BOOTSTATUS = 0,
+ MTX_SCRATCHREG_UNUSED = 0,
+ MTX_SCRATCHREG_TOHOST, //!< Reg for MTX->Host data
+ MTX_SCRATCHREG_TOMTX, //!< Reg for Host->MTX data
+
+ MTX_SCRATCHREG_SIZE, //!< End marker for enum
+ MTX_SCRATCHREG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * `MTX_SCRATCHREG_IDLE` register that is used for synchronous communication and debug.
+ *
+ * Current register usage:
+ * <bits, inclusive boundaries> : <usage>
+ * 2-10 : Number of executed commands (mod 255)
+ * 0-1 : FW idle status
+ */
+#define MTX_SCRATCHREG_IDLE TOPAZHP_TOP_CR_FIRMWARE_REG_4 //!< Reg for firmware IDLE status
+
+/* Flags relating to MTX_SCRATCHREG_IDLE */
+/* Bits [10-22] are used for the line information */
+/* TOPAZHP_LINE_COUNTER (see TRM 8.1.1) uses 12 bits for the line count */
+#define SHIFT_FW_IDLE_REG_STATUS (0)
+#define MASK_FW_IDLE_REG_STATUS (3)
+#define FW_IDLE_STATUS_IDLE (1)
+
+/*
+ * In secure FW mode the first value written to the command FIFO is copied to MMU_CONTROL_0
+ * by the firmware. When we don't want that to happen we can write this value instead.
+ * The firmware will know to ignore it as
+ * long as it is written BEFORE the firmware starts up
+ */
+#define TOPAZHP_NON_SECURE_FW_MARKER (0xffffffff)
+
+/*
+ * This value is an arbitrary value that the firmware will write to TOPAZHP_TOP_CR_FIRMWARE_REG_1
+ * (MTX_SCRATCHREG_BOOTSTATUS)
+ * when it has completed the boot process to indicate that it is ready
+ */
+#define TOPAZHP_FW_BOOT_SIGNAL (0x12345678)
+
+/*
+ * Sizes for arrays that depend on reference usage pattern
+ */
+#define MAX_REF_B_LEVELS 3
+#define MAX_REF_SPACING 1
+#define MAX_REF_I_OR_P_LEVELS (MAX_REF_SPACING + 2)
+#define MAX_REF_LEVELS (MAX_REF_B_LEVELS + MAX_REF_I_OR_P_LEVELS)
+#define MAX_PIC_NODES (MAX_REF_LEVELS + 2)
+#define MAX_MV (MAX_PIC_NODES * 2)
+
+#define MAX_BFRAMES 7 //B-frame count limit for Hierarchical mode
+#define MAX_GOP_SIZE (MAX_BFRAMES + 1)
+#define MAX_SOURCE_SLOTS_SL (MAX_GOP_SIZE + 1)
+
+#define MV_ROW_STRIDE (ALIGN_64(sizeof(struct img_mv_settings) * MAX_BFRAMES))
+
+/*
+ * MTX -> host message FIFO
+ */
+#define LOG2_WB_FIFO_SIZE (5)
+
+#define WB_FIFO_SIZE (1 << (LOG2_WB_FIFO_SIZE))
+
+#define SHIFT_WB_PRODUCER (0)
+#define MASK_WB_PRODUCER (((1 << LOG2_WB_FIFO_SIZE) - 1) << SHIFT_WB_PRODUCER)
+
+#define SHIFT_WB_CONSUMER (0)
+#define MASK_WB_CONSUMER (((1 << LOG2_WB_FIFO_SIZE) - 1) << SHIFT_WB_CONSUMER)
+
+/*
+ * Number of buffers per encode task (default: 2 - double bufferring)
+ */
+#define CODED_BUFFERING_CNT 2 //default to double-buffering
+
+/*
+ * Calculates the ideal minimun coded buffers for a frame level encode
+ */
+#define CALC_OPTIMAL_CODED_PACKAGES_FRAME_ENCODE(numcores, isinterlaced) \
+ ((((isinterlaced) ? 2 : 1) * (numcores)) * CODED_BUFFERING_CNT)
+
+/*
+ * Calculates the ideal minimum coded buffers for a slice level encode
+ */
+#define CALC_OPTIMAL_CODED_PACKAGES_SLICE_ENCODE(slicesperpic) \
+ ((slicesperpic) * CODED_BUFFERING_CNT)
+
+/*
+ * Calculates the ideal minimum coded buffers for an encode
+ */
+#define CALC_OPTIMAL_CODED_PACKAGES_ENCODE(bis_slice_level, slicesperpic, numcores, isinterlaced) \
+ (bis_slice_level ? CALC_OPTIMAL_CODED_PACKAGES_SLICE_ENCODE(slicesperpic) \
+ : CALC_OPTIMAL_CODED_PACKAGES_FRAME_ENCODE(numcores, isinterlaced))
+
+/*
+ * Calculates the actual number of coded buffers that can be used for an encode
+ */
+#define CALC_NUM_CODED_PACKAGES_ENCODE(bis_slice_level, slicesperpic, numcores, isinterlaced) \
+ (CALC_OPTIMAL_CODED_PACKAGES_ENCODE(bis_slice_level, slicesperpic, numcores, isinterlaced))
+
+/*
+ * Maximum number of coded packages
+ */
+#define MAX_CODED_PACKAGES CALC_NUM_CODED_PACKAGES_ENCODE(0, 0, TOPAZHP_MAX_NUM_PIPES, 1)
+
+/*
+ * DMA configuration parameters
+ */
+#define MTX_DMA_BURSTSIZE_BYTES 32
+
+/*
+ * types that should be in DMAC header file
+ */
+enum dmac_acc_del {
+ DMAC_ACC_DEL_0 = 0x0, //!< Access delay zero clock cycles
+ DMAC_ACC_DEL_256 = 0x1, //!< Access delay 256 clock cycles
+ DMAC_ACC_DEL_512 = 0x2, //!< Access delay 512 clock cycles
+ DMAC_ACC_DEL_768 = 0x3, //!< Access delay 768 clock cycles
+ DMAC_ACC_DEL_1024 = 0x4, //!< Access delay 1024 clock cycles
+ DMAC_ACC_DEL_1280 = 0x5, //!< Access delay 1280 clock cycles
+ DMAC_ACC_DEL_1536 = 0x6, //!< Access delay 1536 clock cycles
+ DMAC_ACC_DEL_1792 = 0x7, //!< Access delay 1792 clock cycles
+ DMAC_ACC_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum dmac_bswap {
+ DMAC_BSWAP_NO_SWAP = 0x0, //!< No byte swapping will be performed.
+ DMAC_BSWAP_REVERSE = 0x1, //!< Byte order will be reversed.
+ DMAC_BSWAP_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum dmac_burst {
+ DMAC_BURST_0 = 0x0, //!< burst size of 0
+ DMAC_BURST_1 = 0x1, //!< burst size of 1
+ DMAC_BURST_2 = 0x2, //!< burst size of 2
+ DMAC_BURST_3 = 0x3, //!< burst size of 3
+ DMAC_BURST_4 = 0x4, //!< burst size of 4
+ DMAC_BURST_5 = 0x5, //!< burst size of 5
+ DMAC_BURST_6 = 0x6, //!< burst size of 6
+ DMAC_BURST_7 = 0x7, //!< burst size of 7
+ DMAC_BURST_FORCE32BITS = 0x7FFFFFFFU
+};
+
+#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
+ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP) |\
+ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW) |\
+ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR) |\
+ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI) |\
+ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
+
+#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
+ ((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL) |\
+ (((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR) |\
+ (((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
+
+enum dmac_pw {
+ DMAC_PWIDTH_32_BIT = 0x0, //!< Peripheral width 32-bit.
+ DMAC_PWIDTH_16_BIT = 0x1, //!< Peripheral width 16-bit.
+ DMAC_PWIDTH_8_BIT = 0x2, //!< Peripheral width 8-bit.
+ DMAC_PWIDTH_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Enum describing Command IDs. Some commands require data to be DMA'd in
+ * from the Host, with the base address of the data specified in the Command
+ * Data Address word of the command. The data required is specific to each
+ * command type.
+ */
+enum mtx_cmd_id {
+ // Common Commands
+ MTX_CMDID_NULL, //!< (no data)\n Null command does nothing\n
+ MTX_CMDID_SHUTDOWN, //!< (no data)\n shutdown the MTX\n
+
+ // Video Commands
+ /* !< (extra data: #MTX_HEADER_PARAMS) Command for Sequence, Picture and Slice headers */
+ MTX_CMDID_DO_HEADER,
+ /* !< (data: low latency encode activation, HBI usage) Encode frame data*/
+ MTX_CMDID_ENCODE_FRAME,
+ MTX_CMDID_START_FRAME, //!< (no data)\n Prepare to encode frame\n
+ MTX_CMDID_ENCODE_SLICE, //!< (no data)\n Encode slice data\n
+ MTX_CMDID_END_FRAME, //!< (no data)\n Complete frame encoding\n
+ /* !< (data: pipe number, extra data: #IMG_MTX_VIDEO_CONTEXT)\n Set MTX Video Context */
+ MTX_CMDID_SETVIDEO,
+ /* !< (data: pipe number, extra data: #IMG_MTX_VIDEO_CONTEXT)
+ * Get MTX Video Context
+ */
+ MTX_CMDID_GETVIDEO,
+ /* !< (data: new pipe allocations for the context)
+ * Change pipe allocation for a Video Context
+ */
+ MTX_CMDID_DO_CHANGE_PIPEWORK,
+#if SECURE_IO_PORTS
+ MTX_CMDID_SECUREIO, //!< (data: )\n Change IO security\n
+#endif
+ /* !< (data: subtype and parameters, extra data: #IMG_PICMGMT_CUSTOM_QUANT_DATA
+ * (optional))\n Change encoding parameters
+ */
+ MTX_CMDID_PICMGMT,
+ /* !< (data: QP and bitrate)\n Change encoding parameters */
+ MTX_CMDID_RC_UPDATE,
+ /* !< (extra data: #IMG_SOURCE_BUFFER_PARAMS)
+ * Transfer source buffer from host
+ */
+ MTX_CMDID_PROVIDE_SOURCE_BUFFER,
+ /* !< (data: buffer parameters, extra data: reference buffer)
+ * Transfer reference buffer from host
+ */
+ MTX_CMDID_PROVIDE_REF_BUFFER,
+ /* !< (data: slot and size, extra data: coded package)\n Transfer coded package from host
+ *(coded package contains addresses of header and coded output buffers/1st linked list node)
+ */
+ MTX_CMDID_PROVIDE_CODEDPACKAGE_BUFFER,
+ MTX_CMDID_ABORT, //!< (no data)\n Stop encoding and release all buffers\n
+
+ // JPEG commands
+ MTX_CMDID_SETQUANT, //!< (extra data: #JPEG_MTX_QUANT_TABLE)\n
+ MTX_CMDID_SETUP_INTERFACE, //!< (extra data: #JPEG WRITEBACK POINTERS)\n
+ MTX_CMDID_ISSUEBUFF, //!< (extra data: #MTX_ISSUE_BUFFERS)\n
+ MTX_CMDID_SETUP, //!< (extra data: #JPEG_MTX_DMA_SETUP)\n\n
+ /* !< (extra data: #IMG_VXE_SCALER_SETUP)\nChange source
+ * pixel format after context creation\
+ */
+ MTX_CMDID_UPDATE_SOURCE_FORMAT,
+ /* !< (extra data: #IMG_VXE_CSC_SETUP)\nChange Colour Space Conversion setup dynamically */
+ MTX_CMDID_UPDATE_CSC,
+
+ MTX_CMDID_ENDMARKER, //!< end marker for enum
+ MTX_CMDID_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Priority for the command.
+ * Each Command ID will only work with the correct priority.
+ */
+#define MTX_CMDID_PRIORITY 0x80
+
+/*
+ * Indicates whether or not to issue an interrupt when the firmware sends the
+ * command's writeback message.
+ */
+#define MTX_CMDID_WB_INTERRUPT 0x8000
+
+/*
+ * Enum describing response IDs
+ */
+enum mtx_message_id {
+ MTX_MESSAGE_ACK,
+ MTX_MESSAGE_CODED,
+ MTX_MESSAGE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Mask and shift values for command word
+ */
+#define SHIFT_MTX_MSG_CMD_ID (0)
+#define MASK_MTX_MSG_CMD_ID (0x7f << SHIFT_MTX_MSG_CMD_ID)
+#define SHIFT_MTX_MSG_PRIORITY (7)
+#define MASK_MTX_MSG_PRIORITY (0x1 << SHIFT_MTX_MSG_PRIORITY)
+#define SHIFT_MTX_MSG_CORE (8)
+#define MASK_MTX_MSG_CORE (0x7f << SHIFT_MTX_MSG_CORE)
+#define SHIFT_MTX_MSG_COUNT (16)
+#define MASK_MTX_MSG_COUNT (0xffffU << SHIFT_MTX_MSG_COUNT)
+#define SHIFT_MTX_MSG_MESSAGE_ID (16)
+#define MASK_MTX_MSG_MESSAGE_ID (0xff << SHIFT_MTX_MSG_MESSAGE_ID)
+
+/*
+ * Mask and shift values for data word
+ */
+#define SHIFT_MTX_MSG_ENCODE_CODED_INTERRUPT (0)
+#define MASK_MTX_MSG_ENCODE_CODED_INTERRUPT \
+ (0xff << SHIFT_MTX_MSG_ENCODE_CODED_INTERRUPT)
+#define SHIFT_MTX_MSG_ENCODE_USE_LINE_COUNTER (20)
+#define MASK_MTX_MSG_ENCODE_USE_LINE_COUNTER \
+ (0x1 << SHIFT_MTX_MSG_ENCODE_USE_LINE_COUNTER)
+
+#define SHIFT_MTX_MSG_PICMGMT_SUBTYPE (0)
+#define MASK_MTX_MSG_PICMGMT_SUBTYPE (0xff << SHIFT_MTX_MSG_PICMGMT_SUBTYPE)
+#define SHIFT_MTX_MSG_PICMGMT_DATA (8)
+#define MASK_MTX_MSG_PICMGMT_DATA (0xffffffU << SHIFT_MTX_MSG_PICMGMT_DATA)
+#define SHIFT_MTX_MSG_PICMGMT_STRIDE_Y (0)
+#define MASK_MTX_MSG_PICMGMT_STRIDE_Y (0x3ff << SHIFT_MTX_MSG_PICMGMT_STRIDE_Y)
+#define SHIFT_MTX_MSG_PICMGMT_STRIDE_UV (10)
+#define MASK_MTX_MSG_PICMGMT_STRIDE_UV (0x3ff << SHIFT_MTX_MSG_PICMGMT_STRIDE_UV)
+
+/*Values for updating static Qp values when Rate Control is disabled*/
+#define SHIFT_MTX_MSG_NUM_CODED_BUFFERS_PER_HEADER (5)
+#define MASK_MTX_MSG_NUM_CODED_BUFFERS_PER_HEADER \
+ (0xf << SHIFT_MTX_MSG_NUM_CODED_BUFFERS_PER_HEADER)
+
+#define SHIFT_MTX_MSG_PROVIDE_CODEDPACKAGE_BUFFER_SLOT (0)
+#define MASK_MTX_MSG_PROVIDE_CODEDPACKAGE_BUFFER_SLOT \
+ (0x0f << SHIFT_MTX_MSG_PROVIDE_CODEDPACKAGE_BUFFER_SLOT)
+#define SHIFT_MTX_MSG_PROVIDE_CODED_BUFFER_SIZE (4)
+#define MASK_MTX_MSG_PROVIDE_CODED_BUFFER_SIZE \
+ (0x3fffff << SHIFT_MTX_MSG_PROVIDE_CODED_BUFFER_SIZE)
+
+/*
+ * Enum describing partially coded header element types
+ */
+enum header_element_type {
+ ELEMENT_STARTCODE_RAWDATA = 0, //!< Raw data that includes a start code
+ /*!< Raw data that includes a start code in the middle of the header */
+ ELEMENT_STARTCODE_MIDHDR,
+ ELEMENT_RAWDATA, //!< Raw data
+ ELEMENT_QP, //!< Insert the H264 Picture Header QP parameter
+ ELEMENT_SQP, //!< Insert the H264 Slice Header QP parameter
+ /* Insert the H263/MPEG4 Frame Q_scale parameter (vob_quant field) */
+ ELEMENT_FRAMEQSCALE,
+ /* !< Insert the H263/MPEG4 Slice Q_scale parameter (quant_scale field) */
+ ELEMENT_SLICEQSCALE,
+ ELEMENT_INSERTBYTEALIGN_H264, //!< Insert the byte alignment bits for H264
+ ELEMENT_INSERTBYTEALIGN_MPG4, //!< Insert the byte alignment bits for MPEG4
+ ELEMENT_INSERTBYTEALIGN_MPG2, //!< Insert the byte alignment bits for MPEG2
+ ELEMENT_VBV_MPG2,
+ ELEMENT_TEMPORAL_REF_MPG2,
+ ELEMENT_CURRMBNR, //!< Insert the current macrloblock number for a slice.
+
+ /* !< Insert frame_num field (used as ID for ref. pictures in H264) */
+ ELEMENT_FRAME_NUM, //!< Insert frame_num field (used as ID for ref. pictures in H264)
+ /* !< Insert Temporal Reference field (used as ID for ref. pictures in H263) */
+ ELEMENT_TEMPORAL_REFERENCE,
+ ELEMENT_EXTENDED_TR, //!< Insert Extended Temporal Reference field
+ /*//!< Insert idr_pic_id field (used to distinguish consecutive IDR frames) */
+ ELEMENT_IDR_PIC_ID,
+ /* !< Insert pic_order_cnt_lsb field (used for display ordering in H264) */
+ ELEMENT_PIC_ORDER_CNT,
+ /* !< Insert gob_frame_id field (used for display ordering in H263) */
+ ELEMENT_GOB_FRAME_ID,
+ /* !< Insert vop_time_increment field (used for display ordering in MPEG4) */
+ ELEMENT_VOP_TIME_INCREMENT,
+ /* !< Insert modulo_time_base used in MPEG4 (depends on vop_time_increment_resolution) */
+ ELEMENT_MODULO_TIME_BASE,
+
+ ELEMENT_BOTTOM_FIELD, //!< Insert bottom_field flag
+ ELEMENT_SLICE_NUM, //!< Insert slice num (used for GOB headers in H263)
+ ELEMENT_MPEG2_SLICE_VERTICAL_POS, //!< Insert slice vertical pos (MPEG2 slice header)
+ /* !< Insert 1 bit flag indicating if slice is Intra or not (MPEG2 slice header) */
+ ELEMENT_MPEG2_IS_INTRA_SLICE,
+ /* !< Insert 2 bit field indicating if the current header is for a frame picture (11),
+ * top field (01) or bottom field (10) - (MPEG2 picture header
+ */
+ ELEMENT_MPEG2_PICTURE_STRUCTURE,
+ /* !< Insert flag indicating whether or not this picture is a reference */
+ ELEMENT_REFERENCE,
+ ELEMENT_ADAPTIVE, //!< Insert reference picture marking
+ ELEMENT_DIRECT_SPATIAL_MV_FLAG, //!< Insert spatial direct mode flag
+ ELEMENT_NUM_REF_IDX_ACTIVE, //!< Insert number of active references
+ ELEMENT_REORDER_L0, //!< Insert reference list 0 reordering
+ ELEMENT_REORDER_L1, //!< Insert reference list 1 reordering
+ ELEMENT_TEMPORAL_ID, //!< Insert temporal ID of the picture, used for MVC header
+ /*!< Insert flag indicating whether or not this picture is an anchor picture */
+ ELEMENT_ANCHOR_PIC_FLAG,
+
+ BPH_SEI_NAL_INITIAL_CPB_REMOVAL_DELAY, //!< Insert nal_initial_cpb_removal_delay
+ /* !< Insert nal_initial_cpb_removal_delay_offset */
+ BPH_SEI_NAL_INITIAL_CPB_REMOVAL_DELAY_OFFSET,
+ PTH_SEI_NAL_CPB_REMOVAL_DELAY, //!< Insert cpb_removal_delay
+ PTH_SEI_NAL_DPB_OUTPUT_DELAY, //!< Insert dpb_output_delay
+
+ ELEMENT_SLICEWEIGHTEDPREDICTIONSTRUCT, //!< Insert weighted prediciton parameters
+ ELEMENT_CUSTOM_QUANT, //!< Insert custom quantization values
+ ELEMENT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Struct describing a partially coded header element
+ */
+struct mtx_header_element {
+ enum header_element_type element_type; //!< Element type
+ /* !< Number of bits of coded data to be inserted */
+ unsigned char size;
+ unsigned char bits; //!< Raw data to be inserted.
+};
+
+/*
+ * Struct describing partially coded header parameters
+ */
+struct mtx_header_params {
+ unsigned int elements; //!< Number of header elements
+ /*!< array of element data */
+ struct mtx_header_element element_stream[MAX_HEADERSIZEWORDS - 1];
+};
+
+/*
+ * Enum describing threshold values for skipped MB biasing
+ */
+enum th_skip_scale {
+ TH_SKIP_0 = 0, //!< Bias threshold for QP 0 to 12
+ TH_SKIP_12 = 1, //!< Bias threshold for QP 12 to 24
+ TH_SKIP_24 = 2, //!< Bias threshold for QP 24 and above
+ TH_SKIP_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Struct describing rate control input parameters
+ */
+struct in_rc_params {
+ unsigned int mb_per_frm; //!< Number of MBs Per Frame
+ unsigned int mb_per_bu; //!< Number of MBs Per BU
+ unsigned short bu_per_frm; //!< Number of BUs Per Frame
+
+ unsigned short intra_period; //!< Intra frame frequency
+ unsigned short bframes; //!< B frame frequency
+
+ int bits_per_frm; //!< Bits Per Frame
+ int bits_per_bu; //!< Bits Per BU
+
+ int bit_rate; //!< Bit Rate (bps)
+ int buffer_size; //!< Size of Buffer in bits
+ int buffer_size_frames;//!< Size of Buffer in frames, to be used in VCM
+ int initial_level; //!< Initial Level of Buffer
+ int initial_delay; //!< Initial Delay of Buffer
+
+ unsigned short frm_skip_disable; //!< Disable Frame skipping
+
+ unsigned char se_init_qp_i; //!< Initial QP for sequence (I frames)
+ unsigned char se_init_qp_p; //!< Initial QP for sequence (P frames)
+ unsigned char se_init_qp_b; //!< Initial QP for sequence (B frames)
+
+ unsigned char min_qp; //!< Minimum QP value to use
+ unsigned char max_qp; //!< Maximum QP value to use
+
+ /* !< Scale Factor used to limit the range
+ * of arithmetic with high resolutions and bitrates
+ */
+ unsigned char scale_factor;
+ unsigned short mb_per_row; //!< Number of MBs Per Row
+
+ unsigned short disable_vcm_hardware; //!< Disable using vcm hardware in RC modes.
+
+ union {
+ struct {
+ /* !< Rate at which bits are sent from encoder
+ * to the output after each frame finished encoding
+ */
+ int transfer_rate;
+ /* !< Disable Scene Change detection */
+ unsigned short sc_detect_disable;
+ /* !< Flag indicating Hierarchical B Pic or Flat mode rate control */
+ unsigned short hierarchical_mode;
+ /* !< Constant used in rate control =
+ * (GopSize/(BufferSize-InitialLevel))*256
+ */
+ unsigned int rc_scale_factor;
+ /* !< Enable movement of slice boundary when Qp is high */
+ unsigned short enable_slice_bob;
+ /* !< Maximum number of rows the slice boundary can be moved */
+ unsigned char max_slice_bob;
+ /* !< Minimum Qp at which slice bobbing should take place */
+ unsigned char slice_bob_qp;
+ } h264;
+ struct {
+ unsigned char half_framerate; //!< Half Frame Rate (MP4 only)
+ unsigned char f_code; //!< F Code (MP4 only)
+ int bits_pergop; //!< Bits Per GOP (MP4 only)
+ unsigned short bu_skip_disable; //!< Disable BU skipping
+ int bits_per_mb; //!< Bits Per MB
+ unsigned short avg_qp_val; //!< Average QP in Current Picture
+ unsigned short initial_qp; //!< Initial Quantizer
+ } other;
+ } mode;
+};
+
+/*
+ * Enum describing MTX firmware version (codec and rate control)
+ */
+enum img_codec {
+ IMG_CODEC_NONE = 0, //!< There is no FW in MTX memory
+ IMG_CODEC_JPEG, //!< JPEG
+ IMG_CODEC_H264_NO_RC, //!< H264 with no rate control
+ IMG_CODEC_H264_VBR, //!< H264 variable bitrate
+ IMG_CODEC_H264_CBR, //!< H264 constant bitrate
+ IMG_CODEC_H264_VCM, //!< H264 video conferance mode
+ IMG_CODEC_H263_NO_RC, //!< H263 with no rate control
+ IMG_CODEC_H263_VBR, //!< H263 variable bitrate
+ IMG_CODEC_H263_CBR, //!< H263 constant bitrate
+ IMG_CODEC_MPEG4_NO_RC, //!< MPEG4 with no rate control
+ IMG_CODEC_MPEG4_VBR, //!< MPEG4 variable bitrate
+ IMG_CODEC_MPEG4_CBR, //!< MPEG4 constant bitrate
+ IMG_CODEC_MPEG2_NO_RC, //!< MPEG2 with no rate control
+ IMG_CODEC_MPEG2_VBR, //!< MPEG2 variable bitrate
+ IMG_CODEC_MPEG2_CBR, //!< MPEG2 constant bitrate
+ IMG_CODEC_H264_ERC, //!< H264 example rate control
+ IMG_CODEC_H263_ERC, //!< H263 example rate control
+ IMG_CODEC_MPEG4_ERC, //!< MPEG4 example rate control
+ IMG_CODEC_MPEG2_ERC, //!< MPEG2 example rate control
+ IMG_CODEC_H264MVC_NO_RC, //!< MVC H264 with no rate control
+ IMG_CODEC_H264MVC_CBR, //!< MVC H264 constant bitrate
+ IMG_CODEC_H264MVC_VBR, //!< MVC H264 variable bitrate
+ IMG_CODEC_H264MVC_ERC, //!< MVC H264 example rate control
+ IMG_CODEC_H264_ALL_RC, //!< H264 with multiple rate control modes
+ IMG_CODEC_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Enum describing encoding standard (codec)
+ */
+enum img_standard {
+ IMG_STANDARD_NONE = 0, //!< There is no FW in MTX memory
+ IMG_STANDARD_JPEG, //!< JPEG
+ IMG_STANDARD_H264, //!< H264 with no rate control
+ IMG_STANDARD_H263, //!< H263 with no rate control
+ IMG_STANDARD_MPEG4, //!< MPEG4 with no rate control
+ IMG_STANDARD_MPEG2, //!< MPEG2 with no rate control
+ IMG_STANDARD_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Enum describing image surface format types
+ */
+enum img_format {
+ IMG_CODEC_420_YUV = 100, //!< Planar Y U V
+ IMG_CODEC_420_YV12 = 44, //!< YV12 format Data
+ IMG_CODEC_420_IMC2 = 36, //!< IMC2 format Data
+ IMG_CODEC_420_PL8 = 47, //!< PL8 format YUV data
+ IMG_CODEC_420_PL12 = 101, //!< PL12 format YUV data
+ /* |< PL12 format packed into a single plane (not currently supported by JPEG) */
+ IMG_CODEC_420_PL12_PACKED = 25,
+ /* !< PL21 format packed into a single plane (not currently supported by JPEG) */
+ IMG_CODEC_420_PL21_PACKED = 26,
+ /* !< YUV format 4:2:2 data; start the incrementing auto enumeration
+ * values after the last ones we have used.
+ */
+ IMG_CODEC_422_YUV = 102,
+ IMG_CODEC_422_YV12, //!< YV12 format 4:2:2 data
+ IMG_CODEC_422_PL8, //!< PL8 format 4:2:2 data
+ IMG_CODEC_422_IMC2, //!< IMC2 format 4:2:2 data
+ IMG_CODEC_422_PL12, //!< PL12 format 4:2:2 data
+ IMG_CODEC_Y0UY1V_8888, //!< 4:2:2 YUYV data
+ IMG_CODEC_Y0VY1U_8888, //!< 4:2:2 YVYU data
+ IMG_CODEC_UY0VY1_8888, //!< 4:2:2 UYVY data
+ IMG_CODEC_VY0UY1_8888, //!< 4:2:2 VYUY data
+ IMG_CODEC_444_YUV, //!< YUV format 4:4:4 data (not currently supported by JPEG)
+ IMG_CODEC_444_YV12, //!< YV12 format 4:4:4 data (not currently supported by JPEG)
+ IMG_CODEC_444_PL8, //!< PL8 format 4:4:4 data (not currently supported by JPEG)
+ IMG_CODEC_444_IMC2, //!< PL8 format 4:4:4 data (not currently supported by JPEG)
+ IMG_CODEC_444_PL12, //!< PL12 format 4:4:4 data (not currently supported by JPEG)
+ IMG_CODEC_ABCX, //!< Interleaved 4:4:4 data (not currently supported by JPEG)
+ IMG_CODEC_XBCA, //!< Interleaved 4:4:4 data (not currently supported by JPEG)
+ IMG_CODEC_ABC565, //!< Packed 4:4:4 data (not currently supported by JPEG)
+
+ IMG_CODEC_420_PL21, //!< PL21 format YUV data
+ IMG_CODEC_422_PL21, //!< 4:2:2 PL21 format YUV data
+ /* !< 4:4:4 PL21 format YUV data (not currently supported by JPEG) */
+ IMG_CODEC_444_PL21,
+
+ PVR_SURF_UNSPECIFIED, //!< End of the enum
+ IMG_CODEC_FORMAT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Enum describing presets for source image colour space conversion
+ */
+enum img_csc_preset {
+ IMG_CSC_NONE, //!< No colour space conversion
+ IMG_CSC_709_TO_601, //!< ITU BT.709 YUV to be converted to ITU BT.601 YUV
+ IMG_CSC_601_TO_709, //!< ITU BT.601 YUV to be converted to ITU BT.709 YUV
+ IMG_CSC_RGB_TO_601_ANALOG, //!< RGB to be converted to ITU BT.601 YUV
+ /* !< RGB to be converted to ITU BT.601 YCbCr for SDTV (reduced scale - 16-235) */
+ IMG_CSC_RGB_TO_601_DIGITAL,
+ /* !< RGB to be converted to ITU BT.601 YCbCr for HDTV (full range - 0-255) */
+ IMG_CSC_RGB_TO_601_DIGITAL_FS,
+ IMG_CSC_RGB_TO_709, //!< RGB to be converted to ITU BT.709 YUV
+ IMG_CSC_YIQ_TO_601, //!< YIQ to be converted to ITU BT.601 YUV
+ IMG_CSC_YIQ_TO_709, //!< YIQ to be converted to ITU BT.709 YUV
+ IMG_CSC_BRG_TO_601, //!< BRG to be converted to ITU BT.601 YUV (for XRGB format)
+ IMG_CSC_RBG_TO_601, //!< RBG to be converted to ITU BT.601 YUV (for XBGR format)
+ IMG_CSC_BGR_TO_601, //!< BGR to be converted to ITU BT.601 YUV (for BGRX format)
+ IMG_CSC_UYV_TO_YUV, //!< UYV to be converted to YUV (BT.601 or BT.709)
+ IMG_CSC_PRESETS, //!< End of the enum
+ IMG_CSC_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * GOP structure information
+ */
+#define SHIFT_GOP_FRAMETYPE (0)
+#define MASK_GOP_FRAMETYPE (0x3 << SHIFT_GOP_FRAMETYPE)
+#define SHIFT_GOP_REFERENCE (2)
+#define MASK_GOP_REFERENCE (0x1 << SHIFT_GOP_REFERENCE)
+#define SHIFT_GOP_POS (3)
+#define MASK_GOP_POS (0x1f << SHIFT_GOP_POS)
+#define SHIFT_GOP_REF0 (0 + 8)
+#define MASK_GOP_REF0 (0xf << SHIFT_GOP_REF0)
+#define SHIFT_GOP_REF1 (4 + 8)
+#define MASK_GOP_REF1 (0xf << SHIFT_GOP_REF1)
+
+/*
+ * Frame types
+ */
+enum img_frame_type {
+ IMG_INTRA_IDR = 0,
+ IMG_INTRA_FRAME,
+ IMG_INTER_P,
+ IMG_INTER_B,
+ IMG_INTER_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Motion vector calculation register settings
+ */
+struct img_mv_settings {
+ unsigned int mv_calc_below;
+ unsigned int mv_calc_colocated;
+ unsigned int mv_calc_config;
+};
+
+/*
+ * Frame template types
+ */
+enum img_frame_template_type {
+ IMG_FRAME_IDR = 0,
+ IMG_FRAME_INTRA,
+ IMG_FRAME_INTER_P,
+ IMG_FRAME_INTER_B,
+ IMG_FRAME_INTER_P_IDR,
+ IMG_FRAME_UNDEFINED,
+ IMG_FRAME_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Rate control modes
+ */
+enum img_rcmode {
+ IMG_RCMODE_NONE = 0,
+ IMG_RCMODE_CBR,
+ IMG_RCMODE_VBR,
+ IMG_RCMODE_ERC, // Example Rate Control
+ IMG_RCMODE_VCM,
+ IMG_RCMODE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Video Conferencing Mode (VCM) rate control method's sub modes
+ */
+enum img_rc_vcm_mode {
+ IMG_RC_VCM_MODE_DEFAULT = 0,
+ IMG_RC_VCM_MODE_CFS_NONIFRAMES,
+ IMG_RC_VCM_MODE_CFS_ALLFRAMES,
+ IMG_RC_VCM_MODE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Weighted prediction values
+ */
+struct weighted_prediction_values {
+ unsigned char frame_type;
+ unsigned char weighted_pred_flag; // Corresponds to field in the pps
+ unsigned char weighted_bipred_idc;
+ unsigned int luma_log2_weight_denom;
+ unsigned int chroma_log2_weight_denom;
+ /* Y, Cb, Cr Support for 2 ref pictures on P, or 1 pic in each direction on B. */
+ unsigned char weight_flag[3][2];
+ int weight[3][2];
+ int offset[3][2];
+};
+
+enum weighted_bipred_idc {
+ WBI_NONE = 0x0,
+ WBI_EXPLICIT,
+ WBI_IMPLICIT,
+ WBI_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Registers required to configure input scaler
+ */
+struct img_vxe_scaler_setup {
+ unsigned int input_scaler_control;
+ unsigned int scaler_input_size_reg;
+ unsigned int scaler_crop_reg;
+ unsigned int scaler_pitch_reg;
+ unsigned int scaler_control;
+ unsigned int hor_scaler_coeff_regs[4];
+ unsigned int ver_scaler_coeff_regs[4];
+};
+
+/*
+ * Registers required to configure input Colour Space conversion
+ */
+struct img_vxe_csc_setup {
+ unsigned int csc_source_y[3];
+ unsigned int csc_output_clip[2];
+ unsigned int csc_source_cbcr[3];
+};
+
+/*
+ * SETVIDEO & GETVIDEO - Video encode context
+ */
+struct img_mtx_video_context {
+ /* // keep this at the top as it has alignment issues */
+ unsigned long long clock_div_bitrate;
+ unsigned int width_in_mbs; //!< target output width
+ unsigned int picture_height_in_mbs; //!< target output height
+ unsigned int tmp_reconstructed[MAX_PIC_NODES];
+ unsigned int reconstructed[MAX_PIC_NODES];
+ unsigned int colocated[MAX_PIC_NODES];
+ unsigned int mv[MAX_MV];
+ unsigned int inter_view_mv[2];
+ /* !< Send debug information from Register CRCs to Host with the coded buffer */
+ unsigned int debug_crcs;
+ unsigned int writeback_regions[WB_FIFO_SIZE]; //!< Data section
+ unsigned int initial_cpb_removal_delayoffset;
+ unsigned int max_buffer_mult_clock_div_bitrate;
+ unsigned int sei_buffering_period_template;
+ unsigned int sei_picture_timing_template;
+ unsigned short enable_mvc;
+ unsigned short mvc_view_idx;
+ unsigned int slice_params_templates[5];
+ unsigned int pichdr_templates[4];
+ unsigned int seq_header;
+ unsigned int subset_seq_header;
+ unsigned short no_sequence_headers;
+
+ /* !< Slice map of the source picture */
+ unsigned int slice_map[MAX_SOURCE_SLOTS_SL];
+ unsigned int flat_gop_struct; //!< Address of Flat MiniGop structure
+ unsigned char weighted_prediction_enabled;
+ unsigned char mtx_weighted_implicit_bi_pred;
+ unsigned int weighted_prediction_virt_addr[MAX_SOURCE_SLOTS_SL];
+ /* !< Address of hierarchical MiniGop structure */
+ unsigned int hierar_gop_struct;
+ /* Output Parameters of the First Pass */
+ unsigned int firstpass_out_param_addr[MAX_SOURCE_SLOTS_SL];
+ /* !< Selectable Output Best MV Parameters data of the First Pass */
+ unsigned int firstpass_out_best_multipass_param_addr[MAX_SOURCE_SLOTS_SL];
+ /* !< Input Parameters to the second pass */
+ unsigned int mb_ctrl_in_params_addr[MAX_SOURCE_SLOTS_SL];
+ /* !< Strides of source Y data and chroma data */
+ unsigned int pic_row_stride_bytes;
+ /* !< Picture level parameters (supplied by driver) */
+ unsigned int above_params[TOPAZHP_MAX_NUM_PIPES];
+ unsigned int idr_period;
+ unsigned int intra_loop_cnt;
+ unsigned int bframe_count;
+ unsigned char hierarchical;
+ /* !< Only used in MPEG2, 2 bit field (0 = 8 bit, 1 = 9 bit, 2 = 10 bit and 3=11 bit
+ * precision). Set to zero for other encode standards.
+ */
+ unsigned char mpeg2_intra_dc_precision;
+ unsigned char pic_on_level[MAX_REF_LEVELS];
+ unsigned int vop_time_resolution;
+ unsigned short kick_size; //!< Number of Macroblocks per kick
+ unsigned short kicks_per_bu; //!< Number of kicks per BU
+ unsigned short kicks_per_picture; //!< Number of kicks per picture
+ struct img_mv_settings mv_settings_idr;
+ struct img_mv_settings mv_settings_non_b[MAX_BFRAMES + 1];
+ unsigned int mv_settings_b_table;
+ unsigned int mv_settings_hierarchical;
+ enum img_format format; //!< Pixel format of the source surface
+ enum img_standard standard; //!< Encoder standard (H264 / H263 / MPEG4 / JPEG)
+ enum img_rcmode rc_mode; //!< RC flavour
+ enum img_rc_vcm_mode rc_vcm_mode; //!< RC VCM flavour
+ /* !< RC VCM maximum frame size percentage allowed to exceed in CFS */
+ unsigned int rc_cfs_max_margin_perc;
+ unsigned char first_pic;
+ unsigned char is_interlaced;
+ unsigned char top_field_first;
+ unsigned char arbitrary_so;
+ unsigned char output_reconstructed;
+ unsigned char disable_bit_stuffing;
+ unsigned char insert_hrd_params;
+ unsigned char max_slices_per_picture;
+ unsigned int f_code;
+ /* Contents Adaptive Rate Control parameters*/
+ unsigned int jmcomp_rc_reg0;
+ unsigned int jmcomp_rc_reg1;
+ /* !< Value to use for MVClip_Config register */
+ unsigned int mv_clip_config;
+ /* !< Value to use for Predictor combiner register */
+ unsigned int pred_comb_control;
+ /* !< Value to use for LRITC_Cache_Chunk_Config register */
+ unsigned int lritc_cache_chunk_config;
+ /* !< Value to use for IPEVectorClipping register */
+ unsigned int ipe_vector_clipping;
+ /* !< Value to use for H264CompControl register */
+ unsigned int h264_comp_control;
+ /* !< Value to use for H264CompIntraPredMode register */
+ unsigned int h264_comp_intra_pred_modes;
+ /* !< Value to use for IPCM_0 Config register */
+ unsigned int ipcm_0_config;
+ /* !< Value to use for IPCM_1 Config register */
+ unsigned int ipcm_1_config;
+ /* !< Value to use for SPEMvdClipRange register */
+ unsigned int spe_mvd_clip_range;
+ /* !< Value to use for MB_HOST_CONTROL register */
+ unsigned int mb_host_ctrl;
+ /* !< Value for the CR_DB_DISABLE_DEBLOCK_IDC register */
+ unsigned int deblock_ctrl;
+ /* !< Value for the CR_DB_DISABLE_DEBLOCK_IDC register */
+ unsigned int skip_coded_inter_intra;
+ unsigned int vlc_control;
+ /* !< Slice control register value. Configures the size of a slice */
+ unsigned int vlc_slice_control;
+ /* !< Slice control register value. Configures the size of a slice */
+ unsigned int vlc_slice_mb_control;
+ /* !< Chroma QP offset to use (when PPS id = 0)*/
+ unsigned short cqp_offset;
+ unsigned char coded_header_per_slice;
+ unsigned char initial_qp_i; //!< Initial QP I frames
+ unsigned char initial_qp_p; //!< Initial QP P frames
+ unsigned char initial_qp_b; //!< Initial QP B frames
+ unsigned int first_pic_flags;
+ unsigned int non_first_pic_flags;
+ unsigned char mc_adaptive_rounding_disable;
+#define AR_REG_SIZE 18
+#define AR_DELTA_SIZE 7
+ unsigned short mc_adaptive_rounding_offsets[AR_REG_SIZE][4];
+ short mc_adaptive_rounding_offsets_delta[AR_DELTA_SIZE][4];
+ /* !< Reconstructed address to allow host picture management */
+ unsigned int patched_recon_address;
+ /* !< Reference 0 address to allow host picture management */
+ unsigned int patched_ref0_address;
+ /* !< Reference 1 address to allow host picture management */
+ unsigned int patched_ref1_address;
+ unsigned int ltref_header[MAX_SOURCE_SLOTS_SL];
+ signed char slice_header_slot_num;
+ unsigned char recon_is_longterm;
+ unsigned char ref0_is_longterm;
+ unsigned char ref1_is_longterm;
+ unsigned char ref_spacing;
+ unsigned char fw_num_pipes;
+ unsigned char fw_first_pipe;
+ unsigned char fw_last_pipe;
+ unsigned char fw_pipes_to_use_flags;
+#if SECURE_IO_PORTS
+ unsigned int secure_io_control;
+#endif
+ struct img_vxe_scaler_setup scaler_setup;
+ struct img_vxe_csc_setup csc_setup;
+
+ struct in_rc_params in_params;
+};
+
+/*
+ * PICMGMT - Command sub-type
+ */
+enum img_picmgmt_type {
+ IMG_PICMGMT_REF_TYPE = 0,
+ IMG_PICMGMT_GOP_STRUCT,
+ IMG_PICMGMT_SKIP_FRAME,
+ IMG_PICMGMT_EOS,
+ IMG_PICMGMT_FLUSH,
+ IMG_PICMGMT_QUANT,
+ IMG_PICMGMT_STRIDE,
+ IMG_PICMGMT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * MTX- > host message structure
+ */
+struct img_writeback_msg {
+ unsigned int cmd_word;
+ union {
+ struct {
+ unsigned int data;
+ unsigned int extra_data;
+ unsigned int writeback_val;
+ };
+ unsigned int coded_package_consumed_idx;
+ };
+};
+
+/*
+ * PROVIDE_SOURCE_BUFFER - Details of the source picture buffer
+ */
+struct img_source_buffer_params {
+ /* !< Host context value. Keep at start for alignment. */
+ unsigned long long host_context;
+ unsigned int phys_addr_y_plane_field_0; //!< Source pic phys addr (Y plane, Field 0)
+ unsigned int phys_addr_u_plane_field_0; //!< Source pic phys addr (U plane, Field 0)
+ unsigned int phys_addr_v_plane_field_0; //!< Source pic phys addr (V plane, Field 0)
+ unsigned int phys_addr_y_plane_field_1; //!< Source pic phys addr (Y plane, Field 1)
+ unsigned int phys_addr_u_plane_field_1; //!< Source pic phys addr (U plane, Field 1)
+ unsigned int phys_addr_v_plane_field_1; //!< Source pic phys addr (V plane, Field 1)
+ /* !< Number of frames in the stream (incl. skipped) */
+ unsigned char display_order_num;
+ unsigned char slot_num; //!< Source slot number
+ unsigned char reserved1;
+ unsigned char reserved2;
+};
+
+/*
+ * Struct describing input parameters to encode a video slice
+ */
+struct slice_params {
+ unsigned int flags; //!< Flags for slice encode
+
+ /* Config registers. These are passed straight
+ * through from drivers to hardware.
+ */
+ unsigned int slice_config; //!< Value to use for Slice Config register
+ unsigned int ipe_control; //!< Value to use for IPEControl register
+ /* !<Value to use for Sequencer Config register */
+ unsigned int seq_config;
+
+ enum img_frame_template_type template_type; //!< Slice header template type
+ /* !< Template of corresponding slice header */
+ struct mtx_header_params slice_hdr_tmpl;
+};
+
+/*
+ * Structure describing coded header data returned by the firmware.
+ * The size of the structure should not be more than 64 bytes (needs to have 64 byte alignment)
+ * (i.e. CODED_BUFFER_HEADER_SIZE)
+ */
+struct coded_data_hdr {
+ unsigned long long host_ctx; //!< Host context value. Keep at top for alignment.
+ unsigned int bytes_written; //!< Bytes in this coded buffer excluding this header
+ unsigned int feedback; //!< Feedback word for this coded buffers
+ unsigned int extra_feedback; //!< Extra feedback word for this coded buffers
+
+ unsigned short i_mb_cnt; //!< Number of MBs coded as I-macroblocks in this slice
+ unsigned short p_mb_cnt; //!< Number of MBs coded as P-macroblocks in this slice
+
+ unsigned short b_mb_cnt; //!< Number of MBs coded as B-macroblocks in this slice
+ unsigned short skip_mb_cnt; //!< Number of MBs coded as skipped in this slice
+
+ unsigned short ipcm_mb_cnt; //!< Number of macroblocks coded as IPCM in this slice
+ unsigned char inter_sum_satd_hi; //!< High 8 bits for the inter sum satd
+ unsigned char intra_sum_satd_hi; //!< High 8 bits for the intra sum satd
+ /* !< Number of bits use for coding DC coefficients in this slice */
+ unsigned int dc_bits;
+ /* !< Number of bits used for coding all Motion vector data in this slice */
+ unsigned int mv_bits;
+ /* !< Number of bits used for coding all MB level symbols in this slice */
+ unsigned int symbols_bits;
+ /* !< Number of bits used for coding residual data in all MBs in this slice */
+ unsigned int residual_bits;
+
+ /* !< Sum of QPy/Qscale for all Inter-MBs in the slice */
+ unsigned int qpy_inter;
+ /* !< Sum of QPy/Qscale for all Intra-MBs in the slice */
+ unsigned int qpy_intra;
+ unsigned int inter_sum_satd; //!< Sum of SATD for all Inter-MBs in the slice
+ unsigned int intra_sum_satd; //!< Sum of SATD for all Intra-MBs in the slice
+};
+
+#define MAX_CODED_BUFFERS_PER_PACKAGE_FW 1
+#define MAX_CODED_BUFFERS_PER_PACKAGE 1
+
+// This structure is temporarily used during the 64 byte minimum DMA transfer from driver to FW
+struct coded_package_dma_info {
+ unsigned int coded_mem_addr[MAX_CODED_BUFFERS_PER_PACKAGE_FW];
+ //////////////////
+ // 2 Info words //
+ //////////////////
+ unsigned int coded_header_addr;
+ /* Combined field Host->MTX = IsLinkedList, list segment
+ * (CB memory) size, number of list segments per coded buffer
+ */
+ unsigned int coded_buffer_info;
+
+ // PAD TO 64 BYTES
+ unsigned int padding[16 - MAX_CODED_BUFFERS_PER_PACKAGE_FW - 2];
+};
+
+/*
+ * Contents of the coded data buffer header feedback word
+ */
+#define SHIFT_CODED_FIRST_BU (24)
+#define MASK_CODED_FIRST_BU (0xFFU << SHIFT_CODED_FIRST_BU)
+#define SHIFT_CODED_SLICE_NUM (16)
+#define MASK_CODED_SLICE_NUM (0xFF << SHIFT_CODED_SLICE_NUM)
+#define SHIFT_CODED_STORAGE_FRAME_NUM (14)
+#define MASK_CODED_STORAGE_FRAME_NUM (0x03 << SHIFT_CODED_STORAGE_FRAME_NUM)
+#define SHIFT_CODED_ENTIRE_FRAME (12)
+#define MASK_CODED_ENTIRE_FRAME (0x01 << SHIFT_CODED_ENTIRE_FRAME)
+#define SHIFT_CODED_IS_SKIPPED (11)
+#define MASK_CODED_IS_SKIPPED (0x01 << SHIFT_CODED_IS_SKIPPED)
+#define SHIFT_CODED_IS_CODED (10)
+#define MASK_CODED_IS_CODED (0x01 << SHIFT_CODED_IS_CODED)
+#define SHIFT_CODED_RECON_IDX (6)
+#define MASK_CODED_RECON_IDX (0x0F << SHIFT_CODED_RECON_IDX)
+#define SHIFT_CODED_SOURCE_SLOT (2)
+#define MASK_CODED_SOURCE_SLOT (0x0F << SHIFT_CODED_SOURCE_SLOT)
+#define SHIFT_CODED_FRAME_TYPE (0)
+#define MASK_CODED_FRAME_TYPE (0x03 << SHIFT_CODED_FRAME_TYPE)
+
+/*
+ * Contents of the coded data buffer header extra feedback word
+ */
+#define SHIFT_CODED_SLICES_SO_FAR (24)
+#define MASK_CODED_SLICES_SO_FAR (0xFFU << SHIFT_CODED_SLICES_SO_FAR)
+
+#define SHIFT_CODED_SLICES_IN_BUFFER (16)
+#define MASK_CODED_SLICES_IN_BUFFER (0xFF << SHIFT_CODED_SLICES_IN_BUFFER)
+
+#define SHIFT_CODED_BUFFER_NUMBER_USED (2)
+#define MASK_CODED_BUFFER_NUMBER_USED (0xFF << SHIFT_CODED_BUFFER_NUMBER_USED)
+
+#define SHIFT_CODED_FIELD (1)
+#define MASK_CODED_FIELD (0x01 << SHIFT_CODED_FIELD)
+
+#define SHIFT_CODED_PATCHED_RECON (0)
+#define MASK_CODED_PATCHED_RECON (0x01 << SHIFT_CODED_PATCHED_RECON)
+
+#endif /* _TOPAZSCFWIF_H_ */
diff --git a/drivers/media/platform/vxe-vxd/encoder/fw_headers/vxe_common.h b/drivers/media/platform/vxe-vxd/encoder/fw_headers/vxe_common.h
new file mode 100644
index 000000000000..110818d010b4
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/fw_headers/vxe_common.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _VXECOMMON_H_
+#define _VXECOMMON_H_
+
+#include "topazscfwif.h"
+#include "../common/vid_buf.h"
+
+/*
+ * Enum describing buffer lock status
+ */
+enum lock_status {
+ BUFFER_FREE = 1, //!< Buffer is not locked
+ HW_LOCK, //!< Buffer is locked by hardware
+ SW_LOCK, //!< Buffer is locked by software
+ NOTDEVICEMEMORY, //!< Buffer is not a device memory buffer
+ LOCK_ST_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Struct describing a data buffer
+ */
+struct img_buffer {
+ struct vidio_ddbufinfo mem_info; //!< Pointer to the memory handle for the buffer
+ enum lock_status lock; //!< Lock status for the buffer
+ unsigned int size; //!< Size in bytes of the buffer
+ unsigned int bytes_written; //!< Number of bytes written into buffer
+};
+
+/*
+ * Struct describing a coded data buffer
+ */
+struct img_coded_buffer {
+ struct vidio_ddbufinfo mem_info; //!< Pointer to the memory handle for the buffer
+ enum lock_status lock; //!< Lock status for the buffer
+ unsigned int size; //!< Size in bytes of the buffer
+ unsigned int bytes_written; //!< Number of bytes written into buffer
+};
+
+struct coded_info {
+ struct img_buffer *code_package_fw_buffer;
+ struct coded_package_dma_info *coded_package_fw;
+};
+
+// This structure is used by the Drivers
+struct coded_package_host {
+ struct coded_info mtx_info;
+ /* Array of pointers to buffers */
+ struct img_coded_buffer *coded_buffer[MAX_CODED_BUFFERS_PER_PACKAGE];
+ struct img_buffer *header_buffer;
+ unsigned char num_coded_buffers;
+ unsigned char busy;
+};
+
+/*
+ * Struct describing surface component info
+ */
+struct img_surf_component_info {
+ unsigned int step;
+ unsigned int width;
+ unsigned int height;
+ unsigned int phys_width;
+ unsigned int phys_height;
+};
+
+/*
+ * Struct describing a frame
+ */
+struct img_frame {
+ struct img_buffer *y_plane_buffer; //!< pointer to the image buffer
+ struct img_buffer *u_plane_buffer; //!< pointer to the image buffer
+ struct img_buffer *v_plane_buffer; //!< pointer to the image buffer
+ unsigned int width_bytes; //!< stride of pBuffer
+ unsigned int height; //!< height of picture in pBuffer
+
+ unsigned int component_count; //!< number of colour components used
+ enum img_format format;
+ unsigned int component_offset[3];
+ unsigned int bottom_component_offset[3];
+ struct img_surf_component_info component_info[3];
+ int y_component_offset;
+ int u_component_offset;
+ int v_component_offset;
+ int field0_y_offset, field1_y_offset;
+ int field0_u_offset, field1_u_offset;
+ int field0_v_offset, field1_v_offset;
+ unsigned short src_y_stride_bytes, src_uv_stride_bytes;
+ unsigned char imported;
+};
+
+/*
+ * Struct describing an array of frames
+ */
+struct img_frame_array {
+ unsigned int array_size; //!< Number of frames in array
+ struct img_frame *frame; //!< Pointer to start of frame array
+};
+
+/*
+ * Struct describing list items
+ */
+struct list_item {
+ struct list_item *next; //!< Next item in the list
+ void *data; //!< pointer to list item data
+};
+
+/*
+ * Struct describing rate control params
+ */
+struct img_rc_params {
+ unsigned int bits_per_second; //!< Bit rate
+ /* !< Transfer rate of encoded data from encoder to the output */
+ unsigned int transfer_bits_per_second;
+ unsigned int initial_qp_i; //!< Initial QP I frames (only field used by JPEG)
+ unsigned int initial_qp_p; //!< Initial QP P frames (only field used by JPEG)
+ unsigned int initial_qp_b; //!< Initial QP B frames (only field used by JPEG)
+ unsigned int bu_size; //!< Basic unit size
+ unsigned int frame_rate;
+ unsigned int buffer_size;
+ unsigned int intra_freq;
+ short min_qp;
+ short max_qp;
+ unsigned char rc_enable;
+ int initial_level;
+ int initial_delay;
+ unsigned short bframes;
+ unsigned char hierarchical;
+
+ /* !< Enable movement of slice boundary when Qp is high */
+ unsigned char enable_slice_bob;
+ /* !< Maximum number of rows the slice boundary can be moved */
+ unsigned char max_slice_bob;
+ /* !< Minimum Qp at which slice bobbing should take place */
+ unsigned char slice_bob_qp;
+
+ signed char qcp_offset;
+ unsigned char sc_detect_disable;
+ unsigned int slice_byte_limit;
+ unsigned int slice_mb_limit;
+ enum img_rcmode rc_mode;
+ enum img_rc_vcm_mode rc_vcm_mode;
+ unsigned int rc_cfs_max_margin_perc;
+ unsigned char disable_frame_skipping;
+ unsigned char disable_vcm_hardware;
+};
+
+/*
+ * Bit fields for ui32MmuFlags
+ */
+#define MMU_USE_MMU_FLAG 0x00000001
+#define MMU_EXTENDED_ADDR_FLAG 0x00000004
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/header_gen.c b/drivers/media/platform/vxe-vxd/encoder/header_gen.c
new file mode 100644
index 000000000000..8a321689816e
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/header_gen.c
@@ -0,0 +1,1751 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Encoder coded header generation function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include "fw_headers/topazscfwif.h"
+#include "fw_headers/defs.h"
+#include "header_gen.h"
+#include "img_errors.h"
+#include "reg_headers/topazhp_core_regs.h"
+#include "topaz_api.h"
+
+#define ELEMENTS_EMPTY 9999
+#define MAXNUMBERELEMENTS 32
+#define _1080P_30FPS (((1920 * 1088) / 256) * 30)
+
+void insert_element_token(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ enum header_element_type token)
+{
+ unsigned char offset;
+ unsigned char *p;
+
+ if (mtx_header->elements != ELEMENTS_EMPTY) {
+ if (element_pointers[mtx_header->elements]->element_type ==
+ ELEMENT_STARTCODE_RAWDATA ||
+ element_pointers[mtx_header->elements]->element_type == ELEMENT_RAWDATA ||
+ element_pointers[mtx_header->elements]->element_type ==
+ ELEMENT_STARTCODE_MIDHDR) {
+ /*
+ * Add a new element aligned to word boundary
+ * Find RAWBit size in bytes (rounded to word boundary))
+ * NumberofRawbits (excluding size of bit count field)+
+ * size of the bitcount field
+ */
+ offset = element_pointers[mtx_header->elements]->size + 8 + 31;
+ offset /= 32; /*Now contains rawbits size in words */
+ offset += 1; /*Now contains rawbits+element_type size in words */
+ /* Convert to number of bytes (total size of structure
+ * in bytes, aligned to word boundary).
+ */
+ offset *= 4;
+ } else {
+ offset = 4;
+ }
+
+ mtx_header->elements++;
+ p = (unsigned char *)element_pointers[mtx_header->elements - 1];
+ p += offset;
+ element_pointers[mtx_header->elements] = (struct mtx_header_element *)p;
+ } else {
+ mtx_header->elements = 0;
+ }
+
+ element_pointers[mtx_header->elements]->element_type = token;
+ element_pointers[mtx_header->elements]->size = 0;
+}
+
+unsigned int write_upto_8bits_to_elements(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ unsigned int write_bits, unsigned short bit_cnt)
+{
+ /* This is the core function to write bits/bytes to a header stream,
+ * it writes them directly to ELEMENT structures.
+ */
+ unsigned char *write_bytes;
+ unsigned char *size_bits;
+ union input_value {
+ unsigned int input16;
+ unsigned char input8[2];
+ } input_val;
+
+ unsigned char out_byte_index;
+ short shift;
+
+ if (bit_cnt == 0)
+ return 0;
+
+ /* First ensure that unused bits in ui32WriteBits are zeroed */
+ write_bits &= (0x00ff >> (8 - bit_cnt));
+ input_val.input16 = 0;
+ /*Pointer to the bit count field */
+ size_bits = &element_pointers[mtx_header->elements]->size;
+ /*Pointer to the space where header bits are to be written */
+ write_bytes = &element_pointers[mtx_header->elements]->bits;
+ out_byte_index = (size_bits[0] / 8);
+
+ if (!(size_bits[0] & 7)) {
+ if (size_bits[0] >= 120) {
+ /*Element maximum bits send to element, time to start a new one */
+ mtx_header->elements++; /* Increment element index */
+ /*Element pointer set to position of next element (120/8 = 15 bytes) */
+ element_pointers[mtx_header->elements] =
+ (struct mtx_header_element *)&write_bytes[15];
+ /*Write ELEMENT_TYPE */
+ element_pointers[mtx_header->elements]->element_type = ELEMENT_RAWDATA;
+ /* Set new element size (bits) to zero */
+ element_pointers[mtx_header->elements]->size = 0;
+ /* Begin writing to the new element */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, write_bits,
+ bit_cnt);
+ return (unsigned int)bit_cnt;
+ }
+ write_bytes[out_byte_index] = 0; /* Beginning a new byte, clear byte */
+ }
+
+ shift = (short)((8 - bit_cnt) - (size_bits[0] & 7));
+
+ if (shift >= 0) {
+ write_bits <<= shift;
+ write_bytes[out_byte_index] |= write_bits;
+ size_bits[0] = size_bits[0] + bit_cnt;
+ } else {
+ input_val.input8[1] = (unsigned char)write_bits + 256;
+ input_val.input16 >>= -shift;
+ write_bytes[out_byte_index] |= input_val.input8[1];
+
+ size_bits[0] = size_bits[0] + bit_cnt;
+ size_bits[0] = size_bits[0] - ((unsigned char)-shift);
+ input_val.input8[0] = input_val.input8[0] >> (8 + shift);
+ write_upto_8bits_to_elements(mtx_header, element_pointers, input_val.input8[0],
+ (unsigned short)-shift);
+ }
+
+ return (unsigned int)bit_cnt;
+}
+
+unsigned int write_upto_32bits_to_elements(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ unsigned int write_bits, unsigned int bit_cnt)
+{
+ unsigned int bit_lp;
+ unsigned int end_byte;
+ unsigned char bytes[4];
+
+ for (bit_lp = 0; bit_lp < 4; bit_lp++) {
+ bytes[bit_lp] = (unsigned char)(write_bits & 255);
+ write_bits = write_bits >> 8;
+ }
+
+ end_byte = ((bit_cnt + 7) / 8);
+ if ((bit_cnt) % 8)
+ write_upto_8bits_to_elements(mtx_header, element_pointers, bytes[end_byte - 1],
+ (unsigned char)((bit_cnt) % 8));
+ else
+ write_upto_8bits_to_elements(mtx_header, element_pointers, bytes[end_byte - 1], 8);
+
+ if (end_byte > 1)
+ for (bit_lp = end_byte - 1; bit_lp > 0; bit_lp--)
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ bytes[bit_lp - 1], 8);
+
+ return bit_cnt;
+}
+
+void h264_write_bits_startcode_prefix_element(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ unsigned int byte_size)
+{
+ /* GENERATES THE FIRST ELEMENT OF THE H264_SEQUENCE_HEADER() STRUCTURE */
+ unsigned int lp;
+ /*
+ * Byte aligned (bit 0)
+ * (3 bytes in slice header when slice is first in
+ * a picture without sequence/picture_header before picture
+ */
+ for (lp = 0; lp < byte_size - 1; lp++)
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 8);
+ /* Byte aligned (bit 32 or 24) */
+}
+
+unsigned int generate_ue(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers, unsigned int val)
+{
+ unsigned int lp;
+ unsigned char zeros;
+ unsigned int chunk;
+ unsigned int bit_cnter = 0;
+
+ for (lp = 1, zeros = 0; (lp - 1) < val; lp = lp + lp, zeros++)
+ val = val - lp;
+
+ /*
+ * zeros = number of preceding zeros required
+ * Val = value to append after zeros and 1 bit
+ * Write preceding zeros
+ */
+ for (lp = (unsigned int)zeros; lp + 1 > 8; lp -= 8)
+ bit_cnter += write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+
+ /* Write zeros and 1 bit set */
+ bit_cnter +=
+ write_upto_8bits_to_elements(mtx_header, element_pointers, (unsigned char)1,
+ (unsigned char)(lp + 1));
+
+ /* Write Numeric part */
+ while (zeros > 8) {
+ zeros -= 8;
+ chunk = (val >> zeros);
+ bit_cnter += write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (unsigned char)chunk, 8);
+ val = val - (chunk << zeros);
+ }
+
+ bit_cnter += write_upto_8bits_to_elements(mtx_header,
+ element_pointers, (unsigned char)val, zeros);
+
+ return bit_cnter;
+}
+
+unsigned int generate_se(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers, int val)
+{
+ unsigned int bit_cnter;
+ unsigned int code_num;
+
+ bit_cnter = 0;
+
+ if (val > 0)
+ code_num = (unsigned int)(val + val - 1);
+ else
+ code_num = (unsigned int)(-val - val);
+
+ bit_cnter = generate_ue(mtx_header, element_pointers, code_num);
+
+ return bit_cnter;
+}
+
+void h264_write_bits_scaling_lists(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ struct h264_scaling_matrix_params *scaling_matrix,
+ unsigned char write_8x8)
+{
+ /* Used by H264_WriteBits_SequenceHeader and H264_WriteBits_PictureHeader */
+ unsigned int list, index;
+ int cur_scale, delta_scale;
+
+ if (!scaling_matrix) {
+ insert_element_token(mtx_header, element_pointers, ELEMENT_CUSTOM_QUANT);
+ return;
+ }
+
+ for (list = 0; list < 6; list++) {
+ /* seq_scaling_list_present_flag[ui32List] = 1 */
+ if (scaling_matrix->list_mask & (1 << list)) {
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+
+ cur_scale = 8;
+ for (index = 0; index < 16; index++) {
+ delta_scale =
+ ((int)scaling_matrix->scaling_lists4x4[list][index]) -
+ cur_scale;
+ cur_scale += delta_scale;
+ /* delta_scale */
+ generate_se(mtx_header, element_pointers, delta_scale);
+ }
+ } else {
+ /* seq_scaling_list_present_flag[ui32List] = 0 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+ }
+
+ if (!write_8x8)
+ return;
+
+ for (; list < 8; list++) {
+ /* seq_scaling_list_present_flag[ui32List] = 1 */
+ if (scaling_matrix->list_mask & (1 << list)) {
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+
+ cur_scale = 8;
+ for (index = 0; index < 64; index++) {
+ delta_scale =
+ ((int)scaling_matrix->scaling_lists8x8[list - 6][index]) -
+ cur_scale;
+ cur_scale += delta_scale;
+ /* delta_scale */
+ generate_se(mtx_header, element_pointers, delta_scale);
+ }
+ } else {
+ /* seq_scaling_list_present_flag[ui32List] = 0 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+ }
+}
+
+void h264_write_bits_vui_params(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ struct h264_vui_params *vui_params)
+{
+ /* Builds VUI Params for the Sequence Header (only present in the 1st sequence of stream) */
+
+ if (vui_params->aspect_ratio_info_present_flag == 1) {
+ /* aspect_ratio_info_present_flag = 1 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ vui_params->aspect_ratio_info_present_flag, 1);
+ /* aspect_ratio_idc (8 bits) = vui_params->aspect_ratio_idc in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ vui_params->aspect_ratio_idc, 8);
+
+ if (vui_params->aspect_ratio_idc == 255) {
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (vui_params->sar_width >> 8), 8);
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ vui_params->sar_width, 8);
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (vui_params->sar_height >> 8), 8);
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ vui_params->sar_height, 8);
+ }
+ } else {
+ /* aspect_ratio_info_present_flag = 0 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ /* overscan_info_present_flag (1 bit) = 0 in Topaz */
+ (0 << 3) |
+ /* video_signal_type_present_flag (1 bit) = 0 in Topaz */
+ (0 << 2) |
+ /* chroma_loc_info_present_flag (1 bit) = 0 in Topaz */
+ (0 << 1) |
+ /* timing_info_present_flag (1 bit) = 1 in Topaz */
+ (1),
+ /* num_units_in_tick (32 bits) = 1 in Topaz */
+ 4);
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 8);
+
+ /* time_scale (32 bits) = frame rate */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (unsigned char)vui_params->time_scale, 8);
+ /* fixed_frame_rate_flag (1 bit) = 1 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ /* nal_hrd_parameters_present_flag (1 bit) = 1 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ /* Definitions for nal_hrd_parameters() contained in VUI structure for Topaz
+ * cpb_cnt_minus1 ue(v) = 0 in Topaz = 1b
+ */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ /* bit_rate_scale (4 bits) = 0 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 4);
+ /* cpb_size_scale (4 bits) = 2 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 2, 4);
+ /* bit_rate_value_minus1[0] ue(v) = (Bitrate/64)-1 [RANGE:0 to (2^32)-2] */
+ generate_ue(mtx_header, element_pointers, vui_params->bit_rate_value_minus1);
+ /* cpb_size_value_minus1[0] ue(v) = (CPB_Bits_Size/16)-
+ * 1 where CPB_Bits_Size = 1.5 * Bitrate [RANGE:0 to (2^32)-2]
+ */
+ generate_ue(mtx_header, element_pointers, vui_params->cbp_size_value_minus1);
+ /* cbr_flag[0] (1 bit) = 0 for VBR, 1 for CBR */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, vui_params->cbr, 1);
+ /*initial_cpb_removal_delay_length_minus1 (5 bits) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ vui_params->initial_cpb_removal_delay_length_minus1, 5);
+ /* cpb_removal_delay_length_minus1 (5 bits) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ vui_params->cpb_removal_delay_length_minus1, 5);
+ /* dpb_output_delay_length_minus1 (5 bits) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ vui_params->dpb_output_delay_length_minus1, 5);
+ /* time_offst_length (5 bits) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, vui_params->time_offset_length,
+ 5);
+
+ /* End of nal_hrd_parameters() */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ /* low_delay_hrd_flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ /* pic_struct_present_flag (1 bit) = 0 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ /* bitstream_restriction_flag (1 bit) = 1 in Topaz */
+ (1 << 1) |
+ /* motion_vectors_over_pic_boundaries_flag (1 bit) = 1
+ * in Topaz;
+ */
+ (1 << 0),
+ 2);
+ /* max_bytes_per_pic_denom ue(v) = 0 */
+ generate_ue(mtx_header, element_pointers, 0);
+ /* max_bits_per_mb_denom ue(v) = 0 */
+ generate_ue(mtx_header, element_pointers, 0);
+ /* log2_max_mv_length_horizontal ue(v) = 9(max horizontal vector is 128 integer samples) */
+ generate_ue(mtx_header, element_pointers, 9);
+ /* log2_max_mv_length_vertical ue(v) = 9 (max vertical vecotr is 103 integer samples) */
+ generate_ue(mtx_header, element_pointers, 9);
+ /* num_reorder_frames ue(v) = 0 */
+ generate_ue(mtx_header, element_pointers, vui_params->num_reorder_frames);
+ /* max_dec_frame_buffering ue(v) = 0 */
+ generate_ue(mtx_header, element_pointers, vui_params->max_dec_frame_buffering);
+}
+
+void h264_write_bits_sequence_header(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ struct h264_sequence_header_params *sh_params,
+ struct h264_crop_params *crop,
+ struct h264_scaling_matrix_params *scaling_matrix,
+ unsigned char aso)
+{
+ /* calculate some of the VUI parameters here */
+ if (sh_params->profile == SH_PROFILE_BP) {
+ /* for Baseline profile we never re-roder frames */
+ sh_params->vui_params.num_reorder_frames = 0;
+ sh_params->vui_params.max_dec_frame_buffering = sh_params->max_num_ref_frames;
+ } else {
+ /* in higher profiles we can do up to 3 level hierarchical B frames */
+ if (!sh_params->vui_params.num_reorder_frames)
+ sh_params->vui_params.num_reorder_frames = sh_params->max_num_ref_frames;
+ sh_params->vui_params.max_dec_frame_buffering =
+ max(sh_params->max_num_ref_frames,
+ sh_params->vui_params.num_reorder_frames);
+ }
+
+ insert_element_token(mtx_header, element_pointers, ELEMENT_STARTCODE_RAWDATA);
+ h264_write_bits_startcode_prefix_element(mtx_header, element_pointers, 4);
+
+ /* GENERATES THE FIRST ELEMENT OF THE H264_SEQUENCE_HEADER() STRUCTURE */
+ /*
+ * 4 Byte StartCodePrefix Pregenerated in: H264_WriteBits_StartCodePrefix_Element()
+ * Byte aligned (bit 32)
+ * forbidden_zero_bit=0
+ */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, (0 << 7) |
+ (0x3 << 5) | /* nal_ref_idc=01 (may be 11) */
+ (7), /* nal_unit_type=00111 */
+ 8);
+
+ /* Byte aligned (bit 40) */
+ switch (sh_params->profile) {
+ case SH_PROFILE_BP:
+ /* profile_idc = 8 bits = 66 for BP (PROFILE_IDC_BP) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 66, 8);
+
+ /* Byte aligned (bit 48) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ /* constraint_set0_flag = 1 for BP constra ints */
+ (1 << 7) |
+ /* constraint_set1_flag = 1 for MP constraints */
+ ((aso ? 0 : 1) << 6) |
+ /* constraint_set2_flag = 1 for EP constra ints */
+ (1 << 5) |
+ /* constraint_set3_flag = 1
+ * for level 1b, 0 for others
+ */
+ ((sh_params->level == SH_LEVEL_1B ? 1 : 0) << 4),
+ /* reserved_zero_4bits = 0 */
+ 8);
+ break;
+
+ case SH_PROFILE_MP:
+ /* profile_idc = 8 bits = 77 for MP (PROFILE_IDC_MP) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 77, 8);
+
+ /* Byte aligned (bit 48) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ /* constraint_set0_flag = 0 for no BP constraints */
+ (0 << 7) |
+ /* constraint_set1_flag = 1 for MP constraints */
+ (1 << 6) |
+ /* constraint_set2_flag = 1 for EP constraints */
+ (1 << 5) |
+ /* constraint_set3_flag = 1
+ * for level 1b, 0 for others
+ */
+ ((sh_params->level == SH_LEVEL_1B ? 1 : 0) << 4),
+ /* reserved_zero_4bits = 0 */
+ 8);
+ break;
+
+ case SH_PROFILE_HP:
+ /* profile_idc = 8 bits = 100 for HP (PROFILE_IDC_HP) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 100, 8);
+
+ /* Byte aligned (bit 48) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ /* constraint_set0_flag = 0 for no BP constraints */
+ (0 << 7) |
+ /* constraint_set1_flag = 0 for no MP constraints */
+ (0 << 6) |
+ /* constraint_set2_flag = 0 for no EP constraints */
+ (0 << 5) |
+ /* constraint_set3_flag = 0 */
+ (0 << 4),
+ /* reserved_zero_4bits = 0 */
+ 8);
+ break;
+
+ case SH_PROFILE_H444P:
+ /* profile_idc = 8 bits = 244 for H444P (PROFILE_IDC_H444P) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 244, 8);
+
+ /* Byte aligned (bit 48) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ /* constraint_set0_flag = 0 for no BP constraints */
+ (0 << 7) |
+ /* constraint_set1_flag = 0 for no MP constraints */
+ (0 << 6) |
+ /* constraint_set2_flag = 0 for no EP constraints */
+ (0 << 5) |
+ /* constraint_set3_flag = 0 */
+ (0 << 4),
+ /* reserved_zero_4bits = 0 */
+ 8);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * Byte aligned (bit 56)
+ * level_idc should be set to 9 in the sps in case of
+ * level is Level 1B and the profile is High or High 4:4:4 Profile
+ */
+ if (sh_params->profile == SH_PROFILE_HP || sh_params->profile == SH_PROFILE_H444P)
+ /* level_idc (8 bits) = 9 for 1b, 10xlevel for others */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (sh_params->level == SH_LEVEL_1B) ? 9 :
+ (unsigned char)sh_params->level, 8);
+
+ else
+ /* level_idc (8 bits) = 11 for 1b, 10xlevel for others */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (sh_params->level == SH_LEVEL_1B) ? 11 :
+ (unsigned char)sh_params->level, 8);
+
+ generate_ue(mtx_header, element_pointers, 0); /* seq_parameter_set_id = 0 */
+
+ if (sh_params->profile == SH_PROFILE_HP || sh_params->profile == SH_PROFILE_H444P) {
+ generate_ue(mtx_header, element_pointers, 1); /* chroma_format_idc = 1 */
+ /* bit_depth_luma_minus8 = 0 */
+ generate_ue(mtx_header, element_pointers, 0);
+ /* bit_depth_chroma_minus8 = 0 */
+ generate_ue(mtx_header, element_pointers, 0);
+
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ /* qpprime_y_zero_transform_bypass_flag = 1
+ * if lossless
+ */
+ sh_params->is_lossless ? 1 : 0, 1);
+
+ if (sh_params->use_default_scaling_list ||
+ sh_params->seq_scaling_matrix_present_flag) {
+ /* seq_scaling_matrix_present_flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ if (!sh_params->use_default_scaling_list) {
+ h264_write_bits_scaling_lists(mtx_header, element_pointers,
+ scaling_matrix, TRUE);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ } else {
+ /* seq_scaling_list_present_flag[i] = 0; 0 < i < 8 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+ }
+ } else {
+ /* seq_scaling_matrix_present_flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+ }
+
+ generate_ue(mtx_header, element_pointers, 1); /* log2_max_frame_num_minus4 = 1 */
+ generate_ue(mtx_header, element_pointers, 0); /* pic_order_cnt_type = 0 */
+ /* log2_max_pic_order_cnt_Isb_minus4 = 2 */
+ generate_ue(mtx_header, element_pointers, sh_params->log2_max_pic_order_cnt - 4);
+ /*num_ref_frames ue(2), typically 2 */
+ generate_ue(mtx_header, element_pointers, sh_params->max_num_ref_frames);
+
+ /* Bytes aligned (bit 72) */
+ /* gaps_in_frame_num_value_allowed_Flag - (1 bit) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (sh_params->gaps_in_frame_num_value), 1);
+
+ /*
+ * GENERATES THE SECOND, VARIABLE LENGTH, ELEMENT OF THE H264_SEQUENCE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: xx
+ */
+ /*pic_width_in_mbs_minus1: ue(v) from 10 to 44 (176 to 720 pixel per row) */
+ generate_ue(mtx_header, element_pointers, sh_params->width_in_mbs_minus1);
+ /* pic_height_in_maps_units_minus1:
+ * ue(v) Value from 8 to 35 (144 to 576 pixels per column)
+ */
+ generate_ue(mtx_header, element_pointers, sh_params->height_in_maps_units_minus1);
+ /* We don't know the alignment at this point, so will have to use bit writing functions */
+ /* frame_mb_only_flag 1=frame encoding, 0=field encoding */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, sh_params->frame_mbs_only_flag,
+ 1);
+
+ if (!sh_params->frame_mbs_only_flag) /* in the case of interlaced encoding */
+ /* mb_adaptive_frame_field_flag = 0 in Topaz(field encoding at the sequence level) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+
+ /* direct_8x8_inference_flag=1 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+
+ if (crop->clip) {
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ generate_ue(mtx_header, element_pointers, crop->left_crop_offset);
+ generate_ue(mtx_header, element_pointers, crop->right_crop_offset);
+ generate_ue(mtx_header, element_pointers, crop->top_crop_offset);
+ generate_ue(mtx_header, element_pointers, crop->bottom_crop_offset);
+ } else {
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+
+ /*
+ * GENERATES THE THIRD ELEMENT OF THE H264_SEQUENCE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: xx
+ */
+ /* vui_parameters_present_flag (VUI only in 1st sequence of stream) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (sh_params->vui_params_present),
+ 1);
+ if (sh_params->vui_params_present > 0)
+ h264_write_bits_vui_params(mtx_header, element_pointers, &sh_params->vui_params);
+
+ /* Finally we need to align to the next byte */
+ /* Tell MTX to insert the byte align field (we don't know
+ * final stream size for alignment at this point)
+ */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_INSERTBYTEALIGN_H264);
+}
+
+/*
+ * Prepare an H264 SPS in a form for the MTX to encode into a bitstream.
+ */
+void h264_prepare_sequence_header(struct mtx_header_params *mtx_header,
+ unsigned int pic_width_in_mbs,
+ unsigned int pic_height_in_mbs,
+ unsigned char vui_params_present,
+ struct h264_vui_params *params,
+ struct h264_crop_params *crop,
+ struct h264_sequence_header_params *sh_params,
+ unsigned char aso)
+{
+ struct mtx_header_element *this_element;
+ struct mtx_header_element *element_pointers[MAXNUMBERELEMENTS];
+
+ /*
+ * Builds a sequence, picture and slice header with from the given inputs
+ * parameters (start of new frame) Essential we initialise our header
+ * structures before building
+ */
+ mtx_header->elements = ELEMENTS_EMPTY;
+ this_element = (struct mtx_header_element *)mtx_header->element_stream;
+ element_pointers[0] = this_element;
+
+ h264_write_bits_sequence_header(mtx_header, element_pointers, sh_params, crop, NULL, aso);
+ /*Has been used as an index, so need to add 1 for a valid element count */
+ mtx_header->elements++;
+}
+
+void h264_write_bits_picture_header(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ struct h264_picture_header_params *ph_params,
+ struct h264_scaling_matrix_params *scaling_matrix)
+{
+ /* Begin building the picture header element */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_STARTCODE_RAWDATA);
+
+ h264_write_bits_startcode_prefix_element(mtx_header, element_pointers, 4);
+
+ /* GENERATES THE FIRST (STATIC) ELEMENT OF THE H264_PICTURE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: 18
+ * 4 Byte StartCodePrefix Pregenerated in: H264_WriteBits_StartCodePrefix_Element()
+ * Byte aligned (bit 32)
+ */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (0 << 7) | /* forbidden_zero_bit */
+ (1 << 5) | /* nal_ref_idc (2 bits) = 1 */
+ (8), /* nal_unit_tpye (5 bits) = 8 */
+ 8);
+
+ /* Byte aligned (bit 40) */
+ /* pic_parameter_set_id ue(v) */
+ generate_ue(mtx_header, element_pointers, ph_params->pic_parameter_set_id);
+ /* seq_parameter_set_id ue(v) */
+ generate_ue(mtx_header, element_pointers, ph_params->seq_parameter_set_id);
+
+ /* entropy_coding_mode_flag (1 bit) 0 for CAVLC */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (ph_params->entropy_coding_mode_flag << 4) |
+ (0 << 3) | /* pic_order_present_flag (1 bit) = 0 */
+ (1 << 2) | /* num_slice_group_minus1 ue(v) = 0 in Topaz */
+ (1 << 1) | /* num_ref_idx_l0_active_minus1 ue(v) = 0 in Topaz*/
+ (1),/* num_ref_idx_l1_active_minus1 ue(v) = 0 in Topaz */
+ 5);
+
+ /* WEIGHTED PREDICTION */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ /* weighted_pred_flag (1 bit) */
+ (ph_params->weighted_pred_flag << 2) |
+ /* weighted_bipred_flag (2 bits) */
+ (ph_params->weighted_bipred_idc), 3);
+
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_QP);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+
+ /*
+ * GENERATES THE SECOND ELEMENT OF THE H264_PICTURE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: 5
+ * The following field will be generated as a special case by MTX - so not here
+ * Generate_se(mtx_header, ph_params->pic_init_qp_minus26); pic_int_qp_minus26
+ * se(v) = -26 to 25 in Topaz
+ */
+ generate_se(mtx_header, element_pointers, 0); /* pic_int_qs_minus26 se(v) = 0 in Topaz */
+ /* chroma_qp_index_offset se(v) = 0 in Topaz */
+ generate_se(mtx_header, element_pointers, ph_params->chroma_qp_index_offset);
+ /* deblocking_filter_control_present_flag (1 bit) = 1 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, (1 << 2) |
+ /* constrained_intra_pred_Flag (1 bit) = 0 in Topaz */
+ (ph_params->constrained_intra_pred_flag << 1) |
+ /* redundant_pic_cnt_present_flag (1 bit) = 0 in Topaz */
+ (0),
+ 3);
+
+ if (ph_params->transform_8x8_mode_flag ||
+ ph_params->second_chroma_qp_index_offset != ph_params->chroma_qp_index_offset ||
+ ph_params->pic_scaling_matrix_present_flag) {
+ /* 8x8 transform flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ ph_params->transform_8x8_mode_flag, 1);
+ if (ph_params->pic_scaling_matrix_present_flag) {
+ /* pic_scaling_matrix_present_flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ if (!ph_params->use_default_scaling_list) {
+ h264_write_bits_scaling_lists(mtx_header, element_pointers,
+ scaling_matrix,
+ ph_params->transform_8x8_mode_flag);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ } else {
+ unsigned char scaling_list_size =
+ ph_params->transform_8x8_mode_flag ? 8 : 6;
+
+ /* pic_scaling_list_present_flag[i] = 0;
+ * 0 < i < 6 (+ 2 ( +4 for chroma444) for 8x8)
+ */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0,
+ scaling_list_size);
+ }
+ } else {
+ /* pic_scaling_matrix_present_flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+ /* second_chroma_qp_index_offset se(v) = 0 in Topaz */
+ generate_se(mtx_header, element_pointers, ph_params->second_chroma_qp_index_offset);
+ }
+ /* Tell MTX to insert the byte align field (we don't know final
+ * stream size for alignment at this point)
+ */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_INSERTBYTEALIGN_H264);
+}
+
+/*
+ * Prepare an H264 PPS in a form for the MTX to encode into a bitstream
+ */
+void h264_prepare_picture_header(struct mtx_header_params *mtx_header,
+ unsigned char cabac_enabled,
+ unsigned char transform_8x8,
+ unsigned char intra_constrained,
+ signed char cqp_offset,
+ unsigned char weighted_prediction,
+ unsigned char weighted_bi_pred,
+ unsigned char mvc_pps,
+ unsigned char scaling_matrix,
+ unsigned char scaling_lists)
+{
+ /*
+ * Builds a picture header with from the given inputs parameters (start of new frame)
+ * Essential we initialise our header structures before building
+ */
+ struct h264_picture_header_params ph_params;
+ struct mtx_header_element *this_element;
+ struct mtx_header_element *element_pointers[MAXNUMBERELEMENTS];
+
+ mtx_header->elements = ELEMENTS_EMPTY;
+ this_element = (struct mtx_header_element *)mtx_header->element_stream;
+ element_pointers[0] = this_element;
+
+ ph_params.pic_parameter_set_id = mvc_pps ? MVC_PPS_ID : 0;
+ ph_params.seq_parameter_set_id = mvc_pps ? MVC_SPS_ID : 0;
+ ph_params.entropy_coding_mode_flag = cabac_enabled ? 1 : 0;
+ ph_params.weighted_pred_flag = weighted_prediction;
+ ph_params.weighted_bipred_idc = weighted_bi_pred;
+ ph_params.chroma_qp_index_offset = cqp_offset;
+ ph_params.constrained_intra_pred_flag = intra_constrained ? 1 : 0;
+ ph_params.transform_8x8_mode_flag = transform_8x8 ? 1 : 0;
+ ph_params.pic_scaling_matrix_present_flag = scaling_matrix ? 1 : 0;
+ ph_params.use_default_scaling_list = !scaling_lists;
+ ph_params.second_chroma_qp_index_offset = cqp_offset;
+
+ h264_write_bits_picture_header(mtx_header, element_pointers, &ph_params, NULL);
+ /*Has been used as an index, so need to add 1 for a valid element count */
+ mtx_header->elements++;
+}
+
+/* SEI_INSERTION */
+void h264_write_bits_aud_header(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers)
+{
+ /* Essential we insert the element before we try to fill it! */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_STARTCODE_RAWDATA);
+ /* 00 00 00 01 start code prefix */
+ h264_write_bits_startcode_prefix_element(mtx_header, element_pointers, 4);
+ /* AUD nal_unit_type = 09 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 9, 8);
+
+ /* primary_pic_type u(3) 0=I slice, 1=P or I slice, 2=P,B or I slice */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 2, 3);
+ /* rbsp_trailing_bits */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1 << 4, 5);
+
+ /* Write terminator */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0x80, 8);
+}
+
+void h264_prepare_aud_header(struct mtx_header_params *mtx_header)
+{
+ /* Essential we initialise our header structures before building */
+ struct mtx_header_element *this_element;
+ struct mtx_header_element *element_pointers[MAXNUMBERELEMENTS];
+
+ mtx_header->elements = ELEMENTS_EMPTY;
+ this_element = (struct mtx_header_element *)mtx_header->element_stream;
+ element_pointers[0] = this_element;
+
+ h264_write_bits_aud_header(mtx_header, element_pointers);
+ /*Has been used as an index, so need to add 1 for a valid element count */
+ mtx_header->elements++;
+}
+
+static void insert_prefix_nal_header(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ struct h264_slice_header_params *slh_params,
+ unsigned char cabac_enabled)
+{
+ insert_element_token(mtx_header, element_pointers, ELEMENT_STARTCODE_RAWDATA);
+ /*Can be 3 or 4 bytes - always 4 bytes in our implementations */
+ h264_write_bits_startcode_prefix_element(mtx_header, element_pointers,
+ slh_params->startcode_prefix_size_bytes);
+
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ /* forbidden_zero_bit */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_REFERENCE);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ /* nal unit type */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 14, 5);
+ /* SVC extension flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ /* non_idr_flag flag */
+ if (slh_params->slice_frame_type == SLHP_IDR_SLICEFRAME_TYPE)
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ else
+ /* non_idr_flag flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ /* priority_id flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 6);
+ /* view_id flag */
+ write_upto_32bits_to_elements(mtx_header, element_pointers, 0, 10);
+ /* temporal_id flag */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_TEMPORAL_ID);
+ /* anchor_pic_flag */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_ANCHOR_PIC_FLAG);
+
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ /* interview flag */
+ if (slh_params->slice_frame_type == SLHP_B_SLICEFRAME_TYPE)
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ else
+ /* interview flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ /* reserved one bit */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+}
+
+/* helper function to start new raw data block */
+static unsigned char start_next_rawdata_element = FALSE;
+static void check_start_rawdata_element(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers)
+{
+ if (start_next_rawdata_element) {
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ start_next_rawdata_element = FALSE;
+ }
+}
+
+void h264_write_bits_extension_slice_header(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ struct h264_slice_header_params *slh_params,
+ unsigned char cabac_enabled,
+ unsigned char is_idr)
+{
+ start_next_rawdata_element = FALSE;
+
+ insert_element_token(mtx_header, element_pointers, ELEMENT_STARTCODE_RAWDATA);
+ /*Can be 3 or 4 bytes - always 4 bytes in our implementations */
+ h264_write_bits_startcode_prefix_element(mtx_header, element_pointers,
+ slh_params->startcode_prefix_size_bytes);
+
+ /* GENERATES THE FIRST ELEMENT OF THE H264_SLICE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: 8
+
+ * StartCodePrefix Pregenerated in: Build_H264_4Byte_StartCodePrefix_Element()
+ * (4 or 3 bytes) (3 bytes when slice is first in a picture without
+ * sequence/picture_header before picture Byte aligned (bit 32 or 24)
+ * NOTE: Slice_Type and Frame_Type are always the same, hence slice_frame_type
+ */
+ /* forbidden_zero_bit */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_REFERENCE);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ /* nal_unit_type for coded_slice_extension */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 20, 5);
+ /* SVC extension flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+
+ if (slh_params->slice_frame_type == SLHP_IDR_SLICEFRAME_TYPE)
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ /* non_idr_flag flag */
+ else if ((slh_params->slice_frame_type == SLHP_P_SLICEFRAME_TYPE) && is_idr)
+ /* non_idr_flag flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ else
+ /* non_idr_flag flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ /* priority_id flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 6);
+ /* view_id = hardcoded to 1 for dependent view */
+ write_upto_32bits_to_elements(mtx_header, element_pointers, 1, 10);
+ /* temporal_id flag */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_TEMPORAL_ID);
+ /* anchor_pic_flag */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_ANCHOR_PIC_FLAG);
+
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ /* interview flag is always FALSE for dependent frames */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ /* reserved one bit */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+
+ /* slice header */
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_CURRMBNR);
+
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+
+ /* GENERATES THE SECOND ELEMENT OF THE H264_SLICE_HEADER() STRUCTURE */
+
+ generate_ue(mtx_header, element_pointers,
+ (unsigned int)((slh_params->slice_frame_type ==
+ SLHP_IDR_SLICEFRAME_TYPE) ? SLHP_I_SLICEFRAME_TYPE :
+ slh_params->slice_frame_type));
+ /*slice_type ue(v): 0 for P-slice, 1 for B-slice, 2 for I-slice */
+
+ /* pic_parameter_set_id = 1 for dependent view */
+ generate_ue(mtx_header, element_pointers, 1);
+
+ /* Insert token to tell MTX to insert frame_num */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_FRAME_NUM);
+ start_next_rawdata_element = TRUE;
+
+ if (slh_params->pic_interlace ||
+ slh_params->slice_frame_type == SLHP_IDR_SLICEFRAME_TYPE) {
+ /* interlaced encoding */
+ if (slh_params->pic_interlace) {
+ check_start_rawdata_element(mtx_header, element_pointers);
+ /* field_pic_flag = 1 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ /* Insert token to tell MTX to insert BOTTOM_FIELD flag if required */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_BOTTOM_FIELD);
+ start_next_rawdata_element = TRUE;
+ }
+ }
+
+ if (slh_params->slice_frame_type == SLHP_IDR_SLICEFRAME_TYPE || (is_idr)) {
+ check_start_rawdata_element(mtx_header, element_pointers);
+ /* idr_pic_id ue(v) = 0 (1b) in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ }
+ /* Insert token to tell MTX to insert pic_order_cnt_lsb */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_PIC_ORDER_CNT);
+ start_next_rawdata_element = TRUE;
+
+ if (slh_params->slice_frame_type == SLHP_B_SLICEFRAME_TYPE)
+ /* Insert token to tell MTX to insert direct_spatial_mv_pred_flag */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_DIRECT_SPATIAL_MV_FLAG);
+
+ if (slh_params->slice_frame_type == SLHP_P_SLICEFRAME_TYPE) {
+ insert_element_token(mtx_header, element_pointers, ELEMENT_NUM_REF_IDX_ACTIVE);
+ start_next_rawdata_element = TRUE;
+ } else if (slh_params->slice_frame_type == SLHP_B_SLICEFRAME_TYPE) {
+ check_start_rawdata_element(mtx_header, element_pointers);
+ /* num_ref_idx_active_override_flag (1 bit) = 0 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+
+ /* reference picture list modification */
+ if (slh_params->slice_frame_type != SLHP_I_SLICEFRAME_TYPE &&
+ slh_params->slice_frame_type != SLHP_IDR_SLICEFRAME_TYPE) {
+ /* Insert token to tell MTX to insert BOTTOM_FIELD flag if required */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_REORDER_L0);
+ start_next_rawdata_element = TRUE;
+ }
+
+ if (slh_params->slice_frame_type == SLHP_B_SLICEFRAME_TYPE) {
+ check_start_rawdata_element(mtx_header, element_pointers);
+ /* ref_pic_list_ordering_flag_l1 (1 bit) = 0, no reference
+ * picture ordering in Topaz
+ */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+
+ if (slh_params->slice_frame_type == SLHP_IDR_SLICEFRAME_TYPE || (is_idr)) {
+ check_start_rawdata_element(mtx_header, element_pointers);
+ /* no_output_of_prior_pics_flag (1 bit) = 0 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ /* long_term_reference_flag (1 bit) = 0 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ } else {
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_ADAPTIVE);
+ start_next_rawdata_element = TRUE;
+ }
+
+ if (cabac_enabled && (slh_params->slice_frame_type == SLHP_P_SLICEFRAME_TYPE ||
+ slh_params->slice_frame_type == SLHP_B_SLICEFRAME_TYPE)) {
+ check_start_rawdata_element(mtx_header, element_pointers);
+ /* hard code cabac_init_idc value of 0 */
+ generate_ue(mtx_header, element_pointers, 0);
+ }
+
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_SQP);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+
+ /* GENERATES ELEMENT OF THE H264_SLICE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: 11
+ */
+ /*disable_deblocking_filter_idc ue(v) = 2? */
+ generate_ue(mtx_header, element_pointers, slh_params->disable_deblocking_filter_idc);
+ if (slh_params->disable_deblocking_filter_idc != 1) {
+ /*slice_alpha_c0_offset_div2 se(v) = 0 (1b) in Topaz */
+ generate_se(mtx_header, element_pointers, slh_params->deb_alpha_offset_div2);
+ /*slice_beta_offset_div2 se(v) = 0 (1b) in Topaz */
+ generate_se(mtx_header, element_pointers, slh_params->deb_beta_offset_div2);
+ }
+ /*
+ * num_slice_groups_minus1 ==0 in Topaz, so no slice_group_change_cycle field here
+ * no byte alignment at end of slice headers
+ */
+}
+
+void h264_write_bits_slice_header(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ struct h264_slice_header_params *slh_params,
+ unsigned char cabac_enabled, unsigned char is_idr)
+{
+ start_next_rawdata_element = FALSE;
+ if (slh_params->mvc_view_idx == (unsigned short)(NON_MVC_VIEW)) {
+ insert_element_token(mtx_header, element_pointers, ELEMENT_STARTCODE_RAWDATA);
+ } else if (slh_params->mvc_view_idx == MVC_BASE_VIEW_IDX) {
+ insert_prefix_nal_header(mtx_header, element_pointers, slh_params, cabac_enabled);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_STARTCODE_MIDHDR);
+ } else {
+ /*Insert */
+ h264_write_bits_extension_slice_header(mtx_header, element_pointers,
+ slh_params, cabac_enabled, is_idr);
+ return;
+ }
+
+ h264_write_bits_startcode_prefix_element(mtx_header, element_pointers,
+ /*Can be 3 or 4 bytes - always 4
+ * bytes in our implementations
+ */
+ slh_params->startcode_prefix_size_bytes);
+
+ /* GENERATES THE FIRST ELEMENT OF THE H264_SLICE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: 8
+
+ * StartCodePrefix Pregenerated in: Build_H264_4Byte_StartCodePrefix_Element()
+ * (4 or 3 bytes) (3 bytes when slice is first in a picture without
+ * sequence/picture_header before picture Byte aligned (bit 32 or 24)
+ * NOTE: Slice_Type and Frame_Type are always the same, hence slice_frame_type
+ */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1); /* forbidden_zero_bit */
+
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_REFERENCE);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ /* nal_unit_tpye (5 bits) = I-frame IDR, and 1 for rest */
+ ((slh_params->slice_frame_type == SLHP_IDR_SLICEFRAME_TYPE ?
+ 5 : 1)), 5);
+
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_CURRMBNR);
+
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+
+ /* GENERATES THE SECOND ELEMENT OF THE H264_SLICE_HEADER() STRUCTURE */
+
+ generate_ue(mtx_header, element_pointers,
+ (unsigned int)((slh_params->slice_frame_type == SLHP_IDR_SLICEFRAME_TYPE) ?
+ /*slice_type ue(v): 0 for P-slice, 1 for B-slice, 2 for I-slice */
+ SLHP_I_SLICEFRAME_TYPE : slh_params->slice_frame_type));
+
+ if (slh_params->mvc_view_idx != (unsigned short)(NON_MVC_VIEW))
+ /* pic_parameter_set_id = 0 */
+ generate_ue(mtx_header, element_pointers, slh_params->mvc_view_idx);
+ else
+ generate_ue(mtx_header, element_pointers, 0); /* pic_parameter_set_id = 0 */
+ /* Insert token to tell MTX to insert frame_num */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_FRAME_NUM);
+
+ if (slh_params->pic_interlace ||
+ slh_params->slice_frame_type == SLHP_IDR_SLICEFRAME_TYPE) {
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ /* interlaced encoding */
+ if (slh_params->pic_interlace) {
+ /* field_pic_flag = 1 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ /* Insert token to tell MTX to insert BOTTOM_FIELD flag if required */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_BOTTOM_FIELD);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ }
+
+ if (slh_params->slice_frame_type == SLHP_IDR_SLICEFRAME_TYPE)
+ /* idr_pic_id ue(v) */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_IDR_PIC_ID);
+ }
+ /* Insert token to tell MTX to insert pic_order_cnt_lsb */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_PIC_ORDER_CNT);
+
+ if (slh_params->slice_frame_type == SLHP_B_SLICEFRAME_TYPE)
+ /* Insert token to tell MTX to insert direct_spatial_mv_pred_flag */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_DIRECT_SPATIAL_MV_FLAG);
+
+ if (slh_params->slice_frame_type == SLHP_P_SLICEFRAME_TYPE) {
+ /* Insert token to tell MTX to insert override for number of active references */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_NUM_REF_IDX_ACTIVE);
+ } else if (slh_params->slice_frame_type == SLHP_B_SLICEFRAME_TYPE) {
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ /* num_ref_idx_active_override_flag (1 bit) = 0 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+
+ if (slh_params->slice_frame_type != SLHP_I_SLICEFRAME_TYPE &&
+ slh_params->slice_frame_type != SLHP_IDR_SLICEFRAME_TYPE) {
+ /* Insert token to tell MTX to insert reference list 0 reordering */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_REORDER_L0);
+
+ if (slh_params->slice_frame_type == SLHP_B_SLICEFRAME_TYPE)
+ /* Insert token to tell MTX to insert reference list 1 reordering */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_REORDER_L1);
+ }
+
+ /* WEIGHTED PREDICTION */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_SLICEWEIGHTEDPREDICTIONSTRUCT);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+
+ if (slh_params->reference_picture && slh_params->is_longterm_ref) {
+ /* adaptive_ref_pic_marking_mode_flag (1 bit) = 0 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+
+ /* Clear any existing long-term reference */
+ /* memory_management_control_operation */
+ generate_ue(mtx_header, element_pointers, 5);
+
+ /* Allow a single long-term reference */
+ /* memory_management_control_operation */
+ generate_ue(mtx_header, element_pointers, 4);
+ /* max_long_term_frame_idx_plus1 */
+ generate_ue(mtx_header, element_pointers, 1);
+
+ /* Set current picture as the long-term reference */
+ /* memory_management_control_operation */
+ generate_ue(mtx_header, element_pointers, 6);
+ /* long_term_frame_idx */
+ generate_ue(mtx_header, element_pointers, 0);
+
+ /* End */
+ /* memory_management_control_operation */
+ generate_ue(mtx_header, element_pointers, 0);
+ } else {
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_ADAPTIVE);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ }
+
+ if (cabac_enabled && (slh_params->slice_frame_type == SLHP_P_SLICEFRAME_TYPE ||
+ slh_params->slice_frame_type == SLHP_B_SLICEFRAME_TYPE))
+ /* hard code cabac_init_idc value of 0 */
+ generate_ue(mtx_header, element_pointers, 0);
+
+ /*MTX fills this value in */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_SQP);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+
+ /* GENERATES ELEMENT OF THE H264_SLICE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: 11
+ */
+ /*disable_deblocking_filter_idc ue(v) = 2? */
+ generate_ue(mtx_header, element_pointers, slh_params->disable_deblocking_filter_idc);
+ if (slh_params->disable_deblocking_filter_idc != 1) {
+ /*slice_alpha_c0_offset_div2 se(v) = 0 (1b) in Topaz */
+ generate_se(mtx_header, element_pointers, slh_params->deb_alpha_offset_div2);
+ /*slice_beta_offset_div2 se(v) = 0 (1b) in Topaz */
+ generate_se(mtx_header, element_pointers, slh_params->deb_beta_offset_div2);
+ }
+
+ /*
+ * num_slice_groups_minus1 ==0 in Topaz, so no slice_group_change_cycle field here
+ * no byte alignment at end of slice headers
+ */
+}
+
+/*
+ * Prepare an H264 slice header in a form for the MTX to encode into a
+ * bitstream.
+ */
+void h264_prepare_slice_header(struct mtx_header_params *mtx_header,
+ unsigned char intra_slice, unsigned char inter_b_slice,
+ unsigned char disable_deblocking_filter_idc,
+ unsigned int first_mb_address, unsigned int mb_skip_run,
+ unsigned char cabac_enabled, unsigned char is_interlaced,
+ unsigned char is_idr, unsigned short mvc_view_idx,
+ unsigned char is_longterm_ref)
+{
+ struct h264_slice_header_params slh_params;
+ struct mtx_header_element *this_element;
+ struct mtx_header_element *element_pointers[MAXNUMBERELEMENTS];
+
+ slh_params.startcode_prefix_size_bytes = 4;
+ /* pcb - I think that this is more correct now -- This should also
+ * work for IDR-P frames which will be marked as SLHP_P_SLICEFRAME_TYPE
+ */
+ slh_params.slice_frame_type = intra_slice ? (is_idr ? SLHP_IDR_SLICEFRAME_TYPE :
+ SLHP_I_SLICEFRAME_TYPE) :
+ (inter_b_slice ? SLHP_B_SLICEFRAME_TYPE :
+ SLHP_P_SLICEFRAME_TYPE);
+
+ slh_params.first_mb_address = first_mb_address;
+ slh_params.disable_deblocking_filter_idc = (unsigned char)disable_deblocking_filter_idc;
+ slh_params.pic_interlace = is_interlaced;
+ slh_params.deb_alpha_offset_div2 = 0;
+ slh_params.deb_beta_offset_div2 = 0;
+ /* setup the new flags used for B frame as reference */
+ slh_params.reference_picture = inter_b_slice ? 0 : 1;
+ slh_params.mvc_view_idx = mvc_view_idx;
+ slh_params.is_longterm_ref = is_longterm_ref;
+ slh_params.log2_max_pic_order_cnt = 2;
+ slh_params.longterm_ref_num = 0;
+ slh_params.ref_is_longterm_ref[0] = 0;
+ slh_params.ref_longterm_ref_num[0] = 0;
+ slh_params.ref_is_longterm_ref[1] = 0;
+ slh_params.ref_longterm_ref_num[1] = 0;
+ /*
+ * Builds a single slice header from the given parameters (mid frame)
+ * Essential we initialise our header structures before building
+ */
+ mtx_header->elements = ELEMENTS_EMPTY;
+ this_element = (struct mtx_header_element *)mtx_header->element_stream;
+ element_pointers[0] = this_element;
+
+ h264_write_bits_slice_header(mtx_header, element_pointers, &slh_params, cabac_enabled,
+ is_idr);
+ /*Has been used as an index, so need to add 1 for a valid element count */
+ mtx_header->elements++;
+}
+
+/*
+ * PrepareEncodeSliceParams
+ */
+unsigned int prepare_encode_slice_params(void *enc_ctx, struct slice_params *slice_params,
+ unsigned char is_intra, unsigned short current_row,
+ unsigned char deblock_idc, unsigned short slice_height,
+ unsigned char is_bpicture, unsigned char field_mode,
+ int fine_y_search_size)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ unsigned int frame_store_format;
+ unsigned char swap_chromas;
+ unsigned int mbs_per_kick, kicks_per_slice;
+ unsigned int ipe_control;
+ enum img_ipe_minblock_size blk_sz;
+ struct img_mtx_video_context *mtx_enc_context = NULL;
+ unsigned char restrict_4x4_search_size;
+ unsigned int lritc_boundary;
+
+ if (!enc_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ enc = (struct img_enc_context *)enc_ctx;
+ video = enc->video;
+
+ if (video->mtx_enc_ctx_mem.cpu_virt)
+ mtx_enc_context = (struct img_mtx_video_context *)(&video->mtx_enc_ctx_mem);
+
+ /* We want multiple ones of these so we can submit multiple
+ * slices without having to wait for the next
+ */
+ slice_params->flags = 0;
+ ipe_control = video->ipe_control;
+
+ /* extract block size */
+ blk_sz = (enum img_ipe_minblock_size)F_EXTRACT(ipe_control, TOPAZHP_CR_IPE_BLOCKSIZE);
+
+ /* mask-out the block size bits from ipe_control */
+ ipe_control &= ~(F_MASK(TOPAZHP_CR_IPE_BLOCKSIZE));
+
+ switch (video->standard) {
+ case IMG_STANDARD_H264:
+ if (blk_sz > 2)
+ blk_sz = (enum img_ipe_minblock_size)2;
+ if (is_bpicture && blk_sz > 1)
+ blk_sz = (enum img_ipe_minblock_size)1;
+
+ if (video->mbps >= _1080P_30FPS)
+ ipe_control |= F_ENCODE(fine_y_search_size, TOPAZHP_CR_IPE_LRITC_BOUNDARY) |
+ F_ENCODE(fine_y_search_size, TOPAZHP_CR_IPE_Y_FINE_SEARCH);
+ else
+ ipe_control |= F_ENCODE(fine_y_search_size + 1,
+ TOPAZHP_CR_IPE_LRITC_BOUNDARY) |
+ F_ENCODE(fine_y_search_size, TOPAZHP_CR_IPE_Y_FINE_SEARCH);
+
+ if (video->limit_num_vectors)
+ ipe_control |= F_ENCODE(1, TOPAZHP_CR_IPE_MV_NUMBER_RESTRICTION);
+ break;
+ default:
+ break;
+ }
+
+ if (video->mbps >= _1080P_30FPS)
+ restrict_4x4_search_size = 1;
+ else
+ restrict_4x4_search_size = 0;
+
+ ipe_control |= F_ENCODE(blk_sz, TOPAZHP_CR_IPE_BLOCKSIZE);
+
+ lritc_boundary =
+ (blk_sz !=
+ BLK_SZ_16x16) ? (fine_y_search_size + (restrict_4x4_search_size ? 0 : 1)) : 1;
+ if (lritc_boundary > 3)
+ IMG_DBG_ASSERT(0);
+
+ /* Minimum sub block size to calculate motion vectors for. 0=16x16, 1=8x8, 2=4x4 */
+ ipe_control = F_INSERT(ipe_control, blk_sz, TOPAZHP_CR_IPE_BLOCKSIZE);
+ ipe_control = F_INSERT(ipe_control, fine_y_search_size, TOPAZHP_CR_IPE_Y_FINE_SEARCH);
+ ipe_control = F_INSERT(ipe_control, video->limit_num_vectors,
+ TOPAZHP_CR_IPE_MV_NUMBER_RESTRICTION);
+
+ /* 8x8 search */
+ ipe_control = F_INSERT(ipe_control, lritc_boundary, TOPAZHP_CR_IPE_LRITC_BOUNDARY);
+ ipe_control = F_INSERT(ipe_control, restrict_4x4_search_size ? 0 : 1,
+ TOPAZHP_CR_IPE_4X4_SEARCH);
+
+ ipe_control = F_INSERT(ipe_control, video->high_latency, TOPAZHP_CR_IPE_HIGH_LATENCY);
+
+ slice_params->ipe_control = ipe_control;
+
+ if (!is_intra) {
+ if (is_bpicture)
+ slice_params->flags |= ISINTERB_FLAGS;
+ else
+ slice_params->flags |= ISINTERP_FLAGS;
+ }
+
+ if (video->multi_reference_p && !(is_intra || is_bpicture))
+ slice_params->flags |= ISMULTIREF_FLAGS;
+
+ if (video->spatial_direct && is_bpicture)
+ slice_params->flags |= SPATIALDIRECT_FLAGS;
+
+ if (is_intra) {
+ slice_params->slice_config = F_ENCODE(TOPAZHP_CR_SLICE_TYPE_I_SLICE,
+ TOPAZHP_CR_SLICE_TYPE);
+ } else {
+ if (is_bpicture)
+ slice_params->slice_config = F_ENCODE(TOPAZHP_CR_SLICE_TYPE_B_SLICE,
+ TOPAZHP_CR_SLICE_TYPE);
+ else /* p frame */
+ slice_params->slice_config = F_ENCODE(TOPAZHP_CR_SLICE_TYPE_P_SLICE,
+ TOPAZHP_CR_SLICE_TYPE);
+ }
+
+ mbs_per_kick = video->kick_size;
+
+ /*
+ * we need to figure out the number of kicks and mb's per kick to use.
+ * on H.264 we will use a MB's per kick of basic unit
+ * on other rc varients we will use mb's per kick of width
+ */
+ kicks_per_slice = ((slice_height / 16) * (video->width / 16)) / mbs_per_kick;
+
+ IMG_DBG_ASSERT((kicks_per_slice * mbs_per_kick) ==
+ ((slice_height / 16) * (video->width / 16)));
+
+ /*
+ * need some sensible ones don't look to be implemented yet...
+ * change per stream
+ */
+ if (video->format == IMG_CODEC_UY0VY1_8888 || video->format == IMG_CODEC_VY0UY1_8888)
+ frame_store_format = 3;
+ else if ((video->format == IMG_CODEC_Y0UY1V_8888) ||
+ (video->format == IMG_CODEC_Y0VY1U_8888))
+ frame_store_format = 2;
+ else if (video->format == IMG_CODEC_420_PL12 || video->format == IMG_CODEC_422_PL12 ||
+ video->format == IMG_CODEC_420_PL12_PACKED ||
+ video->format == IMG_CODEC_420_PL21_PACKED ||
+ video->format == IMG_CODEC_420_PL21 || video->format == IMG_CODEC_422_PL21)
+ frame_store_format = 1;
+ else
+ frame_store_format = 0;
+
+ if (video->format == IMG_CODEC_VY0UY1_8888 || video->format == IMG_CODEC_Y0VY1U_8888 ||
+ ((video->format == IMG_CODEC_420_PL21 ||
+ video->format == IMG_CODEC_420_PL21_PACKED) && mtx_enc_context &&
+ mtx_enc_context->scaler_setup.scaler_control == 0))
+ swap_chromas = 1;
+ else
+ swap_chromas = 0;
+
+ switch (video->standard) {
+ case IMG_STANDARD_H264:
+ /* H264 */
+ slice_params->seq_config = F_ENCODE(0, TOPAZHP_CR_TEMPORAL_PIC0_BELOW_IN_VALID) |
+ F_ENCODE(0, TOPAZHP_CR_TEMPORAL_PIC1_BELOW_IN_VALID) |
+ F_ENCODE(0, TOPAZHP_CR_ABOVE_OUT_OF_SLICE_VALID) |
+ F_ENCODE(1, TOPAZHP_CR_WRITE_TEMPORAL_PIC0_BELOW_VALID) |
+ F_ENCODE(0, TOPAZHP_CR_REF_PIC0_VALID) |
+ F_ENCODE(0, TOPAZHP_CR_REF_PIC1_VALID) |
+ F_ENCODE(!is_bpicture, TOPAZHP_CR_REF_PIC1_EQUAL_PIC0) |
+ F_ENCODE(field_mode ? 1 : 0, TOPAZHP_CR_FIELD_MODE) |
+ F_ENCODE(swap_chromas, TOPAZHP_CR_FRAME_STORE_CHROMA_SWAP) |
+ F_ENCODE(frame_store_format, TOPAZHP_CR_FRAME_STORE_FORMAT) |
+ F_ENCODE(TOPAZHP_CR_ENCODER_STANDARD_H264, TOPAZHP_CR_ENCODER_STANDARD) |
+ F_ENCODE(deblock_idc == 1 ? 0 : 1, TOPAZHP_CR_DEBLOCK_ENABLE);
+
+ if (video->rc_params.bframes) {
+ slice_params->seq_config |=
+ F_ENCODE(1, TOPAZHP_CR_WRITE_TEMPORAL_COL_VALID);
+ if ((slice_params->flags & ISINTERB_FLAGS) == ISINTERB_FLAGS)
+ slice_params->seq_config |= F_ENCODE(1,
+ TOPAZHP_CR_TEMPORAL_COL_IN_VALID);
+ }
+ if (!is_bpicture)
+ slice_params->seq_config |=
+ F_ENCODE(1, TOPAZHP_CR_WRITE_TEMPORAL_COL_VALID);
+ break;
+
+ default:
+ break;
+ }
+
+ if (is_bpicture) {
+ slice_params->seq_config |= F_ENCODE(0, TOPAZHP_CR_TEMPORAL_PIC1_BELOW_IN_VALID) |
+ F_ENCODE(0, TOPAZHP_CR_WRITE_TEMPORAL_PIC1_BELOW_VALID) |
+ F_ENCODE(1, TOPAZHP_CR_REF_PIC1_VALID) |
+ F_ENCODE(1, TOPAZHP_CR_TEMPORAL_COL_IN_VALID);
+ }
+
+ if (video->enable_sel_stats_flags & ESF_FIRST_STAGE_STATS)
+ slice_params->seq_config |= F_ENCODE(1, TOPAZHP_CR_WRITE_MB_FIRST_STAGE_VALID);
+
+ if (video->enable_sel_stats_flags & ESF_MP_BEST_MB_DECISION_STATS ||
+ video->enable_sel_stats_flags & ESF_MP_BEST_MOTION_VECTOR_STATS) {
+ slice_params->seq_config |= F_ENCODE(1, TOPAZHP_CR_BEST_MULTIPASS_OUT_VALID);
+
+ if (!(video->enable_sel_stats_flags & ESF_MP_BEST_MOTION_VECTOR_STATS))
+ /* 64 Byte Best Multipass Motion Vector output disabled by default */
+ slice_params->seq_config |= F_ENCODE(1, TOPAZHP_CR_BEST_MVS_OUT_DISABLE);
+ }
+
+ if (video->enable_inp_ctrl)
+ slice_params->seq_config |= F_ENCODE(1, TOPAZHP_CR_MB_CONTROL_IN_VALID);
+
+ return 0;
+}
+
+/*
+ * Generates the slice params template
+ */
+void generate_slice_params_template(struct img_enc_context *enc,
+ struct vidio_ddbufinfo *mem_info,
+ enum img_frame_template_type slice_type,
+ unsigned char is_interlaced, int fine_y_search_size)
+{
+ unsigned char is_intra = ((slice_type == IMG_FRAME_IDR) || (slice_type == IMG_FRAME_INTRA));
+ unsigned char is_bframe = (slice_type == IMG_FRAME_INTER_B);
+ unsigned char is_idr = ((slice_type == IMG_FRAME_IDR) ||
+ (slice_type == IMG_FRAME_INTER_P_IDR));
+ struct img_video_context *video = enc->video;
+ unsigned short mvc_view_idx = (unsigned short)(NON_MVC_VIEW);
+ /* Initialize Slice Params */
+ struct slice_params *slice_params_dest;
+ unsigned int slice_height = video->picture_height / video->slices_per_picture;
+
+ slice_height &= ~15;
+
+ slice_params_dest = (struct slice_params *)(mem_info->cpu_virt);
+
+ mvc_view_idx = video->mvc_view_idx;
+
+ prepare_encode_slice_params(enc, slice_params_dest, is_intra,
+ 0, video->deblock_idc, slice_height, is_bframe,
+ is_interlaced, fine_y_search_size);
+
+ slice_params_dest->template_type = slice_type;
+
+ /* Prepare Slice Header Template */
+ switch (video->standard) {
+ case IMG_STANDARD_H264:
+ h264_prepare_slice_header(&slice_params_dest->slice_hdr_tmpl, is_intra,
+ is_bframe, video->deblock_idc, 0, 0, video->cabac_enabled,
+ is_interlaced, is_idr, mvc_view_idx, FALSE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void h264_write_bits_mvc_sequence_header(struct mtx_header_params *mtx_header,
+ struct mtx_header_element **element_pointers,
+ struct h264_sequence_header_params *sh_params,
+ struct h264_crop_params *crop,
+ struct h264_scaling_matrix_params *scaling_matrix)
+{
+ int view_idx = 0;
+ int num_views = MAX_MVC_VIEWS;
+
+ insert_element_token(mtx_header, element_pointers, ELEMENT_STARTCODE_RAWDATA);
+ h264_write_bits_startcode_prefix_element(mtx_header, element_pointers, 4);
+
+ /*
+ * 4 Byte StartCodePrefix Pregenerated in: H264_WriteBits_StartCodePrefix_Element()
+ * Byte aligned (bit 32)
+ */
+ /* forbidden_zero_bit=0 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, (0 << 7) |
+ (0x3 << 5) | /* nal_ref_idc=01 (may be 11) */
+ (15), /* nal_unit_type=15 */
+ 8);
+
+ /*
+ * Byte aligned (bit 40)
+ * profile_idc = 8 bits = 66 for BP (PROFILE_IDC_BP), 77 for MP (PROFILE_IDC_MP)
+ */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 118, 8);
+
+ /* Byte aligned (bit 48) */
+ /* constrain_set0_flag = 1 for MP + BP constraints */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, (0 << 7) |
+ (0 << 6) | /* constrain_set1_flag = 1 for MP + BP constraints */
+ (0 << 5) | /* constrain_set2_flag = always 0 in BP/MP */
+ (0 << 4), /* constrain_set3_flag = 1 for level 1b, 0 for others */
+ /* reserved_zero_4bits = 0 */
+ 8);
+
+ /*
+ * Byte aligned (bit 56)
+ * level_idc should be set to 9 in the sps in case of level is Level 1B and the profile
+ * is Multiview High or Stereo High profiles
+ */
+ /* level_idc (8 bits) = 9 for 1b, 10xlevel for others */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (sh_params->level == SH_LEVEL_1B) ? 9 :
+ (unsigned char)sh_params->level, 8);
+
+ /* seq_parameter_Set_id = 1 FOR subset-SPS */
+ generate_ue(mtx_header, element_pointers, MVC_SPS_ID);
+ generate_ue(mtx_header, element_pointers, 1); /* chroma_format_idc = 1 */
+ generate_ue(mtx_header, element_pointers, 0); /* bit_depth_luma_minus8 = 0 */
+ generate_ue(mtx_header, element_pointers, 0); /* bit_depth_chroma_minus8 = 0 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, sh_params->is_lossless ? 1 : 0,
+ 1); /* qpprime_y_zero_transform_bypass_flag = 0 */
+
+ if (sh_params->use_default_scaling_list || sh_params->seq_scaling_matrix_present_flag) {
+ /* seq_scaling_matrix_present_flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ if (!sh_params->use_default_scaling_list) {
+ h264_write_bits_scaling_lists(mtx_header, element_pointers, scaling_matrix,
+ TRUE);
+ insert_element_token(mtx_header, element_pointers, ELEMENT_RAWDATA);
+ } else {
+ /* seq_scaling_list_present_flag[i] = 0; 0 < i < 8 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 8);
+ }
+ } else {
+ /* seq_scaling_matrix_present_flag */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+
+ generate_ue(mtx_header, element_pointers, 1); /* log2_max_frame_num_minus4 = 1 */
+ generate_ue(mtx_header, element_pointers, 0); /* pic_order_cnt_type = 0 */
+ /* log2_max_pic_order_cnt_Isb_minus4 = 2 */
+ generate_ue(mtx_header, element_pointers, 2);
+
+ /*num_ref_frames ue(2), typically 2 */
+ generate_ue(mtx_header, element_pointers, sh_params->max_num_ref_frames);
+ /* Bytes aligned (bit 72) */
+ /* gaps_in_frame_num_value_allowed_Flag - (1 bit) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ sh_params->gaps_in_frame_num_value, 1);
+
+ /*
+ * GENERATES THE SECOND, VARIABLE LENGTH, ELEMENT OF THE H264_SEQUENCE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: xx
+ */
+ /*pic_width_in_mbs_minus1: ue(v) from 10 to 44 (176 to 720 pixel per row) */
+ generate_ue(mtx_header, element_pointers, sh_params->width_in_mbs_minus1);
+ /*pic_height_in_maps_units_minus1: ue(v) Value from 8 to 35 (144 to 576 pixels per column)
+ */
+ generate_ue(mtx_header, element_pointers, sh_params->height_in_maps_units_minus1);
+ /* We don't know the alignment at this point, so will have to use bit writing functions */
+ /* frame_mb_only_flag 1=frame encoding, 0=field encoding */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, sh_params->frame_mbs_only_flag,
+ 1);
+
+ if (!sh_params->frame_mbs_only_flag) /* in the case of interlaced encoding */
+ /* mb_adaptive_frame_field_flag = 0 in Topaz(field encoding at the sequence level) */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+
+ /* direct_8x8_inference_flag=1 in Topaz */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+
+ if (crop->clip) {
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1);
+ generate_ue(mtx_header, element_pointers, crop->left_crop_offset);
+ generate_ue(mtx_header, element_pointers, crop->right_crop_offset);
+ generate_ue(mtx_header, element_pointers, crop->top_crop_offset);
+ generate_ue(mtx_header, element_pointers, crop->bottom_crop_offset);
+
+ } else {
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 1);
+ }
+
+ /*
+ * GENERATES THE THIRD ELEMENT OF THE H264_SEQUENCE_HEADER() STRUCTURE
+ * ELEMENT BITCOUNT: xx
+ * vui_parameters_present_flag (VUI only in 1st sequence of stream)
+ */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (sh_params->vui_params_present), 1);
+ if (sh_params->vui_params_present > 0)
+ h264_write_bits_vui_params(mtx_header, element_pointers, &sh_params->vui_params);
+
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 1, 1); /*bit_equal_to_one */
+
+ /* sequence parameter set MVC extension */
+ generate_ue(mtx_header, element_pointers, (num_views - 1)); /*num_views_minus1 */
+ for (view_idx = 0; view_idx < num_views; view_idx++)
+ generate_ue(mtx_header, element_pointers, view_idx);
+
+ /* anchor references */
+ for (view_idx = 1; view_idx < num_views; view_idx++) {
+ /* num_anchor_refs_l0 = 1; view-1 refers to view-0 */
+ generate_ue(mtx_header, element_pointers, 1);
+ generate_ue(mtx_header, element_pointers, 0); /* anchor_ref_l0 = 0 */
+ generate_ue(mtx_header, element_pointers, 0); /* num_anchor_refs_l1 = 0 */
+ }
+
+ /* non-anchor references */
+ for (view_idx = 1; view_idx < num_views; view_idx++) {
+ generate_ue(mtx_header, element_pointers, 1); /* num_non_anchor_refs_l0 = 0 */
+ generate_ue(mtx_header, element_pointers, 0); /* non_anchor_refs_l0 = 0 */
+ generate_ue(mtx_header, element_pointers, 0); /* num_non_anchor_refs_l1 = 0 */
+ }
+
+ generate_ue(mtx_header, element_pointers, 0);/* num_level_values_signaled_minus1 = 0 */
+
+ /* level_idc should be set to 9 in the sps in case of level is
+ * Level 1B and the profile is Multiview High or Stereo High profiles
+ */
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ (sh_params->level == SH_LEVEL_1B) ? 9 :
+ (unsigned char)sh_params->level,
+ 8);/* level_idc (8 bits) = 9 for 1b, 10xlevel for others */
+ generate_ue(mtx_header, element_pointers, 0);/* num_applicable_ops_minus1 = 0 */
+ /* applicable_ops_temporal_id = 0 */
+ write_upto_8bits_to_elements(mtx_header, element_pointers, 0, 3);
+ /* applicable_op_num_target_views_minus1 = 0 */
+ generate_ue(mtx_header, element_pointers, 0);
+ generate_ue(mtx_header, element_pointers, 0); /* applicable_op_target_view_id = 0 */
+ generate_ue(mtx_header, element_pointers, 0); /* applicable_op_num_views_minus1 = 0 */
+
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ 0, /* mvc_vui_parameters_present_flag =0 */
+ 1);
+
+ write_upto_8bits_to_elements(mtx_header, element_pointers,
+ 0, /* additional_extension2_flag =0 */
+ 1);
+
+ /* Finally we need to align to the next byte */
+ /* Tell MTX to insert the byte align field
+ * (we don't know final stream size for alignment at this point)
+ */
+ insert_element_token(mtx_header, element_pointers, ELEMENT_INSERTBYTEALIGN_H264);
+}
+
+/*
+ * Prepare an H264 SPS in a form for the MTX to encode into a bitstream.
+ */
+void h264_prepare_mvc_sequence_header(struct mtx_header_params *mtx_header,
+ unsigned int pic_width_in_mbs,
+ unsigned int pic_height_in_mbs,
+ unsigned char vui_params_present,
+ struct h264_vui_params *params,
+ struct h264_crop_params *crop,
+ struct h264_sequence_header_params *sh_params)
+{
+ struct mtx_header_element *this_element;
+ struct mtx_header_element *element_pointers[MAXNUMBERELEMENTS];
+
+ /*
+ * Builds a sequence, picture and slice header with from the given inputs
+ * parameters (start of new frame) Essential we initialise our header
+ * structures before building
+ */
+ mtx_header->elements = ELEMENTS_EMPTY;
+ this_element = (struct mtx_header_element *)mtx_header->element_stream;
+ element_pointers[0] = this_element;
+
+ h264_write_bits_mvc_sequence_header(mtx_header, element_pointers, sh_params, crop, NULL);
+ /* Has been used as an index, so need to add 1 for a valid element count */
+ mtx_header->elements++;
+}
diff --git a/drivers/media/platform/vxe-vxd/encoder/header_gen.h b/drivers/media/platform/vxe-vxd/encoder/header_gen.h
new file mode 100644
index 000000000000..a1d172714ba4
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/header_gen.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * encoder header generation interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include "fw_headers/topazscfwif.h"
+#include <linux/types.h>
+#include "topaz_api.h"
+#include "vid_buf.h"
+
+/*
+ * enum describing slice/frame type (H264)
+ */
+enum slhp_sliceframe_type {
+ SLHP_P_SLICEFRAME_TYPE,
+ SLHP_B_SLICEFRAME_TYPE,
+ SLHP_I_SLICEFRAME_TYPE,
+ SLHP_SP_SLICEFRAME_TYPE,
+ SLHP_SI_SLICEFRAME_TYPE,
+ SLHP_IDR_SLICEFRAME_TYPE,
+ SLHP_SLICE_FRAME_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * struct describing scaling lists (H264)
+ */
+struct h264_scaling_matrix_params {
+ unsigned char scaling_lists4x4[6][16];
+ unsigned char scaling_lists8x8[2][64];
+ unsigned int list_mask;
+};
+
+/*
+ * struct describing picture parameter set (H264)
+ */
+struct h264_picture_header_params {
+ unsigned char pic_parameter_set_id;
+ unsigned char seq_parameter_set_id;
+ unsigned char entropy_coding_mode_flag;
+ unsigned char weighted_pred_flag;
+ unsigned char weighted_bipred_idc;
+ signed char chroma_qp_index_offset;
+ unsigned char constrained_intra_pred_flag;
+ unsigned char transform_8x8_mode_flag;
+ unsigned char pic_scaling_matrix_present_flag;
+ unsigned char use_default_scaling_list;
+ signed char second_chroma_qp_index_offset;
+};
+
+/*
+ * struct describing slice header (H264)
+ */
+struct h264_slice_header_params {
+ unsigned char startcode_prefix_size_bytes;
+ enum slhp_sliceframe_type slice_frame_type;
+ unsigned int first_mb_address;
+ unsigned char log2_max_pic_order_cnt;
+ unsigned char disable_deblocking_filter_idc;
+ unsigned char pic_interlace;
+ unsigned char reference_picture;
+ signed char deb_alpha_offset_div2;
+ signed char deb_beta_offset_div2;
+ unsigned short mvc_view_idx;
+ unsigned char is_longterm_ref;
+ unsigned char longterm_ref_num;
+ /* Long term reference info for reference frames */
+ unsigned char ref_is_longterm_ref[2];
+ unsigned char ref_longterm_ref_num[2];
+};
+
+void generate_slice_params_template(struct img_enc_context *enc,
+ struct vidio_ddbufinfo *mem_info,
+ enum img_frame_template_type slice_type,
+ unsigned char is_interlaced, int fine_y_search_size);
+
+void h264_prepare_sequence_header(struct mtx_header_params *mtx_header,
+ unsigned int pic_width_in_mbs,
+ unsigned int pic_height_in_mbs, unsigned char vui_params_present,
+ struct h264_vui_params *params,
+ struct h264_crop_params *crop,
+ struct h264_sequence_header_params *sh_params,
+ unsigned char aso);
+
+void h264_prepare_mvc_sequence_header(struct mtx_header_params *mtx_header,
+ unsigned int pic_width_in_mbs, unsigned int pic_height_in_mbs,
+ unsigned char vui_params_present,
+ struct h264_vui_params *params,
+ struct h264_crop_params *crop,
+ struct h264_sequence_header_params *sh_params);
+
+void h264_prepare_aud_header(struct mtx_header_params *mtx_header);
+
+void h264_prepare_picture_header(struct mtx_header_params *mtx_header,
+ unsigned char cabac_enabled,
+ unsigned char transform_8x8,
+ unsigned char intra_constrained,
+ signed char cqp_offset,
+ unsigned char weighted_prediction,
+ unsigned char weighted_bi_pred,
+ unsigned char mvc_pps,
+ unsigned char scaling_matrix,
+ unsigned char scaling_lists);
diff --git a/drivers/media/platform/vxe-vxd/encoder/mtx_fwif.c b/drivers/media/platform/vxe-vxd/encoder/mtx_fwif.c
new file mode 100644
index 000000000000..2f10b902c093
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/mtx_fwif.c
@@ -0,0 +1,990 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MTX Firmware Interface
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+
+#include "fw_headers/mtx_fwif.h"
+#include "fw_headers/defs.h"
+#include "fw_binaries/include_all_fw_variants.h"
+#include "img_errors.h"
+#include "reg_headers/mtx_regs.h"
+/* still used for DMAC regs */
+#include "reg_headers/img_soc_dmac_regs.h"
+#include "target_config.h"
+#include "topaz_device.h"
+#include "topazmmu.h"
+#include "vxe_public_regdefs.h"
+
+extern struct mem_space topaz_mem_space[];
+
+/*
+ * Static Function Decl
+ */
+static void mtx_get_mtx_ctrl_from_dash(struct img_fw_context *fw_ctx);
+
+static unsigned int mtx_read_core_reg(struct img_fw_context *fw_ctx,
+ const unsigned int reg);
+
+static void mtx_write_core_reg(struct img_fw_context *fw_ctx,
+ const unsigned int reg,
+ const unsigned int val);
+
+static int mtx_select_fw_build(struct img_fw_context *fw_ctx, enum img_codec codec);
+
+static void mtx_reg_if_upload(struct img_fw_context *fw_ctx,
+ const unsigned int data_mem, unsigned int addr,
+ const unsigned int words, const unsigned int *const data);
+
+/*
+ * Polling Configuration for TAL
+ */
+#define TAL_REG_RD_WR_TRIES 1000 /* => try 1000 times before giving up */
+
+/*
+ * defines that should come from auto generated headers
+ */
+#define MTX_DMA_MEMORY_BASE (0x82880000)
+#define PC_START_ADDRESS (0x80900000)
+
+#define MTX_CORE_CODE_MEM (0x10)
+#define MTX_CORE_DATA_MEM (0x18)
+
+#define MTX_PC (0x05)
+
+/*
+ * Get control of the MTX.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Return None
+ */
+static void mtx_get_mtx_ctrl_from_dash(struct img_fw_context *fw_ctx)
+{
+ unsigned int reg = 0;
+
+ IMG_DBG_ASSERT(!fw_ctx->drv_has_mtx_ctrl);
+
+ /* Request the bus from the Dash...*/
+ reg = F_ENCODE(1, TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE) |
+ F_ENCODE(0x2, TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN);
+ VXE_WR_REG32(fw_ctx->topaz_multicore_reg_addr, TOPAZHP_TOP_CR_MTX_DEBUG_MSTR, reg);
+
+ do {
+ reg = VXE_RD_REG32(fw_ctx->topaz_multicore_reg_addr, TOPAZHP_TOP_CR_MTX_DEBUG_MSTR);
+
+ } while ((reg & 0x18) != 0);
+
+ /* Save the access control register...*/
+ fw_ctx->drv_has_mtx_ctrl = VXE_RD_REG32(fw_ctx->mtx_reg_mem_space_addr,
+ MTX_CR_MTX_RAM_ACCESS_CONTROL);
+
+ fw_ctx->drv_has_mtx_ctrl = TRUE;
+}
+
+/*
+ * Release control of the MTX.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Return None
+ */
+static void mtx_release_mtx_ctrl_from_dash(struct img_fw_context *fw_ctx)
+{
+ unsigned int reg = 0;
+
+ IMG_DBG_ASSERT(fw_ctx->drv_has_mtx_ctrl);
+
+ /* Restore the access control register...*/
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_RAM_ACCESS_CONTROL,
+ fw_ctx->access_control);
+
+ /* Release the bus...*/
+ reg = F_ENCODE(1, TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE);
+ VXE_WR_REG32(fw_ctx->topaz_multicore_reg_addr, TOPAZHP_TOP_CR_MTX_DEBUG_MSTR, reg);
+
+ fw_ctx->drv_has_mtx_ctrl = FALSE;
+}
+
+/*
+ * Read an MTX register.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Input reg : Offset of register to read
+ * @Return unsigned int : Register value
+ */
+static unsigned int mtx_read_core_reg(struct img_fw_context *fw_ctx, const unsigned int reg)
+{
+ unsigned int ret = 0;
+
+ mtx_get_mtx_ctrl_from_dash(fw_ctx);
+
+ /* Issue read request */
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST,
+ MASK_MTX_MTX_RNW | (reg & ~MASK_MTX_MTX_DREADY));
+
+ /* Wait for done */
+ VXE_POLL_REG32_ISEQ(fw_ctx->mtx_reg_mem_space_addr,
+ MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST,
+ MASK_MTX_MTX_DREADY,
+ MASK_MTX_MTX_DREADY,
+ TAL_REG_RD_WR_TRIES);
+
+ /* Read */
+ ret = VXE_RD_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_REGISTER_READ_WRITE_DATA);
+
+ mtx_release_mtx_ctrl_from_dash(fw_ctx);
+
+ return ret;
+}
+
+/*
+ * Write an MTX register.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Input reg : Offset of register to write
+ * @Input val : Value to write to register
+ */
+static void mtx_write_core_reg(struct img_fw_context *fw_ctx,
+ const unsigned int reg, const unsigned int val)
+{
+ mtx_get_mtx_ctrl_from_dash(fw_ctx);
+
+ /* Put data in MTX_RW_DATA */
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_REGISTER_READ_WRITE_DATA, val);
+
+ /* DREADY is set to 0 and request a write*/
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST,
+ (reg & ~MASK_MTX_MTX_DREADY));
+
+ /* Wait for DREADY to become set*/
+ VXE_POLL_REG32_ISEQ(fw_ctx->mtx_reg_mem_space_addr,
+ MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST,
+ MASK_MTX_MTX_DREADY,
+ MASK_MTX_MTX_DREADY,
+ TAL_REG_RD_WR_TRIES);
+
+ mtx_release_mtx_ctrl_from_dash(fw_ctx);
+}
+
+/* ****** Utility macroses for `mtx_select_fw_build` ************** */
+
+#if FW_BIN_FORMAT_VERSION != 2
+# error Unsupported firmware format version
+#endif
+
+/*
+ * Assign a firmware binary to an MTX.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Input codec : Firmware version to use
+ */
+static int mtx_select_fw_build(struct img_fw_context *fw_ctx, enum img_codec codec)
+{
+ unsigned char *fmt, *rc_mode;
+ unsigned int target_fw_pipes = 0;
+ unsigned int codec_mask = 0;
+ unsigned int cur_hw_config;
+ unsigned char force_specific_pipe_cnt = FALSE;
+
+# define HW_CONFIG_ALL_FEATURES 0
+# define HW_CONFIG_8CONTEXT 1
+
+#define CORE_REV_CONFIG_1_MIN 0x00030906
+#define CORE_REV_CONFIG_1_MAX 0x0003090a
+
+# define CODEC_MASK_JPEG 0x0001
+# define CODEC_MASK_MPEG2 0x0002
+# define CODEC_MASK_MPEG4 0x0004
+# define CODEC_MASK_H263 0x0008
+# define CODEC_MASK_H264 0x0010
+# define CODEC_MASK_H264MVC 0x0020
+# define CODEC_MASK_VP8 0x0040
+# define CODEC_MASK_H265 0x0080
+# define CODEC_MASK_FAKE 0x007F
+
+#define _MVC_CODEC_CASE(RC) { case IMG_CODEC_H264MVC_ ## RC: fmt = "H264MVC"; rc_mode = #RC; \
+ force_specific_pipe_cnt = TRUE; codec_mask = CODEC_MASK_H264MVC; break; }
+
+ switch (codec) {
+ case IMG_CODEC_H264_NO_RC:
+ case IMG_CODEC_H264_VBR:
+ case IMG_CODEC_H264_CBR:
+
+ case IMG_CODEC_H264_VCM:
+ fmt = "H264";
+ rc_mode = "ALL";
+ force_specific_pipe_cnt = TRUE;
+ codec_mask = CODEC_MASK_H264;
+ break;
+ case IMG_CODEC_H263_NO_RC:
+ case IMG_CODEC_H263_VBR:
+ case IMG_CODEC_H263_CBR:
+ fmt = "LEGACY_VIDEO";
+ rc_mode = "ALL";
+ codec_mask = CODEC_MASK_H263;
+ break;
+ case IMG_CODEC_MPEG2_NO_RC:
+ case IMG_CODEC_MPEG2_VBR:
+ case IMG_CODEC_MPEG2_CBR:
+ fmt = "LEGACY_VIDEO";
+ rc_mode = "ALL";
+ codec_mask = CODEC_MASK_MPEG2;
+ break;
+ case IMG_CODEC_MPEG4_NO_RC:
+ case IMG_CODEC_MPEG4_VBR:
+ case IMG_CODEC_MPEG4_CBR:
+ fmt = "LEGACY_VIDEO";
+ rc_mode = "ALL";
+ codec_mask = CODEC_MASK_MPEG4;
+ break;
+ _MVC_CODEC_CASE(NO_RC);
+ _MVC_CODEC_CASE(VBR);
+ _MVC_CODEC_CASE(CBR);
+ _MVC_CODEC_CASE(ERC);
+ case IMG_CODEC_JPEG:
+ fmt = "JPEG";
+ rc_mode = "NO_RC";
+ codec_mask = CODEC_MASK_JPEG;
+ break;
+ default:
+ pr_err("Failed to locate firmware for codec %d\n", codec);
+ return IMG_ERROR_UNDEFINED;
+ }
+#undef _MVC_CODEC_CASE
+
+ /* rc mode name fix */
+ if (strcmp(rc_mode, "NO_RC") == 0)
+ rc_mode = "NONE";
+
+ {
+ /*
+ * Pick firmware type (done implicitly via determining number
+ * of pipes given firmware is expected to have
+ */
+ const unsigned int core_id = fw_ctx->core_rev;
+#define IS_REV(name) ((core_id >= MIN_ ## name ## _REV) && \
+ (core_id <= MAX_ ## name ## _REV))
+
+ if (core_id >= CORE_REV_CONFIG_1_MIN && core_id <= CORE_REV_CONFIG_1_MAX) {
+ /*
+ * For now, it is assumed that this revision ID means 8
+ * context 2 pipe variant
+ */
+ cur_hw_config = HW_CONFIG_8CONTEXT;
+ target_fw_pipes = 2;
+ } else {
+ cur_hw_config = HW_CONFIG_ALL_FEATURES;
+ if (fw_ctx->hw_num_pipes < 3 && force_specific_pipe_cnt)
+ target_fw_pipes = 2;
+ else
+ target_fw_pipes = 4;
+ }
+#undef IS_REV
+ }
+
+ {
+ /* Search for matching firmwares */
+
+ unsigned int fmts_included = 0;
+ unsigned int ii;
+ unsigned char preferred_fw_located = FALSE;
+ unsigned int req_size = 0;
+ struct IMG_COMPILED_FW_BIN_RECORD *selected, *iter;
+
+ selected = NULL;
+
+ for (ii = 0; ii < all_fw_binaries_cnt; ii++) {
+ iter = all_fw_binaries[ii];
+ /*
+ * With HW_3_6, we want to allow 3 pipes if it was
+ * required, this is mainly for test purposes
+ */
+ if ((strcmp("JPEG_H264", iter->fmt) == 0) && target_fw_pipes != 3) {
+ preferred_fw_located = TRUE;
+ req_size = (4 * iter->data_size + (iter->data_origin -
+ MTX_DMA_MEMORY_BASE));
+ break;
+ }
+ }
+
+ if (preferred_fw_located && req_size <= fw_ctx->mtx_ram_size &&
+ cur_hw_config == iter->hw_config && iter->pipes >= target_fw_pipes &&
+ (codec_mask == CODEC_MASK_JPEG || codec_mask == CODEC_MASK_H264) &&
+ ((iter->formats_mask & codec_mask) != 0)) {
+ selected = iter;
+ } else {
+ for (ii = 0; ii < all_fw_binaries_cnt; ii++) {
+ iter = all_fw_binaries[ii];
+ /* The hardware config modes need to match */
+ if (cur_hw_config != iter->hw_config) {
+ pr_err("cur_hw_config %x iter->hw_config %x mismatch\n",
+ cur_hw_config, iter->hw_config);
+ continue;
+ }
+
+ fmts_included = iter->formats_mask;
+
+ if (((fmts_included & codec_mask) != 0) &&
+ (codec_mask == CODEC_MASK_JPEG ||
+ /* no need to match RC for JPEG */
+ strcmp(rc_mode, iter->rc_mode) == 0)) {
+ /*
+ * This firmware matches by format/mode
+ * combination, now to check if it fits
+ * better than current best
+ */
+ if (!selected && iter->pipes >= target_fw_pipes) {
+ /*
+ * Select firmware ether if it
+ * is first matchin one we've
+ * encountered or if it better
+ * matches desired number of
+ * pipes.
+ */
+ selected = iter;
+ }
+
+ if (iter->pipes == target_fw_pipes) {
+ /* Found ideal firmware version */
+ selected = iter;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!selected) {
+ pr_err("Failed to locate firmware for format '%s' and RC mode '%s'.\n",
+ fmt, rc_mode);
+ return IMG_ERROR_UNDEFINED;
+ }
+#ifdef DEBUG_ENCODER_DRIVER
+ pr_info("Using firmware: %s with %i pipes, hwconfig=%i (text size = %i, data size = %i) for requested codec: %s RC mode %s\n",
+ selected->fmt, selected->pipes,
+ selected->hw_config, selected->text_size,
+ selected->data_size, fmt, rc_mode);
+#endif
+
+ /* Export selected firmware to the fw context */
+ fw_ctx->mtx_topaz_fw_text_size = selected->text_size;
+ fw_ctx->mtx_topaz_fw_data_size = selected->data_size;
+ fw_ctx->mtx_topaz_fw_text = selected->text;
+ fw_ctx->mtx_topaz_fw_data = selected->data;
+ fw_ctx->mtx_topaz_fw_data_origin = selected->data_origin;
+ fw_ctx->num_pipes = selected->pipes;
+ fw_ctx->int_defines.length = selected->int_define_cnt;
+ fw_ctx->int_defines.names = selected->int_define_names;
+ fw_ctx->int_defines.values = selected->int_defines;
+ fw_ctx->supported_codecs = selected->formats_mask;
+ fw_ctx->num_contexts = mtx_get_fw_config_int(fw_ctx, "TOPAZHP_MAX_NUM_STREAMS");
+ }
+ return IMG_SUCCESS;
+}
+
+/*
+ * Upload MTX text and data sections via register interface
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Input data_mem : RAM ID for text/data section
+ * @Input address : Address to upload data to
+ * @Input words : Number of words of data to upload
+ * @Input data : Pointer to data to upload
+ */
+static void mtx_reg_if_upload(struct img_fw_context *fw_ctx, const unsigned int data_mem,
+ unsigned int address, const unsigned int words,
+ const unsigned int *const data)
+{
+ unsigned int loop;
+ unsigned int ctrl;
+ unsigned int ram_id;
+ unsigned int addr;
+ unsigned int curr_bank = ~0;
+ unsigned int uploaded = 0;
+
+ mtx_get_mtx_ctrl_from_dash(fw_ctx);
+
+ VXE_POLL_REG32_ISEQ(fw_ctx->mtx_reg_mem_space_addr,
+ MTX_CR_MTX_RAM_ACCESS_STATUS,
+ MASK_MTX_MTX_MTX_MCM_STAT,
+ MASK_MTX_MTX_MTX_MCM_STAT,
+ TAL_REG_RD_WR_TRIES);
+
+ for (loop = 0; loop < words; loop++) {
+ ram_id = data_mem + (address / fw_ctx->mtx_bank_size);
+ if (ram_id != curr_bank) {
+ addr = address >> 2;
+ ctrl = 0;
+ ctrl = F_ENCODE(ram_id, MTX_MTX_MCMID) |
+ F_ENCODE(addr, MTX_MTX_MCM_ADDR) |
+ F_ENCODE(1, MTX_MTX_MCMAI);
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr,
+ MTX_CR_MTX_RAM_ACCESS_CONTROL, ctrl);
+ curr_bank = ram_id;
+ }
+ address += 4;
+
+ if (uploaded > (1024 * 24)) /* should this be RAM bank size?? */
+ break;
+ uploaded += 4;
+
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr,
+ MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER,
+ data[loop]);
+
+ VXE_POLL_REG32_ISEQ(fw_ctx->mtx_reg_mem_space_addr,
+ MTX_CR_MTX_RAM_ACCESS_STATUS,
+ MASK_MTX_MTX_MTX_MCM_STAT,
+ MASK_MTX_MTX_MTX_MCM_STAT,
+ TAL_REG_RD_WR_TRIES);
+ }
+
+ mtx_release_mtx_ctrl_from_dash(fw_ctx);
+}
+
+/*
+ * Transfer memory between the Host and MTX via DMA.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Input channel : DMAC channel to use (0 for TopazSC)
+ * @Input hHostMemTransfer : void * for the host memory
+ * @Input hostMemOffset : offset into the host memory
+ * @Input mtx_addr : Address on MTX
+ * @Input numWords : size of transfer in 32-bit words (PW units)
+ * @Input bRNW : Read not Write (FALSE to write to the MTX)
+ */
+void mtx_dmac_transfer(struct img_fw_context *fw_ctx, unsigned int channel,
+ struct vidio_ddbufinfo *host_mem_transfer,
+ unsigned int host_mem_offset, unsigned int mtx_addr,
+ unsigned int words, unsigned char rnw)
+{
+ unsigned int irq_stat;
+ unsigned int count_reg;
+ void *dmac_reg_addr;
+ void *reg_addr;
+ unsigned int config_reg;
+ unsigned int mmu_status = 0;
+
+ unsigned int dmac_burst_size = DMAC_BURST_2; /* 2 * 128 bits = 32 bytes */
+ unsigned int mtx_burst_size = 4; /* 4 * 2 * 32 bits = 32 bytes */
+
+ /* check the burst sizes */
+ IMG_DBG_ASSERT(dmac_burst_size * 16 == MTX_DMA_BURSTSIZE_BYTES);
+ IMG_DBG_ASSERT(mtx_burst_size * 8 == MTX_DMA_BURSTSIZE_BYTES);
+
+ /* check transfer size matches burst width */
+ IMG_DBG_ASSERT(0 == (words & ((MTX_DMA_BURSTSIZE_BYTES >> 2) - 1)));
+
+ /* check DMA channel */
+ IMG_DBG_ASSERT(channel < DMAC_MAX_CHANNELS);
+
+ /* check that no transfer is currently in progress */
+ dmac_reg_addr = (void *)topaz_mem_space[REG_DMAC].cpu_addr;
+ count_reg = VXE_RD_REG32(dmac_reg_addr, IMG_SOC_DMAC_COUNT(channel));
+ IMG_DBG_ASSERT(0 == (count_reg &
+ (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));
+
+ /* check we don't already have a page fault condition */
+ reg_addr = (void *)topaz_mem_space[REG_TOPAZHP_MULTICORE].cpu_addr;
+ mmu_status = VXE_RD_REG32(reg_addr, TOPAZHP_TOP_CR_MMU_STATUS);
+
+ IMG_DBG_ASSERT(mmu_status == 0);
+
+ if (mmu_status || (count_reg & (MASK_IMG_SOC_EN |
+ MASK_IMG_SOC_LIST_EN))) {
+ /* DMA engine not idle or pre-existing page fault condition */
+ pr_err("DMA engine not idle or pre-existing page fault condition!\n");
+ fw_ctx->initialized = FALSE;
+ return;
+ }
+
+ /* clear status of any previous interrupts */
+ VXE_WR_REG32(dmac_reg_addr, IMG_SOC_DMAC_IRQ_STAT(channel), 0);
+
+ /* and that no interrupts are outstanding */
+ irq_stat = VXE_RD_REG32(dmac_reg_addr, IMG_SOC_DMAC_IRQ_STAT(channel));
+ IMG_DBG_ASSERT(irq_stat == 0);
+
+ /* Write MTX DMAC registers (for current MTX) */
+ /* MTX Address */
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_SYSC_CDMAA,
+ mtx_addr);
+
+ /* MTX DMAC Config */
+ config_reg = F_ENCODE(mtx_burst_size, MTX_BURSTSIZE) |
+ F_ENCODE((rnw ? 1 : 0), MTX_RNW) |
+ F_ENCODE(1, MTX_ENABLE) |
+ F_ENCODE(words, MTX_LENGTH);
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_SYSC_CDMAC,
+ config_reg);
+
+ /* Write System DMAC registers */
+ /* per hold - allow HW to sort itself out */
+ VXE_WR_REG32(dmac_reg_addr, IMG_SOC_DMAC_PER_HOLD(channel), 16);
+
+ VXE_WR_REG32(dmac_reg_addr, IMG_SOC_DMAC_SETUP(channel),
+ host_mem_transfer->dev_virt + host_mem_offset);
+
+ /* count reg */
+ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
+ rnw, DMAC_PWIDTH_32_BIT, words);
+ count_reg |= MASK_IMG_SOC_TRANSFER_IEN; /* generate an interrupt at end of transfer */
+ VXE_WR_REG32(dmac_reg_addr, IMG_SOC_DMAC_COUNT(channel), count_reg);
+
+ /* don't inc address, set burst size */
+ VXE_WR_REG32(dmac_reg_addr, IMG_SOC_DMAC_PERIPH(channel),
+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, FALSE, dmac_burst_size));
+
+ /* Target correct MTX DMAC port */
+ VXE_WR_REG32(dmac_reg_addr, IMG_SOC_DMAC_PERIPHERAL_ADDR(channel),
+ MTX_CR_MTX_SYSC_CDMAT + REG_START_TOPAZ_MTX_HOST);
+
+ /*
+ * Finally, rewrite the count register with the enable bit set to kick
+ * off the transfer
+ */
+ VXE_WR_REG32(dmac_reg_addr, IMG_SOC_DMAC_COUNT(channel),
+ (count_reg | MASK_IMG_SOC_EN));
+
+ /* Wait for it to finish */
+ VXE_POLL_REG32_ISEQ(dmac_reg_addr, IMG_SOC_DMAC_IRQ_STAT(channel),
+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+ TAL_REG_RD_WR_TRIES);
+ count_reg = VXE_RD_REG32(dmac_reg_addr, IMG_SOC_DMAC_COUNT(channel));
+ mmu_status = VXE_RD_REG32(reg_addr, TOPAZHP_TOP_CR_MMU_STATUS);
+ if (mmu_status || (count_reg &
+ (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
+ pr_err("DMA has failed or page faulted\n");
+ /* DMA has failed or page faulted */
+ fw_ctx->initialized = FALSE;
+ }
+
+ /* Clear the interrupt */
+ VXE_WR_REG32(dmac_reg_addr, IMG_SOC_DMAC_IRQ_STAT(channel), 0);
+}
+
+/*
+ * Sets target MTX for DMA and register writes
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Input bTargetAll : TRUE indicates register and DMA writes go to all MTX
+ */
+void mtx_set_target(struct img_fw_context *fw_ctx)
+{
+ unsigned int reg = 0;
+
+ reg = F_ENCODE(0, TOPAZHP_TOP_CR_WRITES_CORE_ALL);
+ VXE_WR_REG32(fw_ctx->topaz_multicore_reg_addr,
+ TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0, reg);
+}
+
+/*
+ * Upload text and data sections via DMA
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ */
+static void mtx_uploadfw(void *dev_ctx, struct img_fw_context *fw_ctx)
+{
+ struct topaz_dev_ctx *ctx = (struct topaz_dev_ctx *)dev_ctx;
+ struct vidio_ddbufinfo text, data;
+ void *add_lin_text, *add_lin_data;
+ unsigned int text_size = fw_ctx->mtx_topaz_fw_text_size;
+ unsigned int data_size = fw_ctx->mtx_topaz_fw_data_size;
+
+ if (topaz_mmu_alloc(ctx->topaz_mmu_ctx.mmu_context_handle,
+ ctx->vxe_arg, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ text_size * 4 + MTX_DMA_BURSTSIZE_BYTES, 64, &text)) {
+ pr_err("mmu_alloc for text failed!\n");
+ fw_ctx->initialized = FALSE;
+ return;
+ }
+ if (topaz_mmu_alloc(ctx->topaz_mmu_ctx.mmu_context_handle,
+ ctx->vxe_arg, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ data_size * 4 + MTX_DMA_BURSTSIZE_BYTES, 64, &data)) {
+ pr_err("mmu_alloc for data failed!\n");
+ topaz_mmu_free(ctx->vxe_arg, &text);
+ fw_ctx->initialized = FALSE;
+ }
+
+ add_lin_text = text.cpu_virt;
+ memcpy((void *)add_lin_text, fw_ctx->mtx_topaz_fw_text, text_size * 4);
+ add_lin_data = data.cpu_virt;
+ memcpy((void *)add_lin_data, fw_ctx->mtx_topaz_fw_data, data_size * 4);
+
+ topaz_update_device_mem(ctx->vxe_arg, &text);
+ topaz_update_device_mem(ctx->vxe_arg, &data);
+
+ /* adjust transfer sizes of text and data sections to match burst size */
+ text_size =
+ ((text_size * 4 + (MTX_DMA_BURSTSIZE_BYTES - 1)) & ~(MTX_DMA_BURSTSIZE_BYTES - 1)) /
+ 4;
+ data_size =
+ ((data_size * 4 + (MTX_DMA_BURSTSIZE_BYTES - 1)) & ~(MTX_DMA_BURSTSIZE_BYTES - 1)) /
+ 4;
+
+ /* ensure that data section (+stack) will not wrap in memory */
+ IMG_DBG_ASSERT(fw_ctx->mtx_ram_size >=
+ (fw_ctx->mtx_topaz_fw_data_origin + (data_size * 4) - MTX_DMA_MEMORY_BASE));
+ if (fw_ctx->mtx_ram_size <
+ (fw_ctx->mtx_topaz_fw_data_origin + (data_size * 4) - MTX_DMA_MEMORY_BASE))
+ fw_ctx->initialized = FALSE;
+
+ /* data section is already prepared/cached */
+ /* Transfer the text section */
+ if (fw_ctx->initialized) {
+ mtx_dmac_transfer(fw_ctx, 0, &text, 0, MTX_DMA_MEMORY_BASE,
+ text_size, FALSE);
+ }
+ /* Transfer the data section */
+ if (fw_ctx->initialized) {
+ mtx_dmac_transfer(fw_ctx, 0, &data, 0,
+ fw_ctx->mtx_topaz_fw_data_origin, data_size,
+ FALSE);
+ }
+
+ topaz_mmu_free(ctx->vxe_arg, &text);
+ topaz_mmu_free(ctx->vxe_arg, &data);
+
+ /* Flush the MMU table cache used during code download */
+ topaz_core_mmu_flush_cache();
+#ifdef DEBUG_ENCODER_DRIVER
+ if (fw_ctx->initialized)
+ pr_info("%s complete!\n", __func__);
+#endif
+}
+
+/*
+ * Load text and data sections onto an MTX.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Input load_method : Method to use for loading code
+ * @Input bTargetAll : Load to one (FALSE) or all (TRUE) MTX
+ */
+void mtx_load(void *dev_ctx, struct img_fw_context *fw_ctx,
+ enum mtx_load_method load_method)
+{
+ struct topaz_dev_ctx *ctx = (struct topaz_dev_ctx *)dev_ctx;
+ unsigned int reg;
+ unsigned short i;
+
+ IMG_DBG_ASSERT(fw_ctx->initialized);
+ if (!fw_ctx->initialized)
+ return;
+
+ fw_ctx->load_method = load_method;
+
+ /* set target to current or all MTXs */
+ mtx_set_target(fw_ctx);
+
+ /* MTX Reset */
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_SOFT_RESET,
+ MASK_MTX_MTX_RESET);
+ ndelay(300);
+
+ switch (load_method) {
+ case MTX_LOADMETHOD_REGIF:
+ /* Code Upload */
+ mtx_reg_if_upload(fw_ctx, MTX_CORE_CODE_MEM, 0,
+ fw_ctx->mtx_topaz_fw_text_size,
+ fw_ctx->mtx_topaz_fw_text);
+
+ /* Data Upload */
+ mtx_reg_if_upload(fw_ctx, MTX_CORE_DATA_MEM,
+ fw_ctx->mtx_topaz_fw_data_origin - MTX_DMA_MEMORY_BASE,
+ fw_ctx->mtx_topaz_fw_data_size,
+ fw_ctx->mtx_topaz_fw_data);
+ break;
+
+ case MTX_LOADMETHOD_DMA:
+ mtx_uploadfw(ctx, fw_ctx);
+ break;
+
+ case MTX_LOADMETHOD_NONE:
+ break;
+
+ default:
+ IMG_DBG_ASSERT(FALSE);
+ }
+
+ /* if we have had any failures up to this point then return now */
+ if (!fw_ctx->initialized)
+ return;
+
+ if (load_method != MTX_LOADMETHOD_NONE) {
+ for (i = 5; i < 8; i++)
+ mtx_write_core_reg(fw_ctx, 0x1 | (i << 4), 0);
+
+ /* Restore 8 Registers of D1 Bank */
+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
+ for (i = 5; i < 8; i++)
+ mtx_write_core_reg(fw_ctx, 0x2 | (i << 4), 0);
+
+ /* Set Starting PC address */
+ mtx_write_core_reg(fw_ctx, MTX_PC, PC_START_ADDRESS);
+
+ /* Verify Starting PC */
+ reg = mtx_read_core_reg(fw_ctx, MTX_PC);
+
+#ifdef DEBUG_ENCODER_DRIVER
+ pr_info("PC_START_ADDRESS = 0x%08X\n", reg);
+#endif
+ IMG_DBG_ASSERT(reg == PC_START_ADDRESS);
+ }
+}
+
+/*
+ * Deinitialise the given MTX context structure
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ */
+void mtx_deinitialize(struct img_fw_context *fw_ctx)
+{
+ struct topaz_dev_ctx *ctx = (struct topaz_dev_ctx *)fw_ctx->dev_ctx;
+ unsigned int i;
+
+ if (!fw_ctx->initialized)
+ pr_warn("Warning detected multi de-initialiseations\n");
+
+ for (i = 0; i < TOPAZHP_MAX_POSSIBLE_STREAMS; i++) {
+ if (fw_ctx->mtx_context_data_copy[i])
+ topaz_mmu_free(ctx->vxe_arg, fw_ctx->mtx_context_data_copy[i]);
+ fw_ctx->mtx_context_data_copy[i] = NULL;
+ }
+
+ kfree(fw_ctx->mtx_reg_copy);
+ fw_ctx->mtx_reg_copy = NULL;
+ fw_ctx->initialized = FALSE;
+}
+
+/*
+ * Initialise the given MTX context structure
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Input core_num : Core number of the MTX to target
+ * @Input codec : version of codec specific firmware to associate with this MTX
+ */
+int mtx_populate_fw_ctx(enum img_codec codec, struct img_fw_context *fw_ctx)
+{
+ unsigned int pipe_cnt;
+ unsigned int size;
+ unsigned int i;
+
+ if (fw_ctx->initialized || fw_ctx->populated)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ /* initialise Context structure */
+ fw_ctx->mtx_reg_mem_space_addr = (void *)topaz_mem_space[REG_MTX].cpu_addr;
+ fw_ctx->topaz_multicore_reg_addr = (void *)topaz_mem_space[REG_TOPAZHP_MULTICORE].cpu_addr;
+
+ fw_ctx->core_rev = VXE_RD_REG32(fw_ctx->topaz_multicore_reg_addr,
+ TOPAZHP_TOP_CR_TOPAZHP_CORE_REV);
+ fw_ctx->core_rev &= (MASK_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV |
+ MASK_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV |
+ MASK_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV);
+ fw_ctx->core_des1 = VXE_RD_REG32(fw_ctx->topaz_multicore_reg_addr,
+ TOPAZHP_TOP_CR_TOPAZHP_CORE_DES1);
+
+ /* Number of hw pipes */
+ pipe_cnt = VXE_RD_REG32(fw_ctx->topaz_multicore_reg_addr, TOPAZHP_TOP_CR_MULTICORE_HW_CFG);
+ pipe_cnt = (pipe_cnt & MASK_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED);
+ fw_ctx->hw_num_pipes = pipe_cnt;
+
+ IMG_DBG_ASSERT(fw_ctx->hw_num_pipes > 0 && fw_ctx->hw_num_pipes <= TOPAZHP_MAX_NUM_PIPES);
+
+ if (fw_ctx->hw_num_pipes <= 0 || fw_ctx->hw_num_pipes > TOPAZHP_MAX_NUM_PIPES)
+ return IMG_ERROR_INVALID_ID;
+
+ for (i = 0; i < fw_ctx->hw_num_pipes; i++)
+ fw_ctx->topaz_reg_mem_space_addr[i] =
+ (void *)topaz_mem_space[REG_TOPAZHP_CORE_0 + (4 * i)].cpu_addr;
+
+ fw_ctx->mtx_debug_val = VXE_RD_REG32(fw_ctx->topaz_multicore_reg_addr,
+ TOPAZHP_TOP_CR_MTX_DEBUG_MSTR);
+
+ /* last bank size */
+ size = 0x1 <<
+ (F_EXTRACT(fw_ctx->mtx_debug_val, TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE) + 2);
+ /* all other banks */
+ fw_ctx->mtx_bank_size = 0x1 <<
+ (F_EXTRACT(fw_ctx->mtx_debug_val, TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE) + 2);
+ /* total RAM size */
+ fw_ctx->mtx_ram_size = size +
+ (fw_ctx->mtx_bank_size *
+ (F_EXTRACT(fw_ctx->mtx_debug_val, TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS) - 1));
+
+ fw_ctx->drv_has_mtx_ctrl = FALSE;
+ fw_ctx->access_control = 0;
+
+ fw_ctx->active_ctx_mask = 0;
+
+ if (mtx_select_fw_build(fw_ctx, codec) != IMG_SUCCESS) {
+ fw_ctx->populated = FALSE;
+ fw_ctx->initialized = FALSE;
+ return IMG_ERROR_UNDEFINED;
+ }
+
+ if (fw_ctx->mtx_topaz_fw_data_size != 0) {
+ /* check FW fits in memory */
+ /* could also add stack size estimate */
+ size = 4 * fw_ctx->mtx_topaz_fw_data_size;
+ size += (fw_ctx->mtx_topaz_fw_data_origin - MTX_DMA_MEMORY_BASE);
+ if (size > fw_ctx->mtx_ram_size) {
+ IMG_DBG_ASSERT(fw_ctx->mtx_ram_size > size);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ fw_ctx->populated = TRUE;
+ return IMG_SUCCESS;
+}
+
+void mtx_initialize(void *dev_ctx, struct img_fw_context *fw_ctx)
+{
+ struct topaz_dev_ctx *ctx = (struct topaz_dev_ctx *)dev_ctx;
+ unsigned int i = 0;
+
+ if (fw_ctx->initialized)
+ return;
+
+ if (fw_ctx->mtx_topaz_fw_data_size != 0) {
+ fw_ctx->mtx_reg_copy = kmalloc((53 * 4), GFP_KERNEL);
+ for (i = 0; i < TOPAZHP_MAX_POSSIBLE_STREAMS; i++) {
+ fw_ctx->mtx_context_data_copy[i] = kmalloc
+ (sizeof(*fw_ctx->mtx_context_data_copy[i]),
+ GFP_KERNEL);
+ if (!fw_ctx->mtx_context_data_copy[i])
+ goto alloc_failed;
+
+ if (topaz_mmu_alloc(ctx->topaz_mmu_ctx.mmu_context_handle,
+ ctx->vxe_arg, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ MTX_CONTEXT_SIZE, 64,
+ fw_ctx->mtx_context_data_copy[i])) {
+ pr_err("mmu_alloc for data copy failed!\n");
+ kfree(fw_ctx->mtx_context_data_copy[i]);
+ fw_ctx->mtx_context_data_copy[i] = NULL;
+ goto alloc_failed;
+ }
+ }
+
+ fw_ctx->dev_ctx = dev_ctx;
+ fw_ctx->initialized = TRUE;
+ }
+
+ return;
+
+alloc_failed:
+ while (i > 0) {
+ topaz_mmu_free(ctx->vxe_arg, fw_ctx->mtx_context_data_copy[i - 1]);
+ kfree(fw_ctx->mtx_context_data_copy[i - 1]);
+ fw_ctx->mtx_context_data_copy[i - 1] = NULL;
+ i--;
+ }
+}
+
+int mtx_get_fw_config_int(struct img_fw_context const * const fw_ctx,
+ unsigned char const * const name)
+{
+ const unsigned long max_len = 1024;
+ unsigned int ii;
+
+ if (fw_ctx->mtx_topaz_fw_data_size == 0) {
+ IMG_DBG_ASSERT("FW context structure is not initialised!" == NULL);
+ return -1;
+ }
+
+ for (ii = 0; ii < fw_ctx->int_defines.length; ii++) {
+ if (strncmp(fw_ctx->int_defines.names[ii], name, max_len) == 0)
+ return fw_ctx->int_defines.values[ii];
+ }
+
+ return -1;
+}
+
+/*
+ * Start an MTX.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ */
+void mtx_start(struct img_fw_context *fw_ctx)
+{
+ IMG_DBG_ASSERT(fw_ctx->initialized);
+ if (!fw_ctx->initialized)
+ return;
+
+ /* target only the current MTX */
+ mtx_set_target(fw_ctx);
+
+ /* Turn on the thread */
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_ENABLE,
+ MASK_MTX_MTX_ENABLE);
+}
+
+/*
+ * Stop an MTX.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ */
+void mtx_stop(struct img_fw_context *fw_ctx)
+{
+ IMG_DBG_ASSERT(fw_ctx->initialized);
+
+ /* target only the current MTX */
+ mtx_set_target(fw_ctx);
+
+ /*
+ * Turn off the thread by writing one to the MTX_TOFF field of the MTX_ENABLE
+ * register.
+ */
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_ENABLE,
+ MASK_MTX_MTX_TOFF);
+}
+
+/*
+ * Kick an MTX.
+ * @Input fw_ctx : Pointer to the context of the target MTX
+ * @Input kick_count : The number of kicks to register
+ */
+void mtx_kick(struct img_fw_context *fw_ctx, unsigned int kick_count)
+{
+ IMG_DBG_ASSERT(fw_ctx->initialized);
+ if (!fw_ctx->initialized)
+ return;
+
+ /* target only the current MTX */
+ mtx_set_target(fw_ctx);
+
+ VXE_WR_REG32(fw_ctx->mtx_reg_mem_space_addr, MTX_CR_MTX_KICK,
+ kick_count);
+}
+
+/*
+ * Wait for MTX to halt
+ * @Input fw_ctx : Pointer to the MTX context
+ */
+void mtx_wait_for_completion(struct img_fw_context *fw_ctx)
+{
+ IMG_DBG_ASSERT(fw_ctx->initialized);
+
+ if (fw_ctx->load_method != MTX_LOADMETHOD_NONE) {
+ /* target only the current MTX */
+ mtx_set_target(fw_ctx);
+
+ /* Wait for the Completion */
+ VXE_POLL_REG32_ISEQ(fw_ctx->mtx_reg_mem_space_addr,
+ MTX_CR_MTX_ENABLE, MASK_MTX_MTX_TOFF,
+ (MASK_MTX_MTX_TOFF | MASK_MTX_MTX_ENABLE),
+ TAL_REG_RD_WR_TRIES);
+ }
+}
+
+unsigned int poll_hw_inactive(struct img_fw_context *fw_ctx)
+{
+ return VXE_POLL_REG32_ISEQ(fw_ctx->topaz_multicore_reg_addr,
+ MTX_SCRATCHREG_IDLE,
+ F_ENCODE(FW_IDLE_STATUS_IDLE, FW_IDLE_REG_STATUS),
+ MASK_FW_IDLE_REG_STATUS,
+ TAL_REG_RD_WR_TRIES);
+}
diff --git a/drivers/media/platform/vxe-vxd/encoder/reg_headers/img_soc_dmac_regs.h b/drivers/media/platform/vxe-vxd/encoder/reg_headers/img_soc_dmac_regs.h
new file mode 100644
index 000000000000..1a7ee5b101c5
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/reg_headers/img_soc_dmac_regs.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _REGCONV_H_img_soc_dmac_regs_h
+#define _REGCONV_H_img_soc_dmac_regs_h
+
+/* Register DMAC_COUNT */
+#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
+#define MASK_IMG_SOC_BSWAP 0x40000000
+#define SHIFT_IMG_SOC_BSWAP 30
+#define SHIFT_IMG_SOC_PW 27
+#define MASK_IMG_SOC_PW 0x18000000
+#define MASK_IMG_SOC_DIR 0x04000000
+#define SHIFT_IMG_SOC_DIR 26
+
+/* Register DMAC_COUNT */
+#define MASK_IMG_SOC_EN 0x00010000
+#define MASK_IMG_SOC_LIST_EN 0x00040000
+
+/* Register DMAC_COUNT */
+#define MASK_IMG_SOC_PI 0x03000000
+#define SHIFT_IMG_SOC_PI 24
+#define MASK_IMG_SOC_CNT 0x0000FFFF
+#define SHIFT_IMG_SOC_CNT 0
+#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
+
+/* Register DMAC_IRQ_STAT */
+#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
+#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
+#define SHIFT_IMG_SOC_TRANSFER_FIN 17
+
+/* Register DMAC_PER_HOLD */
+#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
+
+/* Register DMAC_SETUP */
+#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
+
+/* Register DMAC_PERIPH */
+#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
+#define MASK_IMG_SOC_ACC_DEL 0xE0000000
+#define SHIFT_IMG_SOC_ACC_DEL 29
+#define MASK_IMG_SOC_INCR 0x08000000
+#define SHIFT_IMG_SOC_INCR 27
+#define MASK_IMG_SOC_BURST 0x07000000
+#define SHIFT_IMG_SOC_BURST 24
+
+/* Register DMAC_PERIPHERAL_ADDR */
+#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
+
+#endif
+
diff --git a/drivers/media/platform/vxe-vxd/encoder/reg_headers/mtx_regs.h b/drivers/media/platform/vxe-vxd/encoder/reg_headers/mtx_regs.h
new file mode 100644
index 000000000000..27b9445a09e8
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/reg_headers/mtx_regs.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _REGCONV_H_mtx_regs_h
+#define _REGCONV_H_mtx_regs_h
+
+/* Register CR_MTX_ENABLE */
+#define MTX_CR_MTX_ENABLE 0x0000
+#define MASK_MTX_MTX_ENABLE 0x00000001
+#define MASK_MTX_MTX_TOFF 0x00000002
+
+/* Register CR_MTX_KICK */
+#define MTX_CR_MTX_KICK 0x0080
+
+/* Register CR_MTX_REGISTER_READ_WRITE_DATA */
+#define MTX_CR_MTX_REGISTER_READ_WRITE_DATA 0x00F8
+
+/* Register CR_MTX_REGISTER_READ_WRITE_REQUEST */
+#define MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST 0x00FC
+#define MASK_MTX_MTX_RNW 0x00010000
+#define MASK_MTX_MTX_DREADY 0x80000000
+
+/* Register CR_MTX_RAM_ACCESS_DATA_TRANSFER */
+#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
+
+/* Register CR_MTX_RAM_ACCESS_CONTROL */
+#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
+#define MASK_MTX_MTX_MCMR 0x00000001
+#define MASK_MTX_MTX_MCMAI 0x00000002
+#define SHIFT_MTX_MTX_MCMAI 1
+#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
+#define SHIFT_MTX_MTX_MCM_ADDR 2
+#define MASK_MTX_MTX_MCMID 0x0FF00000
+#define SHIFT_MTX_MTX_MCMID 20
+
+/* Register CR_MTX_RAM_ACCESS_STATUS */
+#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
+#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
+
+/* Register CR_MTX_SOFT_RESET */
+#define MTX_CR_MTX_SOFT_RESET 0x0200
+#define MASK_MTX_MTX_RESET 0x00000001
+
+/* Register CR_MTX_SYSC_CDMAC */
+#define MTX_CR_MTX_SYSC_CDMAC 0x0340
+#define MASK_MTX_LENGTH 0x0000FFFF
+#define SHIFT_MTX_LENGTH 0
+#define MASK_MTX_ENABLE 0x00010000
+#define SHIFT_MTX_ENABLE 16
+#define MASK_MTX_RNW 0x00020000
+#define SHIFT_MTX_RNW 17
+#define MASK_MTX_BURSTSIZE 0x07000000
+#define SHIFT_MTX_BURSTSIZE 24
+
+/* Register CR_MTX_SYSC_CDMAA */
+#define MTX_CR_MTX_SYSC_CDMAA 0x0344
+
+/* Register CR_MTX_SYSC_CDMAT */
+#define MTX_CR_MTX_SYSC_CDMAT 0x0350
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_coreext_regs.h b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_coreext_regs.h
new file mode 100644
index 000000000000..d2e8dba801e7
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_coreext_regs.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _REGCONV_H_topazhp_coreext_regs_h
+#define _REGCONV_H_topazhp_coreext_regs_h
+
+/* Register CR_SCALER_INPUT_SIZE */
+#define MASK_TOPAZHP_EXT_CR_SCALER_INPUT_WIDTH_MIN1 0x00000FFF
+#define SHIFT_TOPAZHP_EXT_CR_SCALER_INPUT_WIDTH_MIN1 0
+#define MASK_TOPAZHP_EXT_CR_SCALER_INPUT_HEIGHT_MIN1 0x0FFF0000
+#define SHIFT_TOPAZHP_EXT_CR_SCALER_INPUT_HEIGHT_MIN1 16
+
+/* Register CR_SCALER_PITCH */
+#define MASK_TOPAZHP_EXT_CR_SCALER_INPUT_HOR_PITCH 0x00007FFF
+#define SHIFT_TOPAZHP_EXT_CR_SCALER_INPUT_HOR_PITCH 0
+#define MASK_TOPAZHP_EXT_CR_SCALER_HOR_BILINEAR_FILTER 0x00008000
+#define SHIFT_TOPAZHP_EXT_CR_SCALER_HOR_BILINEAR_FILTER 15
+#define MASK_TOPAZHP_EXT_CR_SCALER_INPUT_VER_PITCH 0x7FFF0000
+#define SHIFT_TOPAZHP_EXT_CR_SCALER_INPUT_VER_PITCH 16
+#define MASK_TOPAZHP_EXT_CR_SCALER_VER_BILINEAR_FILTER 0x80000000
+#define SHIFT_TOPAZHP_EXT_CR_SCALER_VER_BILINEAR_FILTER 31
+
+/* Register CR_SCALER_CROP */
+#define MASK_TOPAZHP_EXT_CR_SCALER_INPUT_CROP_VER 0x000000FF
+#define SHIFT_TOPAZHP_EXT_CR_SCALER_INPUT_CROP_VER 0
+#define MASK_TOPAZHP_EXT_CR_SCALER_INPUT_CROP_HOR 0x0000FF00
+#define SHIFT_TOPAZHP_EXT_CR_SCALER_INPUT_CROP_HOR 8
+
+/* Register CR_SCALER_CONTROL */
+#define MASK_TOPAZHP_EXT_CR_SCALER_ENABLE 0x00000001
+#define SHIFT_TOPAZHP_EXT_CR_SCALER_ENABLE 0
+#define MASK_TOPAZHP_EXT_CR_ENABLE_COLOUR_SPACE_CONVERSION 0x00000002
+#define SHIFT_TOPAZHP_EXT_CR_ENABLE_COLOUR_SPACE_CONVERSION 1
+#define MASK_TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT 0x007F0000
+#define SHIFT_TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT 16
+
+/* 4:4:4, Any 3 colour space components plus reserved byte (e.g.
+ * RGB), 8-bit components, packed 32-bit per pixel in a single plane, 8 LSBits not used
+ */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444IL4XBCA8 0x0000007E
+
+/* 4:4:4, Any 3 colour space components plus reserved byte (e.g.
+ * RGB), 8-bit components, packed 32-bit per pixel in a single plane, 8 MSBits not used
+ */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444IL4ABCX8 0x0000007C
+
+/* RGB with 5 bits for R, 6 bits for G and 5 bits for B */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444IL3RGB565 0x00000070
+
+/* 4:4:4, Y in 1 plane, CrCb interleaved in 2nd plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444PL12YCRCB8 0x0000006A
+
+/* 4:4:4, Y in 1 plane, CbCr interleaved in 2nd plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444PL12YCBCR8 0x00000068
+
+/* 4:4:4, Y Cb Cr in 3 separate planes, 8-bit components
+ * (could also be ABC, but colour space conversion is not supported by input scaler
+ */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444PL111YCBCR8 0x00000060
+
+/* 4:2:2, CrYCbY interleaved in a single plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422IL3CRYCBY8 0x00000056
+
+/* 4:2:2, CbYCrY interleaved in a single plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422IL3CBYCRY8 0x00000054
+
+/* 4:2:2, YCrYCb interleaved in a single plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422IL3YCRYCB8 0x00000052
+
+/* 4:2:2, YCbYCr interleaved in a single plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422IL3YCBYCR8 0x00000050
+
+/* 4:2:2, Y in 1 plane, CrCb interleaved in 2nd plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422PL12YCRCB8 0x0000004A
+
+/* 4:2:2, Y in 1 plane, CbCr interleaved in 2nd plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422PL12YCBCR8 0x00000048
+
+/* 4:2:2, Y Cb Cr in 3 separate planes, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422PL111YCBCR8 0x00000040
+
+/* 4:2:0, Y in 1 plane, CrCb interleaved in 2nd plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_420PL12YCRCB8 0x0000002A
+
+/* 4:2:0, Y in 1 plane, CbCr interleaved in 2nd plane, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_420PL12YCBCR8 0x00000028
+
+/* 4:2:0, Y Cb Cr in 3 separate planes, 8-bit components */
+#define TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_420PL111YCBCR8 0x00000020
+
+/* Register CR_CSC_SOURCE_MOD_Y_0 */
+#define MASK_TOPAZHP_EXT_CR_CSC_SOURCE_MOD_00 0x00000003
+#define SHIFT_TOPAZHP_EXT_CR_CSC_SOURCE_MOD_00 0
+
+/* Subtract 1/2 maximum value from unsigned pixel component */
+#define TOPAZHP_EXT_CR_CSC_SOURCE_MOD_00_MINUS_1_2 0x00000003
+
+/* Subtract 1/16th maximum value from unsigned pixel component */
+#define TOPAZHP_EXT_CR_CSC_SOURCE_MOD_00_MINUS_1_16 0x00000002
+
+/* Source pixel component is unsigned */
+#define TOPAZHP_EXT_CR_CSC_SOURCE_MOD_00_UNSIGNED 0x00000000
+
+/* Register CR_CSC_SOURCE_MOD_Y_1 */
+#define MASK_TOPAZHP_EXT_CR_CSC_SOURCE_MOD_01 0x00000003
+#define SHIFT_TOPAZHP_EXT_CR_CSC_SOURCE_MOD_01 0
+
+/* Subtract 1/2 maximum value from unsigned pixel component */
+#define TOPAZHP_EXT_CR_CSC_SOURCE_MOD_01_MINUS_1_2 0x00000003
+
+/* Subtract 1/16th maximum value from unsigned pixel component */
+#define TOPAZHP_EXT_CR_CSC_SOURCE_MOD_01_MINUS_1_16 0x00000002
+
+/* Source pixel component is unsigned */
+#define TOPAZHP_EXT_CR_CSC_SOURCE_MOD_01_UNSIGNED 0x00000000
+
+/* Register CR_CSC_SOURCE_CB_CR_1 */
+#define MASK_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CB_01 0x00000FFF
+#define SHIFT_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CB_01 0
+#define SHIFT_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CR_01 16
+
+/* Register CR_CSC_SOURCE_MOD_Y_2 */
+#define MASK_TOPAZHP_EXT_CR_CSC_SOURCE_MOD_02 0x00000003
+#define SHIFT_TOPAZHP_EXT_CR_CSC_SOURCE_MOD_02 0
+
+/* Subtract 1/2 maximum value from unsigned pixel component */
+#define TOPAZHP_EXT_CR_CSC_SOURCE_MOD_02_MINUS_1_2 0x00000003
+
+/* Subtract 1/16th maximum value from unsigned pixel component */
+#define TOPAZHP_EXT_CR_CSC_SOURCE_MOD_02_MINUS_1_16 0x00000002
+
+/* Source pixel component is unsigned */
+#define TOPAZHP_EXT_CR_CSC_SOURCE_MOD_02_UNSIGNED 0x00000000
+
+/* Register CR_CSC_SOURCE_CB_CR_2 */
+#define MASK_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CB_02 0x00000FFF
+#define SHIFT_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CR_02 16
+
+/* Register CR_CSC_OUTPUT_COEFF_0 */
+#define SHIFT_TOPAZHP_EXT_CR_CSC_OUTPUT_MIN_CLIP_00 0
+#define SHIFT_TOPAZHP_EXT_CR_CSC_OUTPUT_MAX_CLIP_00 16
+#define MASK_TOPAZHP_EXT_CR_CSC_OUTPUT_MOD_00 0x30000000
+#define SHIFT_TOPAZHP_EXT_CR_CSC_OUTPUT_MOD_00 28
+
+/* Add 1/16th maximum value prior to applying unsigned clamping */
+#define TOPAZHP_EXT_CR_CSC_OUTPUT_MOD_00_ADD_1_16 0x00000002
+
+/* Register CR_CSC_OUTPUT_COEFF_1 */
+#define MASK_TOPAZHP_EXT_CR_CSC_OUTPUT_MIN_CLIP_01 0x000003FF
+#define SHIFT_TOPAZHP_EXT_CR_CSC_OUTPUT_MIN_CLIP_01 0
+#define SHIFT_TOPAZHP_EXT_CR_CSC_OUTPUT_MAX_CLIP_01 16
+#define MASK_TOPAZHP_EXT_CR_CSC_OUTPUT_MOD_01 0x30000000
+#define SHIFT_TOPAZHP_EXT_CR_CSC_OUTPUT_MOD_01 28
+
+/* Add 1/2 maximum value prior to applying unsigned clamping */
+#define TOPAZHP_EXT_CR_CSC_OUTPUT_MOD_01_ADD_1_2 0x00000003
+#define MASK_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_Y 0x0FFF0000
+#define SHIFT_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_Y 16
+
+/* Register CR_CSC_SOURCE_CB_CR */
+#define MASK_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CB 0x00000FFF
+#define SHIFT_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CB 0
+#define MASK_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CR 0x0FFF0000
+#define SHIFT_TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CR 16
+
+/* Register CR_CSC_OUTPUT_COEFF */
+#define MASK_TOPAZHP_EXT_CR_CSC_OUTPUT_MIN_CLIP 0x000003FF
+#define SHIFT_TOPAZHP_EXT_CR_CSC_OUTPUT_MIN_CLIP 0
+#define MASK_TOPAZHP_EXT_CR_CSC_OUTPUT_MAX_CLIP 0x03FF0000
+#define SHIFT_TOPAZHP_EXT_CR_CSC_OUTPUT_MAX_CLIP 16
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_db_regs.h b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_db_regs.h
new file mode 100644
index 000000000000..29c974e86b92
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_db_regs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _REGCONV_H_topaz_db_regs_h
+#define _REGCONV_H_topaz_db_regs_h
+
+/* Register CR_DB_DISABLE_DEBLOCK_IDC */
+#define MASK_TOPAZ_DB_CR_DISABLE_DEBLOCK_IDC 0x00000003
+#define SHIFT_TOPAZ_DB_CR_DISABLE_DEBLOCK_IDC 0
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_vlc_regs.h b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_vlc_regs.h
new file mode 100644
index 000000000000..0a3d6c588a13
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topaz_vlc_regs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _REGCONV_H_topaz_vlc_regs_h
+#define _REGCONV_H_topaz_vlc_regs_h
+
+///* Register CR_VLC_CONTROL */
+#define MASK_TOPAZ_VLC_CR_CODEC 0x00000003
+#define SHIFT_TOPAZ_VLC_CR_CODEC 0
+#define MASK_TOPAZ_VLC_CR_CABAC_ENABLE 0x00000100
+#define SHIFT_TOPAZ_VLC_CR_CABAC_ENABLE 8
+#define MASK_TOPAZ_VLC_CR_VLC_FIELD_CODED 0x00000200
+#define SHIFT_TOPAZ_VLC_CR_VLC_FIELD_CODED 9
+#define MASK_TOPAZ_VLC_CR_VLC_8X8_TRANSFORM 0x00000400
+#define SHIFT_TOPAZ_VLC_CR_VLC_8X8_TRANSFORM 10
+#define MASK_TOPAZ_VLC_CR_VLC_CONSTRAINED_INTRA 0x00000800
+#define SHIFT_TOPAZ_VLC_CR_VLC_CONSTRAINED_INTRA 11
+#define MASK_TOPAZ_VLC_CR_CODEC_EXTEND 0x10000000
+#define SHIFT_TOPAZ_VLC_CR_CODEC_EXTEND 28
+
+///* Register CR_VLC_IPCM_0 */
+#define MASK_TOPAZ_VLC_CR_CABAC_DB_MARGIN 0x03FF0000
+#define SHIFT_TOPAZ_VLC_CR_CABAC_DB_MARGIN 16
+#define MASK_TOPAZ_VLC_CR_CABAC_BIN_FLEX 0x00001FFF
+#define SHIFT_TOPAZ_VLC_CR_CABAC_BIN_FLEX 0
+#define MASK_TOPAZ_VLC_CR_IPCM_THRESHOLD 0x00000FFF
+#define SHIFT_TOPAZ_VLC_CR_IPCM_THRESHOLD 0
+#define MASK_TOPAZ_VLC_CR_CABAC_BIN_LIMIT 0x1FFF0000
+#define SHIFT_TOPAZ_VLC_CR_CABAC_BIN_LIMIT 16
+#define MASK_TOPAZ_VLC_CR_SLICE_SIZE_LIMIT 0x00FFFFFF
+#define SHIFT_TOPAZ_VLC_CR_SLICE_SIZE_LIMIT 0
+#define MASK_TOPAZ_VLC_CR_SLICE_MBS_LIMIT 0x00003FFF
+#define SHIFT_TOPAZ_VLC_CR_SLICE_MBS_LIMIT 0
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/reg_headers/topazhp_core_regs.h b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topazhp_core_regs.h
new file mode 100644
index 000000000000..b355c94d4e72
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topazhp_core_regs.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _REGCONV_H_topazhp_core_regs_h
+#define _REGCONV_H_topazhp_core_regs_h
+
+/* Register CR_LRITC_CACHE_CHUNK_CONFIG */
+#define MASK_TOPAZHP_CR_CACHE_CHUNKS_PRIORITY 0x000000FF
+#define SHIFT_TOPAZHP_CR_CACHE_CHUNKS_PRIORITY 0
+#define MASK_TOPAZHP_CR_CACHE_CHUNKS_MAX 0x0000FF00
+#define SHIFT_TOPAZHP_CR_CACHE_CHUNKS_MAX 8
+#define MASK_TOPAZHP_CR_CACHE_CHUNKS_PER_MB 0x00FF0000
+#define SHIFT_TOPAZHP_CR_CACHE_CHUNKS_PER_MB 16
+
+/* Register CR_SEQ_CUR_PIC_ROW_STRIDE */
+#define MASK_TOPAZHP_CR_CUR_PIC_LUMA_STRIDE 0x0000FFC0
+#define SHIFT_TOPAZHP_CR_CUR_PIC_LUMA_STRIDE 6
+#define MASK_TOPAZHP_CR_CUR_PIC_CHROMA_STRIDE 0xFFC00000
+#define SHIFT_TOPAZHP_CR_CUR_PIC_CHROMA_STRIDE 22
+
+/* Register CR_SEQUENCER_CONFIG */
+#define MASK_TOPAZHP_CR_ENCODER_STANDARD 0x00000007
+#define SHIFT_TOPAZHP_CR_ENCODER_STANDARD 0
+#define TOPAZHP_CR_ENCODER_STANDARD_H264 0x00000002 /* H264 encode */
+#define MASK_TOPAZHP_CR_FRAME_STORE_FORMAT 0x00000030
+#define SHIFT_TOPAZHP_CR_FRAME_STORE_FORMAT 4
+
+/* 4:2:0 frame, with Luma, Cb and Cr all in separate planes (if the frame
+ * store actually contains 4:2:2 chroma, the chroma stride can be doubled
+ * so that it is read as 4:2:0)
+ */
+#define MASK_TOPAZHP_CR_FRAME_STORE_CHROMA_SWAP 0x00000040
+#define SHIFT_TOPAZHP_CR_FRAME_STORE_CHROMA_SWAP 6
+#define MASK_TOPAZHP_CR_FIELD_MODE 0x00000080
+#define SHIFT_TOPAZHP_CR_FIELD_MODE 7
+#define MASK_TOPAZHP_CR_REF_PIC0_VALID 0x00000100
+#define SHIFT_TOPAZHP_CR_REF_PIC0_VALID 8
+#define MASK_TOPAZHP_CR_REF_PIC1_VALID 0x00000200
+#define SHIFT_TOPAZHP_CR_REF_PIC1_VALID 9
+#define MASK_TOPAZHP_CR_REF_PIC1_EQUAL_PIC0 0x00000400
+#define SHIFT_TOPAZHP_CR_REF_PIC1_EQUAL_PIC0 10
+#define MASK_TOPAZHP_CR_ABOVE_OUT_OF_SLICE_VALID 0x00000800
+#define SHIFT_TOPAZHP_CR_ABOVE_OUT_OF_SLICE_VALID 11
+#define MASK_TOPAZHP_CR_TEMPORAL_COL_IN_VALID 0x00001000
+#define SHIFT_TOPAZHP_CR_TEMPORAL_COL_IN_VALID 12
+#define MASK_TOPAZHP_CR_TEMPORAL_PIC0_BELOW_IN_VALID 0x00002000
+#define SHIFT_TOPAZHP_CR_TEMPORAL_PIC0_BELOW_IN_VALID 13
+#define MASK_TOPAZHP_CR_TEMPORAL_PIC1_BELOW_IN_VALID 0x00004000
+#define SHIFT_TOPAZHP_CR_TEMPORAL_PIC1_BELOW_IN_VALID 14
+#define MASK_TOPAZHP_CR_DEBLOCK_ENABLE 0x00008000
+#define SHIFT_TOPAZHP_CR_DEBLOCK_ENABLE 15
+#define MASK_TOPAZHP_CR_WRITE_TEMPORAL_COL_VALID 0x00010000
+#define SHIFT_TOPAZHP_CR_WRITE_TEMPORAL_COL_VALID 16
+#define MASK_TOPAZHP_CR_WRITE_TEMPORAL_PIC0_BELOW_VALID 0x00020000
+#define SHIFT_TOPAZHP_CR_WRITE_TEMPORAL_PIC0_BELOW_VALID 17
+#define MASK_TOPAZHP_CR_WRITE_TEMPORAL_PIC1_BELOW_VALID 0x00040000
+#define SHIFT_TOPAZHP_CR_WRITE_TEMPORAL_PIC1_BELOW_VALID 18
+#define MASK_TOPAZHP_CR_WRITE_MB_FIRST_STAGE_VALID 0x00200000
+#define SHIFT_TOPAZHP_CR_WRITE_MB_FIRST_STAGE_VALID 21
+#define MASK_TOPAZHP_CR_MB_CONTROL_IN_VALID 0x00800000
+#define SHIFT_TOPAZHP_CR_MB_CONTROL_IN_VALID 23
+#define MASK_TOPAZHP_CR_BEST_MULTIPASS_OUT_VALID 0x10000000
+#define SHIFT_TOPAZHP_CR_BEST_MULTIPASS_OUT_VALID 28
+#define MASK_TOPAZHP_CR_BEST_MVS_OUT_DISABLE 0x40000000
+#define SHIFT_TOPAZHP_CR_BEST_MVS_OUT_DISABLE 30
+#define MASK_TOPAZHP_CR_SLICE_TYPE 0x00030000
+#define SHIFT_TOPAZHP_CR_SLICE_TYPE 16
+#define TOPAZHP_CR_SLICE_TYPE_B_SLICE 0x00000002 /* B-slice */
+#define TOPAZHP_CR_SLICE_TYPE_P_SLICE 0x00000001 /* P-slice */
+#define TOPAZHP_CR_SLICE_TYPE_I_SLICE 0x00000000 /* I-slice */
+#define MASK_TOPAZHP_CR_MVCALC_RESTRICT_PICTURE 0x00010000
+#define SHIFT_TOPAZHP_CR_MVCALC_RESTRICT_PICTURE 16
+
+/* Register CR_MVCALC_CONFIG */
+#define MASK_TOPAZHP_CR_MVCALC_GRID_MB_X_STEP 0x0000000F
+#define SHIFT_TOPAZHP_CR_MVCALC_GRID_MB_X_STEP 0
+#define MASK_TOPAZHP_CR_MVCALC_GRID_MB_Y_STEP 0x00000F00
+#define SHIFT_TOPAZHP_CR_MVCALC_GRID_MB_Y_STEP 8
+#define MASK_TOPAZHP_CR_MVCALC_GRID_SUB_STEP 0x000F0000
+#define SHIFT_TOPAZHP_CR_MVCALC_GRID_SUB_STEP 16
+#define MASK_TOPAZHP_CR_MVCALC_GRID_DISABLE 0x00800000
+#define SHIFT_TOPAZHP_CR_MVCALC_GRID_DISABLE 23
+#define MASK_TOPAZHP_CR_MVCALC_IPE0_JITTER_FACTOR 0x03000000
+#define SHIFT_TOPAZHP_CR_MVCALC_IPE0_JITTER_FACTOR 24
+#define MASK_TOPAZHP_CR_MVCALC_IPE1_JITTER_FACTOR 0x0C000000
+#define SHIFT_TOPAZHP_CR_MVCALC_IPE1_JITTER_FACTOR 26
+#define MASK_TOPAZHP_CR_MVCALC_JITTER_POINTER_RST 0x10000000
+#define MASK_TOPAZHP_CR_MVCALC_NO_PSEUDO_DUPLICATES 0x20000000
+#define SHIFT_TOPAZHP_CR_MVCALC_NO_PSEUDO_DUPLICATES 29
+#define MASK_TOPAZHP_CR_MVCALC_DUP_VEC_MARGIN 0xC0000000
+#define SHIFT_TOPAZHP_CR_MVCALC_DUP_VEC_MARGIN 30
+
+/* Register CR_MVCALC_COLOCATED */
+#define MASK_TOPAZHP_CR_COL_DIST_SCALE_FACT 0x000007FF
+#define SHIFT_TOPAZHP_CR_COL_DIST_SCALE_FACT 0
+
+/* Register CR_MVCALC_BELOW */
+#define MASK_TOPAZHP_CR_PIC0_DIST_SCALE_FACTOR 0x000007FF
+#define SHIFT_TOPAZHP_CR_PIC0_DIST_SCALE_FACTOR 0
+#define MASK_TOPAZHP_CR_PIC1_DIST_SCALE_FACTOR 0x07FF0000
+#define SHIFT_TOPAZHP_CR_PIC1_DIST_SCALE_FACTOR 16
+
+/* Register CR_PREFETCH_QP */
+#define MASK_TOPAZHP_CR_SKIPPED_CODED_SCALE_IDX 0x00007000
+#define SHIFT_TOPAZHP_CR_SKIPPED_CODED_SCALE_IDX 12
+#define MASK_TOPAZHP_CR_INTER_INTRA_SCALE_IDX 0x00000700
+#define SHIFT_TOPAZHP_CR_INTER_INTRA_SCALE_IDX 8
+
+/* Register CR_MB_HOST_CONTROL */
+#define MASK_TOPAZHP_CR_MB_HOST_QP 0x00000001
+#define SHIFT_TOPAZHP_CR_MB_HOST_QP 0
+#define MASK_TOPAZHP_CR_MB_HOST_SKIPPED_CODED_SCALE 0x00000002
+#define SHIFT_TOPAZHP_CR_MB_HOST_SKIPPED_CODED_SCALE 1
+#define MASK_TOPAZHP_CR_MB_HOST_INTER_INTRA_SCALE 0x00000004
+#define SHIFT_TOPAZHP_CR_MB_HOST_INTER_INTRA_SCALE 2
+#define MASK_TOPAZHP_CR_H264COMP_8X8_TRANSFORM 0x00000001
+#define SHIFT_TOPAZHP_CR_H264COMP_8X8_TRANSFORM 0
+#define MASK_TOPAZHP_CR_H264COMP_CONSTRAINED_INTRA 0x00000002
+#define SHIFT_TOPAZHP_CR_H264COMP_CONSTRAINED_INTRA 1
+#define MASK_TOPAZHP_CR_H264COMP_8X8_CAVLC 0x00000004
+#define SHIFT_TOPAZHP_CR_H264COMP_8X8_CAVLC 2
+#define MASK_TOPAZHP_CR_H264COMP_DEFAULT_SCALING_LIST 0x00000008
+#define SHIFT_TOPAZHP_CR_H264COMP_DEFAULT_SCALING_LIST 3
+#define MASK_TOPAZHP_CR_H264COMP_ADAPT_ROUND_ENABLE 0x00000010
+#define SHIFT_TOPAZHP_CR_H264COMP_ADAPT_ROUND_ENABLE 4
+#define MASK_TOPAZHP_CR_H264COMP_VIDEO_CONF_ENABLE 0x00000020
+#define SHIFT_TOPAZHP_CR_H264COMP_VIDEO_CONF_ENABLE 5
+#define MASK_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_8X8_INTER_LUMA_ENABLE 0x00000080
+#define SHIFT_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_8X8_INTER_LUMA_ENABLE 7
+#define MASK_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTER_CR_ENABLE 0x00000100
+#define SHIFT_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTER_CR_ENABLE 8
+#define MASK_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTER_CB_ENABLE 0x00000200
+#define SHIFT_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTER_CB_ENABLE 9
+#define MASK_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTER_LUMA_ENABLE 0x00000400
+#define SHIFT_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTER_LUMA_ENABLE 10
+#define MASK_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_8X8_INTRA_LUMA_ENABLE 0x00000800
+#define SHIFT_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_8X8_INTRA_LUMA_ENABLE 11
+#define MASK_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTRA_CR_ENABLE 0x00001000
+#define SHIFT_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTRA_CR_ENABLE 12
+#define MASK_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTRA_CB_ENABLE 0x00002000
+#define SHIFT_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTRA_CB_ENABLE 13
+#define MASK_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTRA_LUMA_ENABLE 0x00004000
+#define SHIFT_TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTRA_LUMA_ENABLE 14
+#define MASK_TOPAZHP_CR_H264COMP_LOSSLESS 0x00010000
+#define SHIFT_TOPAZHP_CR_H264COMP_LOSSLESS 16
+#define MASK_TOPAZHP_CR_H264COMP_LOSSLESS_8X8_PREFILTER 0x00020000
+#define SHIFT_TOPAZHP_CR_H264COMP_LOSSLESS_8X8_PREFILTER 17
+
+/* The Intra8x8 Pre-filter is performed in Lossless Mode. H.264 standard lossless. */
+#define TOPAZHP_CR_H264COMP_LOSSLESS_8X8_PREFILTER_FILTER 0x00000001
+
+/* The Intra8x8 Pre-filter is bypassed in Lossless Mode. x264 compatibility mode for lossless. */
+#define TOPAZHP_CR_H264COMP_LOSSLESS_8X8_PREFILTER_BYPASS 0x00000000
+
+/* Register CR_IPE_CONTROL */
+#define MASK_TOPAZHP_CR_IPE_BLOCKSIZE 0x00000003
+#define SHIFT_TOPAZHP_CR_IPE_BLOCKSIZE 0
+#define MASK_TOPAZHP_CR_IPE_16X8_ENABLE 0x00000004
+#define SHIFT_TOPAZHP_CR_IPE_16X8_ENABLE 2
+#define MASK_TOPAZHP_CR_IPE_8X16_ENABLE 0x00000008
+#define SHIFT_TOPAZHP_CR_IPE_8X16_ENABLE 3
+#define MASK_TOPAZHP_CR_IPE_Y_FINE_SEARCH 0x00000030
+#define SHIFT_TOPAZHP_CR_IPE_Y_FINE_SEARCH 4
+#define MASK_TOPAZHP_CR_IPE_4X4_SEARCH 0x00000040
+#define SHIFT_TOPAZHP_CR_IPE_4X4_SEARCH 6
+#define MASK_TOPAZHP_CR_IPE_LRITC_BOUNDARY 0x00000300
+#define SHIFT_TOPAZHP_CR_IPE_LRITC_BOUNDARY 8
+#define MASK_TOPAZHP_CR_IPE_HIGH_LATENCY 0x00001000
+#define SHIFT_TOPAZHP_CR_IPE_HIGH_LATENCY 12
+#define MASK_TOPAZHP_CR_IPE_MV_NUMBER_RESTRICTION 0x00004000
+#define SHIFT_TOPAZHP_CR_IPE_MV_NUMBER_RESTRICTION 14
+
+/* Register CR_IPE_VECTOR_CLIPPING */
+#define MASK_TOPAZHP_CR_IPE_VECTOR_CLIPPING_X 0x000000FF
+#define SHIFT_TOPAZHP_CR_IPE_VECTOR_CLIPPING_X 0
+#define MASK_TOPAZHP_CR_IPE_VECTOR_CLIPPING_Y 0x0000FF00
+#define SHIFT_TOPAZHP_CR_IPE_VECTOR_CLIPPING_Y 8
+#define MASK_TOPAZHP_CR_IPE_VECTOR_CLIPPING_ENABLED 0x00010000
+#define SHIFT_TOPAZHP_CR_IPE_VECTOR_CLIPPING_ENABLED 16
+
+/* Register CR_JMCOMP_CARC_CONTROL_0 */
+#define MASK_TOPAZHP_CR_CARC_NEG_SCALE 0x3F000000
+#define SHIFT_TOPAZHP_CR_CARC_NEG_SCALE 24
+#define MASK_TOPAZHP_CR_CARC_NEG_RANGE 0x001F0000
+#define SHIFT_TOPAZHP_CR_CARC_NEG_RANGE 16
+#define MASK_TOPAZHP_CR_CARC_POS_SCALE 0x00003F00
+#define SHIFT_TOPAZHP_CR_CARC_POS_SCALE 8
+#define MASK_TOPAZHP_CR_CARC_POS_RANGE 0x0000001F
+#define SHIFT_TOPAZHP_CR_CARC_POS_RANGE 0
+
+/* Register CR_JMCOMP_CARC_CONTROL_1 */
+#define MASK_TOPAZHP_CR_CARC_SHIFT 0x03000000
+#define SHIFT_TOPAZHP_CR_CARC_SHIFT 24
+#define MASK_TOPAZHP_CR_CARC_CUTOFF 0x00F00000
+#define SHIFT_TOPAZHP_CR_CARC_CUTOFF 20
+#define MASK_TOPAZHP_CR_CARC_THRESHOLD 0x0007FF00
+#define SHIFT_TOPAZHP_CR_CARC_THRESHOLD 8
+#define MASK_TOPAZHP_CR_SPE_MVD_CLIP_ENABLE 0x80000000
+#define SHIFT_TOPAZHP_CR_SPE_MVD_CLIP_ENABLE 31
+
+/* Register CR_PRED_COMB_CONTROL */
+#define MASK_TOPAZHP_CR_INTRA4X4_DISABLE 0x00000001
+#define SHIFT_TOPAZHP_CR_INTRA4X4_DISABLE 0
+#define MASK_TOPAZHP_CR_INTRA8X8_DISABLE 0x00000002
+#define SHIFT_TOPAZHP_CR_INTRA8X8_DISABLE 1
+#define MASK_TOPAZHP_CR_INTRA16X16_DISABLE 0x00000004
+#define SHIFT_TOPAZHP_CR_INTRA16X16_DISABLE 2
+#define MASK_TOPAZHP_CR_INTER8X8_DISABLE 0x00000010
+#define SHIFT_TOPAZHP_CR_INTER8X8_DISABLE 4
+#define MASK_TOPAZHP_CR_B_PIC0_DISABLE 0x00000100
+#define SHIFT_TOPAZHP_CR_B_PIC0_DISABLE 8
+#define MASK_TOPAZHP_CR_B_PIC1_DISABLE 0x00000200
+#define SHIFT_TOPAZHP_CR_B_PIC1_DISABLE 9
+#define MASK_TOPAZHP_CR_INTER_INTRA_SCALE_ENABLE 0x00001000
+#define SHIFT_TOPAZHP_CR_INTER_INTRA_SCALE_ENABLE 12
+#define MASK_TOPAZHP_CR_CUMULATIVE_BIASES_ENABLE 0x00000800
+#define SHIFT_TOPAZHP_CR_CUMULATIVE_BIASES_ENABLE 11
+#define MASK_TOPAZHP_CR_SKIPPED_CODED_SCALE_ENABLE 0x00002000
+#define SHIFT_TOPAZHP_CR_SKIPPED_CODED_SCALE_ENABLE 13
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/reg_headers/topazhp_multicore_regs_old.h b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topazhp_multicore_regs_old.h
new file mode 100644
index 000000000000..d5f8b32605c5
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/reg_headers/topazhp_multicore_regs_old.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * firmware header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _REGCONV_H_topazhp_multicore_regs_old_h
+#define _REGCONV_H_topazhp_multicore_regs_old_h
+
+///* Register CR_LAMBDA_DC_TABLE */
+#define MASK_TOPAZHP_CR_TEMPORAL_BLEND 0x001F0000
+#define SHIFT_TOPAZHP_CR_TEMPORAL_BLEND 16
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/target.h b/drivers/media/platform/vxe-vxd/encoder/target.h
new file mode 100644
index 000000000000..8a6244fd465c
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/target.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * target interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#if !defined(__TARGET_H__)
+#define __TARGET_H__
+
+#include <linux/types.h>
+
+#define TARGET_NO_IRQ (999) /* Interrupt number when no interrupt exists */
+
+/*
+ * The memory space types
+ */
+enum mem_space_type {
+ MEMSPACE_REGISTER, /* Memory space is mapped to device registers */
+ MEMSPACE_MEMORY, /* Memory space is mapped to device memory */
+ MEMSPACE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure contains all information about a device register
+ */
+struct mem_space_reg {
+ unsigned long long addr; /* Base address of device registers */
+ unsigned int size; /* Size of device register block */
+ unsigned int intr_num; /* The interrupt number */
+};
+
+/*
+ * This structure contains all information about a device memory region
+ */
+struct mem_space_mem {
+ unsigned long long addr; /* Base address of memory region */
+ unsigned long long size; /* Size of memory region */
+ unsigned long long guard_band; /* Memory guard band */
+};
+
+/*
+ * This structure contains all information about the device memory space
+ */
+struct mem_space {
+ unsigned char *name; /* Memory space name */
+ enum mem_space_type type; /* Memory space type */
+ union {
+ struct mem_space_reg reg; /* Device register info */
+ struct mem_space_mem mem; /* Device memory region info */
+ };
+
+ unsigned long cpu_addr; /* Cpu KM address for the mem space */
+};
+
+struct target_config {
+ unsigned int num_mem_spaces;
+ struct mem_space *mem_spaces;
+};
+
+#endif /* __TARGET_H__ */
diff --git a/drivers/media/platform/vxe-vxd/encoder/target_config.h b/drivers/media/platform/vxe-vxd/encoder/target_config.h
new file mode 100644
index 000000000000..97785f9a8e0d
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/target_config.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device specific memory configuration
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __TARGET_CONFIG_H__
+#define __TARGET_CONFIG_H__
+
+#include "target.h"
+
+/* Order MUST match with topaz_mem_space definition */
+enum topaz_mem_space_idx {
+ REG_TOPAZHP_MULTICORE = 0,
+ REG_DMAC,
+ REG_COMMS,
+ REG_MTX,
+ REG_MMU,
+ REG_TOPAZHP_TEST,
+ REG_MTX_RAM,
+ REG_TOPAZHP_CORE_0,
+ REG_TOPAZHP_VLC_CORE_0,
+ REG_TOPAZHP_DEBLOCKER_CORE_0,
+ REG_TOPAZHP_COREEXT_0,
+ REG_TOPAZHP_CORE_1,
+ REG_TOPAZHP_VLC_CORE_1,
+ REG_TOPAZHP_DEBLOCKER_CORE_1,
+ REG_TOPAZHP_COREEXT_1,
+ REG_TOPAZHP_CORE_2,
+ REG_TOPAZHP_VLC_CORE_2,
+ REG_TOPAZHP_DEBLOCKER_CORE_2,
+ REG_TOPAZHP_COREEXT_2,
+ REG_TOPAZHP_CORE_3,
+ REG_TOPAZHP_VLC_CORE_3,
+ REG_TOPAZHP_DEBLOCKER_CORE_3,
+ REG_TOPAZHP_COREEXT_3,
+ FW,
+ SYSMEM,
+ MEMSYSMEM,
+ MEM,
+ FB,
+ MEMDMAC_00,
+ MEMDMAC_01,
+ MEMDMAC_02,
+ MEM_SPACE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/topaz_api.c b/drivers/media/platform/vxe-vxd/encoder/topaz_api.c
new file mode 100644
index 000000000000..66e073478cb0
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/topaz_api.c
@@ -0,0 +1,3887 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Encoder Core API function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "fw_headers/coreflags.h"
+#include "fw_headers/topazscfwif.h"
+#include "header_gen.h"
+#include "img_errors.h"
+#include "img_mem_man.h"
+#include "lst.h"
+#include "reg_headers/topaz_coreext_regs.h"
+#include "reg_headers/topazhp_core_regs.h"
+#include "reg_headers/topaz_vlc_regs.h"
+#include "reg_headers/topaz_db_regs.h"
+#include "topaz_color_formats.h"
+#include "topaz_device.h"
+#include "topaz_api.h"
+#include "topaz_api_utils.h"
+#include "topazmmu.h"
+#include "vxe_public_regdefs.h"
+#include "img_errors.h"
+
+#define TOPAZ_TIMEOUT_RETRIES (5000000)
+#define TOPAZ_TIMEOUT_WAIT_FOR_SPACE (500)
+
+#define COMM_WB_DATA_BUF_SIZE (64)
+
+/*
+ * All contexts should be able to send as many commands as possible before waiting for a response.
+ * There must be enough command memory buffers for all applicable commands, that is:
+ * -To fill all source slots
+ * -To supply custom quant data
+ */
+#define TOPAZ_CMD_DATA_BUF_NUM ((MAX_SOURCE_SLOTS_SL + 1) * TOPAZHP_MAX_POSSIBLE_STREAMS)
+#define TOPAZ_CMD_DATA_BUF_SIZE (64)
+#define COMM_CMD_DATA_BUF_SLOT_NONE 0xFF
+
+struct topaz_core_context *global_topaz_core_context;
+
+static unsigned char global_cmd_data_busy[TOPAZ_CMD_DATA_BUF_NUM];
+struct vidio_ddbufinfo global_cmd_data_dev_addr; /* Data section */
+struct vidio_ddbufinfo global_cmd_data_info[TOPAZ_CMD_DATA_BUF_NUM]; /* Data section */
+static unsigned char global_pipe_usage[TOPAZHP_MAX_NUM_PIPES] = { 0 };
+
+struct vidio_ddbufinfo *global_wb_data_info;
+static unsigned char is_topaz_core_initialized;
+
+/*
+ * Get a buffer reference
+ */
+static int topaz_get_buffer(struct topaz_stream_context *str_ctx,
+ struct img_buffer *buffer, void **lin_address,
+ unsigned char update_host_memory)
+{
+ if (buffer->lock == NOTDEVICEMEMORY) {
+ *lin_address = buffer->mem_info.cpu_virt;
+ return IMG_SUCCESS;
+ }
+
+ if (buffer->lock == SW_LOCK)
+ return IMG_ERROR_SURFACE_LOCKED;
+
+ if (update_host_memory)
+ topaz_update_host_mem(str_ctx->vxe_ctx, &buffer->mem_info);
+
+ *lin_address = buffer->mem_info.cpu_virt;
+ buffer->lock = SW_LOCK;
+
+ return IMG_SUCCESS;
+}
+
+static int topaz_release_buffer(struct topaz_stream_context *str_ctx,
+ struct img_buffer *buffer, unsigned char update_device_memory)
+{
+ if (buffer->lock == NOTDEVICEMEMORY)
+ return IMG_SUCCESS;
+
+ if (buffer->lock == HW_LOCK)
+ return IMG_ERROR_SURFACE_LOCKED;
+
+ buffer->lock = BUFFER_FREE;
+
+ if (update_device_memory)
+ topaz_update_device_mem(str_ctx->vxe_ctx, &buffer->mem_info);
+
+ return IMG_SUCCESS;
+}
+
+static int topaz_get_cmd_data_buffer(struct vidio_ddbufinfo **mem_info)
+{
+ int index = 0;
+ int res = IMG_SUCCESS;
+
+ mutex_lock_nested(global_topaz_core_context->mutex, SUBCLASS_TOPAZ_API);
+
+ do {
+ if (!global_cmd_data_busy[index])
+ break;
+ index++;
+ } while (index < ARRAY_SIZE(global_cmd_data_info));
+
+ if (index == ARRAY_SIZE(global_cmd_data_info)) {
+ res = IMG_ERROR_UNEXPECTED_STATE;
+ } else {
+ global_cmd_data_busy[index] = TRUE;
+ *mem_info = &global_cmd_data_info[index];
+ }
+
+ mutex_unlock((struct mutex *)global_topaz_core_context->mutex);
+
+ return res;
+}
+
+static int topaz_release_cmd_data_buffer(struct vidio_ddbufinfo *mem_info)
+{
+ int index = 0;
+ int res = IMG_ERROR_UNEXPECTED_STATE;
+
+ mutex_lock_nested(global_topaz_core_context->mutex, SUBCLASS_TOPAZ_API);
+
+ do {
+ if (mem_info == &global_cmd_data_info[index]) {
+ global_cmd_data_busy[index] = FALSE;
+ res = IMG_SUCCESS;
+ break;
+ }
+ index++;
+ } while (index < ARRAY_SIZE(global_cmd_data_info));
+
+ mutex_unlock((struct mutex *)global_topaz_core_context->mutex);
+
+ return res;
+}
+
+/*
+ * Get a buffer reference
+ */
+static int get_coded_buffer(struct topaz_stream_context *str_ctx, void **lin_address,
+ unsigned char update_host_memory, unsigned char coded_package_idx)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ unsigned char coded_buffer_idx;
+ unsigned char found = FALSE;
+ unsigned int *address;
+ struct coded_data_hdr *coded_datahdr = NULL;
+ unsigned int offset_buffer_header = 0, offset_coded_buffer = 0;
+ /* Tells if all the slices have been retrieved */
+ unsigned char all_slice_retrieved = FALSE;
+ /* Tells if we have reach the last coded buffer used or not */
+ unsigned char slice_break = FALSE;
+ /* Tells if we are at the beginning of a slice or not */
+ unsigned char new_coded_header = TRUE;
+ /* Tells the number of bytes remaining to be retrieved */
+ unsigned int total_byte_written = 0;
+ unsigned int coded_slices_so_far = 0;
+ unsigned int coded_slices_in_buffer = 0;
+
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ if (video->coded_package[coded_package_idx]->header_buffer->lock == SW_LOCK)
+ return IMG_ERROR_UNDEFINED;
+
+ /* Retrieve the FW Package memory. Get linear address */
+ video->coded_package[coded_package_idx]->mtx_info.coded_package_fw =
+ (struct coded_package_dma_info *)(&video->coded_package[coded_package_idx]->mtx_info
+ .code_package_fw_buffer->mem_info);
+
+ if (update_host_memory) {
+ /* Go through all the coded buffers */
+ for (coded_buffer_idx = 0; coded_buffer_idx < MAX_CODED_BUFFERS_PER_PACKAGE;
+ coded_buffer_idx++) {
+ /* Reset the Offset */
+ offset_coded_buffer = 0;
+ do {
+ if (new_coded_header) { // beginning of a slice
+ slice_break = FALSE;
+ /* Get the coded header information */
+ *lin_address = video->coded_package
+ [coded_package_idx]->header_buffer->mem_info.cpu_virt;
+ address = *lin_address;
+ /* Getting the nth buffer header */
+ coded_datahdr = (struct coded_data_hdr *)(address +
+ (offset_buffer_header / 4));
+ total_byte_written = coded_datahdr->bytes_written;
+ coded_slices_so_far =
+ F_DECODE(coded_datahdr->extra_feedback,
+ CODED_SLICES_SO_FAR);
+ coded_slices_in_buffer =
+ F_DECODE(coded_datahdr->extra_feedback,
+ CODED_SLICES_IN_BUFFER);
+
+ /* Increment the offset in the coded header information
+ * buffer in order to point on the next header
+ */
+ offset_buffer_header += CODED_BUFFER_INFO_SECTION_SIZE;
+ }
+
+ if (!new_coded_header) {
+ /* Retrieve the last coded data */
+ offset_coded_buffer = ALIGN_16(offset_coded_buffer +
+ total_byte_written);
+ slice_break = TRUE;
+ /* On next loop we will be at the start of a new slice */
+ new_coded_header = TRUE;
+ } else {
+ /*
+ * New slice : Read all the bytes written for this slice
+ * Go after what we read, next 16bit align address
+ */
+ offset_coded_buffer =
+ ALIGN_16(offset_coded_buffer +
+ coded_datahdr->bytes_written);
+ if (F_DECODE(coded_datahdr->extra_feedback,
+ CODED_SLICES_SO_FAR) ==
+ F_DECODE(coded_datahdr->extra_feedback,
+ CODED_SLICES_IN_BUFFER)) {
+ /* We now have all the slices for this coded buffer,
+ * we should not try to read further.
+ */
+ all_slice_retrieved = TRUE;
+ break;
+ }
+ }
+ } while (coded_slices_so_far != coded_slices_in_buffer);
+
+ if (all_slice_retrieved || slice_break) {
+ /* If we are NOT in the middle of a slice */
+ found = TRUE;
+ /* We lock this last buffer */
+ video->coded_package[coded_package_idx]->coded_buffer
+ [coded_buffer_idx]->lock = SW_LOCK;
+ /* This function will do nothing if -debugCRCs (1 or 2) has not
+ * been specified on the command line
+ */
+ break;
+ }
+ }
+
+ if (!found)
+ topaz_update_host_mem(str_ctx->vxe_ctx, &video->coded_package
+ [coded_package_idx]->header_buffer->mem_info);
+ }
+
+ /* address of first header if all buffer finish in middle of
+ * slice or !bUpdateHostMemory, last red header otherwise
+ */
+ *lin_address = video->coded_package[coded_package_idx]->header_buffer->mem_info.cpu_virt;
+ /* Lock-it */
+ video->coded_package[coded_package_idx]->header_buffer->lock = SW_LOCK;
+
+ return IMG_SUCCESS;
+}
+
+static void combine_feedback(struct topaz_stream_context *str_ctx,
+ unsigned char active_coded_package_idx, unsigned int *feedback,
+ unsigned int *extra_feedback, unsigned int *bytes_coded)
+{
+ struct img_enc_context *enc = str_ctx->enc_ctx;
+ struct coded_data_hdr *coded_datahdr;
+ unsigned int offset = 0;
+ unsigned int min_bu = 0xFFFFFFFF;
+ unsigned int coded_bytes = 0;
+ unsigned int bu;
+ unsigned int coded_slices_so_far;
+ unsigned int coded_slices_in_buffer;
+
+ do {
+ /* we should be able to rely on the linear pointer here
+ * as the coded data header should have been updated.
+ */
+ coded_datahdr = (struct coded_data_hdr *)((unsigned long)(enc->video->coded_package
+ [active_coded_package_idx]->header_buffer->mem_info.cpu_virt) +
+ offset);
+
+ IMG_DBG_ASSERT(coded_datahdr);
+ if (!coded_datahdr)
+ return;
+
+ bu = F_DECODE(coded_datahdr->feedback, CODED_FIRST_BU);
+ coded_slices_so_far = F_DECODE(coded_datahdr->extra_feedback, CODED_SLICES_SO_FAR);
+ coded_slices_in_buffer = F_DECODE(coded_datahdr->extra_feedback,
+ CODED_SLICES_IN_BUFFER);
+
+ if (bu < min_bu)
+ min_bu = bu;
+
+ coded_bytes += coded_datahdr->bytes_written;
+ offset += CODED_BUFFER_INFO_SECTION_SIZE;
+ } while (coded_slices_so_far != coded_slices_in_buffer);
+
+ *bytes_coded = coded_bytes;
+ *feedback = F_INSERT(coded_datahdr->feedback, min_bu, CODED_FIRST_BU);
+ *extra_feedback = coded_datahdr->extra_feedback;
+}
+
+/*
+ * Move around the reconstructed data and handle the list for frame reordering
+ */
+static void process_reconstructed(struct topaz_stream_context *str_ctx, unsigned char is_coded,
+ enum img_frame_type frame_type, struct list_item **recon_list)
+{
+ struct img_video_context *video = str_ctx->enc_ctx->video;
+ unsigned char *tmp_buffer;
+ unsigned short width, height;
+ struct list_item *new_item;
+ struct img_recon_node *new_node;
+ struct list_item *current_item;
+
+ *recon_list = NULL;
+
+ if (!video->output_reconstructed)
+ return;
+
+ /* Create new reconstructed node */
+ new_item = kzalloc(sizeof(*new_item), GFP_KERNEL);
+ if (!new_item)
+ return;
+
+ new_item->data = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_item->data) {
+ kfree(new_item);
+ new_item = NULL;
+ return;
+ }
+
+ new_node = (struct img_recon_node *)new_item->data;
+
+ if (is_coded) {
+ topaz_update_host_mem(str_ctx->vxe_ctx, video->recon_buffer);
+ tmp_buffer = (unsigned char *)video->recon_buffer->cpu_virt;
+ width = ALIGN_64(video->width);
+ height = ALIGN_64(video->frame_height);
+
+ new_node->buffer = kzalloc(width * height * 3 / 2, GFP_KERNEL);
+ if (!new_node->buffer) {
+ kfree(new_item->data);
+ kfree(new_item);
+ new_item = NULL;
+ new_node = NULL;
+ return;
+ }
+ memcpy(new_node->buffer, tmp_buffer, width * height * 3 / 2);
+
+ } else {
+ new_node->buffer = NULL;
+ }
+ new_node->poc = video->recon_poc;
+
+ /* Add new node to the queue */
+ if (!video->ref_frame) {
+ /* First element */
+ new_item->next = NULL;
+ video->ref_frame = new_item;
+ } else if (new_node->poc == 0) {
+ /* First element after aborted sequence */
+ current_item = video->ref_frame;
+
+ while (current_item->next)
+ current_item = current_item->next;
+
+ /* Insert at end */
+ new_item->next = NULL;
+ current_item->next = new_item;
+ } else {
+ struct img_recon_node *head_node = (struct img_recon_node *)video->ref_frame->data;
+
+ if (head_node->poc > new_node->poc) {
+ /* Insert at start */
+ new_item->next = video->ref_frame;
+ video->ref_frame = new_item;
+ } else {
+ struct img_recon_node *next_node = NULL;
+
+ current_item = video->ref_frame;
+ while (current_item->next) {
+ next_node = (struct img_recon_node *)current_item->next->data;
+
+ if (next_node->poc > new_node->poc) {
+ /* Insert between current and next */
+ new_item->next = current_item->next;
+ current_item->next = new_item;
+ break;
+ }
+ current_item = current_item->next;
+ }
+
+ if (!current_item->next) {
+ /* Insert at end */
+ new_item->next = NULL;
+ current_item->next = new_item;
+ }
+ }
+ }
+
+ if (video->next_recon == 0) {
+ video->next_recon++;
+ /* Flush all frames */
+ *recon_list = video->ref_frame;
+ video->ref_frame = NULL;
+ } else if (new_node->poc == video->next_recon) {
+ struct list_item *flush_tail = video->ref_frame;
+ struct img_recon_node *next_node;
+
+ video->next_recon++;
+
+ /* Find all flushable frames */
+ while (flush_tail->next) {
+ next_node = (struct img_recon_node *)flush_tail->next->data;
+
+ /* Flushing sequence ends when POCs no longer match */
+ if (next_node->poc != video->next_recon)
+ break;
+
+ video->next_recon++;
+
+ flush_tail = flush_tail->next;
+ }
+
+ /* Flush consecutive sequence */
+ *recon_list = video->ref_frame;
+
+ /* Set new head */
+ video->ref_frame = flush_tail->next;
+
+ /* Separate sequences */
+ flush_tail->next = NULL;
+ }
+}
+
+int topaz_process_message(struct topaz_stream_context *str_ctx, struct mtx_tohost_msg tohost_msg)
+{
+ struct driver_tohost_msg *driver_msg;
+ struct list_item *current_el = NULL;
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ struct list_item *message_list = NULL;
+ unsigned int index = 0;
+
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ /* add a new element */
+ current_el = kzalloc(sizeof(*current_el), GFP_KERNEL);
+ if (!current_el)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ current_el->data = kzalloc(sizeof(*driver_msg), GFP_KERNEL);
+ if (!current_el->data) {
+ kfree(current_el);
+ current_el = NULL;
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* adding to head */
+ current_el->next = message_list;
+ message_list = current_el;
+
+ driver_msg = (struct driver_tohost_msg *)current_el->data;
+ driver_msg->cmd_id = tohost_msg.cmd_id;
+ driver_msg->data = tohost_msg.data;
+ driver_msg->command_data_buf = tohost_msg.command_data_buf;
+
+ switch (tohost_msg.cmd_id) {
+ case MTX_MESSAGE_ACK:
+ driver_msg->input_cmd_id = (enum mtx_cmd_id)F_DECODE(tohost_msg.input_cmd_word,
+ MTX_MSG_CMD_ID);
+ break;
+
+ case MTX_MESSAGE_CODED:
+ {
+ struct coded_data_hdr *coded_datahdr = NULL;
+ unsigned int feedback, extra_feedback;
+ unsigned char active_coded_package_idx;
+ struct img_feedback_element *feedback_struct;
+
+ active_coded_package_idx = tohost_msg.coded_pkg_idx;
+
+ get_coded_buffer(str_ctx, (void **)&coded_datahdr, TRUE,
+ active_coded_package_idx);
+
+ feedback = coded_datahdr->feedback;
+ extra_feedback = coded_datahdr->extra_feedback;
+
+ /* detect the FrameNum of the coded buffer */
+ feedback_struct = (struct img_feedback_element *)&driver_msg->feedback;
+
+ combine_feedback(str_ctx, active_coded_package_idx, &feedback, &extra_feedback,
+ &feedback_struct->bytes_coded);
+
+ feedback_struct->coded_buffer_count = F_DECODE(extra_feedback,
+ CODED_BUFFER_NUMBER_USED);
+
+ /* Give the header buffer to the feedback structure */
+ feedback_struct->coded_package = video->coded_package[active_coded_package_idx];
+ feedback_struct->active_coded_package_idx = active_coded_package_idx;
+ /* update this frame, using the info from the coded buffer */
+ feedback_struct->coded_package->coded_buffer[feedback_struct->coded_slot_num] =
+ video->coded_package[active_coded_package_idx]->coded_buffer[feedback_struct
+ ->coded_slot_num];
+
+ feedback_struct->first_bu = F_DECODE(feedback, CODED_FIRST_BU);
+ feedback_struct->storage_frame_num = F_DECODE(feedback, CODED_STORAGE_FRAME_NUM);
+ feedback_struct->entire_frame = F_DECODE(feedback, CODED_ENTIRE_FRAME);
+ feedback_struct->is_skipped = F_DECODE(feedback, CODED_IS_SKIPPED);
+ feedback_struct->is_coded = F_DECODE(feedback, CODED_IS_CODED);
+ feedback_struct->recon_idx = F_DECODE(feedback, CODED_RECON_IDX);
+ feedback_struct->source_slot = F_DECODE(feedback, CODED_SOURCE_SLOT);
+ feedback_struct->frame_type = (enum img_frame_type)F_DECODE
+ (feedback, CODED_FRAME_TYPE);
+ feedback_struct->slice_num = F_DECODE(feedback, CODED_SLICE_NUM);
+ feedback_struct->poc = video->source_slot_poc[feedback_struct->source_slot];
+
+ feedback_struct->slices_in_buffer = F_DECODE(extra_feedback,
+ CODED_SLICES_IN_BUFFER);
+ feedback_struct->field = F_DECODE(extra_feedback, CODED_FIELD);
+ feedback_struct->patched_recon = F_DECODE(extra_feedback,
+ CODED_PATCHED_RECON);
+ feedback_struct->bytes_coded = coded_datahdr->bytes_written;
+ feedback_struct->host_ctx = coded_datahdr->host_ctx;
+
+ if (video->highest_storage_number != feedback_struct->storage_frame_num &&
+ video->standard != IMG_STANDARD_H263) {
+ if (feedback_struct->storage_frame_num ==
+ ((video->highest_storage_number + 1) & 0x03)) {
+ /* it is piece of the next frame */
+ video->highest_storage_number = feedback_struct->storage_frame_num;
+ /* retrieve next WB */
+ video->encode_pic_processing--;
+ video->extra_wb_retrieved++;
+ } else if (feedback_struct->storage_frame_num ==
+ ((video->highest_storage_number + 2) & 0x03)) {
+ /* it is piece of the next frame */
+ video->highest_storage_number = feedback_struct->storage_frame_num;
+
+ video->encode_pic_processing -= 2;
+ video->extra_wb_retrieved += 2;
+ }
+ }
+
+ while (index < feedback_struct->coded_buffer_count) {
+ if (video->coded_package
+ [active_coded_package_idx]->coded_buffer[index]->lock == SW_LOCK)
+ /* Unlock coded buffers used*/
+ topaz_release_buffer(str_ctx,
+ (struct img_buffer *)(video->coded_package
+ [active_coded_package_idx]->coded_buffer[index]),
+ FALSE);
+ index++;
+ }
+
+ /* Unlock header buffer */
+ topaz_release_buffer(str_ctx, video->coded_package
+ [feedback_struct->active_coded_package_idx]->header_buffer,
+ FALSE);
+
+ /* Release the coded slot */
+ video->coded_package[feedback_struct->active_coded_package_idx]->busy = FALSE;
+
+ feedback_struct->src_frame = video->source_slot_buff[feedback_struct->source_slot];
+
+ /* Detect the slice number based on the Slice Map and the first BU in a slice */
+ if (feedback_struct->bytes_coded) {
+ struct img_buffer *output_slice_map;
+ unsigned char *src_buffer = NULL;
+ unsigned char slices_per_picture;
+ unsigned short first_bu_in_slice;
+ unsigned char slice_number;
+ unsigned char index;
+ unsigned char slice_size_in_bu[MAX_SLICESPERPIC];
+
+ /* postion the start of the slice map */
+ output_slice_map = &video->slice_map[feedback_struct->source_slot];
+
+ topaz_get_buffer(str_ctx, output_slice_map, (void **)&src_buffer, FALSE);
+
+ /* retrieve slices per field */
+ slices_per_picture = *src_buffer;
+ src_buffer++;
+
+ /* retrieve first BU in slices and Slice sizes in BUs */
+ first_bu_in_slice = 0;
+
+ for (index = 0; index < slices_per_picture; index++) {
+ slice_number = src_buffer[index * 2];
+ slice_size_in_bu[slice_number] = src_buffer[index * 2 + 1];
+
+ first_bu_in_slice += slice_size_in_bu[slice_number];
+ }
+ topaz_release_buffer(str_ctx, output_slice_map, FALSE);
+
+ feedback_struct->slices_per_picture = slices_per_picture;
+ }
+
+ if (feedback_struct->entire_frame) {
+ /* we encoded the entire frame */
+ video->frames_encoded++;
+#ifdef DEBUG_ENCODER_DRIVER
+ pr_info("FRAMES_CODED[%d]\n", video->frames_encoded);
+#endif
+
+ if (feedback_struct->coded_package->coded_buffer[0])
+ /* Send callback for coded_buffer ready */
+ global_topaz_core_context->vxe_str_processed_cb(str_ctx->vxe_ctx,
+ VXE_CB_CODED_BUFF_READY,
+ (void *)(feedback_struct->coded_package->coded_buffer[0]),
+ feedback_struct->bytes_coded, video->frames_encoded);
+
+ if (!str_ctx->vxe_ctx->eos) {
+ if (feedback_struct->src_frame) {
+ /* Send callback for src ready */
+ global_topaz_core_context->vxe_str_processed_cb(
+ str_ctx->vxe_ctx,
+ VXE_CB_SRC_FRAME_RELEASE,
+ (void *)(feedback_struct
+ ->src_frame),
+ 0, 0);
+ }
+ }
+ if (video->flush_at_frame > 0 &&
+ video->frames_encoded >= video->flush_at_frame)
+ feedback_struct->last_frame_encoded = TRUE;
+
+ if (feedback_struct->patched_recon && video->patched_recon_buffer) {
+ video->recon_buffer = video->patched_recon_buffer;
+ video->patched_recon_buffer = NULL;
+ } else {
+ video->recon_buffer =
+ &video->recon_pictures[feedback_struct->recon_idx];
+ }
+ video->recon_poc = feedback_struct->poc;
+
+ video->frame_type = feedback_struct->frame_type;
+
+ process_reconstructed(str_ctx, feedback_struct->is_coded, video->frame_type,
+ &feedback_struct->recon_list);
+
+ /* If there are more frames to be encoded, release the source slot */
+ if (video->frame_count == 0 ||
+ video->encode_requested < video->frame_count)
+ video->source_slot_buff[feedback_struct->source_slot] = NULL;
+
+ if (!video->extra_wb_retrieved) {
+ video->encode_pic_processing--;
+ video->highest_storage_number =
+ (video->highest_storage_number + 1) & 0x03;
+ } else {
+ video->extra_wb_retrieved--;
+ }
+ } else {
+ if (feedback_struct->coded_package->coded_buffer[0])
+ /* Send callback for coded_buffer ready */
+ global_topaz_core_context->vxe_str_processed_cb(str_ctx->vxe_ctx,
+ VXE_CB_CODED_BUFF_READY,
+ (void *)(feedback_struct->coded_package->coded_buffer[0]),
+ feedback_struct->bytes_coded, video->frames_encoded);
+ }
+
+ if (feedback_struct->entire_frame &&
+ (video->enable_sel_stats_flags & ESF_FIRST_STAGE_STATS))
+ feedback_struct->motion_search_statistics_buf =
+ &video->firstpass_out_param_buf[feedback_struct->source_slot];
+ else
+ feedback_struct->motion_search_statistics_buf = NULL;
+
+ if (video->frame_count > 0 && video->frames_encoded >= video->frame_count)
+ feedback_struct->last_frame_encoded = TRUE;
+
+ if (feedback_struct->entire_frame &&
+ (video->enable_sel_stats_flags & ESF_MP_BEST_MB_DECISION_STATS ||
+ video->enable_sel_stats_flags & ESF_MP_BEST_MOTION_VECTOR_STATS))
+ feedback_struct->best_multipass_statistics_buf =
+ &video->firstpass_out_best_multipass_param_buf
+ [feedback_struct->source_slot];
+ else
+ feedback_struct->best_multipass_statistics_buf = NULL;
+ break;
+ }
+ default:
+ break;
+ }
+
+ kfree(current_el->data);
+ kfree(current_el);
+
+ return IMG_SUCCESS;
+}
+
+void handle_encoder_firmware_response(struct img_writeback_msg *wb_msg, void *priv)
+{
+ struct topaz_stream_context *str_ctx;
+ struct mtx_tohost_msg tohost_msg;
+ int index;
+ unsigned int cmd_buf_slot = COMM_CMD_DATA_BUF_SLOT_NONE;
+ unsigned int *cmdbuf_devaddr;
+
+ str_ctx = (struct topaz_stream_context *)priv;
+
+ if (!str_ctx)
+ return;
+
+ memset(&tohost_msg, 0, sizeof(tohost_msg));
+ tohost_msg.cmd_id = (enum mtx_message_id)F_DECODE(wb_msg->cmd_word, MTX_MSG_MESSAGE_ID);
+
+ switch (tohost_msg.cmd_id) {
+ case MTX_MESSAGE_ACK:
+#ifdef DEBUG_ENCODER_DRIVER
+ pr_info("MTX_MESSAGE_ACK received\n");
+#endif
+
+ tohost_msg.wb_val = wb_msg->writeback_val;
+ tohost_msg.input_cmd_word = wb_msg->cmd_word;
+ tohost_msg.data = wb_msg->data;
+ break;
+ case MTX_MESSAGE_CODED:
+#ifdef DEBUG_ENCODER_DRIVER
+ pr_info("MTX_MESSAGE_CODED Received\n");
+#endif
+ tohost_msg.input_cmd_word = wb_msg->cmd_word;
+ tohost_msg.coded_pkg_idx = wb_msg->coded_package_consumed_idx;
+ break;
+ default:
+ break;
+ }
+
+ cmdbuf_devaddr = global_cmd_data_dev_addr.cpu_virt;
+
+ for (index = 0; index < TOPAZ_CMD_DATA_BUF_NUM; index++) {
+ if (*cmdbuf_devaddr == wb_msg->extra_data) {
+ /* Input cmd buffer found */
+ cmd_buf_slot = index;
+ break;
+ }
+ cmdbuf_devaddr++;
+ }
+
+ if (cmd_buf_slot != COMM_CMD_DATA_BUF_SLOT_NONE) {
+ tohost_msg.command_data_buf = &global_cmd_data_info[cmd_buf_slot];
+ topaz_release_cmd_data_buffer(tohost_msg.command_data_buf);
+
+ } else {
+ tohost_msg.command_data_buf = NULL;
+ }
+
+ mutex_lock_nested(str_ctx->vxe_ctx->mutex, SUBCLASS_VXE_V4L2);
+ topaz_process_message(str_ctx, tohost_msg);
+ mutex_unlock(str_ctx->vxe_ctx->mutex);
+}
+
+static inline void populate_firmware_message(struct vidio_ddbufinfo *dest, unsigned int dest_offset,
+ struct vidio_ddbufinfo *src, unsigned int src_offset)
+{
+ *(unsigned int *)((unsigned long)dest->cpu_virt + dest_offset) =
+ src->dev_virt + src_offset;
+}
+
+/*
+ * init_hardware
+ */
+int init_topaz_core(void *device_handle, unsigned int *num_pipes,
+ unsigned int mmu_flags, void *callback)
+{
+ unsigned int index;
+
+ if (is_topaz_core_initialized)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ is_topaz_core_initialized = TRUE;
+
+ global_topaz_core_context = kzalloc(sizeof(*global_topaz_core_context), GFP_KERNEL);
+ if (!global_topaz_core_context) {
+ is_topaz_core_initialized = FALSE;
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Initialise device context. */
+ global_topaz_core_context->dev_handle = (struct topaz_dev_ctx *)device_handle;
+ global_topaz_core_context->vxe_str_processed_cb = (vxe_cb)callback;
+
+ lst_init(&global_topaz_core_context->topaz_stream_list);
+
+ *num_pipes = topazdd_get_num_pipes(device_handle);
+
+ /* allocate memory for HighCmd FIFO data section */
+ if (topaz_mmu_alloc(global_topaz_core_context->dev_handle->topaz_mmu_ctx.mmu_context_handle,
+ global_topaz_core_context->dev_handle->vxe_arg, MMU_GENERAL_HEAP_ID,
+ 1, (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ 4 * TOPAZ_CMD_DATA_BUF_NUM, 64, &global_cmd_data_dev_addr)) {
+ IMG_DBG_ASSERT("Global command data info buff alloc failed\n" != NULL);
+ kfree(global_topaz_core_context);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (index = 0; index < ARRAY_SIZE(global_cmd_data_info); index++) {
+ if (topaz_mmu_alloc
+ (global_topaz_core_context->dev_handle->topaz_mmu_ctx.mmu_context_handle,
+ global_topaz_core_context->dev_handle->vxe_arg, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ TOPAZ_CMD_DATA_BUF_SIZE, 64, &global_cmd_data_info[index])) {
+ IMG_DBG_ASSERT("Global command data info buff alloc failed\n" != NULL);
+ topaz_mmu_free(global_topaz_core_context->dev_handle->vxe_arg,
+ &global_cmd_data_dev_addr);
+ kfree(global_topaz_core_context);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+ populate_firmware_message(&global_cmd_data_dev_addr, 4 * index,
+ &global_cmd_data_info[index], 0);
+ global_cmd_data_busy[index] = FALSE;
+ }
+
+ /*Lock for locking critical section in TopazAPI*/
+ global_topaz_core_context->mutex = kzalloc(sizeof(*global_topaz_core_context->mutex),
+ GFP_KERNEL);
+ if (!global_topaz_core_context->mutex)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ mutex_init(global_topaz_core_context->mutex);
+ return IMG_SUCCESS;
+}
+
+/*
+ * deinit_topaz_core
+ */
+int deinit_topaz_core(void)
+{
+ unsigned int index;
+
+ mutex_destroy(global_topaz_core_context->mutex);
+ kfree(global_topaz_core_context->mutex);
+ global_topaz_core_context->mutex = NULL;
+
+ if (topaz_mmu_free(global_topaz_core_context->dev_handle->vxe_arg,
+ &global_cmd_data_dev_addr))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ for (index = 0; index < ARRAY_SIZE(global_cmd_data_info); index++)
+ if (topaz_mmu_free(global_topaz_core_context->dev_handle->vxe_arg,
+ &global_cmd_data_info[index]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ return IMG_SUCCESS;
+}
+
+static unsigned short create_gop_frame(unsigned char *level, unsigned char reference,
+ unsigned char pos, unsigned char ref0_level,
+ unsigned char ref1_level, enum img_frame_type frame_type)
+{
+ *level = max(ref0_level, ref1_level) + 1;
+
+ return F_ENCODE(reference, GOP_REFERENCE) |
+ F_ENCODE(pos, GOP_POS) |
+ F_ENCODE(ref0_level, GOP_REF0) |
+ F_ENCODE(ref1_level, GOP_REF1) |
+ F_ENCODE(frame_type, GOP_FRAMETYPE);
+}
+
+static void gop_split(unsigned short **gop_structure, signed char ref0,
+ signed char ref1, unsigned char ref0_level,
+ unsigned char ref1_level, unsigned char pic_on_level[])
+{
+ unsigned char distance = ref1 - ref0;
+ unsigned char position = ref0 + (distance >> 1);
+ unsigned char level;
+
+ if (distance == 1)
+ return;
+
+ /* mark middle as this level */
+ (*gop_structure)++;
+ **gop_structure = create_gop_frame(&level, distance >= 3, position, ref0_level, ref1_level,
+ IMG_INTER_B);
+ pic_on_level[level]++;
+
+ if (distance >= 4)
+ gop_split(gop_structure, ref0, position, ref0_level, level, pic_on_level);
+
+ if (distance >= 3)
+ gop_split(gop_structure, position, ref1, level, ref1_level, pic_on_level);
+}
+
+static void mini_gop_generate_hierarchical(unsigned short gop_structure[],
+ unsigned int bframe_count,
+ unsigned int ref_spacing,
+ unsigned char pic_on_level[])
+{
+ unsigned char level;
+
+ gop_structure[0] = create_gop_frame(&level, TRUE, bframe_count, ref_spacing, 0,
+ IMG_INTER_P);
+ pic_on_level[level]++;
+
+ gop_split(&gop_structure, -1, bframe_count, ref_spacing, ref_spacing + 1, pic_on_level);
+}
+
+static void mini_gop_generate_flat(unsigned short gop_structure[],
+ unsigned int bframe_count,
+ unsigned int ref_spacing,
+ unsigned char pic_on_level[])
+{
+ /* B B B B P */
+ unsigned char encode_order_pos;
+ unsigned char level;
+
+ gop_structure[0] = create_gop_frame(&level, TRUE, MAX_BFRAMES, ref_spacing, 0,
+ IMG_INTER_P);
+ pic_on_level[level]++;
+
+ for (encode_order_pos = 1; encode_order_pos < MAX_GOP_SIZE; encode_order_pos++) {
+ gop_structure[encode_order_pos] = create_gop_frame(&level,
+ FALSE, encode_order_pos - 1,
+ ref_spacing, ref_spacing + 1,
+ IMG_INTER_B);
+ pic_on_level[level] = bframe_count;
+ }
+}
+
+/*
+ * Create the MTX-side encoder context
+ */
+static int topaz_video_create_mtx_context(struct topaz_stream_context *str_ctx,
+ struct img_video_params *video_params)
+{
+ struct img_video_context *video;
+ struct img_enc_context *enc;
+ int index, i, j;
+ void *mtx_enc_context_mem;
+ struct img_mtx_video_context *mtx_enc_context;
+ unsigned char flag;
+ unsigned int max_cores;
+ unsigned int bit_limit;
+ unsigned int vert_mv_limit;
+ unsigned int packed_strides;
+ unsigned short *gop_structure;
+
+ max_cores = topazdd_get_num_pipes(global_topaz_core_context->dev_handle);
+
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ mtx_enc_context = (struct img_mtx_video_context *)(video->mtx_enc_ctx_mem.cpu_virt);
+
+ /* clear the context region */
+ memset(mtx_enc_context, 0x00, MTX_CONTEXT_SIZE);
+
+ mtx_enc_context_mem = (void *)(&enc->video->mtx_enc_ctx_mem);
+
+ mtx_enc_context->initial_qp_i = video->rc_params.initial_qp_i;
+ mtx_enc_context->initial_qp_p = video->rc_params.initial_qp_p;
+ mtx_enc_context->initial_qp_b = video->rc_params.initial_qp_b;
+
+ mtx_enc_context->cqp_offset = (video->rc_params.qcp_offset & 0x1f) |
+ ((video->rc_params.qcp_offset & 0x1f) << 8);
+ mtx_enc_context->standard = video->standard;
+ mtx_enc_context->width_in_mbs = video->width >> 4;
+ mtx_enc_context->picture_height_in_mbs = video->picture_height >> 4;
+
+ mtx_enc_context->kick_size = video->kick_size;
+ mtx_enc_context->kicks_per_bu = video->kicks_per_bu;
+ mtx_enc_context->kicks_per_picture = (mtx_enc_context->width_in_mbs *
+ mtx_enc_context->picture_height_in_mbs) / video->kick_size;
+
+ mtx_enc_context->output_reconstructed = video->output_reconstructed;
+
+ mtx_enc_context->vop_time_resolution = video->vop_time_resolution;
+
+ mtx_enc_context->max_slices_per_picture = video->slices_per_picture;
+
+ mtx_enc_context->is_interlaced = video->is_interlaced;
+ mtx_enc_context->top_field_first = video->top_field_first;
+ mtx_enc_context->arbitrary_so = video->arbitrary_so;
+
+ mtx_enc_context->idr_period = video->idr_period;
+ mtx_enc_context->bframe_count = video->rc_params.bframes;
+ mtx_enc_context->hierarchical = (unsigned char)video->rc_params.hierarchical;
+ mtx_enc_context->intra_loop_cnt = video->intra_cnt;
+ mtx_enc_context->ref_spacing = video_params->ref_spacing;
+
+ mtx_enc_context->debug_crcs = video_params->debug_crcs;
+
+ mtx_enc_context->fw_num_pipes = enc->pipes_to_use;
+ mtx_enc_context->fw_first_pipe = enc->base_pipe;
+ mtx_enc_context->fw_last_pipe = enc->base_pipe + enc->pipes_to_use - 1;
+ mtx_enc_context->fw_pipes_to_use_flags = 0;
+
+ flag = 0x1 << mtx_enc_context->fw_first_pipe;
+ /* Pipes used MUST be contiguous from the BasePipe offset */
+ for (index = 0; index < mtx_enc_context->fw_num_pipes; index++, flag <<= 1)
+ mtx_enc_context->fw_pipes_to_use_flags |= flag;
+
+ mtx_enc_context->format = video_params->format;
+
+ /* copy scaler values to context in case we need them later */
+ video->enable_scaler = video_params->enable_scaler;
+ video->crop_left = video_params->crop_left;
+ video->crop_right = video_params->crop_right;
+ video->crop_top = video_params->crop_top;
+ video->crop_bottom = video_params->crop_bottom;
+ video->source_width = video_params->source_width;
+ video->source_frame_height = video_params->source_frame_height;
+ video->intra_pred_modes = video_params->intra_pred_modes;
+
+ topaz_setup_input_format(video, &mtx_enc_context->scaler_setup);
+ topaz_setup_input_csc(video, &mtx_enc_context->scaler_setup, &mtx_enc_context->csc_setup,
+ video_params->csc_preset);
+
+ mtx_enc_context->enable_mvc = video->enable_mvc;
+ mtx_enc_context->mvc_view_idx = video->mvc_view_idx;
+
+ if (video->standard == IMG_STANDARD_H264)
+ mtx_enc_context->no_sequence_headers = video->no_sequence_headers;
+
+ mtx_enc_context->coded_header_per_slice = video->coded_header_per_slice;
+
+ packed_strides = topaz_get_packed_buffer_strides
+ (video->buffer_stride_bytes, video->format, video_params->enable_scaler,
+ video_params->is_interlaced, video_params->is_interleaved);
+
+ mtx_enc_context->pic_row_stride_bytes =
+ F_ENCODE(F_DECODE(packed_strides, MTX_MSG_PICMGMT_STRIDE_Y),
+ TOPAZHP_CR_CUR_PIC_LUMA_STRIDE) |
+ F_ENCODE(F_DECODE(packed_strides, MTX_MSG_PICMGMT_STRIDE_UV),
+ TOPAZHP_CR_CUR_PIC_CHROMA_STRIDE);
+
+ mtx_enc_context->rc_mode = video->rc_params.rc_mode;
+ if (mtx_enc_context->rc_mode == IMG_RCMODE_VCM) {
+ mtx_enc_context->rc_vcm_mode = video->rc_params.rc_vcm_mode;
+ mtx_enc_context->rc_cfs_max_margin_perc = video->rc_params.rc_cfs_max_margin_perc;
+ }
+
+ mtx_enc_context->disable_bit_stuffing = (unsigned char)video_params->disable_bit_stuffing;
+
+ mtx_enc_context->first_pic = TRUE;
+
+ /*Content Adaptive Rate Control Parameters*/
+ if (video_params->carc) {
+ mtx_enc_context->jmcomp_rc_reg0 =
+ F_ENCODE(video_params->carc_pos_range, TOPAZHP_CR_CARC_POS_RANGE) |
+ F_ENCODE(video_params->carc_pos_scale, TOPAZHP_CR_CARC_POS_SCALE) |
+ F_ENCODE(video_params->carc_neg_range, TOPAZHP_CR_CARC_NEG_RANGE) |
+ F_ENCODE(video_params->carc_neg_scale, TOPAZHP_CR_CARC_NEG_SCALE);
+
+ mtx_enc_context->jmcomp_rc_reg1 =
+ F_ENCODE(video_params->carc_threshold, TOPAZHP_CR_CARC_THRESHOLD) |
+ F_ENCODE(video_params->carc_cutoff, TOPAZHP_CR_CARC_CUTOFF) |
+ F_ENCODE(video_params->carc_shift, TOPAZHP_CR_CARC_SHIFT);
+ } else {
+ mtx_enc_context->jmcomp_rc_reg0 = 0;
+ mtx_enc_context->jmcomp_rc_reg1 = 0;
+ }
+
+ mtx_enc_context->mv_clip_config =
+ F_ENCODE(video_params->no_offscreen_mv, TOPAZHP_CR_MVCALC_RESTRICT_PICTURE);
+
+ mtx_enc_context->lritc_cache_chunk_config = 0;
+
+ mtx_enc_context->ipcm_0_config =
+ F_ENCODE(enc->video->cabac_bin_flex, TOPAZ_VLC_CR_CABAC_BIN_FLEX) |
+ F_ENCODE(DEFAULT_CABAC_DB_MARGIN, TOPAZ_VLC_CR_CABAC_DB_MARGIN);
+
+ bit_limit = 3100;
+
+ mtx_enc_context->ipcm_1_config = F_ENCODE(bit_limit, TOPAZ_VLC_CR_IPCM_THRESHOLD) |
+ F_ENCODE(enc->video->cabac_bin_limit, TOPAZ_VLC_CR_CABAC_BIN_LIMIT);
+
+ /* leave alone until high profile and constrained modes are defined. */
+ mtx_enc_context->h264_comp_control = F_ENCODE((video->cabac_enabled ? 0 : 1),
+ TOPAZHP_CR_H264COMP_8X8_CAVLC);
+ mtx_enc_context->h264_comp_control |=
+ F_ENCODE(video_params->use_default_scaling_list ? 1 : 0,
+ TOPAZHP_CR_H264COMP_DEFAULT_SCALING_LIST);
+ mtx_enc_context->h264_comp_control |= F_ENCODE(video->h264_8x8_transform ? 1 : 0,
+ TOPAZHP_CR_H264COMP_8X8_TRANSFORM);
+ mtx_enc_context->h264_comp_control |= F_ENCODE(video->h264_intra_constrained ? 1 : 0,
+ TOPAZHP_CR_H264COMP_CONSTRAINED_INTRA);
+
+ mtx_enc_context->mc_adaptive_rounding_disable = video_params->vp_adaptive_rounding_disable;
+ mtx_enc_context->h264_comp_control |=
+ F_ENCODE(mtx_enc_context->mc_adaptive_rounding_disable ? 0 : 1,
+ TOPAZHP_CR_H264COMP_ADAPT_ROUND_ENABLE);
+
+ if (!mtx_enc_context->mc_adaptive_rounding_disable)
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < AR_REG_SIZE; j++)
+ mtx_enc_context->mc_adaptive_rounding_offsets[j][i] =
+ video_params->vp_adaptive_rounding_offsets[j][i];
+
+ if (video->standard == IMG_STANDARD_H264)
+ mtx_enc_context->h264_comp_control |=
+ F_ENCODE(USE_VCM_HW_SUPPORT, TOPAZHP_CR_H264COMP_VIDEO_CONF_ENABLE);
+
+ mtx_enc_context->h264_comp_control |=
+ F_ENCODE(video_params->use_custom_scaling_lists & 0x01 ? 1 : 0,
+ TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTRA_LUMA_ENABLE) |
+ F_ENCODE(video_params->use_custom_scaling_lists & 0x02 ? 1 : 0,
+ TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTRA_CB_ENABLE) |
+ F_ENCODE(video_params->use_custom_scaling_lists & 0x04 ? 1 : 0,
+ TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTRA_CR_ENABLE) |
+ F_ENCODE(video_params->use_custom_scaling_lists & 0x08 ? 1 : 0,
+ TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTER_LUMA_ENABLE) |
+ F_ENCODE(video_params->use_custom_scaling_lists & 0x10 ? 1 : 0,
+ TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTER_CB_ENABLE) |
+ F_ENCODE(video_params->use_custom_scaling_lists & 0x20 ? 1 : 0,
+ TOPAZHP_CR_H264COMP_CUSTOM_QUANT_4X4_INTER_CR_ENABLE) |
+ F_ENCODE(video_params->use_custom_scaling_lists & 0x40 ? 1 : 0,
+ TOPAZHP_CR_H264COMP_CUSTOM_QUANT_8X8_INTRA_LUMA_ENABLE) |
+ F_ENCODE(video_params->use_custom_scaling_lists & 0x80 ? 1 : 0,
+ TOPAZHP_CR_H264COMP_CUSTOM_QUANT_8X8_INTER_LUMA_ENABLE);
+
+ mtx_enc_context->h264_comp_control |=
+ F_ENCODE(video_params->enable_lossless ? 1 : 0, TOPAZHP_CR_H264COMP_LOSSLESS) |
+ F_ENCODE(video_params->lossless_8x8_prefilter ?
+ TOPAZHP_CR_H264COMP_LOSSLESS_8X8_PREFILTER_BYPASS :
+ TOPAZHP_CR_H264COMP_LOSSLESS_8X8_PREFILTER_FILTER,
+ TOPAZHP_CR_H264COMP_LOSSLESS_8X8_PREFILTER);
+
+ mtx_enc_context->h264_comp_intra_pred_modes = 0x3ffff;// leave at default for now.
+
+ if (video->intra_pred_modes != 0)
+ mtx_enc_context->h264_comp_intra_pred_modes = video->intra_pred_modes;
+
+ mtx_enc_context->pred_comb_control = video->pred_comb_control;
+
+ mtx_enc_context->skip_coded_inter_intra =
+ F_ENCODE(video->inter_intra_index, TOPAZHP_CR_INTER_INTRA_SCALE_IDX) |
+ F_ENCODE(video->coded_skipped_index, TOPAZHP_CR_SKIPPED_CODED_SCALE_IDX);
+
+ if (video->enable_inp_ctrl) {
+ mtx_enc_context->mb_host_ctrl =
+ F_ENCODE(video->enable_host_qp, TOPAZHP_CR_MB_HOST_QP) |
+ F_ENCODE(video->enable_host_bias, TOPAZHP_CR_MB_HOST_SKIPPED_CODED_SCALE) |
+ F_ENCODE(video->enable_host_bias, TOPAZHP_CR_MB_HOST_INTER_INTRA_SCALE);
+ mtx_enc_context->pred_comb_control |= F_ENCODE(1,
+ TOPAZHP_CR_INTER_INTRA_SCALE_ENABLE)
+ | F_ENCODE(1, TOPAZHP_CR_SKIPPED_CODED_SCALE_ENABLE);
+ }
+
+ if (video_params->enable_cumulative_biases)
+ mtx_enc_context->pred_comb_control |=
+ F_ENCODE(1, TOPAZHP_CR_CUMULATIVE_BIASES_ENABLE);
+
+ mtx_enc_context->pred_comb_control |=
+ F_ENCODE((((video->inter_intra_index == 3) && (video->coded_skipped_index == 3))
+ ? 0 : 1), TOPAZHP_CR_INTER_INTRA_SCALE_ENABLE) |
+ F_ENCODE((video->coded_skipped_index == 3 ? 0 : 1),
+ TOPAZHP_CR_SKIPPED_CODED_SCALE_ENABLE);
+
+ mtx_enc_context->deblock_ctrl =
+ F_ENCODE(video->deblock_idc, TOPAZ_DB_CR_DISABLE_DEBLOCK_IDC);
+
+ /* Set up VLC Control Register */
+ mtx_enc_context->vlc_control = 0;
+
+ switch (video->standard) {
+ case IMG_STANDARD_H264:
+ /* 1 for H.264 note this is inconsistent with the sequencer value */
+ mtx_enc_context->vlc_control |= F_ENCODE(1, TOPAZ_VLC_CR_CODEC);
+ mtx_enc_context->vlc_control |= F_ENCODE(0, TOPAZ_VLC_CR_CODEC_EXTEND);
+ break;
+
+ default:
+ break;
+ }
+
+ if (video->cabac_enabled)
+ /* 2 for Mpeg4 note this is inconsistent with the sequencer value */
+ mtx_enc_context->vlc_control |= F_ENCODE(1, TOPAZ_VLC_CR_CABAC_ENABLE);
+
+ mtx_enc_context->vlc_control |= F_ENCODE(video->is_interlaced ? 1 : 0,
+ TOPAZ_VLC_CR_VLC_FIELD_CODED);
+ mtx_enc_context->vlc_control |= F_ENCODE(video->h264_8x8_transform ? 1 : 0,
+ TOPAZ_VLC_CR_VLC_8X8_TRANSFORM);
+ mtx_enc_context->vlc_control |= F_ENCODE(video->h264_intra_constrained ? 1 : 0,
+ TOPAZ_VLC_CR_VLC_CONSTRAINED_INTRA);
+
+ mtx_enc_context->vlc_slice_control = F_ENCODE(video->rc_params.slice_byte_limit,
+ TOPAZ_VLC_CR_SLICE_SIZE_LIMIT);
+ mtx_enc_context->vlc_slice_mb_control = F_ENCODE(video->rc_params.slice_mb_limit,
+ TOPAZ_VLC_CR_SLICE_MBS_LIMIT);
+
+ switch (video->standard) {
+ case IMG_STANDARD_H264:
+ vert_mv_limit = 255; /* default to no clipping */
+ if (video->vert_mv_limit)
+ vert_mv_limit = enc->video->vert_mv_limit;
+
+ /* as topaz can only cope with at most 255 (in the register field) */
+ vert_mv_limit = min(255U, vert_mv_limit);
+ mtx_enc_context->ipe_vector_clipping =
+ F_ENCODE(1, TOPAZHP_CR_IPE_VECTOR_CLIPPING_ENABLED) |
+ F_ENCODE(255, TOPAZHP_CR_IPE_VECTOR_CLIPPING_X) |
+ F_ENCODE(vert_mv_limit, TOPAZHP_CR_IPE_VECTOR_CLIPPING_Y);
+
+ mtx_enc_context->spe_mvd_clip_range = F_ENCODE(0, TOPAZHP_CR_SPE_MVD_CLIP_ENABLE);
+ break;
+ default:
+ break;
+ }
+
+ /* Update MV Scaling settings: IDR */
+ memcpy(&mtx_enc_context->mv_settings_idr, &video->mv_settings_idr,
+ sizeof(struct img_mv_settings));
+
+ /* NonB (I or P) */
+ for (i = 0; i <= MAX_BFRAMES; i++)
+ memcpy(&mtx_enc_context->mv_settings_non_b[i], &video->mv_settings_non_b[i],
+ sizeof(struct img_mv_settings));
+
+ /* WEIGHTED PREDICTION */
+ mtx_enc_context->weighted_prediction_enabled = video_params->weighted_prediction;
+ mtx_enc_context->mtx_weighted_implicit_bi_pred = video_params->vp_weighted_implicit_bi_pred;
+
+ /* SEI_INSERTION */
+ mtx_enc_context->insert_hrd_params = video_params->insert_hrd_params;
+ if (mtx_enc_context->insert_hrd_params & enc->video->rc_params.bits_per_second)
+ /* HRD parameters are meaningless without a bitrate */
+ mtx_enc_context->insert_hrd_params = FALSE;
+
+ if (mtx_enc_context->insert_hrd_params) {
+ mtx_enc_context->clock_div_bitrate = (90000 * 0x100000000LL);
+ mtx_enc_context->clock_div_bitrate /= enc->video->rc_params.bits_per_second;
+ mtx_enc_context->max_buffer_mult_clock_div_bitrate =
+ (unsigned int)(((unsigned long long)(video->rc_params.buffer_size) *
+ 90000ULL) /
+ (unsigned long long)enc->video->rc_params.bits_per_second);
+ }
+
+ memcpy(&mtx_enc_context->in_params, &video->pic_params.in_params,
+ sizeof(struct in_rc_params));
+
+ mtx_enc_context->lritc_cache_chunk_config =
+ F_ENCODE(enc->video->chunks_per_mb,
+ TOPAZHP_CR_CACHE_CHUNKS_PER_MB)
+ | F_ENCODE(enc->video->max_chunks, TOPAZHP_CR_CACHE_CHUNKS_MAX)
+ | F_ENCODE(enc->video->max_chunks - enc->video->priority_chunks,
+ TOPAZHP_CR_CACHE_CHUNKS_PRIORITY);
+
+ mtx_enc_context->first_pic_flags = video->first_pic_flags;
+ mtx_enc_context->non_first_pic_flags = video->non_first_pic_flags;
+
+ mtx_enc_context->slice_header_slot_num = -1;
+
+ memset(mtx_enc_context->pic_on_level, 0, sizeof(mtx_enc_context->pic_on_level));
+
+ gop_structure = (unsigned short *)(video->flat_gop_struct.cpu_virt);
+
+ mini_gop_generate_flat(gop_structure, mtx_enc_context->bframe_count,
+ mtx_enc_context->ref_spacing, mtx_enc_context->pic_on_level);
+ topaz_update_device_mem(str_ctx->vxe_ctx, &video->flat_gop_struct);
+
+ if (video->rc_params.hierarchical) {
+ memset(mtx_enc_context->pic_on_level, 0, sizeof(mtx_enc_context->pic_on_level));
+ gop_structure = (unsigned short *)(video->hierar_gop_struct.cpu_virt);
+
+ mini_gop_generate_hierarchical(gop_structure, mtx_enc_context->bframe_count,
+ mtx_enc_context->ref_spacing,
+ mtx_enc_context->pic_on_level);
+ topaz_update_device_mem(str_ctx->vxe_ctx, &video->hierar_gop_struct);
+ }
+
+ topaz_update_device_mem(str_ctx->vxe_ctx, &video->mtx_enc_ctx_mem);
+
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->mv_settings_b_table -
+ (unsigned char *)mtx_enc_context),
+ &video->mv_settings_btable, 0);
+
+ if (video->rc_params.hierarchical)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->mv_settings_hierarchical -
+ (unsigned char *)mtx_enc_context),
+ &video->mv_settings_hierarchical, 0);
+
+ for (i = 0; i < video->pic_nodes; i++) {
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->reconstructed[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->recon_pictures[i], 0);
+
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->colocated[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->colocated[i], 0);
+ }
+
+ for (i = 0; i < WB_FIFO_SIZE; i++)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->writeback_regions[i] -
+ (unsigned char *)mtx_enc_context),
+ &global_wb_data_info[i], 0);
+
+ for (i = 0; i < video->mv_stores; i++)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)
+ &mtx_enc_context->mv[i] - (unsigned char *)mtx_enc_context),
+ &video->mv[i], 0);
+
+ if (video->enable_mvc) {
+ for (i = 0; i < 2; i++)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->inter_view_mv[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->inter_view_mv[i], 0);
+ }
+
+ for (i = 0; i < (int)max_cores; i++)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->above_params[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->above_params[i], 0);
+
+ /* SEI insertion */
+ if (video_params->insert_hrd_params) {
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)
+ &mtx_enc_context->sei_buffering_period_template -
+ (unsigned char *)mtx_enc_context),
+ &video->sei_buffering_period_header_mem, 0);
+
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)
+ &mtx_enc_context->sei_picture_timing_template -
+ (unsigned char *)mtx_enc_context),
+ &video->sei_picture_timing_header_mem, 0);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(video->slice_params_template_mem); i++)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)
+ &mtx_enc_context->slice_params_templates[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->slice_params_template_mem[i], 0);
+
+ for (i = 0; i < video->slots_in_use; i++) {
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->slice_map[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->slice_map[i].mem_info, 0);
+
+ /* WEIGHTED PREDICTION */
+ if (video_params->weighted_prediction ||
+ video_params->vp_weighted_implicit_bi_pred == WBI_EXPLICIT) {
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)
+ &mtx_enc_context->weighted_prediction_virt_addr[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->weighted_prediction_mem[i], 0);
+ }
+ }
+
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->flat_gop_struct -
+ (unsigned char *)mtx_enc_context), &video->flat_gop_struct, 0);
+
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->flat_gop_struct -
+ (unsigned char *)mtx_enc_context),
+ &video->flat_gop_struct, 0);
+
+ for (i = 0; i < video->slots_in_use; i++) {
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->ltref_header[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->ltref_header[i], 0);
+ }
+
+ if (mtx_enc_context->hierarchical)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->hierar_gop_struct -
+ (unsigned char *)mtx_enc_context),
+ &video->hierar_gop_struct, 0);
+
+ for (i = 0; i < ARRAY_SIZE(video->pichdr_template_mem); i++)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->pichdr_templates[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->pichdr_template_mem[i], 0);
+
+ if (video->standard == IMG_STANDARD_H264) {
+ populate_firmware_message(mtx_enc_context_mem, (unsigned int)((unsigned char *)
+ &mtx_enc_context->seq_header - (unsigned char *)mtx_enc_context),
+ &video->seq_header_mem, 0);
+
+ if (video->enable_mvc)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->subset_seq_header -
+ (unsigned char *)mtx_enc_context),
+ &video->subset_seq_header_mem, 0);
+ }
+
+ /* Store the feedback memory address for all "5" slots in the context */
+ if (video->enable_sel_stats_flags & ESF_FIRST_STAGE_STATS) {
+ for (i = 0; i < video->slots_in_use; i++)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)
+ &mtx_enc_context->firstpass_out_param_addr[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->firstpass_out_param_buf[i].mem_info, 0);
+ }
+
+ /* Store the feedback memory address for all "5" slots in the context */
+ if (video->enable_sel_stats_flags & ESF_MP_BEST_MB_DECISION_STATS ||
+ video->enable_sel_stats_flags & ESF_MP_BEST_MOTION_VECTOR_STATS) {
+ for (i = 0; i < video->slots_in_use; i++) {
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)
+ &mtx_enc_context->firstpass_out_best_multipass_param_addr[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->firstpass_out_best_multipass_param_buf[i].mem_info, 0);
+ }
+ }
+
+ /* Store the MB-Input control parameter memory for all the 5-slots in the context */
+ if (video->enable_inp_ctrl) {
+ for (i = 0; i < video->slots_in_use; i++)
+ populate_firmware_message
+ (mtx_enc_context_mem,
+ (unsigned int)((unsigned char *)&mtx_enc_context->mb_ctrl_in_params_addr[i] -
+ (unsigned char *)mtx_enc_context),
+ &video->mb_ctrl_in_params_buf[i].mem_info, 0);
+ }
+
+ topaz_update_device_mem(str_ctx->vxe_ctx, &video->mtx_enc_ctx_mem);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Prepares the header templates for the encode for H.264
+ */
+static int h264_prepare_templates(struct topaz_stream_context *str_ctx,
+ struct img_rc_params *rc_params,
+ int fine_y_search_size)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video_ctx;
+ struct pic_params *pic_params;
+
+ enc = str_ctx->enc_ctx;
+ video_ctx = enc->video;
+
+ prepare_mv_estimates(enc);
+
+ pic_params = &enc->video->pic_params;
+
+ pic_params->flags = 0;
+
+ if (rc_params->rc_enable) {
+ pic_params->flags |= ISRC_FLAGS;
+ setup_rc_data(enc->video, pic_params, rc_params);
+ } else {
+ pic_params->in_params.se_init_qp_i = rc_params->initial_qp_i;
+ pic_params->in_params.mb_per_row = (enc->video->width >> 4);
+ pic_params->in_params.mb_per_bu = rc_params->bu_size;
+ pic_params->in_params.mb_per_frm = ((unsigned int)(enc->video->width >> 4)) *
+ (enc->video->frame_height >> 4);
+ pic_params->in_params.bu_per_frm = (pic_params->in_params.mb_per_frm) /
+ rc_params->bu_size;
+ }
+
+ /* Prepare Slice header templates */
+ generate_slice_params_template(enc, &enc->video->slice_params_template_mem[IMG_FRAME_IDR],
+ IMG_FRAME_IDR, enc->video->is_interlaced,
+ fine_y_search_size);
+ topaz_update_device_mem(str_ctx->vxe_ctx, &enc->video->slice_params_template_mem
+ [IMG_FRAME_IDR]);
+
+ generate_slice_params_template(enc, &enc->video->slice_params_template_mem[IMG_FRAME_INTRA],
+ IMG_FRAME_INTRA, enc->video->is_interlaced,
+ fine_y_search_size);
+ topaz_update_device_mem(str_ctx->vxe_ctx, &enc->video->slice_params_template_mem
+ [IMG_FRAME_INTRA]);
+
+ generate_slice_params_template(enc,
+ &enc->video->slice_params_template_mem[IMG_FRAME_INTER_P],
+ IMG_FRAME_INTER_P, enc->video->is_interlaced,
+ fine_y_search_size);
+ topaz_update_device_mem(str_ctx->vxe_ctx, &enc->video->slice_params_template_mem
+ [IMG_FRAME_INTER_P]);
+
+ generate_slice_params_template(enc, &enc->video->slice_params_template_mem
+ [IMG_FRAME_INTER_B],
+ IMG_FRAME_INTER_B, enc->video->is_interlaced,
+ fine_y_search_size);
+ topaz_update_device_mem(str_ctx->vxe_ctx, &enc->video->slice_params_template_mem
+ [IMG_FRAME_INTER_B]);
+
+ if (video_ctx->enable_mvc) {
+ generate_slice_params_template(enc, &enc->video->slice_params_template_mem
+ [IMG_FRAME_INTER_P_IDR],
+ IMG_FRAME_INTER_P_IDR, enc->video->is_interlaced, fine_y_search_size);
+ topaz_update_device_mem(str_ctx->vxe_ctx, &enc->video->slice_params_template_mem
+ [IMG_FRAME_INTER_P_IDR]);
+ }
+
+ /* Prepare Pic Params Templates */
+ adjust_pic_flags(enc, rc_params, TRUE, &video_ctx->first_pic_flags);
+ adjust_pic_flags(enc, rc_params, FALSE, &video_ctx->non_first_pic_flags);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Prepares the header templates for the encode.
+ */
+static int topaz_video_prepare_templates(struct topaz_stream_context *str_ctx,
+ unsigned char search_range,
+ int fine_y_search_size)
+{
+ struct img_enc_context *enc = str_ctx->enc_ctx;
+ struct img_video_context *video = enc->video;
+ int err_value = IMG_ERROR_UNEXPECTED_STATE;
+
+ switch (video->standard) {
+ case IMG_STANDARD_H264:
+ err_value = h264_prepare_templates(str_ctx, &video->rc_params, fine_y_search_size);
+ break;
+ default:
+ break;
+ }
+
+ return err_value;
+}
+
+/*
+ * Prepare the sequence header for h.264
+ */
+int topaz_h264_prepare_sequence_header(void *topaz_str_ctx, unsigned int mb_width,
+ unsigned int mb_height,
+ unsigned char vui_params_present,
+ struct h264_vui_params *params,
+ struct h264_crop_params *crop_params,
+ struct h264_sequence_header_params *sh_params,
+ unsigned char mvc_sps)
+{
+ struct mtx_header_params *seq_header;
+ struct img_enc_context *enc;
+ struct topaz_stream_context *str_ctx;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+
+ enc = str_ctx->enc_ctx;
+
+ /* Ensure parameters are consistent with context */
+ if (!enc->video->custom_scaling)
+ sh_params->seq_scaling_matrix_present_flag = FALSE;
+
+ /* Get a pointer to the memory the header will be written to */
+ seq_header = (struct mtx_header_params *)(enc->video->seq_header_mem.cpu_virt);
+ h264_prepare_sequence_header(seq_header, mb_width, mb_height, vui_params_present,
+ params, crop_params, sh_params, enc->video->arbitrary_so);
+
+ topaz_update_device_mem(str_ctx->vxe_ctx, &enc->video->seq_header_mem);
+
+ if (mvc_sps) {
+ /* prepare subset sequence parameter header */
+ struct mtx_header_params *subset_seq_header;
+
+ subset_seq_header =
+ (struct mtx_header_params *)(enc->video->subset_seq_header_mem.cpu_virt);
+ h264_prepare_mvc_sequence_header(subset_seq_header, mb_width, mb_height,
+ vui_params_present, params, crop_params,
+ sh_params);
+ topaz_update_device_mem(str_ctx->vxe_ctx, &enc->video->subset_seq_header_mem);
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Prepare the picture header for h.264
+ */
+int topaz_h264_prepare_picture_header(void *topaz_str_ctx, signed char cqp_offset)
+{
+ struct mtx_header_params *pic_header;
+ struct topaz_stream_context *str_ctx;
+ struct img_enc_context *enc;
+ unsigned char dep_view_pps = FALSE;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+ enc = str_ctx->enc_ctx;
+
+ /* Get a pointer to the memory the header will be written to */
+ pic_header = (struct mtx_header_params *)(enc->video->pichdr_template_mem[0].cpu_virt);
+
+ if (enc->video->enable_mvc && enc->video->mvc_view_idx != 0 &&
+ (enc->video->mvc_view_idx != (unsigned short)(NON_MVC_VIEW)))
+ dep_view_pps = TRUE;
+
+ h264_prepare_picture_header(pic_header, enc->video->cabac_enabled,
+ enc->video->h264_8x8_transform,
+ enc->video->h264_intra_constrained,
+ cqp_offset, enc->video->weighted_prediction,
+ enc->video->weighted_bi_pred,
+ dep_view_pps, enc->video->pps_scaling,
+ enc->video->pps_scaling && enc->video->custom_scaling);
+
+ topaz_update_device_mem(str_ctx->vxe_ctx, &enc->video->pichdr_template_mem[0]);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Prepare the AUD header for H264
+ */
+int topaz_h264_prepare_aud_header(void *str_context)
+{
+ struct mtx_header_params *aud_header;
+ struct img_enc_context *enc;
+ struct topaz_stream_context *str_ctx;
+
+ str_ctx = (struct topaz_stream_context *)str_context;
+ if (!str_ctx)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ enc = str_ctx->enc_ctx;
+
+ /* Get a pointer to the memory the header will be written to */
+ aud_header = (struct mtx_header_params *)(&enc->video->aud_header_mem);
+
+ h264_prepare_aud_header(aud_header);
+
+ topaz_update_device_mem(str_ctx->vxe_ctx, &enc->video->aud_header_mem);
+
+ return IMG_SUCCESS;
+}
+
+static unsigned int topaz_get_max_coded_data_size(enum img_standard standard, unsigned short width,
+ unsigned short height, unsigned int initial_qp_i)
+{
+ unsigned int worst_qp_size;
+
+ if (standard == IMG_STANDARD_H264) {
+ /* allocate based on worst case qp size */
+ worst_qp_size = 400;
+ return ((unsigned int)(width / 16) * (unsigned int)(height / 16) * worst_qp_size);
+ }
+
+ if (initial_qp_i <= 5)
+ return ((unsigned int)width * (unsigned int)height * 1600) / (16 * 16);
+
+ return ((unsigned int)width * (unsigned int)height * 900) / (16 * 16);
+}
+
+static int topaz_get_context_coded_buffer_size(struct img_enc_context *enc,
+ struct img_rc_params *rc_params,
+ unsigned int *coded_buffer_size)
+{
+ struct img_video_context *video;
+
+ video = enc->video;
+
+ *coded_buffer_size = topaz_get_max_coded_data_size(video->standard, video->width,
+ video->picture_height,
+ rc_params->initial_qp_i);
+
+ if (!video->disable_bit_stuffing && rc_params->rc_mode == IMG_RCMODE_CBR)
+ *coded_buffer_size = max(*coded_buffer_size,
+ ((rc_params->bits_per_second + rc_params->frame_rate / 2) /
+ rc_params->frame_rate) * 2);
+
+ if (video->coded_header_per_slice)
+ *coded_buffer_size += CODED_BUFFER_INFO_SECTION_SIZE * video->slices_per_picture;
+ else
+ *coded_buffer_size += CODED_BUFFER_INFO_SECTION_SIZE;
+ /* Ensure coded buffer sizes are always aligned to 1024 */
+ *coded_buffer_size = ALIGN_1024(*coded_buffer_size);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Description: Allocate a coded package
+ */
+static int topaz_allocate_coded_package(struct topaz_stream_context *str_ctx,
+ unsigned int coded_buffersize_bytes,
+ struct coded_package_host **package)
+{
+ struct coded_package_host *this_package;
+ struct img_video_context *video = str_ctx->enc_ctx->video;
+
+ *package = kzalloc(sizeof(*package), GFP_KERNEL);
+
+ this_package = *package;
+
+ if (!this_package)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ this_package->busy = 0;
+
+ this_package->num_coded_buffers = 1;
+
+ /* Allocate FW Buffer IMG_BUFFER memory */
+ this_package->mtx_info.code_package_fw_buffer =
+ kzalloc(sizeof(struct img_buffer), GFP_KERNEL);
+
+ if (!this_package->mtx_info.code_package_fw_buffer)
+ goto error_handling;
+
+ /* Allocate header IMG_BUFFER memory */
+ this_package->header_buffer = kzalloc(sizeof(*this_package->header_buffer), GFP_KERNEL);
+
+ if (!this_package->header_buffer)
+ goto error_handling;
+
+ /* Allocate the FW Package (this will provide addresses
+ * of header and the coded buffer array)
+ */
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ sizeof(struct coded_package_dma_info), 64,
+ &this_package->mtx_info.code_package_fw_buffer->mem_info))
+ goto error_handling;
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE),
+ (video->coded_header_per_slice ? video->slices_per_picture : 1) *
+ CODED_BUFFER_INFO_SECTION_SIZE,
+ 64, &this_package->header_buffer->mem_info))
+ goto error_handling;
+
+ this_package->header_buffer->size =
+ (video->coded_header_per_slice ? video->slices_per_picture : 1) *
+ CODED_BUFFER_INFO_SECTION_SIZE;
+
+ return IMG_SUCCESS;
+
+error_handling:
+ if (*package) {
+ kfree(*package);
+ *package = NULL;
+ }
+
+ if (this_package->mtx_info.code_package_fw_buffer) {
+ if (this_package->mtx_info.code_package_fw_buffer->mem_info.dev_virt)
+ topaz_mmu_stream_free
+ (str_ctx->mmu_ctx,
+ &this_package->mtx_info.code_package_fw_buffer->mem_info);
+
+ kfree(this_package->mtx_info.code_package_fw_buffer);
+ this_package->mtx_info.code_package_fw_buffer = NULL;
+ }
+
+ kfree(this_package->header_buffer);
+ this_package->header_buffer = NULL;
+
+ return IMG_ERROR_OUT_OF_MEMORY;
+}
+
+/*
+ * Create the Video Encoder context
+ */
+static int topaz_video_create_context(struct topaz_stream_context *str_ctx,
+ struct img_video_params *video_params,
+ struct img_rc_params *rc_params)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ unsigned int alloc_size;
+ int index, i;
+ unsigned short picture_height;
+ unsigned int coded_buffer_size;
+ unsigned short width_in_mbs;
+ unsigned short frame_height_in_mbs;
+ unsigned char pipes_to_use;
+ unsigned int max_cores;
+ unsigned int min_slice_height;
+ unsigned int factor = 1;
+ unsigned int kick_size, kicks_per_bu;
+ int ret;
+
+ max_cores = topazdd_get_num_pipes(str_ctx->core_ctx->dev_handle);
+
+ enc = str_ctx->enc_ctx;
+
+ picture_height =
+ ((video_params->frame_height >> (video_params->is_interlaced ? 1 : 0)) + 15) & ~15;
+ width_in_mbs = (video_params->width + 15) >> 4;
+ frame_height_in_mbs = ((picture_height + 15) >> 4) <<
+ (video_params->is_interlaced ? 1 : 0);
+
+ if (topaz_get_encoder_caps(video_params->standard, video_params->width, picture_height,
+ &enc->caps) != IMG_SUCCESS) {
+ pr_err("\nERROR: Unable to encode the size %dx%d with current hardware version\n\n",
+ video_params->width, picture_height);
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ /*scaler input W/H limit is 4K*/
+ if (video_params->source_width > 4096) {
+ pr_err("\nERROR: Source Width is bigger than the maximum supported Source Width(4096)\n");
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ if (video_params->source_frame_height > 4096) {
+ pr_err("\nERROR: Source Height is bigger than the maximum supported Source Height(4096)\n");
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ if (video_params->width > enc->caps.max_width) {
+ pr_err("\n ERROR: Width too big for given core revision 0x%x. Maximum width is %d.\n",
+ enc->caps.core_revision, enc->caps.max_width);
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ if (picture_height > enc->caps.max_height) {
+ pr_err("\n ERROR: Height too big for given core revision 0x%x. Maximum height is %d.\n",
+ enc->caps.core_revision, enc->caps.max_height);
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ if (video_params->width < enc->caps.min_width) {
+ pr_err("\n ERROR: Width too small for given core revision 0x%x. Minimum width is %d.\n",
+ enc->caps.core_revision, enc->caps.min_width);
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ if (video_params->standard == IMG_STANDARD_H264) {
+ if (video_params->slices_per_picture < enc->caps.min_slices) {
+ pr_err("WARNING: Minimum slices supported for this resolution is %d. Increasing slices per frame to %d\n",
+ enc->caps.min_slices, video_params->slices_per_picture);
+ video_params->slices_per_picture = (unsigned char)enc->caps.min_slices;
+ }
+ factor = min(enc->pipes_to_use, video_params->slices_per_picture);
+ }
+
+ if (video_params->standard == IMG_STANDARD_H264)
+ pipes_to_use = min(enc->pipes_to_use, video_params->slices_per_picture);
+ else
+ pipes_to_use = 1;
+
+ if (picture_height < (enc->caps.min_height * factor)) {
+ pr_err("\n ERROR: Height too small for given core revision 0x%x. Minimum height is %d.\n",
+ enc->caps.core_revision, enc->caps.min_height * factor);
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ if ((unsigned int)((width_in_mbs) * (picture_height >> 4)) > enc->caps.max_mb_num) {
+ pr_err("\n ERROR: Number of macroblocks too high. It should not be bigger than %d.\n",
+ enc->caps.max_mb_num);
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ calculate_kick_and_bu_size(width_in_mbs, picture_height / 16, video_params->is_interlaced,
+ enc->caps.max_bu_per_frame, &kick_size, &kicks_per_bu,
+ &min_slice_height);
+
+ if (enc->caps.min_slice_height > min_slice_height)
+ min_slice_height = enc->caps.min_slice_height;
+
+ if ((unsigned int)(video_params->slices_per_picture * min_slice_height) >
+ (unsigned int)(picture_height / 16)) {
+ /* we have too many slices for this resolution */
+ pr_err("\n ERROR: Too many slices for this resolution.\n");
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+
+ video = kzalloc(sizeof(*video), GFP_KERNEL);
+ if (!video)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ enc->video = video;
+
+ memcpy(&video->rc_params, rc_params, sizeof(*rc_params));
+
+ /* Setup BU size for rate control */
+ video->rc_params.bu_size = kick_size * kicks_per_bu;
+ rc_params->bu_size = video->rc_params.bu_size;
+
+ video->kick_size = kick_size;
+ video->kicks_per_bu = kicks_per_bu;
+
+ video->debug_crcs = video_params->debug_crcs;
+
+ /* stream level params */
+ video->standard = video_params->standard;
+ video->format = video_params->format;
+ video->csc_preset = video_params->csc_preset;
+ video->width = width_in_mbs << 4;
+ video->frame_height = frame_height_in_mbs << 4;
+ video->unrounded_width = video_params->width;
+ video->unrounded_frame_height = video_params->frame_height;
+
+ video->picture_height = picture_height;
+ video->is_interlaced = video_params->is_interlaced;
+ video->is_interleaved = video_params->is_interleaved;
+ video->top_field_first = !(video_params->bottom_field_first);
+ video->encode_requested = 0;
+ video->limit_num_vectors = video_params->limit_num_vectors;
+ video->disable_bit_stuffing = video_params->disable_bit_stuffing;
+ video->vert_mv_limit = video_params->vert_mv_limit;
+ /* Cabac Parameters */
+ video->cabac_enabled = video_params->cabac_enabled;
+ video->cabac_bin_limit = video_params->cabac_bin_limit;
+ video->cabac_bin_flex = video_params->cabac_bin_flex;
+
+ video->frame_count = 0;
+ video->flush_at_frame = 0;
+ video->flushed_at_frame = 0;
+ video->encoder_idle = TRUE;
+ video->high_latency = video_params->high_latency;
+ video->slices_per_picture = (unsigned char)video_params->slices_per_picture;
+ video->deblock_idc = video_params->deblock_idc;
+ video->output_reconstructed = video_params->output_reconstructed;
+ video->arbitrary_so = video_params->arbitrary_so;
+ video->f_code = video_params->f_code;
+
+ /* Default f_code is 4 */
+ if (!video->f_code)
+ video->f_code = 4;
+
+ video->vop_time_resolution = video_params->vop_time_resolution;
+ video->frames_encoded = 0;
+ video->idr_period = video_params->idr_period;
+
+ video->intra_cnt = video_params->intra_cnt;
+ video->multi_reference_p = video_params->multi_reference_p;
+ video->spatial_direct = video_params->spatial_direct;
+ video->enable_sel_stats_flags = video_params->enable_sel_stats_flags;
+ video->enable_inp_ctrl = video_params->enable_inp_ctrl;
+ video->enable_host_bias = video_params->enable_host_bias;
+ video->enable_host_qp = video_params->enable_host_qp;
+ /* Line counter */
+ video->line_counter = video_params->line_counter_enabled;
+
+ video->enable_air = video_params->enable_air;
+ video->num_air_mbs = video_params->num_air_mbs;
+ video->air_threshold = video_params->air_threshold;
+ video->air_skip_cnt = video_params->air_skip_cnt;
+
+ video->extra_wb_retrieved = 0;
+ video->highest_storage_number = 0;
+
+ video->buffer_stride_bytes = calculate_stride(video_params->format,
+ video_params->buffer_stride_bytes,
+ video_params->source_width);
+ video->buffer_height = ((video_params->buffer_height ? video_params->buffer_height :
+ video_params->source_frame_height));
+
+ if (!video_params->disable_bh_rounding)
+ video->buffer_height =
+ (((video->buffer_height >> (video_params->is_interlaced ? 1 : 0)) + 15) &
+ ~15) << (video_params->is_interlaced ? 1 : 0);
+
+ video_params->buffer_stride_bytes = video->buffer_stride_bytes;
+ video_params->buffer_height = video->buffer_height;
+
+ video->next_recon = 0;
+
+ video->enable_mvc = video_params->enable_mvc;
+ video->mvc_view_idx = video_params->mvc_view_idx;
+
+ enc->pipes_to_use = pipes_to_use;
+
+ enc->requested_pipes_to_use = pipes_to_use;
+ video->slots_in_use = rc_params->bframes + 2;
+ enc->video->slots_required = enc->video->slots_in_use;
+
+ video->h264_8x8_transform = video_params->h264_8x8;
+ video->h264_intra_constrained = video_params->constrained_intra;
+ video->custom_scaling = (video_params->use_custom_scaling_lists != 0);
+ video->pps_scaling =
+ (video_params->pps_scaling &&
+ (video_params->use_default_scaling_list || video->custom_scaling));
+
+ video->encode_pic_processing = 0;
+ video->next_slice = 0;
+ video->ref_frame = NULL;
+
+ /* create topaz device context */
+ ret = topazdd_create_stream_context(global_topaz_core_context->dev_handle,
+ str_ctx->enc_ctx->codec,
+ handle_encoder_firmware_response, str_ctx,
+ &str_ctx->enc_ctx->video->dd_str_ctx,
+ &global_wb_data_info);
+
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ ret = topazdd_setup_stream_ctx
+ (str_ctx->enc_ctx->video->dd_str_ctx, video->frame_height,
+ video->width, (unsigned char *)&video->dd_ctx_num, &video->dd_ctx_num);
+
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ /* Create MMU stream context */
+ ret = topaz_mmu_stream_create(&global_topaz_core_context->dev_handle->topaz_mmu_ctx,
+ 0x1 /*stream_id*/, str_ctx->vxe_ctx, &str_ctx->mmu_ctx);
+ if (ret)
+ return ret;
+
+ /* WEIGHTED PREDICTION */
+ if (video_params->weighted_prediction ||
+ video_params->vp_weighted_implicit_bi_pred == WBI_EXPLICIT) {
+ video->weighted_prediction = TRUE;
+
+ for (i = 0; i < video->slots_in_use; i++) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ sizeof(struct weighted_prediction_values), 64,
+ &video->weighted_prediction_mem[i]))
+ IMG_DBG_ASSERT("Allocation failed (A)" == NULL);
+ }
+ } else {
+ video->weighted_prediction = FALSE;
+ }
+
+ video->weighted_bi_pred = video_params->vp_weighted_implicit_bi_pred;
+
+ video->coded_skipped_index = video_params->coded_skipped_index;
+ video->inter_intra_index = video_params->inter_intra_index;
+
+ /*
+ * patch video parameters is the user has specified a profile
+ * calculate the number of macroblocks per second
+ */
+ video->mbps = width_in_mbs * frame_height_in_mbs * video->rc_params.frame_rate;
+
+ patch_hw_profile(video_params, video);
+
+ enc->auto_expand_pipes = video_params->auto_expand_pipes;
+
+ /* As ui32Vp8RefStructMode is not in use the worst case
+ * would have to be taken and hence 5 pic nodes
+ */
+ video->pic_nodes = (rc_params->hierarchical ? MAX_REF_B_LEVELS : 0) +
+ video_params->ref_spacing + 4;
+ video->mv_stores = (video->pic_nodes * 2);
+
+ /* We're using a common MACRO here so we can guarantee the same calculation
+ * when managing buffers either from host or within drivers
+ */
+ video->coded_package_max_num = CALC_NUM_CODED_PACKAGES_ENCODE
+ (video_params->slice_level,
+ video_params->slices_per_picture, pipes_to_use,
+ video->is_interlaced);
+
+ alloc_size = MVEA_ABOVE_PARAM_REGION_SIZE * (ALIGN_64(width_in_mbs));
+
+ for (index = 0; index < (int)max_cores; index++) {
+ if (str_ctx->vxe_ctx->above_mb_params_sgt[index].sgl) {
+ video->above_params[index].buf_size = alloc_size;
+
+ topaz_mmu_stream_map_ext_sg
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID,
+ &str_ctx->vxe_ctx->above_mb_params_sgt[index],
+ video->above_params[index].buf_size,
+ 64, (enum sys_emem_attrib)0, video->above_params[index].cpu_virt,
+ &video->above_params[index],
+ &video->above_params[index].buff_id);
+ } else {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ ALIGN_64(alloc_size), 64, &video->above_params[index]))
+ IMG_DBG_ASSERT("Allocation failed (C)" == NULL);
+ }
+ }
+
+ alloc_size = MVEA_MV_PARAM_REGION_SIZE * ALIGN_4(width_in_mbs) * frame_height_in_mbs;
+
+ for (index = 0; index < video->pic_nodes; index++) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ alloc_size, 64, &video->colocated[index]))
+ IMG_DBG_ASSERT("Allocation failed (D)" == NULL);
+ }
+
+ alloc_size = (ALIGN_64(video->width)) * (ALIGN_64(video->frame_height)) * 3 / 2;
+
+ for (index = 0; index < video->pic_nodes; index++) {
+ void *data;
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ alloc_size, 256, &video->recon_pictures[index]))
+ IMG_DBG_ASSERT("Allocation failed (E)" == NULL);
+
+ data = video->recon_pictures[index].cpu_virt;
+ memset(data, 0, alloc_size);
+
+ topaz_update_device_mem(str_ctx->vxe_ctx, &video->recon_pictures[index]);
+ }
+
+ video->patched_recon_buffer = NULL;
+
+ alloc_size = MVEA_MV_PARAM_REGION_SIZE * ALIGN_4(width_in_mbs) * frame_height_in_mbs;
+ for (i = 0; i < video->mv_stores; i++) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ alloc_size, 64, &video->mv[i]))
+ IMG_DBG_ASSERT("Allocation failed (F)" == NULL);
+ topaz_update_device_mem(str_ctx->vxe_ctx, &video->mv[i]);
+ }
+
+ if (video->enable_mvc) {
+ for (i = 0; i < 2; i++) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ alloc_size, 64, &video->inter_view_mv[i]))
+ IMG_DBG_ASSERT("Allocation failed (G)" == NULL);
+ }
+ }
+
+ /* memory for encoder context */
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ ALIGN_64(MTX_CONTEXT_SIZE), 64, &video->mtx_enc_ctx_mem))
+ IMG_DBG_ASSERT("Allocation failed (H)" == NULL);
+
+ video->no_sequence_headers = video_params->no_sequence_headers;
+ video->auto_encode = video_params->auto_encode;
+ video->slice_level = video_params->slice_level;
+ video->coded_header_per_slice = video_params->coded_header_per_slice;
+
+ /* partially coded headers supplied to HW */
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ MAX_HEADERSIZEBYTES, 64, &video->seq_header_mem))
+ IMG_DBG_ASSERT("Allocation failed (I)\n" == NULL);
+
+ /* partially coded subset sequence parameter headers supplied to HW */
+ if (video->enable_mvc) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ MAX_HEADERSIZEBYTES, 64, &video->subset_seq_header_mem))
+ IMG_DBG_ASSERT("Allocation failed (J)" == NULL);
+ }
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ MAX_BFRAMES * MV_ROW_STRIDE, 64, &video->mv_settings_btable))
+
+ IMG_DBG_ASSERT("Allocation failed (K)" == NULL);
+
+ if (video->rc_params.hierarchical) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ MAX_BFRAMES * sizeof(struct img_mv_settings), 64,
+ &video->mv_settings_hierarchical))
+ IMG_DBG_ASSERT("Allocation failed (L)" == NULL);
+ } else {
+ video->mv_settings_hierarchical.cpu_virt = NULL;
+ }
+
+ video->insert_hrd_params = video_params->insert_hrd_params;
+ if (video_params->insert_hrd_params) {
+ alloc_size = 64;
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ alloc_size, 64, &video->aud_header_mem))
+ IMG_DBG_ASSERT("Allocation failed (M)" == NULL);
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ alloc_size, 64, &video->sei_buffering_period_header_mem))
+ IMG_DBG_ASSERT("Allocation failed (N)" == NULL);
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ alloc_size, 64, &video->sei_picture_timing_header_mem))
+ IMG_DBG_ASSERT("Allocation failed (O)" == NULL);
+ }
+
+ for (index = 0; index < ARRAY_SIZE(video->pichdr_template_mem); index++) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ MAX_HEADERSIZEBYTES, 64, &video->pichdr_template_mem[index]))
+ IMG_DBG_ASSERT("Allocation failed (P)" == NULL);
+ }
+
+ for (index = 0; index < ARRAY_SIZE(video->slice_params_template_mem); index++) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ ALIGN_64(sizeof(struct slice_params)), 64,
+ &video->slice_params_template_mem[index]))
+ IMG_DBG_ASSERT("Allocation failed (Q)" == NULL);
+ }
+
+ for (index = 0; index < video->slots_in_use; index++) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ ALIGN_64(sizeof(struct mtx_header_params)), 64,
+ &video->ltref_header[index]))
+ IMG_DBG_ASSERT("Allocation failed (R)" == NULL);
+ }
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE),
+ ALIGN_64(4), 64, &video->src_phys_addr))
+ IMG_DBG_ASSERT("Allocation failed (S)" == NULL);
+
+ for (index = 0; index < video->slots_in_use; index++) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ (1 + MAX_SLICESPERPIC * 2 + 15) & ~15, 64,
+ &video->slice_map[index].mem_info) != IMG_SUCCESS)
+ IMG_DBG_ASSERT("Allocation failed (T)" == NULL);
+ }
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE),
+ ALIGN_64(sizeof(unsigned short) * MAX_GOP_SIZE), 64, &video->flat_gop_struct))
+ IMG_DBG_ASSERT("Allocation failed (U)" == NULL);
+
+ if (video->rc_params.hierarchical) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ ALIGN_64(sizeof(unsigned short) * MAX_GOP_SIZE), 64,
+ &video->hierar_gop_struct))
+ IMG_DBG_ASSERT("Allocation failed (V)" == NULL);
+ }
+
+ if (video->custom_scaling) {
+ for (index = 0; index < 2; index++) {
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ ALIGN_64(QUANT_LISTS_SIZE), 64, &video->custom_quant[index]))
+ IMG_DBG_ASSERT("Allocation failed (W)" == NULL);
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ 192, 64, &video->custom_quant_regs4x4_sp[index]))
+ IMG_DBG_ASSERT("Allocation failed (X)" == NULL);
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ 128 * CUSTOM_QUANT_PARAMSIZE_8x8, 64,
+ &video->custom_quant_regs8x8_sp[index]))
+ IMG_DBG_ASSERT("Allocation failed (Y)" == NULL);
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ 128, 64, &video->custom_quant_regs4x4_q[index]))
+ IMG_DBG_ASSERT("Allocation failed (Z)" == NULL);
+
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ 64 * CUSTOM_QUANT_PARAMSIZE_8x8, 64,
+ &video->custom_quant_regs8x8_q[index]))
+ IMG_DBG_ASSERT("Allocation failed (0)" == NULL);
+ }
+ video->custom_quant_slot = 0;
+ }
+
+ /* Allocate device memory for storing feedback information for all "5" slots */
+ if (video->enable_sel_stats_flags & ESF_FIRST_STAGE_STATS) {
+ for (index = 0; index < video->slots_in_use; index++) {
+ unsigned int row_size =
+ ALIGN_64(width_in_mbs * sizeof(struct img_first_stage_mb_params));
+
+ /* Allocate memory padding size of each row to be multiple of 64-bytes */
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ frame_height_in_mbs * row_size, 64,
+ &video->firstpass_out_param_buf[index].mem_info))
+ IMG_DBG_ASSERT("Allocation failed (1)" == NULL);
+
+ video->firstpass_out_param_buf[index].lock = BUFFER_FREE;
+ video->firstpass_out_param_buf[index].bytes_written = 0;
+ video->firstpass_out_param_buf[index].size =
+ frame_height_in_mbs * row_size;
+ }
+ } else {
+ /* Set buffer pointers to NULL */
+ for (index = 0; index < video->slots_in_use; index++) {
+ video->firstpass_out_param_buf[index].mem_info.cpu_virt = NULL;
+ video->firstpass_out_param_buf[index].lock = BUFFER_FREE;
+ video->firstpass_out_param_buf[index].bytes_written = 0;
+ video->firstpass_out_param_buf[index].size = 0;
+ }
+ }
+
+ /* Allocate device memory for storing feedback information for all "5" slots */
+ if (video->enable_sel_stats_flags & ESF_MP_BEST_MB_DECISION_STATS ||
+ video->enable_sel_stats_flags & ESF_MP_BEST_MOTION_VECTOR_STATS) {
+ for (index = 0; index < video->slots_in_use; index++) {
+ unsigned int best_multipass_size = frame_height_in_mbs *
+ //From TRM (4.5.2)
+ (((5 * width_in_mbs) + 3) >> 2) * 64;
+
+ /* Allocate memory padding size of each row to be multiple of 64-bytes */
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE),
+ best_multipass_size, 64,
+ &video->firstpass_out_best_multipass_param_buf[index].mem_info))
+ IMG_DBG_ASSERT("Allocation failed (2)" == NULL);
+
+ video->firstpass_out_best_multipass_param_buf[index].lock =
+ BUFFER_FREE;
+ video->firstpass_out_best_multipass_param_buf[index].bytes_written = 0;
+ video->firstpass_out_best_multipass_param_buf[index].size =
+ best_multipass_size;
+ }
+ } else {
+ /* Set buffer pointers to NULL */
+ for (index = 0; index < video->slots_in_use; index++) {
+ video->firstpass_out_best_multipass_param_buf[index].mem_info.cpu_virt =
+ NULL;
+ video->firstpass_out_best_multipass_param_buf[index].lock =
+ BUFFER_FREE;
+ video->firstpass_out_best_multipass_param_buf[index].bytes_written = 0;
+ video->firstpass_out_best_multipass_param_buf[index].size = 0;
+ }
+ }
+
+ if (video->enable_inp_ctrl) {
+ for (index = 0; index < video->slots_in_use; index++) {
+ alloc_size = frame_height_in_mbs * width_in_mbs * 2;
+
+ /*
+ * Allocate memory for worst case slice structure
+ * i.e. assume number-of-slices == number-of-rows
+ */
+ if (topaz_mmu_stream_alloc
+ (str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED |
+ SYS_MEMATTRIB_WRITECOMBINE), alloc_size + 64, 64,
+ &video->mb_ctrl_in_params_buf[index].mem_info))
+ IMG_DBG_ASSERT("Allocation failed (3)" == NULL);
+
+ video->mb_ctrl_in_params_buf[index].lock = BUFFER_FREE;
+ video->mb_ctrl_in_params_buf[index].bytes_written = 0;
+ video->mb_ctrl_in_params_buf[index].size = alloc_size;
+ }
+ } else {
+ for (index = 0; index < video->slots_in_use; index++) {
+ video->mb_ctrl_in_params_buf[index].mem_info.cpu_virt = NULL;
+ video->mb_ctrl_in_params_buf[index].lock = BUFFER_FREE;
+ video->mb_ctrl_in_params_buf[index].bytes_written = 0;
+ video->mb_ctrl_in_params_buf[index].size = 0;
+ }
+ }
+
+ for (index = 0; index < video->slots_in_use; index++)
+ video->source_slot_buff[index] = NULL;
+
+ /* Allocate coded package */
+ topaz_get_context_coded_buffer_size(enc, rc_params, &coded_buffer_size);
+
+ video->coded_buffer_max_size = coded_buffer_size;
+
+ for (i = 0; i < video->coded_package_max_num; i++) {
+ if (topaz_allocate_coded_package(str_ctx, coded_buffer_size,
+ &video->coded_package[i]) != IMG_SUCCESS)
+ IMG_DBG_ASSERT("Coded package Allocation failed\n" == NULL);
+ }
+
+ video->encode_sent = 0;
+
+ topaz_video_prepare_templates(str_ctx, video_params->f_code,
+ video_params->fine_y_search_size);
+
+ enc->video->max_chunks = video_params->max_chunks;
+ enc->video->chunks_per_mb = video_params->chunks_per_mb;
+ enc->video->priority_chunks = video_params->priority_chunks;
+
+ return topaz_video_create_mtx_context(str_ctx, video_params);
+}
+
+unsigned char topaz_validate_params(struct img_video_params *video_params,
+ struct img_rc_params *rc_params)
+{
+ unsigned char modified = FALSE;
+ unsigned int required_core_des1 = 0;
+ unsigned int core_des1 = topazdd_get_core_des1();
+
+ if (video_params) {
+ /* Validate video params */
+ if (video_params->standard == IMG_STANDARD_H264) {
+ if (video_params->is_interlaced) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED)) == 0) {
+ video_params->is_interlaced = FALSE;
+
+ if (!video_params->is_interleaved) {
+ /* Non-interleaved source.
+ * Encode field pictures as frames.
+ */
+ video_params->frame_height >>= 1;
+ video_params->buffer_height >>= 1;
+ video_params->source_frame_height >>= 1;
+ } else {
+ /* Interleaved source. Unite fields into single picture. */
+ video_params->is_interleaved = FALSE;
+ }
+
+ video_params->bottom_field_first = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED);
+ }
+ }
+
+ if (video_params->h264_8x8) {
+ if ((core_des1 &
+ F_ENCODE(1, TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED)) ==
+ 0) {
+ video_params->h264_8x8 = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED);
+ }
+ }
+
+ if (video_params->cabac_enabled) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED)) == 0) {
+ video_params->cabac_enabled = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED);
+ }
+ }
+
+ if (!video_params->enc_features.disable_bframes) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED)) == 0) {
+ video_params->enc_features.disable_bframes = FALSE;
+ modified = TRUE;
+ }
+ }
+
+ if (video_params->enable_sel_stats_flags) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED)) == 0) {
+ video_params->enable_sel_stats_flags = 0;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED);
+ }
+ }
+
+ if (video_params->use_default_scaling_list) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED)) ==
+ 0) {
+ video_params->use_default_scaling_list = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED);
+ }
+ }
+
+ if (video_params->use_custom_scaling_lists) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED)) == 0) {
+ video_params->use_custom_scaling_lists = 0;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED);
+ }
+ }
+
+ if ((video_params->weighted_prediction ||
+ video_params->vp_weighted_implicit_bi_pred)) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED)) ==
+ 0) {
+ video_params->weighted_prediction = FALSE;
+ video_params->vp_weighted_implicit_bi_pred = 0;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED);
+ }
+ }
+
+ if (video_params->multi_reference_p || video_params->enable_mvc) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED)) ==
+ 0) {
+ video_params->multi_reference_p = FALSE;
+ video_params->enable_mvc = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED);
+ }
+ }
+
+ if (video_params->spatial_direct) {
+ if ((core_des1 &
+ F_ENCODE
+ (1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED)) ==
+ 0) {
+ video_params->spatial_direct = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED);
+ }
+ }
+
+ if (video_params->enable_lossless) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED)) == 0) {
+ video_params->enable_lossless = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED);
+ }
+ }
+ }
+
+ if (video_params->enable_scaler) {
+ if ((core_des1 &
+ F_ENCODE(1, TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED)) == 0) {
+ video_params->enable_scaler = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED);
+ }
+ }
+
+ if (rc_params) {
+ /* Validate RC params */
+ if (video_params->standard == IMG_STANDARD_H264) {
+ if (rc_params->bframes) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED)) ==
+ 0) {
+ rc_params->bframes = 0;
+ rc_params->hierarchical = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED);
+ }
+ }
+
+ if (rc_params->hierarchical && rc_params->bframes > 1) {
+ if ((core_des1 &
+ F_ENCODE
+ (1, TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED))
+ == 0) {
+ rc_params->hierarchical = FALSE;
+ modified = TRUE;
+ } else {
+ required_core_des1 |= F_ENCODE(1,
+ TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED);
+ }
+ }
+ }
+ }
+ }
+
+ return modified;
+}
+
+/*
+ * Creat an encoder context
+ */
+int topaz_stream_create(void *vxe_ctx, struct img_video_params *video_params,
+ unsigned char base_pipe, unsigned char pipes_to_use,
+ struct img_rc_params *rc_params, void **topaz_str_context)
+{
+ struct img_enc_context *enc;
+ struct topaz_stream_context *str_ctx;
+
+ if (!is_topaz_core_initialized)
+ return IMG_ERROR_NOT_INITIALISED;
+
+ str_ctx = kzalloc(sizeof(*str_ctx), GFP_KERNEL);
+ if (!str_ctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ enc = kzalloc(sizeof(*enc), GFP_KERNEL);
+ if (!enc) {
+ kfree(str_ctx);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ *topaz_str_context = str_ctx;
+ str_ctx->enc_ctx = enc;
+ str_ctx->core_ctx = global_topaz_core_context;
+ str_ctx->vxe_ctx = (struct vxe_enc_ctx *)vxe_ctx;
+
+ enc->core_rev = topazdd_get_core_rev();
+ enc->sync_first_pass = true;
+
+ enc->requested_base_pipe = base_pipe;
+ enc->base_pipe = base_pipe;
+ enc->requested_pipes_to_use = pipes_to_use;
+ enc->pipes_to_use = pipes_to_use;
+
+ topaz_validate_params(video_params, rc_params);
+
+ switch (video_params->standard) {
+ case IMG_STANDARD_H264:
+ if (video_params->enable_mvc) {
+ switch (rc_params->rc_mode) {
+ case IMG_RCMODE_NONE:
+ enc->codec = IMG_CODEC_H264MVC_NO_RC;
+ break;
+ case IMG_RCMODE_CBR:
+ enc->codec = IMG_CODEC_H264MVC_CBR;
+ break;
+ case IMG_RCMODE_VBR:
+ enc->codec = IMG_CODEC_H264MVC_VBR;
+ break;
+ case IMG_RCMODE_ERC:
+ enc->codec = IMG_CODEC_H264MVC_ERC;
+ break;
+ case IMG_RCMODE_VCM:
+ IMG_DBG_ASSERT("VCM mode is not supported for MVC" == NULL);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (rc_params->rc_mode) {
+ case IMG_RCMODE_NONE:
+ enc->codec = IMG_CODEC_H264_NO_RC;
+ break;
+ case IMG_RCMODE_CBR:
+ enc->codec = IMG_CODEC_H264_CBR;
+ break;
+ case IMG_RCMODE_VBR:
+ enc->codec = IMG_CODEC_H264_VBR;
+ break;
+ case IMG_RCMODE_VCM:
+ enc->codec = IMG_CODEC_H264_VCM;
+ break;
+ case IMG_RCMODE_ERC:
+ enc->codec = IMG_CODEC_H264_ERC;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ IMG_DBG_ASSERT("Only H264 encode is supported" == NULL);
+ }
+
+ /* initialise video context structure */
+ return (topaz_video_create_context(str_ctx, video_params, rc_params));
+}
+
+/*
+ * Sends a command to the specified core.
+ * The function returns a writeback value. This is a unique value that will be
+ * written back by the target core after it completes its command.
+ */
+unsigned int topaz_insert_command(struct img_enc_context *enc_ctx,
+ enum mtx_cmd_id cmd_id, unsigned int data)
+{
+ unsigned int writeback_val;
+
+ if (enc_ctx->debug_settings &&
+ enc_ctx->debug_settings->serialized_communication_mode ==
+ VXE_SERIALIZED_MODE_SERIAL)
+ /* in serial mode do not use the priority bit */
+ cmd_id &= ~MTX_CMDID_PRIORITY;
+
+ topazdd_send_msg(enc_ctx->video->dd_str_ctx, cmd_id, data, NULL, &writeback_val);
+
+ return writeback_val;
+}
+
+/*
+ * Sends a command to the specified core.
+ */
+unsigned int topaz_insert_command_with_sync(struct img_enc_context *enc_ctx,
+ enum mtx_cmd_id cmd_id, unsigned int data)
+{
+ int ret;
+
+ if (enc_ctx->debug_settings &&
+ enc_ctx->debug_settings->serialized_communication_mode ==
+ VXE_SERIALIZED_MODE_SERIAL)
+ /* in serial mode do not use the priority bit */
+ cmd_id &= ~MTX_CMDID_PRIORITY;
+
+ ret = topazdd_send_msg_with_sync(enc_ctx->video->dd_str_ctx, cmd_id, data, NULL);
+
+ return ret;
+}
+
+/*
+ * Sends a command to the specified core.
+ * The data specified in psCommandData will be read via DMA by the MTX,
+ * so this memory must remain in scope for the duration of the execution
+ * of the command.
+ * The function returns a writeback value. This is a unique value that will be
+ * written back by the target core after it completes its command.
+ */
+unsigned int topaz_insert_mem_command(struct img_enc_context *enc_ctx,
+ enum mtx_cmd_id cmd_id,
+ unsigned int data,
+ struct vidio_ddbufinfo *command_data)
+{
+ unsigned int writeback_val;
+
+ /* Priority bit is not supported for MEM commands */
+ cmd_id &= ~MTX_CMDID_PRIORITY;
+
+ topazdd_send_msg(enc_ctx->video->dd_str_ctx, cmd_id, data, command_data, &writeback_val);
+
+ return writeback_val;
+}
+
+/*
+ * Sends a command to the specified core.
+ * The data specified in psCommandData will be read via DMA by the MTX,
+ * so this memory must remain in scope for the duration of the execution
+ * of the command.
+ */
+unsigned int topaz_insert_mem_command_with_sync(struct img_enc_context *enc_ctx,
+ enum mtx_cmd_id cmd_id,
+ unsigned int data,
+ struct vidio_ddbufinfo *command_data)
+{
+ int ret;
+
+ /* Priority bit is not supported for MEM commands */
+ cmd_id &= ~MTX_CMDID_PRIORITY;
+
+ ret = topazdd_send_msg_with_sync(enc_ctx->video->dd_str_ctx, cmd_id,
+ data, command_data);
+ return ret;
+}
+
+/*
+ * Send the Access Unit Delimiter to the stream
+ */
+static int topaz_send_aud_header(struct img_enc_context *enc)
+{
+ if (enc->video->aborted)
+ return IMG_ERROR_UNDEFINED;
+
+ /* must use unique writeback word */
+ topaz_insert_mem_command(enc, MTX_CMDID_DO_HEADER, 0,
+ enc->video->aud_header_mem.cpu_virt);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Transmit the picture headerts to MTX
+ */
+static int topaz_send_picture_headers(struct img_enc_context *enc)
+{
+ /* send Seqence headers only for IDR (I-frames) and only once in the beginning */
+ struct img_video_context *video = enc->video;
+
+ /* SEI_INSERTION */
+ if (video->insert_hrd_params) {
+ /* Access unit delimiter */
+ if (!video->enable_mvc || (video->enable_mvc && video->mvc_view_idx == 0))
+ /* in case of MVC, both views are a single access unit.
+ * delimiter should be inserted by view 0 only.
+ */
+ topaz_send_aud_header(enc);
+ }
+
+ if (video->insert_seq_header && !video->no_sequence_headers) {
+ switch (video->standard) {
+ case IMG_STANDARD_H264:
+ IMG_DBG_ASSERT("SPS and PPS will be send from firmware." != NULL);
+ break;
+ default:
+ IMG_DBG_ASSERT("only H264 encode is supported." == NULL);
+ break;
+ }
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Encode a frame
+ */
+int topaz_encode_frame(void *topaz_str_ctx)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ struct topaz_stream_context *str_ctx;
+ /* If line counter is enabled, we add one more bit in the command data
+ * to inform the firmware context whether it should proceed
+ */
+ unsigned int encode_cmd_data;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ if (video->aborted)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ video->insert_seq_header = (video->encode_sent == 0);
+
+ topaz_send_picture_headers(enc);
+
+ encode_cmd_data = F_ENCODE(1, MTX_MSG_ENCODE_CODED_INTERRUPT);
+
+ if (video->line_counter)
+ /* Set bit 20 to 1 to inform FW that we are using the line counter feature */
+ encode_cmd_data |= F_ENCODE(1, MTX_MSG_ENCODE_USE_LINE_COUNTER);
+
+ topaz_insert_command(enc, (enum mtx_cmd_id)
+ (MTX_CMDID_ENCODE_FRAME | MTX_CMDID_WB_INTERRUPT),
+ encode_cmd_data);
+
+ video->encode_pic_processing++;
+ video->encode_sent++;
+
+ return IMG_SUCCESS;
+}
+
+int topaz_get_pipe_usage(unsigned char pipe, unsigned char *ctx_id)
+{
+ IMG_DBG_ASSERT(pipe < TOPAZHP_MAX_NUM_PIPES);
+
+ if (pipe >= TOPAZHP_MAX_NUM_PIPES)
+ return 0;
+
+ return global_pipe_usage[pipe];
+}
+
+void topaz_set_pipe_usage(unsigned char pipe, unsigned char val)
+{
+ IMG_DBG_ASSERT(pipe < TOPAZHP_MAX_NUM_PIPES);
+
+ if (pipe < TOPAZHP_MAX_NUM_PIPES)
+ global_pipe_usage[pipe] = val;
+}
+
+/*
+ * Set the mtx context to the one implicit in the encoder context
+ */
+static int topaz_video_setup_mtx_context(struct img_enc_context *enc)
+{
+ struct img_video_context *video_context;
+ unsigned char index;
+
+ video_context = enc->video;
+
+ for (index = 0; index < enc->pipes_to_use; index++)
+ topaz_set_pipe_usage(enc->base_pipe + index, enc->ctx_num);
+
+ if (topaz_insert_mem_command_with_sync(enc, (enum mtx_cmd_id)
+ (MTX_CMDID_SETVIDEO | MTX_CMDID_WB_INTERRUPT),
+ enc->base_pipe, &video_context->mtx_enc_ctx_mem)) {
+ pr_err("topaz mtx context setup command failed\n");
+ return IMG_ERROR_UNDEFINED;
+ }
+
+ video_context->aborted = FALSE;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Load the encoder and MTX context
+ */
+int topaz_load_context(void *topaz_str_ctx)
+{
+ struct topaz_stream_context *str_ctx;
+ struct img_enc_context *enc;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+ enc = str_ctx->enc_ctx;
+
+ enc->video->vid_ctx_num = 0;
+
+ enc->ctx_num++;
+
+ return topaz_video_setup_mtx_context(enc);
+}
+
+/*
+ * Store the encoder and MTX context
+ */
+int topaz_store_context(void *topaz_str_ctx)
+{
+ struct img_enc_context *enc;
+ struct topaz_stream_context *str_ctx;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+ enc = str_ctx->enc_ctx;
+
+ /* Update Globals */
+ if (enc->codec != IMG_CODEC_NONE && enc->codec != IMG_CODEC_JPEG) {
+ struct img_video_context *video_context;
+
+ video_context = enc->video;
+
+ if (!topaz_insert_mem_command_with_sync(enc, (enum mtx_cmd_id)
+ (MTX_CMDID_GETVIDEO |
+ MTX_CMDID_WB_INTERRUPT),
+ enc->base_pipe, &video_context->mtx_enc_ctx_mem)) {
+ pr_err("MTX message for GETVIDEO failed\n");
+ return IMG_ERROR_UNDEFINED;
+ }
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Flush video stream
+ */
+int topaz_flush_stream(void *topaz_str_ctx, unsigned int frame_cnt)
+{
+ struct topaz_stream_context *str_ctx;
+ struct img_enc_context *enc;
+ int index;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+
+ enc = str_ctx->enc_ctx;
+
+ if (enc->video->aborted)
+ return IMG_ERROR_UNDEFINED;
+
+ /* flush the internal queues */
+ /* Check source slots */
+ for (index = 0; index < enc->video->slots_in_use; index++) {
+ if (enc->video->source_slot_buff[index]) {
+ /* Found a valid src_frame, so signal callback for the same. */
+ global_topaz_core_context->vxe_str_processed_cb(str_ctx->vxe_ctx,
+ VXE_CB_SRC_FRAME_RELEASE,
+ (void *)(enc->video->source_slot_buff[index]),
+ 0, 0);
+ enc->video->source_slot_buff[index] = NULL;
+ }
+ }
+
+ /* Check coded package slots */
+ for (index = 0; index < enc->video->coded_package_max_num; index++) {
+ if (enc->video->coded_package[index]->busy) {
+ /* Found a valid coded package, so, signal callback for the same */
+ global_topaz_core_context->vxe_str_processed_cb(str_ctx->vxe_ctx,
+ VXE_CB_CODED_BUFF_READY,
+ (void *)(enc->video->coded_package[index]->coded_buffer[0]),
+ 0, 0);
+ enc->video->coded_package[index]->busy = FALSE;
+ }
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Destroy the Video Encoder context
+ */
+static int topaz_video_destroy_context(struct topaz_stream_context *str_ctx)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ int i;
+ unsigned int max_cores;
+
+ max_cores = topazdd_get_num_pipes(str_ctx->core_ctx->dev_handle);
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ for (i = 0; i < enc->pipes_to_use; i++)
+ if (topaz_get_pipe_usage(enc->base_pipe + i, NULL) == enc->ctx_num)
+ topaz_set_pipe_usage(enc->base_pipe + i, 0);
+
+ if (video->standard == IMG_STANDARD_H264 && video->weighted_prediction) {
+ for (i = 0; i < video->slots_in_use; i++) {
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx,
+ &video->weighted_prediction_mem[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+ }
+
+ for (i = 0; i < video->coded_package_max_num; i++) {
+ if (topaz_mmu_stream_free
+ (str_ctx->mmu_ctx,
+ &video->coded_package[i]->mtx_info.code_package_fw_buffer->mem_info))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ kfree(video->coded_package[i]->mtx_info.code_package_fw_buffer);
+ video->coded_package[i]->mtx_info.code_package_fw_buffer = NULL;
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx,
+ &video->coded_package[i]->header_buffer->mem_info))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ kfree(video->coded_package[i]->header_buffer);
+ video->coded_package[i]->header_buffer = NULL;
+
+ kfree(video->coded_package[i]);
+ video->coded_package[i] = NULL;
+ }
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->flat_gop_struct))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (video->rc_params.hierarchical)
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->hierar_gop_struct))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ for (i = 0; i < video->slots_in_use; i++) {
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->slice_map[i].mem_info))
+ IMG_DBG_ASSERT("slice map free failed" == NULL);
+ }
+
+ for (i = 0; i < (int)max_cores; i++) {
+ if (str_ctx->vxe_ctx->above_mb_params_sgt[i].sgl) {
+ topaz_mmu_stream_free_sg(str_ctx->mmu_ctx, &video->above_params[i]);
+ } else {
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->above_params[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+ }
+
+ for (i = 0; i < video->pic_nodes; i++)
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->colocated[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ for (i = 0; i < video->pic_nodes; i++)
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->recon_pictures[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ for (i = 0; i < video->mv_stores; i++)
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->mv[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (video->enable_mvc) {
+ for (i = 0; i < 2; i++) {
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->inter_view_mv[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+ }
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->mtx_enc_ctx_mem))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->mv_settings_btable))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (video->mv_settings_hierarchical.cpu_virt)
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->mv_settings_hierarchical))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ /* partially coded headers supplied to HW */
+ /* SEI_INSERTION */
+ if (video->insert_hrd_params) {
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->aud_header_mem))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx,
+ &video->sei_buffering_period_header_mem))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->sei_picture_timing_header_mem))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->seq_header_mem))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ /* FREE subset sequence parameter header */
+ if (video->enable_mvc)
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->subset_seq_header_mem))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ for (i = 0; i < ARRAY_SIZE(video->pichdr_template_mem); i++) {
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->pichdr_template_mem[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(video->slice_params_template_mem); i++) {
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->slice_params_template_mem[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->src_phys_addr))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ /* de-allocate memory corresponding to the output parameters */
+ for (i = 0; i < video->slots_in_use; i++) {
+ if (video->firstpass_out_param_buf[i].mem_info.cpu_virt)
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx,
+ &video->firstpass_out_param_buf[i].mem_info))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (video->mb_ctrl_in_params_buf[i].mem_info.cpu_virt)
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx,
+ &video->mb_ctrl_in_params_buf[i].mem_info))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+
+ /* de-allocate memory corresponding to the selectable best MV parameters */
+ for (i = 0; i < video->slots_in_use; i++) {
+ if (video->firstpass_out_best_multipass_param_buf[i].mem_info.cpu_virt)
+ if (topaz_mmu_stream_free
+ (str_ctx->mmu_ctx,
+ &video->firstpass_out_best_multipass_param_buf[i].mem_info))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+
+ for (i = 0; i < video->slots_in_use; i++) {
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->ltref_header[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+
+ if (video->custom_scaling) {
+ for (i = 0; i < 2; i++) {
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx, &video->custom_quant[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx,
+ &video->custom_quant_regs4x4_sp[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx,
+ &video->custom_quant_regs8x8_sp[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx,
+ &video->custom_quant_regs4x4_q[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+
+ if (topaz_mmu_stream_free(str_ctx->mmu_ctx,
+ &video->custom_quant_regs8x8_q[i]))
+ IMG_DBG_ASSERT("Free failed" == NULL);
+ }
+ }
+
+ topazdd_destroy_stream_ctx(video->dd_str_ctx);
+
+ topaz_mmu_stream_destroy(&global_topaz_core_context->dev_handle->topaz_mmu_ctx,
+ str_ctx->mmu_ctx);
+
+ /* free the video encoder structure itself */
+ kfree(video);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Destroy an Encoder Context
+ */
+int topaz_stream_destroy(void *str_context)
+{
+ struct img_enc_context *enc;
+ struct topaz_stream_context *str_ctx;
+ int ret;
+
+ str_ctx = (struct topaz_stream_context *)str_context;
+ if (!str_ctx)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ enc = str_ctx->enc_ctx;
+
+ ret = topaz_video_destroy_context(str_ctx);
+
+ kfree(enc->debug_settings);
+ enc->debug_settings = NULL;
+
+ kfree(enc);
+ kfree(str_context);
+
+ return ret;
+}
+
+/*
+ * Get the capabilities of the encoder for the given codec
+ */
+int topaz_get_encoder_caps(enum img_standard standard,
+ unsigned short width, unsigned short height,
+ struct img_enc_caps *caps)
+{
+ unsigned int width_in_mbs, height_in_mbs, kick_size, kicks_per_bu, min_slice_height, mbs;
+
+ /* get the actual number of cores */
+ caps->num_cores = topazdd_get_num_pipes(global_topaz_core_context->dev_handle);
+
+ if (caps->num_cores < 3)
+ caps->max_bu_per_frame = TOPAZHP_MAX_BU_SUPPORT_HD;
+ else
+ caps->max_bu_per_frame = TOPAZHP_MAX_BU_SUPPORT_4K;
+
+ caps->core_features = topazdd_get_core_des1();
+ caps->core_revision = topazdd_get_core_rev();
+
+ width_in_mbs = (width + 15) / 16;
+ height_in_mbs = (height + 15) / 16;
+
+ switch (standard) {
+ case IMG_STANDARD_H264:
+ /* Assume progressive video for now as we don't know either way */
+ calculate_kick_and_bu_size(width_in_mbs, height_in_mbs, FALSE,
+ caps->max_bu_per_frame, &kick_size, &kicks_per_bu,
+ &min_slice_height);
+ caps->max_slices = height_in_mbs / min_slice_height;
+
+ /*
+ * Limit for number of MBs in slices is 32K-2 = 32766
+ * Here we will limit it to 16K per slice = 16384
+ */
+ caps->min_slices = 1;
+ mbs = width_in_mbs * height_in_mbs;
+ if (mbs >= 32768)
+ caps->min_slices = 3;
+ else if (mbs >= 16384)
+ caps->min_slices = 2;
+
+ /* if height is bigger or equal to 4000, use at least two slices */
+ if (height_in_mbs >= 250 && caps->min_slices == 1)
+ caps->min_slices = 2;
+
+ caps->recommended_slices = min(caps->num_cores, caps->max_slices);
+ caps->min_slice_height = min_slice_height;
+
+ caps->max_height = 2048;
+ caps->max_width = 2048;
+ caps->min_height = 48;
+ caps->min_width = 144;
+ caps->max_mb_num = (2048 * 2048) >> 8;
+ break;
+ default:
+ IMG_DBG_ASSERT("Only H264 encoder is supported" == NULL);
+ }
+
+ if (caps->recommended_slices < caps->min_slices)
+ caps->recommended_slices = caps->min_slices;
+ if (caps->recommended_slices > caps->max_slices)
+ caps->recommended_slices = caps->max_slices;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Supply a source frame to the encode process
+ */
+int topaz_send_source_frame(void *topaz_str_ctx, struct img_frame *src_frame,
+ unsigned int frame_num, unsigned long long ctx)
+{
+ struct topaz_stream_context *str_ctx;
+ struct img_source_buffer_params *buffer_params;
+
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ unsigned char slot_number;
+ void *data;
+ unsigned int y_plane_base = 0;
+ unsigned int u_plane_base = 0;
+ unsigned int v_plane_base = 0;
+ struct vidio_ddbufinfo *cmd_data_mem_info = NULL;
+ unsigned char *slice_map_addr = NULL;
+ unsigned char index;
+ unsigned char round;
+ unsigned char slice_number;
+ unsigned char first_bu_in_slice;
+ unsigned char size_in_bus;
+ unsigned int slice_height;
+ unsigned char halfway_slice;
+ unsigned int halfway_bu;
+ unsigned char slices_per_picture;
+ unsigned int picture_height_remaining;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ /* if source slot is NULL then it's just a next portion of slices */
+ if (!src_frame)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ if (video->aborted)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ slot_number = video->source_slot_reserved;
+
+ /* mark the appropriate slot as filled */
+ video->source_slot_buff[slot_number] = src_frame;
+ video->source_slot_poc[slot_number] = frame_num;
+
+ topaz_get_cmd_data_buffer(&cmd_data_mem_info);
+
+ if (!cmd_data_mem_info)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ data = cmd_data_mem_info->cpu_virt;
+ buffer_params = (struct img_source_buffer_params *)data;
+
+ /* Prepare data */
+ if (src_frame->y_plane_buffer) {
+ populate_firmware_message(&video->src_phys_addr, 0,
+ &src_frame->y_plane_buffer->mem_info, 0);
+
+ data = video->src_phys_addr.cpu_virt;
+ y_plane_base = *((unsigned int *)data);
+ }
+
+ if (src_frame->u_plane_buffer) {
+ populate_firmware_message(&video->src_phys_addr, 0,
+ &src_frame->u_plane_buffer->mem_info, 0);
+
+ data = video->src_phys_addr.cpu_virt;
+ u_plane_base = *((unsigned int *)data);
+ } else {
+ u_plane_base = y_plane_base;
+ }
+
+ if (src_frame->v_plane_buffer) {
+ populate_firmware_message(&video->src_phys_addr, 0,
+ &src_frame->v_plane_buffer->mem_info, 0);
+
+ data = video->src_phys_addr.cpu_virt;
+ v_plane_base = *((unsigned int *)data);
+ } else {
+ v_plane_base = u_plane_base;
+ }
+
+ buffer_params->slot_num = slot_number;
+ buffer_params->display_order_num = (unsigned char)(frame_num & 0xFF);
+ buffer_params->host_context = ctx;
+
+ buffer_params->phys_addr_y_plane_field_0 = y_plane_base + src_frame->y_component_offset +
+ src_frame->field0_y_offset;
+ buffer_params->phys_addr_u_plane_field_0 = u_plane_base + src_frame->u_component_offset +
+ src_frame->field0_u_offset;
+ buffer_params->phys_addr_v_plane_field_0 = v_plane_base + src_frame->v_component_offset +
+ src_frame->field0_v_offset;
+
+ buffer_params->phys_addr_y_plane_field_1 = y_plane_base + src_frame->y_component_offset +
+ src_frame->field1_y_offset;
+ buffer_params->phys_addr_u_plane_field_1 = u_plane_base + src_frame->u_component_offset +
+ src_frame->field1_u_offset;
+ buffer_params->phys_addr_v_plane_field_1 = v_plane_base + src_frame->v_component_offset +
+ src_frame->field1_v_offset;
+
+ topaz_update_device_mem(str_ctx->vxe_ctx, cmd_data_mem_info);
+
+ topaz_get_buffer(str_ctx, &video->slice_map[slot_number], (void **)&slice_map_addr,
+ FALSE);
+
+ /* Fill standard Slice Map (non arbitrary) */
+ halfway_bu = 0;
+ first_bu_in_slice = 0;
+ slice_number = 0;
+ slices_per_picture = video->slices_per_picture;
+ picture_height_remaining = video->picture_height;
+ halfway_slice = slices_per_picture / 2;
+ *slice_map_addr = slices_per_picture;
+ slice_map_addr++;
+ round = 16 * enc->caps.min_slice_height - 1;
+
+ for (index = 0; index < slices_per_picture - 1; index++) {
+ if (index == halfway_slice)
+ halfway_bu = first_bu_in_slice;
+
+ slice_height = (picture_height_remaining / (video->slices_per_picture - index)) &
+ ~round;
+ picture_height_remaining -= slice_height;
+ size_in_bus = ((slice_height / 16) * (video->width / 16)) /
+ video->rc_params.bu_size;
+
+ /* slice number */
+ *slice_map_addr = slice_number;
+ slice_map_addr++;
+
+ /* SizeInKicks BU */
+ *slice_map_addr = size_in_bus;
+ slice_map_addr++;
+
+ slice_number++;
+
+ first_bu_in_slice += (unsigned int)size_in_bus;
+ }
+
+ slice_height = picture_height_remaining;
+ if (index == halfway_slice)
+ halfway_bu = first_bu_in_slice;
+
+ /* round up for case where the last BU is smaller */
+ size_in_bus = ((slice_height / 16) * (video->width / 16) + video->rc_params.bu_size - 1) /
+ video->rc_params.bu_size;
+
+ /* slice number */
+ *slice_map_addr = slice_number;
+ slice_map_addr++;
+
+ /* last BU */
+ *slice_map_addr = size_in_bus;
+ slice_map_addr++;
+
+ topaz_release_buffer(str_ctx, &video->slice_map[slot_number], TRUE);
+
+#ifdef DEBUG_ENCODER_DRIVER
+ pr_info("\n\nAPI - IMG_V_SendSourceFrame - Sending a source slot %i to FW\n\n",
+ slot_number);
+#endif
+
+ /* Send command */
+ topaz_insert_mem_command(enc, MTX_CMDID_PROVIDE_SOURCE_BUFFER, 0, cmd_data_mem_info);
+
+ video->encode_requested++;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Supply a header buffer and an optional number of coded data buffers as part of a package
+ */
+int topaz_send_coded_package(void *topaz_str_ctx, struct img_coded_buffer *coded_buffer)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ struct topaz_stream_context *str_ctx;
+ unsigned char coded_buffer_idx;
+ unsigned int *address = NULL;
+ struct coded_package_dma_info *this_coded_header_node;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+ if (!coded_buffer)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ if (video->aborted)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ video->coded_package[video->coded_package_slot_reserved]->coded_buffer[0] = coded_buffer;
+
+#ifdef DEBUG_ENCODER_DRIVER
+ pr_info("\n\nEncode Context [%i] sending coded package [%i]\n", enc->ctx_num,
+ video->coded_package_slot_reserved);
+#endif
+
+ /* Get the FW buffer */
+ topaz_get_buffer
+ (str_ctx,
+ video->coded_package[video->coded_package_slot_reserved]->mtx_info.code_package_fw_buffer,
+ (void **)&address, FALSE);
+
+ this_coded_header_node =
+ video->coded_package[video->coded_package_slot_reserved]->mtx_info.coded_package_fw =
+ (struct coded_package_dma_info *)address;
+
+ this_coded_header_node->coded_buffer_info =
+ F_ENCODE
+ (video->coded_package[video->coded_package_slot_reserved]->num_coded_buffers,
+ MTX_MSG_NUM_CODED_BUFFERS_PER_HEADER);
+
+ /* Inverted function: From host to MTX */
+ populate_firmware_message(&(video->coded_package
+ [video->coded_package_slot_reserved]->mtx_info.code_package_fw_buffer->mem_info),
+ (unsigned char *)&this_coded_header_node->coded_header_addr -
+ (unsigned char *)this_coded_header_node,
+ (struct vidio_ddbufinfo *)
+ (&(video->coded_package[video->coded_package_slot_reserved]->header_buffer->mem_info
+ )), 0);
+
+ /* Normal mode - An array of consecutive memory addresses */
+ for (coded_buffer_idx = 0; coded_buffer_idx <
+ video->coded_package[video->coded_package_slot_reserved]->num_coded_buffers;
+ coded_buffer_idx++) {
+ if (video->coded_package[video->coded_package_slot_reserved]->coded_buffer
+ [coded_buffer_idx]) {
+ /* Write coded buffer memory address into the structure (host to MTX) */
+ populate_firmware_message(&(video->coded_package
+ [video->coded_package_slot_reserved]->mtx_info.code_package_fw_buffer->mem_info),
+ (unsigned char *)&this_coded_header_node->coded_mem_addr[coded_buffer_idx] -
+ (unsigned char *)this_coded_header_node, (struct vidio_ddbufinfo *)
+ (&(video->coded_package
+ [video->coded_package_slot_reserved]->coded_buffer[coded_buffer_idx]->mem_info)),
+ 0);
+ } else {
+ this_coded_header_node->coded_mem_addr[coded_buffer_idx] = 0;
+ break;
+ }
+ }
+
+ /* Release the FW buffer */
+ topaz_release_buffer(str_ctx, video->coded_package
+ [video->coded_package_slot_reserved]->mtx_info.code_package_fw_buffer, TRUE);
+
+ /* Send header buffers to the MTX */
+ topaz_insert_mem_command(enc, (enum mtx_cmd_id)(MTX_CMDID_PROVIDE_CODEDPACKAGE_BUFFER |
+ MTX_CMDID_WB_INTERRUPT),
+ F_ENCODE(video->coded_package[video->coded_package_slot_reserved]->coded_buffer[0]->size >>
+ 10, MTX_MSG_PROVIDE_CODED_BUFFER_SIZE) |
+ F_ENCODE(video->coded_package_slot_reserved, MTX_MSG_PROVIDE_CODEDPACKAGE_BUFFER_SLOT),
+ &(video->coded_package
+ [video->coded_package_slot_reserved]->mtx_info.code_package_fw_buffer->mem_info));
+
+ return IMG_SUCCESS;
+}
+
+unsigned int topaz_get_coded_buffer_max_size(void *topaz_str_ctx, enum img_standard standard,
+ unsigned short width, unsigned short height,
+ struct img_rc_params *rc_params)
+{
+ /* TODO: Determine if we want to make this api str_ctx dependent
+ * struct topaz_stream_context *str_ctx;
+ * if (!topaz_str_ctx)
+ * return IMG_ERROR_INVALID_CONTEXT;
+ */
+ /* Worst-case coded buffer size: All MBs maximum size,
+ * and a coded buffer header for each row
+ */
+ return topaz_get_max_coded_data_size(standard, width, height, rc_params->initial_qp_i) +
+ ((height >> 4) * CODED_BUFFER_INFO_SECTION_SIZE);
+}
+
+unsigned int topaz_get_coded_package_max_num(void *topaz_str_ctx, enum img_standard standard,
+ unsigned short width, unsigned short height,
+ struct img_rc_params *rc_params)
+{
+ struct topaz_stream_context *str_ctx;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+
+ return str_ctx->enc_ctx->video->coded_package_max_num;
+}
+
+/*
+ * Get a source slot to fill
+ */
+int topaz_reserve_source_slot(void *topaz_str_ctx, unsigned char *src_slot_num)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ struct topaz_stream_context *str_ctx;
+ signed char index;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ if (video->aborted)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ for (index = 0; index < video->slots_in_use; index++) {
+ if (!video->source_slot_buff[index]) {
+ /* Found an empty slot, Mark the slot as reserved */
+ video->source_slot_reserved = index;
+ *src_slot_num = index;
+ return IMG_SUCCESS;
+ }
+ }
+
+ return IMG_ERROR_UNEXPECTED_STATE;
+}
+
+/*
+ * Get a coded slot to fill
+ */
+int topaz_reserve_coded_package_slot(void *topaz_str_ctx)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ struct topaz_stream_context *str_ctx;
+ signed char index;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ if (video->aborted)
+ return IMG_ERROR_UNEXPECTED_STATE;
+
+ for (index = 0; index < video->coded_package_max_num; index++) {
+ if (!video->coded_package[index]->busy) {
+ /* Found an empty slot, Mark the slot as reserved */
+ video->coded_package_slot_reserved = index;
+ video->coded_package[index]->busy = TRUE;
+ return IMG_SUCCESS;
+ }
+ }
+
+ return IMG_ERROR_UNEXPECTED_STATE;
+}
+
+/*
+ * Returns number of empty source slots
+ */
+signed char topaz_query_empty_source_slots(void *topaz_str_ctx)
+{
+ struct topaz_stream_context *str_ctx;
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+
+ unsigned char slot_number;
+ unsigned char empty_source_slots = 0;
+
+ if (!topaz_str_ctx) {
+ pr_err("ERROR: Invalid context handle provides to IMG_V_QueryEmptySourceSlots\n");
+ return IMG_ERROR_INVALID_CONTEXT;
+ }
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ if (video->aborted)
+ return -2;
+
+ for (slot_number = 0; slot_number < video->slots_in_use; slot_number++) {
+ if (!video->source_slot_buff[slot_number])
+ empty_source_slots++;
+ }
+
+ return empty_source_slots;
+}
+
+/*
+ * Returns number of empty coded buffer slots
+ */
+signed char topaz_query_empty_coded_slots(void *topaz_str_ctx)
+{
+ struct topaz_stream_context *str_ctx;
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+
+ unsigned char slot_number;
+ unsigned char empty_coded_slots = 0;
+
+ if (!topaz_str_ctx) {
+ pr_err("ERROR: Invalid context handle provides to IMG_V_QueryEmptyCodedSlots\n");
+ return IMG_ERROR_INVALID_CONTEXT;
+ }
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+ enc = str_ctx->enc_ctx;
+ video = enc->video;
+
+ if (video->aborted)
+ return -2;
+
+ for (slot_number = 0; slot_number < video->coded_package_max_num; slot_number++) {
+ if (!video->coded_package[slot_number]->busy)
+ empty_coded_slots++;
+ }
+
+ return empty_coded_slots;
+}
+
+/*
+ * topaz_stream_map_buf_sg
+ */
+int topaz_stream_map_buf_sg(void *topaz_str_ctx, enum venc_buf_type buf_type,
+ struct vidio_ddbufinfo *buf_info, void *sgt)
+{
+ int ret;
+ struct topaz_stream_context *str_ctx;
+
+ /*
+ * Resource stream ID cannot be zero. If zero just warning and
+ * proceeding further will break the code. Return IMG_ERROR_INVALID_ID.
+ */
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ IMG_DBG_ASSERT(buf_type < VENC_BUFTYPE_MAX);
+ IMG_DBG_ASSERT(buf_info);
+ IMG_DBG_ASSERT(sgt);
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+
+ /* Map heap from VENC to MMU. Currently only one heap is used for all buffer types */
+ switch (buf_type) {
+ case VENC_BUFTYPE_BITSTREAM:
+ case VENC_BUFTYPE_PICTURE:
+ /* TODO: add logic to cache these buffers into str context list */
+ break;
+
+ default:
+ IMG_DBG_ASSERT(FALSE);
+ }
+
+ /* Map this buffer into the MMU. */
+ ret = topaz_mmu_stream_map_ext_sg(str_ctx->mmu_ctx, MMU_GENERAL_HEAP_ID, sgt,
+ buf_info->buf_size, 64,
+ (enum sys_emem_attrib)0, buf_info->cpu_virt, buf_info,
+ &buf_info->buff_id);
+ IMG_DBG_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * core_stream_unmap_buf_sg
+ */
+int topaz_stream_unmap_buf_sg(void *topaz_str_ctx, struct vidio_ddbufinfo *buf_info)
+{
+ int ret;
+ struct topaz_stream_context *str_ctx;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+
+ /* Unmap this buffer from the MMU. */
+ ret = topaz_mmu_stream_free_sg(str_ctx->mmu_ctx, buf_info);
+
+ IMG_DBG_ASSERT(ret == IMG_SUCCESS);
+ if (ret != IMG_SUCCESS)
+ return ret;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * End Of Video stream
+ */
+int topaz_end_of_stream(void *topaz_str_ctx, unsigned int frame_cnt)
+{
+ struct topaz_stream_context *str_ctx;
+ struct img_enc_context *enc;
+
+ if (!topaz_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ str_ctx = (struct topaz_stream_context *)topaz_str_ctx;
+ enc = str_ctx->enc_ctx;
+
+ if (enc->video->aborted)
+ return IMG_ERROR_UNDEFINED;
+
+ enc->video->frame_count = frame_cnt;
+
+ if (frame_cnt - enc->video->flushed_at_frame < enc->video->slots_in_use)
+ enc->video->slots_required = frame_cnt - enc->video->flushed_at_frame;
+
+ /* Send PicMgmt Command */
+ topaz_insert_command(enc, (enum mtx_cmd_id)(MTX_CMDID_PICMGMT | MTX_CMDID_PRIORITY),
+ F_ENCODE(IMG_PICMGMT_EOS, MTX_MSG_PICMGMT_SUBTYPE) |
+ F_ENCODE(frame_cnt, MTX_MSG_PICMGMT_DATA));
+
+ return IMG_SUCCESS;
+}
diff --git a/drivers/media/platform/vxe-vxd/encoder/topaz_api.h b/drivers/media/platform/vxe-vxd/encoder/topaz_api.h
new file mode 100644
index 000000000000..d952b26e4119
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/topaz_api.h
@@ -0,0 +1,1047 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Encoder core interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __TOPAZ_API_H__
+#define __TOPAZ_API_H__
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "fw_headers/topazscfwif.h"
+#include "fw_headers/vxe_common.h"
+#include "vid_buf.h"
+#include "lst.h"
+
+#define MAX_MVC_VIEWS 2
+#define MVC_BASE_VIEW_IDX 0
+#define NON_MVC_VIEW (~0x0)
+
+#define MVC_SPS_ID 1
+#define MVC_PPS_ID 1
+
+#define NUM_SLICE_TYPES 5
+#define MAX_PLANES 3
+
+/*
+ * This type defines the buffer type categories.
+ * @brief Buffer Types
+ */
+enum venc_buf_type {
+ VENC_BUFTYPE_BITSTREAM = 0,
+ VENC_BUFTYPE_PICTURE,
+ VENC_BUFTYPE_ALL,
+ VENC_BUFTYPE_MAX,
+ VENC_BUFTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * VXE callback type definitions
+ */
+enum vxe_cb_type {
+ VXE_CB_CODED_BUFF_READY,
+ VXE_CB_SRC_FRAME_RELEASE,
+ VXE_CB_STR_END,
+ VXE_CB_ERROR_FATAL,
+ VXE_CB_FORCE32BITS = 0x7FFFFFFFU
+};
+
+typedef void (*vxe_cb)(void *ctx, enum vxe_cb_type type, void *buf_ref, unsigned int size,
+ unsigned int coded_frm_cnt);
+
+/*
+ * Enum specifying video encode profile
+ */
+enum img_video_enc_profile {
+ ENC_PROFILE_DEFAULT = 0,
+ ENC_PROFILE_LOWCOMPLEXITY,
+ ENC_PROFILE_HIGHCOMPLEXITY,
+ ENC_PROFILE_REDUCEDMODE,
+ ENC_PROFILE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Enum describing smallest blocksize used during motion search
+ */
+enum img_ipe_minblock_size {
+ BLK_SZ_16x16 = 0,
+ BLK_SZ_8x8 = 1,
+ BLK_SZ_4x4 = 2,
+ BLK_SZ_DEFAULT = 3,
+ BLK_SZ_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Struct specifying flags to enable/disable encode features.
+ * All boolean flags are FALSE by default
+ */
+struct img_encode_features {
+ unsigned short disable_intra4x4;
+ unsigned short disable_intra8x8;
+ unsigned short disable_intra16x16;
+ unsigned short disable_inter8x8;
+ unsigned short restrict_inter4x4;
+ unsigned short disable_bpic_ref1;
+ unsigned short disable_bpic_ref0;
+ unsigned short enable_8x16_mv_detect;
+ unsigned short enable_16x8_mv_detect;
+ unsigned short disable_bframes;
+ enum img_ipe_minblock_size min_blk_sz;
+ unsigned short restricted_intra_pred;
+};
+
+/*
+ *
+ * Struct describing Macro-block params generated by first stage.
+ * Refer T.R.M. for details
+ */
+struct img_first_stage_mb_params {
+ unsigned short ipe0_sad;
+ unsigned short ipe1_sad;
+ unsigned char ipe0_blks;
+ unsigned char ipe1_blks;
+ unsigned char carc_cmplx_val;
+ unsigned char reserved;
+};
+
+/*
+ * Size of Inter/Intra & Coded/Skipped tables
+ */
+#define TOPAZHP_SCALE_TBL_SZ (8)
+#define DEFAULT_CABAC_DB_MARGIN (0x190)
+
+/*
+ *Struct describing params for video encoding
+ *@enable_sel_stats_flags: Flags to enable selective first-pass statistics gathering by the
+ *hardware. Bit 1 - First Stage Motion Search Data, Bit 2 - Best
+ * Multipass MB Decision Data, Bit 3 - Best Multipass Motion Vectors.
+ * (First stage Table 2 motion vectors are always switched on)
+ *@enable_inp_ctrl: Enable Macro-block input control
+ *@enable_air: Enable Adaptive Intra Refresh
+ *@num_air_mbs: n = Max number of AIR MBs per frame, 0 = _ALL_ MBs over threshold will be marked
+ * as AIR Intras, -1 = Auto 10%
+ *@air_threshold: n = SAD Threshold above which a MB is a AIR MB candidate, -1 = Auto adjusting
+ * threshold
+ *@air_skip_cnt: n = Number of MBs to skip in AIR Table between frames, -1 = Random
+ * (0 - NumAIRMbs) skip between frames in AIR table
+ *@disable_bit_stuffing: Disabling bitstuffing to maintain bitrate
+ *@mpeg2_intra_dc_precision: Only used in MPEG2, 2 bit field (0 = 8 bit, 1 = 9 bit, 2 = 10 bit
+ * and 3=11 bit precision). Set to zero for other encode standards.
+ *@enable_mvc: True if MVC is enabled. False by default
+ *@mvc_view_idx: MVC view index
+ *@disable_bh_rounding: True if we wish to disable the buffer height rounding to 16 pixels
+ * (enables contiguous YU memory for non-aligned image heights)
+ *@auto_expand_pipes: Automatically expand a context pipe allocations when new pipes become
+ * available
+ *@line_counter_enabled: <! Drives the activation of low-latency encoding
+ */
+struct img_video_params {
+ enum img_standard standard;
+ enum img_format format;
+ enum img_csc_preset csc_preset;
+ unsigned char slices_per_picture;
+ unsigned short width;
+ unsigned short frame_height;
+ unsigned char is_interlaced;
+ unsigned char is_interleaved;
+ unsigned char constrained_intra;
+ unsigned char h264_8x8;
+ unsigned char bottom_field_first;
+ unsigned char arbitrary_so;
+ unsigned char cabac_enabled;
+ unsigned int cabac_bin_limit;
+ unsigned int cabac_bin_flex;
+ unsigned char deblock_idc;
+ unsigned char output_reconstructed;
+ unsigned int f_code;
+ int fine_y_search_size;
+ unsigned char no_offscreen_mv;
+ unsigned int idr_period;
+ unsigned int intra_cnt;
+ unsigned int vop_time_resolution;
+ struct img_encode_features enc_features;
+ unsigned char enable_sel_stats_flags;
+ unsigned char enable_inp_ctrl;
+ unsigned char enable_air;
+ int num_air_mbs;
+ int air_threshold;
+ short air_skip_cnt;
+ unsigned char enable_cumulative_biases;
+ unsigned char enable_host_bias;
+ unsigned char enable_host_qp;
+ unsigned char use_default_scaling_list;
+ short use_custom_scaling_lists;
+ unsigned short pps_scaling;
+ unsigned int vert_mv_limit;
+ unsigned int intra_pred_modes;
+ unsigned short limit_num_vectors;
+ unsigned short disable_bit_stuffing;
+ unsigned char coded_skipped_index;
+ unsigned char inter_intra_index;
+ unsigned char mpeg2_intra_dc_precision;
+
+ /* Contents Adaptive Rate Control parameters */
+ unsigned short carc;
+ int carc_baseline;
+ unsigned int carc_threshold;
+ unsigned int carc_cutoff;
+ unsigned int carc_neg_range;
+ unsigned int carc_neg_scale;
+ unsigned int carc_pos_range;
+ unsigned int carc_pos_scale;
+ unsigned int carc_shift;
+
+ /* Weighted prediction */
+ unsigned char weighted_prediction;
+ unsigned char vp_weighted_implicit_bi_pred;
+
+ /* SEI insertion */
+ unsigned char insert_hrd_params;
+
+ unsigned short intra_refresh;
+
+ unsigned int chunks_per_mb;
+ unsigned int max_chunks;
+ unsigned int priority_chunks;
+ unsigned int mbps;
+
+ unsigned char multi_reference_p;
+ unsigned char ref_spacing;
+ unsigned char spatial_direct;
+
+ unsigned short vp_adaptive_rounding_disable;
+ unsigned short vp_adaptive_rounding_offsets[18][4];
+
+ unsigned int debug_crcs;
+
+ unsigned char enable_mvc;
+ unsigned short mvc_view_idx;
+ unsigned char high_latency;
+
+ unsigned short buffer_stride_bytes;
+ unsigned short buffer_height;
+ unsigned char disable_bh_rounding;
+ unsigned short source_width;
+ unsigned short source_frame_height;
+
+ unsigned char no_sequence_headers;
+ unsigned char auto_encode;
+ unsigned char slice_level;
+ unsigned char coded_header_per_slice;
+
+ unsigned char auto_expand_pipes;
+ unsigned char enable_lossless;
+ unsigned char lossless_8x8_prefilter;
+
+ unsigned char enable_scaler;
+ unsigned short crop_left;
+ unsigned short crop_right;
+ unsigned short crop_top;
+ unsigned short crop_bottom;
+
+#if SECURE_IO_PORTS
+ unsigned char secure_ctx_input;
+ unsigned char secure_ctx_output;
+#endif
+ /* Low latency encoding related */
+ unsigned char line_counter_enabled;
+};
+
+/*
+ * Struct describing JPEG component info
+ */
+struct component_info {
+ unsigned int width; /* Width of the image component */
+ unsigned int stride; /* Stride of the image component */
+ unsigned int step; /* Step of the image component */
+ unsigned int height; /* Height of the image component */
+};
+
+/*
+ * Struct describing JPEG info
+ */
+struct img_unaligned_source_info {
+ unsigned int output_width; /* Width of the JPEG image */
+ unsigned int output_height; /* Height of the JPEG image */
+ unsigned int components; /* Number of components in the image ( 1 or 3 ) */
+ struct component_info comp_info[3]; /* Array containing component info */
+};
+
+/*
+ * Struct containing details of a reconstuctured picture
+ */
+struct img_recon_done {
+ unsigned int poc; /* PicOrderCount */
+ void *buffer; /* Buffer containing reconstructured image */
+};
+
+/*
+ * Struct containing a feedback for one frame
+ *@is_coded: Is the frame was coded or skipped completely?
+ *@is_skipped: Is the frame coded as a set of skipped MBs?
+ *@entire_frame: Was the frame encoded entirely or there a still slices to come?
+ *@img_frame_type frame_type: Frame Type (IDR, I, P, B)
+ *@source_slot: Number of the source slot, containing the source buffer
+ *@recon_idx: Number of the slot, conaining reconstructed picture
+ *@*src_frame: Source buffer
+ *@*motion_search_statistics_buf: Buffer to contain Table 1 (8 byte Motion Search Data) of the
+ * selectable stats
+ *@*best_multipass_statistics_buf: Buffer to contain (optionally) Table 4 (64 bytes
+ * Multipass Motion Vectors) and table 3 (16 byte Multipass Parameters)
+ *@coded_package: Pointer to the coded package, containing the returned
+ * header and coded buffer information
+ *@recon_list: List of reconstructed pictures
+ *@bytes_coded: Size of the encoded slice in bytes
+ *@first_bu: Number of the first BU in the slice
+ *@storage_frame_num: Last 2 bits of the Frame number in Storage Order
+ *@slice_num: Number of the slice in a Slice Map supplied
+ *@slices_per_picture: Number of the slices in this picture
+ *@field: The field this slice belongs to
+ *@coded_slot_num: Slot number of the coded buffer
+ *@poc: PicOrderCount of the coded slice
+ *@patched_recon: Was the reconstructed picture written to a patched buffer?
+ *@slices_in_buffer: Number of slices contained in the coded buffer
+ *@last_frame_encoded: True if the last frame has been encoded
+ *@coded_buffer_count: Number of coded buffer used
+ *@host_ctx: Host context value
+ */
+struct img_feedback_element {
+ unsigned char is_coded;
+ unsigned char is_skipped;
+ unsigned char entire_frame;
+ enum img_frame_type frame_type;
+ unsigned char source_slot;
+ unsigned char recon_idx;
+ struct img_frame *src_frame;
+ struct img_buffer *motion_search_statistics_buf;
+ struct img_buffer *best_multipass_statistics_buf;
+
+ struct coded_package_host *coded_package;
+
+ struct list_item *recon_list;
+ unsigned int bytes_coded;
+ unsigned int first_bu;
+ unsigned char storage_frame_num;
+ unsigned char slice_num;
+ unsigned char slices_per_picture;
+ unsigned char field;
+ unsigned char coded_slot_num;
+
+ unsigned char active_coded_package_idx;
+
+ unsigned int poc;
+ unsigned char patched_recon;
+ unsigned char slices_in_buffer;
+ unsigned char last_frame_encoded;
+ unsigned char coded_buffer_count;
+
+ unsigned long long host_ctx;
+};
+
+/*
+ * Bit fields for ui32CoreFeatures
+ */
+#define SCALER_SUPPORTED_MASK 0x00000080
+#define GENERATE_PERFORMANCE_STORE_MASK 0x00000100
+#define H264_LOSSLESS_SUPPORTED_MASK 0x00000200
+#define H264_CUSTOM_QUANT_SUPPORTED_MASK 0x00000400
+#define MPEG2_SUPPORTED_MASK 0x00000800
+#define SIGNATURES_SUPPORTED_SUBSET_MASK 0x00001000
+#define SIGNATURES_SUPPORTED_ALL_MASK 0x00002000
+#define H264_WEIGHTED_PRED_ME_SUPPORTED_MASK 0x00004000
+#define H264_WEIGHTED_PRED_SUPPORTED_MASK 0x00008000
+#define H264_2_REF_ON_P_PIC_SUPPORTED_MASK 0x00010000
+#define H264_SPATIAL_DIRECT_SUPPORTED_MASK 0x00020000
+#define H264_MULTIPASS_SUPPORTED_MASK 0x00040000
+#define H264_DEFAULT_TABLES_SUPPORTED_MASK 0x00080000
+#define H264_8X8_TRANSFORM_SUPPORTED_MASK 0x00100000
+#define H264_INTERLACED_SUPPORTED_MASK 0x00200000
+#define H264_B_PIC_SUPPORTED_MASK 0x00400000
+#define H264_16X8_8X16_SUPPORTED_MASK 0x00800000
+#define H264_CABAC_SUPPORTED_MASK 0x01000000
+#define SLAVE_JPEG_SUPPORTED_MASK 0x02000000
+#define JPEG_SUPPORTED_MASK 0x04000000
+#define H263_SUPPORTED_MASK 0x08000000
+#define MPEG4_SUPPORTED_MASK 0x10000000
+#define H264_SUPPORTED_MASK 0x20000000
+#define DMAC_SUPPORTED_MASK 0x40000000
+#define MMU_SUPPORTED_MASK 0x80000000
+
+/*
+ * Struct describing the capabilities of the encoder
+ */
+struct img_enc_caps {
+ unsigned short min_slices; /* Minimum slices to use */
+ unsigned short max_slices; /* Maximum slices to use */
+ unsigned short recommended_slices; /* Recommended number of slices */
+ unsigned short num_cores; /* Number of cores that will be used */
+ unsigned int core_features; /* Core features flags */
+ unsigned int core_revision; /* Core revision */
+ unsigned int max_height; /* Maximum height supported */
+ unsigned int max_width; /* Maximum width supported */
+ unsigned int min_height; /* Minimum height supported */
+ unsigned int min_width; /* Minimum width supported */
+ unsigned int max_mb_num; /* Maximum number of macro blocks */
+ unsigned int min_slice_height; /* Minimum number of rows that a slice can span
+ * (not including automatic slice breaks)
+ */
+ unsigned int max_bu_per_frame; /* Maximum number of basic units per frame */
+};
+
+/*
+ * Struct describing driver results message
+ */
+struct driver_tohost_msg {
+ enum mtx_message_id cmd_id;
+ enum mtx_cmd_id input_cmd_id;
+ unsigned int data;
+ struct vidio_ddbufinfo *command_data_buf;
+ struct img_feedback_element feedback;
+};
+
+/*
+ * Enum describing picture coding type
+ */
+
+enum h264_picture_coding_type {
+ I_FRAME = 0, /* An intra frame */
+ P_FRAME = 1, /* An inter frame */
+ B_FRAME = 2, /* A B frame */
+ FRAME_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Struct describing H264 VUI parameters
+ */
+struct h264_vui_params {
+ unsigned int time_scale; /* Time scale as defined in the H.264 specification */
+ unsigned int bit_rate_value_minus1; /* An inter framebitrate/64)-1 */
+ unsigned int cbp_size_value_minus1; /* An inter frame(bitrate*1.5)/16 */
+ unsigned char aspect_ratio_info_present_flag; /* aspect_ratio_info_present_flag as
+ * defined in the H.264 specification
+ */
+ unsigned char aspect_ratio_idc; /* as defined in the H.264 specification */
+ unsigned short sar_width;
+ unsigned short sar_height;
+ unsigned char cbr; /* CBR as defined in the H.264 specification */
+ /* as defined in the H.264 specification */
+ unsigned char initial_cpb_removal_delay_length_minus1;
+ unsigned char cpb_removal_delay_length_minus1; /* as defined in the H.264 specification */
+ unsigned char dpb_output_delay_length_minus1; /* as defined in the H.264 specification */
+ unsigned char time_offset_length; /* as defined in the H.264 specification */
+ unsigned char num_reorder_frames;
+ unsigned char max_dec_frame_buffering;
+};
+
+/*
+ * Struct describing H264 crop parameters
+ */
+struct h264_crop_params {
+ /* Flag indicating if cropping parameters are present */
+ unsigned char clip;
+ /* Number of pixels to crop from the left side /2 */
+ unsigned short left_crop_offset;
+ /* Number of pixels to crop from the right side /2 */
+ unsigned short right_crop_offset;
+ unsigned short top_crop_offset; /* Number of pixels to crop from the top /2 */
+ unsigned short bottom_crop_offset; /* Number of pixels to crop from the bottom /2 */
+};
+
+/*
+ * Enum describing Profile (H264)
+ */
+enum sh_profile_type {
+ SH_PROFILE_BP = 0, /* H.264 Baseline Profile */
+ SH_PROFILE_MP = 1, /* H.264 Main Profile */
+ SH_PROFILE_HP = 2, /* H.264 High Profile */
+ SH_PROFILE_H10P = 3, /* H.264 High 10P Profile */
+ SH_PROFILE_H422P = 4, /* H.264 High 4:2:2 Profile */
+ SH_PROFILE_H444P = 5, /* H.264 High 4:4:4 Profile */
+ SH_PROFILE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Enum describing level (H264)
+ */
+enum sh_level_type {
+ SH_LEVEL_1 = 10, /* H264 Level as specified in the specification */
+ SH_LEVEL_1B = 1, /* Special-case H264 Level */
+ SH_LEVEL_11 = 11, /* H264 Level as specified in the specification */
+ SH_LEVEL_12 = 12, /* H264 Level as specified in the specification */
+ SH_LEVEL_13 = 13, /* H264 Level as specified in the specification */
+ SH_LEVEL_2 = 20, /* H264 Level as specified in the specification */
+ SH_LEVEL_21 = 21, /* H264 Level as specified in the specification */
+ SH_LEVEL_22 = 22, /* H264 Level as specified in the specification */
+ SH_LEVEL_3 = 30, /* H264 Level as specified in the specification */
+ SH_LEVEL_31 = 31, /* H264 Level as specified in the specification */
+ SH_LEVEL_32 = 32, /* H264 Level as specified in the specification */
+ SH_LEVEL_4 = 40, /* H264 Level as specified in the specification */
+ SH_LEVEL_41 = 41, /* H264 Level as specified in the specification */
+ SH_LEVEL_42 = 42, /* H264 Level as specified in the specification */
+ SH_LEVEL_5 = 50, /* H264 Level as specified in the specification */
+ SH_LEVEL_51 = 51, /* H264 Level as specified in the specification */
+ SH_LEVEL_52 = 52, /* H264 Level as specified in the specification */
+ SH_LEVEL_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Struct describing SPS (sequence) parameters (H264)
+ */
+struct h264_sequence_header_params {
+ enum sh_profile_type profile;
+ enum sh_level_type level;
+ unsigned char width_in_mbs_minus1;
+ unsigned char height_in_maps_units_minus1;
+ unsigned char log2_max_pic_order_cnt;
+ unsigned char max_num_ref_frames;
+ unsigned char gaps_in_frame_num_value;
+ unsigned char frame_mbs_only_flag;
+ unsigned char vui_params_present;
+ unsigned char seq_scaling_matrix_present_flag;
+
+ unsigned char use_default_scaling_list;
+ unsigned char is_lossless;
+ struct h264_vui_params vui_params;
+};
+
+/*
+ * Struct describing Bias parameters
+ */
+struct img_henc_debug_settings {
+ unsigned int serialized_communication_mode;
+};
+
+#define VXE_SERIALIZED_MODE_OFF (0)
+#define VXE_SERIALIZED_MODE_SYNCED (1)
+#define VXE_SERIALIZED_MODE_SERIAL (2)
+
+/*
+ * Struct describing input parameters to encode a video picture
+ */
+struct pic_params {
+ unsigned int flags; /* Picture parameter flags */
+ struct in_rc_params in_params; /* Rate control parameters */
+};
+
+/*
+ * Video encode context
+ *@void *dd_str_ctx: Pointer to device instance data
+ *@standard: Video standard
+ *@frame_height: target output height
+ *@picture_height: target output height
+ *@buffer_stride_bytes: input buffer stride
+ *@buffer_height: input buffer width
+ *@format: Pixel format of the source surface
+ *@csc_preset: Colour space conversion to be performed on the source surface
+ *@pic_params: Picture level parameters (supplied by driver)
+ *@above_params[TOPAZHP_MAX_NUM_PIPES]: Picture level parameters (supplied by driver)
+ *@recon_pictures[MAX_PIC_NODES]: Reference pictures (2 input and 1 output)
+ *@colocated[MAX_PIC_NODES]: Colocated vector stores (2 input and 1 output)
+ *@mv[MAX_MV: Vector stores
+ *@inter_view_mv[2]: Inter-view vector stores
+ *@seq_header_mem: Partially coded Sequence header
+ *@subset_seq_header_mem: Partially coded Subset sequence header for H264 mvc
+ *@flat_gop_struct: Flat MiniGop structure
+ *@hierar_gop_struct: Hierarchical MiniGop structure
+ *@custom_quant[2]: Custom quantization values
+ *@custom_quant_regs4x4_sp[2]: Custom quantization register values for 4x4 Sp
+ *@vidio_ddbufinfo custom_quant_regs8x8_sp[2]: Custom quantization register values for 8x8 Sp
+ *@vidio_ddbufinfo custom_quant_regs4x4_q[2]: Custom quantization register values for 4x4 Q
+ *@vidio_ddbufinfo custom_quant_regs8x8_q[2]: Custom quantization register values for 8x8 Q
+ *@slice_map[MAX_SOURCE_SLOTS_SL]: Slice map of the source picture
+ *@firstpass_out_param_buf[MAX_SOURCE_SLOTS_SL]: Output Parameters of the First Pass
+ *@firstpass_out_best_multipass_param_buf[MAX_SOURCE_SLOTS_SL]: Output Selectable Best MV
+ * Parameters of the First Pass
+ *@mb_ctrl_in_params_buf[MAX_SOURCE_SLOTS_SL]: Input Parameters to the second pass
+ *@ipe_control: common bits IPE control register for entire picture
+ *@pred_comb_control: common bits of Predictor-combiner control register for entire picture
+ *@cabac_enabled: FLAG to enable Cabac mode
+ *@cabac_bin_limit: Min Bin Limit after which the Topaz hardware would encode MB as IPCM
+ *@cabac_bin_flex: Max Flex-Limit, the Topaz-HW will encode MB as IPCM after (BinLimit+BinFlex)
+ *@vidio_ddbufinfo mv_settings_btable: three colocated vector stores (2 input and 1 output)
+ *@img_frame *source_slot_buff[MAX_SOURCE_SLOTS_SL]: Source slots
+ *@unsigned int source_slot_poc[MAX_SOURCE_SLOTS_SL]: POCs of frames in slots
+ *@unsigned char slots_in_use: Number of source slots
+ *@unsigned char slots_required: Number of source slots to be consumed
+ *@coded_package_host *coded_package[MAX_CODED_PACKAGES]: Collection of coded/header information
+ *@unsigned char encoder_idle: Indicates that the encoder is waiting for data,
+ * Set to true at start of encode
+ *@unsigned char enable_sel_stats_flags: Flags to enable selective first-pass
+ * statistics gathering by the
+ * hardware. Bit 1 - First Stage Motion Search Data, Bit 2
+ * - Best Multipass MB Decision Data, Bit 3 - Best Multipass
+ * Motion Vectors. (First stage Table 2 motion vectors are
+ * always switched on)
+ *@enable_inp_ctrl;Enable Macro-block input control
+ *@enable_air: Enable Adaptive Intra Refresh
+ *@num_air_mbs: n = Max number of AIR MBs per frame, 0 = _ALL_ MBs over threshold will be marked
+ * as AIR Intras, -1 = Auto 10%
+ *@air_threshold: n = SAD Threshold above which a MB is a AIR MB candidate,
+ * -1 = Auto adjusting threshold
+ *@air_skip_cnt: n = Number of MBs to skip in AIR Table between frames,
+ * -1 = Random (0 - NumAIRMbs) skip between frames in AIR table
+ *@enable_mvc: True if MVC is enabled. False by default
+ *@mvc_view_idx: View Idx of this MVC view
+ *@line_counter: Keep track of line counter activation
+ */
+struct img_video_context {
+ /* topaz dd str context handle */
+ void *dd_str_ctx;
+ unsigned int dd_ctx_num;
+
+ /* stream level params */
+ enum img_standard standard;
+ unsigned short width;
+ unsigned short frame_height;
+ unsigned short picture_height;
+ unsigned short buffer_stride_bytes;
+ unsigned short buffer_height;
+ unsigned char frame_rate;
+
+ unsigned short unrounded_width;
+ unsigned short unrounded_frame_height;
+
+ unsigned int debug_crcs;
+ enum img_format format;
+ enum img_csc_preset csc_preset;
+
+ /* Numbers of array elements that will be allocated */
+ int pic_nodes;
+ int mv_stores;
+
+ /* per core params */
+ struct pic_params pic_params;
+ struct vidio_ddbufinfo above_params[TOPAZHP_MAX_NUM_PIPES];
+ struct vidio_ddbufinfo recon_pictures[MAX_PIC_NODES];
+ struct vidio_ddbufinfo colocated[MAX_PIC_NODES];
+ struct vidio_ddbufinfo mv[MAX_MV];
+ struct vidio_ddbufinfo inter_view_mv[2];
+
+ /* partially coded headers supplied to HW */
+ /* SEI_INSERTION */
+ struct vidio_ddbufinfo aud_header_mem;
+ struct vidio_ddbufinfo sei_buffering_period_header_mem;
+ struct vidio_ddbufinfo sei_picture_timing_header_mem;
+
+ struct vidio_ddbufinfo seq_header_mem;
+ struct vidio_ddbufinfo subset_seq_header_mem;
+ struct vidio_ddbufinfo pichdr_template_mem[4];
+ struct vidio_ddbufinfo slice_params_template_mem[NUM_SLICE_TYPES];
+
+ unsigned int f_code;
+ struct vidio_ddbufinfo src_phys_addr;
+
+ /* WEIGHTED PREDICTION */
+ struct vidio_ddbufinfo weighted_prediction_mem[MAX_SOURCE_SLOTS_SL];
+ unsigned char weighted_prediction;
+ unsigned char weighted_bi_pred;
+
+ struct vidio_ddbufinfo flat_gop_struct;
+ struct vidio_ddbufinfo hierar_gop_struct;
+
+ struct vidio_ddbufinfo ltref_header[MAX_SOURCE_SLOTS_SL];
+
+ struct vidio_ddbufinfo custom_quant[2];
+ struct vidio_ddbufinfo custom_quant_regs4x4_sp[2];
+ struct vidio_ddbufinfo custom_quant_regs8x8_sp[2];
+ struct vidio_ddbufinfo custom_quant_regs4x4_q[2];
+ struct vidio_ddbufinfo custom_quant_regs8x8_q[2];
+ unsigned char custom_quant_slot;
+
+ struct img_buffer slice_map[MAX_SOURCE_SLOTS_SL];
+
+ struct img_buffer firstpass_out_param_buf[MAX_SOURCE_SLOTS_SL];
+
+ struct img_buffer firstpass_out_best_multipass_param_buf[MAX_SOURCE_SLOTS_SL];
+ struct img_buffer mb_ctrl_in_params_buf[MAX_SOURCE_SLOTS_SL];
+
+ /* these values set at picture level & written in at slice */
+ unsigned int ipe_control;
+ unsigned int pred_comb_control;
+ unsigned char cabac_enabled;
+ unsigned int cabac_bin_limit;
+ unsigned int cabac_bin_flex;
+
+ unsigned int first_pic_flags;
+ unsigned int non_first_pic_flags;
+
+ unsigned char is_interlaced;
+ unsigned char is_interleaved;
+ unsigned char top_field_first;
+ unsigned char arbitrary_so;
+ unsigned char slices_per_picture;
+ unsigned char deblock_idc;
+ unsigned int kick_size;
+ unsigned int kicks_per_bu;
+ unsigned int vop_time_resolution;
+ unsigned int idr_period;
+ unsigned int intra_cnt;
+ unsigned char multi_reference_p;
+ unsigned char spatial_direct;
+
+ struct img_mv_settings mv_settings_idr;
+ struct img_mv_settings mv_settings_non_b[MAX_BFRAMES + 1];
+
+ /* | MVSetingsB0 | MVSetingsB1 | ... | MVSetings Bn | */
+
+ struct vidio_ddbufinfo mv_settings_btable;
+ struct vidio_ddbufinfo mv_settings_hierarchical;
+
+ /* Source slots */
+ struct img_frame *source_slot_buff[MAX_SOURCE_SLOTS_SL];
+ unsigned int source_slot_poc[MAX_SOURCE_SLOTS_SL];
+ unsigned char slots_in_use;
+ unsigned char slots_required;
+
+ /* Coded slots */
+ struct coded_package_host *coded_package[MAX_CODED_PACKAGES];
+ unsigned int coded_buffer_max_size;
+ unsigned char coded_package_max_num;
+
+ unsigned int frame_count;
+ unsigned int flush_at_frame;
+ unsigned int flushed_at_frame;
+ unsigned int encode_sent;
+ unsigned int encode_requested;
+ unsigned int frames_encoded;
+ unsigned char encoder_idle;
+ unsigned char aborted;
+
+ struct list_item *ref_frame;
+ unsigned int recon_poc;
+ unsigned int next_recon;
+
+ struct vidio_ddbufinfo *recon_buffer;
+ struct vidio_ddbufinfo *patched_recon_buffer;
+
+ struct img_rc_params rc_params;
+ enum img_frame_type frame_type;
+
+ unsigned int buffers_status_reg;
+
+ unsigned char insert_seq_header;
+ unsigned char output_reconstructed;
+
+ unsigned int encode_pic_processing;
+ unsigned char extra_wb_retrieved;
+
+ unsigned char enable_sel_stats_flags;
+
+ unsigned char enable_inp_ctrl;
+ unsigned char enable_air;
+ int num_air_mbs;
+ int air_threshold;
+ short air_skip_cnt;
+
+ unsigned char enable_host_bias;
+ unsigned char enable_host_qp;
+
+ unsigned char custom_scaling;
+ unsigned char pps_scaling;
+ unsigned char h264_8x8_transform;
+ unsigned char h264_intra_constrained;
+ unsigned int vert_mv_limit;
+ unsigned int intra_pred_modes;
+ unsigned char limit_num_vectors;
+ unsigned char disable_bit_stuffing;
+ unsigned char coded_skipped_index;
+ unsigned char inter_intra_index;
+ struct vidio_ddbufinfo mtx_enc_ctx_mem;
+ /* SEI_INSERTION */
+ unsigned char insert_hrd_params;
+ unsigned int chunks_per_mb;
+ unsigned int max_chunks;
+ unsigned int priority_chunks;
+
+ unsigned char source_slot_reserved;
+ unsigned char coded_package_slot_reserved;
+ void *encode_pic_signal;
+
+ unsigned char highest_storage_number;
+ unsigned char vid_ctx_num;
+ unsigned char enable_mvc;
+ unsigned short mvc_view_idx;
+ unsigned char high_latency;
+ unsigned int mbps;
+
+ unsigned char no_sequence_headers;
+ unsigned int next_slice;
+ unsigned char auto_encode;
+ unsigned char slice_level;
+ unsigned char coded_header_per_slice;
+
+ /* Scaler specific values */
+ unsigned char enable_scaler;
+ unsigned short crop_left;
+ unsigned short crop_right;
+ unsigned short crop_top;
+ unsigned short crop_bottom;
+ unsigned short source_width;
+ unsigned short source_frame_height;
+
+#if SECURE_IO_PORTS
+ unsigned char secure_ctx_input;
+ unsigned char secure_ctx_output;
+#endif
+ unsigned char line_counter;
+};
+
+/*
+ * Encoder context
+ *@codec: encode codec
+ *@video: Video encode context
+ *@base_pip: The first of a contiguous set of pipes to use for the context encode
+ *@pipes_to_use: The number of contiguous pipes (starting with ui8BasePipe) to
+ * use for the context encode
+ *@requested_base_pipe: The first of a contiguous set of pipes to use for the context encode
+ *@requested_pipes_to_use: The number of contiguous pipes (starting with ui8BasePipe)
+ * to use for the context encode
+ *@auto_expand_pipes: Automatically expand a context pipe allocations
+ * when new pipes become available
+ *@sync_first_pass: true if never synced
+ */
+struct img_enc_context {
+ enum img_codec codec;
+ struct img_video_context *video;
+
+ unsigned char base_pipe;
+ unsigned char pipes_to_use;
+
+ unsigned char requested_base_pipe;
+ unsigned char requested_pipes_to_use;
+ unsigned char auto_expand_pipes;
+
+ unsigned char ctx_num;
+
+ unsigned char sync_first_pass;
+ unsigned int core_rev;
+
+ struct img_enc_caps caps;
+ struct img_henc_debug_settings *debug_settings;
+};
+
+/*
+ * Struct containing details of a reconstuctured picture
+ */
+struct img_recon_node {
+ unsigned int poc; /* PicOrderCount */
+ void *buffer; /* Buffer containing reconstructured image */
+};
+
+/*
+ * This structure contains the topaz Context.
+ * @brief topaz Context
+ */
+struct topaz_core_context {
+ /* List of stream context structures */
+ struct topaz_dev_ctx *dev_handle;
+ struct lst_t topaz_stream_list;
+ vxe_cb vxe_str_processed_cb;
+ unsigned int num_pipes;
+ struct mutex *mutex;
+};
+
+struct topaz_stream_context {
+ void **link; /* List link (allows the structure to be part of a MeOS list).*/
+ struct topaz_core_context *core_ctx;
+ struct img_enc_context *enc_ctx;
+ unsigned int stream_id; /* DMAN allocated device ID. */
+ struct vxe_enc_ctx *vxe_ctx;
+ void *mmu_ctx; /* stream specific MMU context */
+
+};
+
+/*
+ * Function pointer type for picture management functions
+ */
+typedef void (*pic_mgmt_func)(void *app_context, unsigned int frame_num);
+
+/*
+ * @function InitHardware
+ * @brief Initialise the Encoder Hardware
+ * @details Reset the hardware and set up registers, etc.
+ */
+int init_topaz_core(void *dev_handle, unsigned int *num_pipes,
+ unsigned int mmu_flags, void *callback);
+
+/*
+ * @function DeinitHardware
+ */
+int deinit_topaz_core(void);
+
+/*
+ * @function CreateContext
+ * @brief Create an encoder context
+ * @details Set up an encoder context with the given parameters
+ */
+int topaz_stream_create(void *vxe_ctx, struct img_video_params *video_params,
+ unsigned char base_pipe,
+ unsigned char pipes_to_use, struct img_rc_params *rc_params,
+ void **topaz_str_context);
+
+int topaz_end_of_stream(void *topaz_str_ctx, unsigned int frame_cnt);
+
+int topaz_flush_stream(void *topaz_str_ctx, unsigned int frame_cnt);
+
+int topaz_stream_destroy(void *topaz_str_ctx);
+
+/*
+ * Load the given context onto the hardware
+ */
+int topaz_load_context(void *topaz_str_ctx);
+
+/*
+ * Store the context from the hardware into given location
+ */
+int topaz_store_context(void *topaz_str_ctx);
+
+/*
+ * Destroy the given context onto the hardware
+ */
+int topaz_destroy_context(void *topaz_str_ctx);
+
+/*
+ * Get the capabilities of the encoder for the given requirements.
+ * @param standard : Standard setting
+ * @param width : Target output width
+ * @param height : Target output height
+ * @param caps : Pointer to caps structure to be filled in.
+ */
+int topaz_get_encoder_caps(enum img_standard standard, unsigned short width,
+ unsigned short height, struct img_enc_caps *caps);
+
+int topaz_stream_map_buf_sg(void *topaz_str_ctx, enum venc_buf_type buf_type,
+ struct vidio_ddbufinfo *buf_info, void *sgt);
+
+int topaz_stream_unmap_buf_sg(void *topaz_str_ctx, struct vidio_ddbufinfo *buf_info);
+
+/*
+ * Prepare a partially coded H264 Sequence Header (SPS).
+ * @param mb_width : Width of the sequence in MBs
+ * @param mb_height : Height of the sequence in MBs
+ * @param vui_params_present : IMG_TRUE to include VUI parameters
+ * @param params : Pointer to VUI parameters structure
+ * @param crop : Pointer to crop parameter structure
+ * @param sh_params : Pointer to sequence header params structure
+ */
+int topaz_h264_prepare_sequence_header(void *topaz_str_ctx, unsigned int mb_width,
+ unsigned int mb_height,
+ unsigned char vui_params_present,
+ struct h264_vui_params *params,
+ struct h264_crop_params *crop,
+ struct h264_sequence_header_params *sh_params,
+ unsigned char mvc_sps);
+/*
+ * Prepare a partially coded H264 Picture Header (PPS).
+ * @param cqp_offset : Chroma QP offset
+ */
+int topaz_h264_prepare_picture_header(void *topaz_str_ctx, signed char cqp_offset);
+
+/*
+ * Prepare an AUD header.
+ */
+int topaz_h264_prepare_aud_header(void *topaz_str_ctx);
+
+/*
+ * Set offsets and strides for YUV components.
+ * @param frame : Source frame
+ */
+int topaz_set_component_offsets(void *topaz_str_ctx, struct img_frame *frame);
+
+/*
+ * Reserves a slot to be used by a subsequent call to SendCodedPackageToFW.
+ * If internally allocated coded buffers are being used, it also gets a buffer
+ * from the encoder's internal list.
+ */
+int topaz_reserve_coded_package_slot(void *topaz_str_ctx);
+
+/*
+ * Submits a buffer to the encoder to receive coded data along with a coded header buffer.
+ * A VP8 non coded buffer can also be sent.
+ * @param coded_buffer : Pointer to the coded package to send to FW
+ */
+int topaz_send_coded_package(void *topaz_str_ctx, struct img_coded_buffer *coded_buffer);
+
+/*
+ * Returns the number of empty source slots available
+ * @return unsigned char: Number of empty source slots (negative number indicates an error)
+ */
+signed char topaz_query_empty_source_slots(void *topaz_str_ctx);
+
+/*
+ * Returns the number of empty coded buffer slots available
+ * @return signed char: Number of empty coded slots (negative number indicates an error)
+ */
+signed char topaz_query_empty_coded_slots(void *topaz_str_ctx);
+
+/*
+ * Reserves a slot to be used by a subsequent call to SendSourceFrame.
+ * If internally allocated source buffers are being used, it also gets a buffer
+ * from the encoder's internal list.
+ * @param src_slot_num : Pointer to receive slot number
+ */
+int topaz_reserve_source_slot(void *topaz_str_ctx, unsigned char *src_slot_num);
+
+/*
+ * Submits a frame to the encoder for processing
+ * @param src_frame : Pointer to receive buffer pointer
+ * @param frame_num : Frame number of the given frame
+ * @param ctx_value : Value which will be insert into coded data buffer header
+ */
+int topaz_send_source_frame(void *topaz_str_ctx,
+ struct img_frame *src_frame,
+ unsigned int frame_num,
+ unsigned long long ctx_value);
+
+/*
+ * Indicates that there are no more source frames to send. It may not be the last
+ * command, but it should inform the drivers about the length of the video stream.
+ */
+int topaz_end_of_stream(void *topaz_str_ctx, unsigned int frame_count);
+
+/*
+ * Indicates that the encoder should be flushed after the specified number of
+ * frames have been encoded.
+ * @param frame_count: Number of frames which should have been encoded
+ * when the flush is complete
+ */
+int topaz_flush_stream(void *topaz_str_ctx, unsigned int frame_count);
+
+/*
+ * Get the maximum coded data length for the given parameters, which can be used
+ * to determine the size of the coded data buffer.
+ */
+unsigned int topaz_get_coded_buffer_max_size(void *topaz_str_ctx, enum img_standard standard,
+ unsigned short width, unsigned short height,
+ struct img_rc_params *rc_params);
+
+unsigned int topaz_get_coded_package_max_num(void *topaz_str_ctx, enum img_standard standard,
+ unsigned short width, unsigned short height,
+ struct img_rc_params *rc_params);
+
+/*
+ * Tell the firmware to encode a frame.
+ */
+int topaz_encode_frame(void *topaz_str_ctx);
+
+/*
+ * Tells whether or not a pipe is being used by any context.
+ * If it is being used then it returns the id (1 or 2) of the context that is using it.
+ * Else it returns zero as the context id.
+ */
+int topaz_get_pipe_usage(unsigned char pipe, unsigned char *ctx_id);
+
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/topaz_api_utils.c b/drivers/media/platform/vxe-vxd/encoder/topaz_api_utils.c
new file mode 100644
index 000000000000..d88ddf5ca508
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/topaz_api_utils.c
@@ -0,0 +1,1487 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * encoder utility function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "fw_headers/defs.h"
+#include "img_errors.h"
+#include "reg_headers/topazhp_core_regs.h"
+#include "reg_headers/topaz_coreext_regs.h"
+#include "reg_headers/topazhp_multicore_regs_old.h"
+#include "topaz_api.h"
+
+#define MV_OFFSET_IN_TABLE(distance, \
+ position) ((distance) * MV_ROW_STRIDE + (position) * sizeof(struct img_mv_settings))
+#define DEFAULT_MVCALC_CONFIG ((0x00040303) | (MASK_TOPAZHP_CR_MVCALC_JITTER_POINTER_RST))
+
+/*
+ * Calculates the correct number of macroblocks per kick and kicks per BU
+ */
+void calculate_kick_and_bu_size(unsigned int width_in_mbs,
+ unsigned int height_in_mbs,
+ unsigned char is_interlaced,
+ unsigned int max_bu_per_frame,
+ unsigned int *kick_size,
+ unsigned int *kicks_per_bu,
+ unsigned int *min_slice_height)
+{
+ unsigned int kick_size_local, kicks_per_bu_local, bu_per_frame, min_slice_height_local;
+
+ /*
+ * Basic unit is either an integer number of rows or an integer number of
+ * basic units fit in a row We calculate the ideal kick size first then decide
+ * how many kicks there will be for each basic unit
+ */
+
+ /* Default to 1 kick per row */
+ kick_size_local = width_in_mbs;
+ kicks_per_bu_local = 1;
+ min_slice_height_local = 1;
+
+ /* See if we can use a smaller kick size */
+ if (!(kick_size_local % 3) && kick_size_local > 30) {
+ kick_size_local /= 3;
+ kicks_per_bu_local = 3;
+ } else if (!(kick_size_local % 2) && (kick_size_local > 20)) {
+ kick_size_local /= 2;
+ kicks_per_bu_local = 2;
+ }
+
+ IMG_DBG_ASSERT((kick_size_local < 256) && ("Kick Size can't be bigger than 255" != NULL));
+
+ /* Now calculate how many kicks we do per BU */
+ bu_per_frame = height_in_mbs * (is_interlaced ? 2 : 1);
+
+ while (bu_per_frame > max_bu_per_frame) {
+ /* we have too many BUs so double up the number
+ * of rows per BU so we can half the number of BUs
+ */
+ kicks_per_bu_local *= 2;
+ /* if we had an odd number of rows then the last BU will be half height */
+ bu_per_frame = (bu_per_frame + 1) / 2;
+ min_slice_height_local *= 2;
+ }
+
+ /* if we can afford to have 2 BUs per row then do it */
+ if ((bu_per_frame < (max_bu_per_frame / 2)) && kicks_per_bu_local == 2) {
+ kicks_per_bu_local = 1;
+ bu_per_frame *= 2;
+ }
+
+ /* if we can afford to have 3 BUs per row then do it */
+ if ((bu_per_frame < (max_bu_per_frame / 3)) && kicks_per_bu_local == 3) {
+ kicks_per_bu_local = 1;
+ bu_per_frame += 2;
+ }
+
+ *kick_size = kick_size_local;
+ *kicks_per_bu = kicks_per_bu_local;
+ *min_slice_height = min_slice_height_local;
+}
+
+/*
+ * Calculates the stride based on the input format and width
+ */
+unsigned int calculate_stride(enum img_format format, ushort requested_stride_bytes, ushort width)
+{
+ ushort stride_bytes;
+
+ if (requested_stride_bytes) {
+ stride_bytes = requested_stride_bytes;
+ } else {
+ switch (format) {
+ case IMG_CODEC_Y0UY1V_8888:
+ case IMG_CODEC_Y0VY1U_8888:
+ case IMG_CODEC_UY0VY1_8888:
+ case IMG_CODEC_VY0UY1_8888:
+ stride_bytes = width << 1;
+ break;
+ case IMG_CODEC_ABCX:
+ case IMG_CODEC_XBCA:
+ stride_bytes = width << 2;
+ break;
+ case IMG_CODEC_ABC565:
+ stride_bytes = width << 1;
+ break;
+ default:
+ stride_bytes = width;
+ break;
+ }
+ }
+
+ switch (format) {
+ case IMG_CODEC_420_YUV:
+ case IMG_CODEC_420_YV12:
+ case IMG_CODEC_420_PL8:
+ case IMG_CODEC_422_YUV:
+ case IMG_CODEC_422_YV12:
+ case IMG_CODEC_422_PL8:
+ /* although luma stride is same as chroma stride,
+ * start address is half the stride. so we need 128-byte alignment
+ */
+ case IMG_CODEC_420_IMC2:
+ /* although luma stride is same as chroma stride,
+ * start address is half the stride. so we need 128-byte alignment
+ */
+ case IMG_CODEC_422_IMC2:
+
+ /*
+ * All strides need to be 64-byte aligned
+ * Chroma stride is half luma stride, so (luma) stride needs
+ * to be 64-byte aligned when divided by 2
+ */
+ return ALIGN_128(stride_bytes);
+ default:
+ /* Stride needs to be 64-byte aligned */
+ return ALIGN_64(stride_bytes);
+ }
+}
+
+/*
+ * Patch HW profile based on the profile specified by the user
+ */
+void patch_hw_profile(struct img_video_params *video_params, struct img_video_context *video)
+{
+ unsigned int ipe_control = 0;
+ unsigned int pred_comb_control = 0;
+ struct img_encode_features *enc_features = &video_params->enc_features;
+
+ /* disable_intra4x4 */
+ if (enc_features->disable_intra4x4)
+ pred_comb_control |= F_ENCODE(1, TOPAZHP_CR_INTRA4X4_DISABLE);
+
+ /* disable_intra8x8 */
+ if (enc_features->disable_intra8x8)
+ pred_comb_control |= F_ENCODE(1, TOPAZHP_CR_INTRA8X8_DISABLE);
+
+ /* disable_intra16x16, check if at least one of the other Intra mode is enabled */
+ if (enc_features->disable_intra16x16 &&
+ (!(enc_features->disable_intra8x8) || !(enc_features->disable_intra4x4)))
+ pred_comb_control |= F_ENCODE(1, TOPAZHP_CR_INTRA16X16_DISABLE);
+
+ if (video_params->mbps)
+ video->mbps = video_params->mbps;
+
+ if (enc_features->restrict_inter4x4)
+ ipe_control |= F_ENCODE(1, TOPAZHP_CR_IPE_MV_NUMBER_RESTRICTION);
+
+ if (enc_features->disable_inter8x8)
+ pred_comb_control |= F_ENCODE(1, TOPAZHP_CR_INTER8X8_DISABLE);
+
+ if (enc_features->disable_bpic_ref1)
+ pred_comb_control |= F_ENCODE(1, TOPAZHP_CR_B_PIC1_DISABLE);
+ else if (enc_features->disable_bpic_ref0)
+ pred_comb_control |= F_ENCODE(1, TOPAZHP_CR_B_PIC0_DISABLE);
+
+ /* save predictor combiner control in video encode parameter set */
+ video->pred_comb_control = pred_comb_control;
+
+ /* set blocksize */
+ ipe_control |= F_ENCODE(enc_features->min_blk_sz, TOPAZHP_CR_IPE_BLOCKSIZE);
+
+ if (enc_features->enable_8x16_mv_detect)
+ ipe_control |= F_ENCODE(1, TOPAZHP_CR_IPE_8X16_ENABLE);
+
+ if (enc_features->enable_16x8_mv_detect)
+ ipe_control |= F_ENCODE(1, TOPAZHP_CR_IPE_16X8_ENABLE);
+
+ if (enc_features->disable_bframes)
+ video->rc_params.bframes = 0;
+
+ if (enc_features->restricted_intra_pred)
+ video->intra_pred_modes = 0xff0f;
+
+ /* save IPE-control register */
+ video->ipe_control = ipe_control;
+}
+
+/*
+ * Set offsets and strides for YUV components of source picture
+ */
+int topaz_set_component_offsets(void *enc_ctx_handle, struct img_frame *frame)
+{
+ struct img_enc_context *enc;
+ struct img_video_context *video;
+ enum img_format format;
+ ushort stride_bytes;
+ ushort picture_height;
+
+ if (!enc_ctx_handle)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ /* if source slot is NULL then it's just a next portion of slices */
+ if (!frame)
+ return IMG_ERROR_UNDEFINED;
+
+ enc = (struct img_enc_context *)enc_ctx_handle;
+ video = enc->video;
+
+ format = video->format;
+ picture_height = video->buffer_height >> (video->is_interlaced ? 1 : 0);
+ stride_bytes = video->buffer_stride_bytes;
+
+ /*
+ * 3 Components: Y, U, V
+ * Y component is always at the beginning
+ */
+ frame->y_component_offset = 0;
+ frame->src_y_stride_bytes = stride_bytes;
+
+ /* Assume for now that field 0 comes first */
+ frame->field0_y_offset = 0;
+ frame->field0_u_offset = 0;
+ frame->field0_v_offset = 0;
+
+ switch (format) {
+ case IMG_CODEC_420_YUV:
+ frame->src_uv_stride_bytes = stride_bytes / 2;
+
+ frame->u_component_offset = stride_bytes * picture_height;
+ frame->v_component_offset = stride_bytes * picture_height + (stride_bytes / 2) *
+ (picture_height / 2);
+ break;
+
+ case IMG_CODEC_420_PL8:
+ frame->src_uv_stride_bytes = stride_bytes / 2;
+
+ frame->u_component_offset = 0;
+ frame->v_component_offset = 0;
+ break;
+
+ case IMG_CODEC_420_PL12:
+ case IMG_CODEC_420_PL21:
+ frame->src_uv_stride_bytes = stride_bytes;
+
+ frame->u_component_offset = 0;
+ frame->v_component_offset = 0;
+ break;
+
+ case IMG_CODEC_420_YV12:
+ frame->src_uv_stride_bytes = stride_bytes / 2;
+ frame->u_component_offset = stride_bytes * picture_height + (stride_bytes / 2) *
+ (picture_height / 2);
+ frame->v_component_offset = stride_bytes * picture_height;
+ break;
+
+ case IMG_CODEC_420_PL12_PACKED:
+ case IMG_CODEC_420_PL21_PACKED:
+ frame->src_uv_stride_bytes = stride_bytes;
+ frame->u_component_offset = stride_bytes * picture_height;
+ frame->v_component_offset = stride_bytes * picture_height;
+ break;
+
+ case IMG_CODEC_420_IMC2: /* IMC2 */
+ frame->src_uv_stride_bytes = stride_bytes;
+ frame->u_component_offset = stride_bytes * picture_height + (stride_bytes / 2);
+ frame->v_component_offset = stride_bytes * picture_height;
+ break;
+
+ case IMG_CODEC_422_YUV:
+ frame->src_uv_stride_bytes = stride_bytes / 2;
+ frame->u_component_offset = stride_bytes * picture_height;
+ frame->v_component_offset = stride_bytes * picture_height + (stride_bytes / 2) *
+ picture_height;
+ break;
+
+ case IMG_CODEC_422_YV12: /* YV16 */
+ frame->src_uv_stride_bytes = stride_bytes / 2;
+ frame->u_component_offset = stride_bytes * picture_height + (stride_bytes / 2) *
+ picture_height;
+ frame->v_component_offset = stride_bytes * picture_height;
+ break;
+
+ case IMG_CODEC_422_PL8:
+ frame->src_uv_stride_bytes = stride_bytes / 2;
+ frame->u_component_offset = 0;
+ frame->v_component_offset = 0;
+ break;
+
+ case IMG_CODEC_422_IMC2: /* IMC2 */
+ frame->src_uv_stride_bytes = stride_bytes;
+ frame->u_component_offset = stride_bytes * picture_height + (stride_bytes / 2);
+ frame->v_component_offset = stride_bytes * picture_height;
+ break;
+
+ case IMG_CODEC_422_PL12:
+ case IMG_CODEC_422_PL21:
+ frame->src_uv_stride_bytes = stride_bytes;
+ frame->u_component_offset = 0;
+ frame->v_component_offset = 0;
+ break;
+
+ case IMG_CODEC_444_YUV:
+ frame->src_uv_stride_bytes = stride_bytes;
+ frame->u_component_offset = stride_bytes * picture_height;
+ frame->v_component_offset = stride_bytes * picture_height + stride_bytes *
+ picture_height;
+ break;
+
+ case IMG_CODEC_444_YV12: /* YV16 */
+ frame->src_uv_stride_bytes = stride_bytes;
+ frame->u_component_offset = stride_bytes * picture_height + stride_bytes *
+ picture_height;
+ frame->v_component_offset = stride_bytes * picture_height;
+ break;
+
+ case IMG_CODEC_444_PL8:
+ frame->src_uv_stride_bytes = stride_bytes;
+ frame->u_component_offset = 0;
+ frame->v_component_offset = 0;
+ break;
+
+ case IMG_CODEC_444_IMC2: /* IMC2 */
+ frame->src_uv_stride_bytes = stride_bytes * 2;
+ frame->u_component_offset = stride_bytes * picture_height + stride_bytes;
+ frame->v_component_offset = stride_bytes * picture_height;
+ break;
+
+ case IMG_CODEC_444_PL12:
+ case IMG_CODEC_444_PL21:
+ frame->src_uv_stride_bytes = stride_bytes * 2;
+ frame->u_component_offset = 0;
+ frame->v_component_offset = 0;
+ break;
+
+ case IMG_CODEC_Y0UY1V_8888:
+ case IMG_CODEC_Y0VY1U_8888:
+ case IMG_CODEC_UY0VY1_8888:
+ case IMG_CODEC_VY0UY1_8888:
+ case IMG_CODEC_ABCX:
+ case IMG_CODEC_XBCA:
+ case IMG_CODEC_ABC565:
+ frame->src_uv_stride_bytes = stride_bytes;
+ frame->u_component_offset = 0;
+ frame->v_component_offset = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ if (video->is_interlaced) {
+ if (video->is_interleaved) {
+ switch (format) {
+ case IMG_CODEC_420_IMC2:
+ case IMG_CODEC_422_IMC2:
+ frame->v_component_offset *= 2;
+ frame->u_component_offset = frame->v_component_offset +
+ (stride_bytes / 2);
+ break;
+ case IMG_CODEC_444_IMC2:
+ frame->v_component_offset *= 2;
+ frame->u_component_offset = frame->v_component_offset +
+ stride_bytes;
+ break;
+
+ default:
+ frame->u_component_offset *= 2;
+ frame->v_component_offset *= 2;
+ break;
+ }
+
+ frame->field1_y_offset = frame->field0_y_offset + frame->src_y_stride_bytes;
+ frame->field1_u_offset = frame->field0_u_offset +
+ frame->src_uv_stride_bytes;
+ frame->field1_v_offset = frame->field0_v_offset +
+ frame->src_uv_stride_bytes;
+
+ frame->src_y_stride_bytes *= 2;
+ frame->src_uv_stride_bytes *= 2;
+ } else {
+ unsigned int y_field_size, c_field_size;
+
+ switch (format) {
+ case IMG_CODEC_420_YUV:
+ case IMG_CODEC_420_YV12:
+ case IMG_CODEC_420_IMC2:
+ case IMG_CODEC_420_PL12_PACKED:
+ case IMG_CODEC_420_PL21_PACKED:
+ /* In Packed formats including PL12 packed the field offsets
+ * should be calculated in the following manner
+ */
+ y_field_size = picture_height * stride_bytes * 3 / 2;
+ c_field_size = y_field_size;
+ break;
+ case IMG_CODEC_420_PL8:
+ y_field_size = picture_height * stride_bytes;
+ c_field_size = picture_height * stride_bytes / 4;
+ break;
+ case IMG_CODEC_420_PL12:
+ case IMG_CODEC_420_PL21:
+ y_field_size = picture_height * stride_bytes;
+ c_field_size = picture_height * stride_bytes / 2;
+ break;
+ case IMG_CODEC_422_YUV:
+ case IMG_CODEC_422_YV12:
+ case IMG_CODEC_422_IMC2:
+ y_field_size = picture_height * stride_bytes * 2;
+ c_field_size = y_field_size;
+ break;
+ case IMG_CODEC_422_PL8:
+ y_field_size = picture_height * stride_bytes;
+ c_field_size = picture_height * stride_bytes / 2;
+ break;
+ case IMG_CODEC_422_PL12:
+ case IMG_CODEC_422_PL21:
+ y_field_size = picture_height * stride_bytes;
+ c_field_size = picture_height * stride_bytes;
+ break;
+ case IMG_CODEC_Y0UY1V_8888:
+ case IMG_CODEC_UY0VY1_8888:
+ case IMG_CODEC_Y0VY1U_8888:
+ case IMG_CODEC_VY0UY1_8888:
+ y_field_size = picture_height * stride_bytes;
+ c_field_size = y_field_size;
+ break;
+ case IMG_CODEC_444_YUV:
+ case IMG_CODEC_444_YV12:
+ case IMG_CODEC_444_IMC2:
+ y_field_size = picture_height * stride_bytes * 3;
+ c_field_size = y_field_size;
+ break;
+ case IMG_CODEC_444_PL8:
+ y_field_size = picture_height * stride_bytes;
+ c_field_size = picture_height * stride_bytes;
+ break;
+ case IMG_CODEC_444_PL12:
+ case IMG_CODEC_444_PL21:
+ y_field_size = picture_height * stride_bytes;
+ c_field_size = picture_height * stride_bytes * 2;
+ break;
+ case IMG_CODEC_ABCX:
+ case IMG_CODEC_XBCA:
+ case IMG_CODEC_ABC565:
+ y_field_size = picture_height * stride_bytes;
+ c_field_size = y_field_size;
+ break;
+ default:
+ y_field_size = picture_height * stride_bytes * 3 / 2;
+ c_field_size = y_field_size;
+ break;
+ }
+
+ frame->field1_y_offset = y_field_size;
+ frame->field1_u_offset = c_field_size;
+ frame->field1_v_offset = c_field_size;
+ }
+ } else {
+ frame->field1_y_offset = frame->field0_y_offset;
+ frame->field1_u_offset = frame->field0_u_offset;
+ frame->field1_v_offset = frame->field0_v_offset;
+ }
+ return IMG_SUCCESS;
+}
+
+void topaz_setup_input_csc(struct img_video_context *video,
+ struct img_vxe_scaler_setup *scaler_setup,
+ struct img_vxe_csc_setup *csc_setup,
+ enum img_csc_preset csc_preset)
+{
+#define CSC_MINUS_1_16(X) TOPAZHP_EXT_CR_CSC_SOURCE_MOD_0 ## X ## _MINUS_1_16
+#define CSC_MINUS_1_2(X) TOPAZHP_EXT_CR_CSC_SOURCE_MOD_0 ## X ## _MINUS_1_2
+#define CSC_UNSIGNED(X) TOPAZHP_EXT_CR_CSC_SOURCE_MOD_0 ## X ## _UNSIGNED
+
+ if (csc_preset != IMG_CSC_NONE &&
+ (video->format == IMG_CODEC_ABCX ||
+ video->format == IMG_CODEC_XBCA || video->format == IMG_CODEC_ABC565)) {
+ unsigned char source_mode[IMG_CSC_PRESETS][3] = {
+ /* IMG_CSC_NONE - No colour-space conversion */
+ {CSC_MINUS_1_16(0), CSC_MINUS_1_2(1), CSC_MINUS_1_16(2)},
+
+ /* IMG_CSC_709_TO_601 - ITU BT.709 YUV to be converted to ITU BT.601 YUV */
+ {CSC_MINUS_1_16(0), CSC_MINUS_1_2(1), CSC_MINUS_1_16(2)},
+
+ /* IMG_CSC_601_TO_709 - ITU BT.601 YUV to be
+ * converted to ITU BT.709 YUV
+ */
+ {CSC_MINUS_1_16(0), CSC_MINUS_1_2(1), CSC_MINUS_1_16(2)},
+
+ /* IMG_CSC_RGB_TO_601_ANALOG - RGB to be
+ * converted to ITU BT.601 YUV
+ */
+ { CSC_UNSIGNED(0), CSC_UNSIGNED(1), CSC_UNSIGNED(2)},
+
+ /* IMG_CSC_RGB_TO_601_DIGITAL - RGB to be
+ * converted to ITU BT.601 YCbCr RS
+ */
+ { CSC_UNSIGNED(0), CSC_UNSIGNED(1), CSC_UNSIGNED(2)},
+
+ /* IMG_CSC_RGB_TO_601_DIGITAL_FS - RGB to be
+ * converted to ITU BT.601 YCbCr FS
+ */
+ { CSC_UNSIGNED(0), CSC_UNSIGNED(1), CSC_UNSIGNED(2)},
+
+ /* IMG_CSC_RGB_TO_709 - RGB to be converted to ITU BT.709 YUV */
+ { CSC_UNSIGNED(0), CSC_UNSIGNED(1), CSC_UNSIGNED(2)},
+
+ /* IMG_CSC_YIQ_TO_601 - YIQ to be converted to ITU BT.601 YUV */
+ {CSC_MINUS_1_16(0), CSC_MINUS_1_2(1), CSC_MINUS_1_16(2)},
+
+ /* IMG_CSC_YIQ_TO_709 - YIQ to be converted to ITU BT.709 YUV */
+ {CSC_MINUS_1_16(0), CSC_MINUS_1_2(1), CSC_MINUS_1_16(2)},
+
+ /* IMG_CSC_BRG_TO_601 - RGB to be converted to ITU BT.601 YUV */
+ {0, 0, 0},
+
+ /* IMG_CSC_RBG_TO_601 - RGB to be converted to ITU BT.709 YUV */
+ {0, 0, 0},
+
+ /* IMG_CSC_BGR_TO_601 - RGB to be converted to ITU BT.601 YUV */
+ {0, 0, 0},
+
+ /* IMG_CSC_UYV_TO_YUV - UYV to be converted to YUV */
+ {CSC_MINUS_1_2(0), CSC_MINUS_1_16(1), CSC_MINUS_1_2(2)},
+ /*{ CSC_UNSIGNED(0), CSC_UNSIGNED(1), CSC_UNSIGNED(2)}, */
+ };
+
+ int coeffs[IMG_CSC_PRESETS][3][3] = {
+ /* IMG_CSC_NONE - No colour-space conversion */
+ {
+ { 1024, 0, 0 },
+ { 0, 1024, 0 },
+ { 0, 0, 1024 }
+ },
+
+ /* IMG_CSC_709_TO_601 - ITU BT.709 YUV to be converted to ITU BT.601 YUV */
+ {
+ { 1024, (int)(0.15941 * 1024), (int)(0.11649 * 1024) },
+ { 0, (int)(-0.07844 * 1024), (int)(0.98985 * 1024) },
+ { 0, (int)(0.9834 * 1024), (int)(-0.10219 * 1024) }
+ },
+
+ /* IMG_CSC_601_TO_709 - ITU BT.601 YUV to be converted to ITU BT.709 YUV */
+ {
+ { 1024, (int)(-0.17292 * 1024), (int)(-0.13554 * 1024) },
+ { 0, (int)(0.08125 * 1024), (int)(1.01864 * 1024) },
+ { 0, (int)(1.02532 * 1024), (int)(0.10586 * 1024) }
+ },
+
+ /* IMG_CSC_RGB_TO_601_ANALOG - RGB to be converted to ITU BT.601 YUV */
+ { /* R G B */
+ { (int)(219 * 0.299 * 4.0157),
+ (int)(219 * 0.587 * 4.0157),
+ (int)(219 * 0.114 * 4.0157) },
+ { (int)(224 * -0.14713 * 4.0157),
+ (int)(224 * -0.28886 * 4.0157),
+ (int)(224 * 0.446 * 4.0157) },
+ { (int)(224 * 0.615 * 4.0157),
+ (int)(224 * -0.51499 * 4.0157),
+ (int)(224 * -0.10001 * 4.0157) }
+ }, /* A B C */
+
+ /* IMG_CSC_RGB_TO_601_DIGITAL - RGB to be
+ * converted to ITU BT.601 YCbCr reduced scale
+ */
+ { /* R G B */
+ { (int)(219 * 0.299 * 4.0157),
+ (int)(219 * 0.587 * 4.0157),
+ (int)(219 * 0.114 * 4.0157) },
+ { (int)(224 * -0.172 * 4.0157),
+ (int)(224 * -0.339 * 4.0157),
+ (int)(224 * 0.511 * 4.0157) },
+ { (int)(224 * 0.511 * 4.0157),
+ (int)(224 * -0.428 * 4.0157),
+ (int)(224 * -0.083 * 4.0157) }
+ }, /* A B C */
+
+ /* IMG_CSC_RGB_TO_601_DIGITAL_FS - RGB to be
+ * converted to ITU BT.601 YCbCr full scale
+ */
+ { /* R G B */
+ { (int)(219 * 0.257 * 4.0157),
+ (int)(219 * 0.504 * 4.0157),
+ (int)(219 * 0.098 * 4.0157) },
+ { (int)(224 * -0.148 * 4.0157),
+ (int)(224 * -0.291 * 4.0157),
+ (int)(224 * 0.439 * 4.0157) },
+ { (int)(224 * 0.439 * 4.0157),
+ (int)(224 * -0.368 * 4.0157),
+ (int)(224 * -0.071 * 4.0157) }
+ }, /* A B C */
+
+ /* IMG_CSC_RGB_TO_709 - RGB to be converted to ITU BT.709 YUV */
+ {
+ { (int)(219 * 0.2215 * 4.0157), (int)(219 * 0.7154 * 4.0157),
+ (int)(219 * 0.0721 * 4.0157) },
+ { (int)(224 * -0.1145 * 4.0157), (int)(224 * -0.3855 * 4.0157),
+ (int)(224 * 0.5 * 4.0157) },
+ { (int)(224 * 0.5016 * 4.0157), (int)(224 * -0.4556 * 4.0157),
+ (int)(224 * -0.0459 * 4.0157) }
+ },
+
+ /* IMG_CSC_YIQ_TO_601 - YIQ to be converted to ITU BT.601 YUV */
+ {
+ { 1024, 0, 0 },
+ { 0, (int)(0.83885 * 1024), (int)(-0.54475 * 1024) },
+ { 0, (int)(0.54484 * 1024), (int)(0.83896 * 1024) }
+ },
+
+ /* IMG_CSC_YIQ_TO_709 - YIQ to be converted to ITU BT.709 YUV */
+ {
+ { 1024, (int)(-0.20792 * 1024), (int)(0.07122 * 1024) },
+ { 0, (int)(0.89875 * 1024), (int)(-0.48675 * 1024) },
+ { 0, (int)(0.64744 * 1024), (int)(0.80255 * 1024) }
+ },
+
+ /*
+ * IMG_CSC_BRG_TO_601 - RGB to be converted to ITU BT.601 YUV
+ * Entries have been reordered to provide support for xRGB format
+ */
+ { /* B R G */
+ { (int)(219 * 0.114 * 4.0157),
+ (int)(219 * 0.299 * 4.0157),
+ (int)(219 * 0.587 * 4.0157)},
+ { (int)(224 * 0.446 * 4.0157),
+ (int)(224 * -0.14713 * 4.0157),
+ (int)(224 * -0.28886 * 4.0157)},
+ { (int)(224 * -0.10001 * 4.0157),
+ (int)(224 * 0.615 * 4.0157),
+ (int)(224 * -0.51499 * 4.0157)}
+ }, /* A B C */
+
+ /*
+ * IMG_CSC_RBG_TO_601 - RGB to be converted to ITU BT.601 YUV
+ * Entries have been reordered to provide support for xBGR format
+ */
+ { /* R B G */
+ { (int)(219 * 0.299 * 4.0157),
+ (int)(219 * 0.114 * 4.0157),
+ (int)(219 * 0.587 * 4.0157)},
+ { (int)(224 * -0.14713 * 4.0157),
+ (int)(224 * 0.446 * 4.0157),
+ (int)(224 * -0.28886 * 4.0157)},
+ { (int)(224 * 0.615 * 4.0157),
+ (int)(224 * -0.10001 * 4.0157),
+ (int)(224 * -0.51499 * 4.0157)}
+ }, /* A B C */
+
+ /*
+ * IMG_CSC_BGR_TO_601 - RGB to be converted to ITU BT.601 YUV
+ * Entries have been reordered to provide support for BGRx format
+ */
+ { /* B G R */
+ { (int)(219 * 0.114 * 4.0157),
+ (int)(219 * 0.587 * 4.0157),
+ (int)(219 * 0.299 * 4.0157)},
+ { (int)(224 * 0.446 * 4.0157),
+ (int)(224 * -0.28886 * 4.0157),
+ (int)(224 * -0.14713 * 4.0157)},
+ { (int)(224 * -0.10001 * 4.0157),
+ (int)(224 * -0.51499 * 4.0157),
+ (int)(224 * 0.615 * 4.0157)},
+ }, /* A B C */
+
+ /* IMG_CSC_UYV_TO_YUV - UYV to YUV */
+ {
+ { 0, 1024, 0 },
+ { 1024, 0, 0 },
+ { 0, 0, 1024 }
+ },
+ };
+
+ unsigned int index = csc_preset;
+
+ IMG_DBG_ASSERT(index < IMG_CSC_PRESETS);
+
+ if (index >= IMG_CSC_PRESETS)
+ return;
+
+#define SRC_MOD(X) TOPAZHP_EXT_CR_CSC_SOURCE_MOD_0 ## X
+#define OUT_MOD(X) TOPAZHP_EXT_CR_CSC_OUTPUT_MOD_0 ## X
+
+#define SOURCE_Y_ARRAY csc_setup->csc_source_y
+#define SRC_Y_PARAM(X) TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_Y
+
+#define SOURCE_CBCR_ARRAY csc_setup->csc_source_cbcr
+#define SRC_CB_PARAM(X) TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CB
+#define SRC_CR_PARAM(X) TOPAZHP_EXT_CR_CSC_SOURCE_SRC_TO_CR
+
+#define CLIP_VALUE 255
+
+ scaler_setup->scaler_control |= F_ENCODE(1,
+ TOPAZHP_EXT_CR_ENABLE_COLOUR_SPACE_CONVERSION);
+
+ csc_setup->csc_output_clip[0] =
+ F_ENCODE(TOPAZHP_EXT_CR_CSC_OUTPUT_MOD_00_ADD_1_16, OUT_MOD(0)) |
+ F_ENCODE(CLIP_VALUE, TOPAZHP_EXT_CR_CSC_OUTPUT_MAX_CLIP) |
+ F_ENCODE(0, TOPAZHP_EXT_CR_CSC_OUTPUT_MIN_CLIP);
+
+ csc_setup->csc_output_clip[1] =
+ F_ENCODE(TOPAZHP_EXT_CR_CSC_OUTPUT_MOD_01_ADD_1_2, OUT_MOD(1)) |
+ F_ENCODE(CLIP_VALUE, TOPAZHP_EXT_CR_CSC_OUTPUT_MAX_CLIP) |
+ F_ENCODE(0, TOPAZHP_EXT_CR_CSC_OUTPUT_MIN_CLIP);
+
+ SOURCE_Y_ARRAY[0] = F_ENCODE(source_mode[index][0], SRC_MOD(0)) |
+ F_ENCODE(coeffs[index][0][0], SRC_Y_PARAM(0));
+ SOURCE_CBCR_ARRAY[0] = F_ENCODE(coeffs[index][1][0], SRC_CB_PARAM(0)) |
+ F_ENCODE(coeffs[index][2][0], SRC_CR_PARAM(0));
+
+ SOURCE_Y_ARRAY[1] = F_ENCODE(source_mode[index][1], SRC_MOD(1)) |
+ F_ENCODE(coeffs[index][0][1], SRC_Y_PARAM(1));
+ SOURCE_CBCR_ARRAY[1] = F_ENCODE(coeffs[index][1][1], SRC_CB_PARAM(1)) |
+ F_ENCODE(coeffs[index][2][1], SRC_CR_PARAM(1));
+
+ SOURCE_Y_ARRAY[2] = F_ENCODE(source_mode[index][2], SRC_MOD(2)) |
+ F_ENCODE(coeffs[index][0][2], SRC_Y_PARAM(2));
+ SOURCE_CBCR_ARRAY[2] = F_ENCODE(coeffs[index][1][2], SRC_CB_PARAM(2)) |
+ F_ENCODE(coeffs[index][2][2], SRC_CR_PARAM(2));
+ }
+}
+
+/*
+ * Calculate buffer strides
+ */
+unsigned int topaz_get_packed_buffer_strides(ushort buffer_stride_bytes,
+ enum img_format format,
+ unsigned char enable_scaler,
+ unsigned char is_interlaced,
+ unsigned char is_interleaved)
+{
+ ushort src_y_stride_bytes;
+ ushort src_uv_stride_bytes = 0;
+
+ /* 3 Components: Y, U, V */
+ src_y_stride_bytes = buffer_stride_bytes;
+
+ switch (format) {
+ case IMG_CODEC_420_YUV:
+ case IMG_CODEC_420_PL8:
+ case IMG_CODEC_420_YV12:
+ src_uv_stride_bytes = src_y_stride_bytes / 2;
+ break;
+
+ case IMG_CODEC_422_YUV: /* Odd-numbered chroma rows unused if scaler not present */
+ case IMG_CODEC_422_YV12: /* Odd-numbered chroma rows unused if scaler not present */
+ case IMG_CODEC_422_PL8: /* Odd-numbered chroma rows unused if scaler not present */
+ if (!enable_scaler)
+ /* Skip alternate lines of chroma for 4:2:2 if scaler disabled/not present */
+ src_uv_stride_bytes = src_y_stride_bytes;
+ else
+ src_uv_stride_bytes = src_y_stride_bytes / 2;
+ break;
+ /* Interleaved chroma pixels (and unused odd-numbered chroma rows if scaler not present) */
+ case IMG_CODEC_422_IMC2:
+ /* Interleaved chroma rows (and unused odd-numbered chroma rows if scaler not present) */
+ case IMG_CODEC_422_PL12:
+ /* Interleaved chroma rows (and unused odd-numbered chroma rows if scaler not present) */
+ case IMG_CODEC_422_PL21:
+ if (!enable_scaler)
+ /* Skip alternate lines of chroma for 4:2:2 if scaler disabled/not present */
+ src_uv_stride_bytes = src_y_stride_bytes * 2;
+ else
+ src_uv_stride_bytes = src_y_stride_bytes;
+ break;
+
+ case IMG_CODEC_420_PL12: /* Interleaved chroma pixels */
+ case IMG_CODEC_420_PL21:
+
+ case IMG_CODEC_420_PL12_PACKED: /* Interleaved chroma pixels */
+ case IMG_CODEC_420_PL21_PACKED: /* Interleaved chroma pixels */
+ case IMG_CODEC_420_IMC2: /* Interleaved chroma rows */
+ case IMG_CODEC_Y0UY1V_8888: /* Interleaved luma and chroma pixels */
+ case IMG_CODEC_Y0VY1U_8888: /* Interleaved luma and chroma pixels */
+ case IMG_CODEC_UY0VY1_8888: /* Interleaved luma and chroma pixels */
+ case IMG_CODEC_VY0UY1_8888: /* Interleaved luma and chroma pixels */
+ case IMG_CODEC_ABCX: /* Interleaved pixels of unknown colour space */
+ case IMG_CODEC_XBCA: /* Interleaved pixels of unknown colour space */
+ case IMG_CODEC_ABC565: /* Packed pixels of unknown coloour space */
+ src_uv_stride_bytes = src_y_stride_bytes;
+ break;
+
+ case IMG_CODEC_444_YUV: /* Unusable if scaler not present */
+ case IMG_CODEC_444_YV12: /* Unusable if scaler not present */
+ case IMG_CODEC_444_PL8: /* Unusable if scaler not present */
+ src_uv_stride_bytes = src_y_stride_bytes;
+ break;
+
+ /* Interleaved chroma pixels (unusable if scaler not present) */
+ case IMG_CODEC_444_IMC2:
+ /* Interleaved chroma rows (unusable if scaler not present) */
+ case IMG_CODEC_444_PL12:
+ /* Interleaved chroma rows (unusable if scaler not present) */
+ case IMG_CODEC_444_PL21:
+ src_uv_stride_bytes = src_y_stride_bytes * 2;
+ break;
+
+ default:
+ break;
+ }
+
+ if (is_interlaced && is_interleaved) {
+ src_y_stride_bytes *= 2;
+ src_uv_stride_bytes *= 2;
+ }
+ return F_ENCODE(src_y_stride_bytes >> 6, MTX_MSG_PICMGMT_STRIDE_Y) |
+ F_ENCODE(src_uv_stride_bytes >> 6, MTX_MSG_PICMGMT_STRIDE_UV);
+}
+
+/*
+ * Setup the registers for scaling candidate motion vectors to take into account
+ * how far away (temporally) the reference pictures are
+ */
+#define RESTRICT16x16_FLAGS (0x1)
+#define RESTRICT8x8_FLAGS (0x2)
+
+void update_driver_mv_scaling(unsigned int frame_num, unsigned int ref0_num, unsigned int ref1_num,
+ unsigned int pic_flags, unsigned int *mv_calc_below_handle,
+ unsigned int *mv_calc_colocated_handle,
+ unsigned int *mv_calc_config_handle)
+{
+ unsigned int mv_calc_config = 0;
+ unsigned int mv_calc_colocated = F_ENCODE(0x10, TOPAZHP_CR_TEMPORAL_BLEND);
+ unsigned int mv_calc_below = 0;
+
+ /* If b picture calculate scaling factor for colocated motion vectors */
+ if (pic_flags & ISINTERB_FLAGS) {
+ int tb, td, tx;
+ int dist_scale;
+
+ /* calculation taken from H264 spec */
+ tb = (frame_num * 2) - (ref1_num * 2);
+ td = (ref0_num * 2) - (ref1_num * 2);
+ tx = (16384 + abs(td / 2)) / td;
+ dist_scale = (tb * tx + 32) >> 6;
+ if (dist_scale > 1023)
+ dist_scale = 1023;
+
+ if (dist_scale < -1024)
+ dist_scale = -1024;
+
+ mv_calc_colocated |= F_ENCODE(dist_scale, TOPAZHP_CR_COL_DIST_SCALE_FACT);
+
+ /*
+ * We assume the below temporal mvs are from the latest reference frame
+ * rather then the most recently encoded B frame (as Bs aren't reference)
+ * Fwd temporal is same as colocated mv scale
+ */
+ mv_calc_below |= F_ENCODE(dist_scale, TOPAZHP_CR_PIC0_DIST_SCALE_FACTOR);
+
+ /* Bkwd temporal needs to be scaled by the recipricol
+ * amount in the other direction
+ */
+ tb = (frame_num * 2) - (ref0_num * 2);
+ td = (ref0_num * 2) - (ref1_num * 2);
+ tx = (16384 + abs(td / 2)) / td;
+ dist_scale = (tb * tx + 32) >> 6;
+ if (dist_scale > 1023)
+ dist_scale = 1023;
+
+ if (dist_scale < -1024)
+ dist_scale = -1024;
+
+ mv_calc_below |= F_ENCODE(dist_scale, TOPAZHP_CR_PIC1_DIST_SCALE_FACTOR);
+ } else {
+ /* Don't scale the temporal below mvs */
+ mv_calc_below |= F_ENCODE(1 << 8, TOPAZHP_CR_PIC0_DIST_SCALE_FACTOR);
+
+ if (ref0_num != ref1_num) {
+ int ref0_dist, ref1_dist;
+ int scale;
+
+ /*
+ * Distance to second reference picture may be different when
+ * using multiple reference frames on P. Scale based on difference
+ * in temporal distance to ref pic 1 compared to distance to ref pic 0
+ */
+ ref0_dist = (frame_num - ref0_num);
+ ref1_dist = (frame_num - ref1_num);
+ scale = (ref1_dist << 8) / ref0_dist;
+
+ if (scale > 1023)
+ scale = 1023;
+ if (scale < -1024)
+ scale = -1024;
+
+ mv_calc_below |= F_ENCODE(scale, TOPAZHP_CR_PIC1_DIST_SCALE_FACTOR);
+ } else {
+ mv_calc_below |= F_ENCODE(1 << 8, TOPAZHP_CR_PIC1_DIST_SCALE_FACTOR);
+ }
+ }
+
+ if (frame_num > 0) {
+ int ref0_distance, ref1_distance;
+ int jitter0, jitter1;
+
+ ref0_distance = abs((int)frame_num - (int)ref0_num);
+ ref1_distance = abs((int)frame_num - (int)ref1_num);
+
+ if (!(pic_flags & ISINTERB_FLAGS)) {
+ jitter0 = ref0_distance * 1;
+ jitter1 = jitter0 > 1 ? 1 : 2;
+ } else {
+ jitter0 = ref1_distance * 1;
+ jitter1 = ref0_distance * 1;
+ }
+
+ /* Hardware can only cope with 1 - 4 jitter factors */
+ jitter0 = (jitter0 > 4) ? 4 : (jitter0 < 1) ? 1 : jitter0;
+ jitter1 = (jitter1 > 4) ? 4 : (jitter1 < 1) ? 1 : jitter1;
+
+ /* Hardware can only cope with 1 - 4 jitter factors */
+ IMG_DBG_ASSERT(jitter0 > 0 && jitter0 <= 4 && jitter1 > 0 && jitter1 <= 4);
+
+ mv_calc_config |= F_ENCODE(jitter0 - 1, TOPAZHP_CR_MVCALC_IPE0_JITTER_FACTOR) |
+ F_ENCODE(jitter1 - 1, TOPAZHP_CR_MVCALC_IPE1_JITTER_FACTOR);
+ }
+
+ mv_calc_config |= F_ENCODE(1, TOPAZHP_CR_MVCALC_DUP_VEC_MARGIN);
+ mv_calc_config |= F_ENCODE(7, TOPAZHP_CR_MVCALC_GRID_MB_X_STEP);
+ mv_calc_config |= F_ENCODE(13, TOPAZHP_CR_MVCALC_GRID_MB_Y_STEP);
+ mv_calc_config |= F_ENCODE(3, TOPAZHP_CR_MVCALC_GRID_SUB_STEP);
+ mv_calc_config |= F_ENCODE(1, TOPAZHP_CR_MVCALC_GRID_DISABLE);
+
+ mv_calc_config |= F_ENCODE(1, TOPAZHP_CR_MVCALC_NO_PSEUDO_DUPLICATES);
+
+ *mv_calc_below_handle = mv_calc_below;
+ *mv_calc_colocated_handle = mv_calc_colocated;
+ *mv_calc_config_handle = mv_calc_config;
+}
+
+void prepare_mv_estimates(struct img_enc_context *enc)
+{
+ struct img_video_context *vid_ctx = enc->video;
+ unsigned int distance;
+ unsigned int distance_b;
+ unsigned int position;
+ struct img_mv_settings *host_mv_settings_b_table;
+ struct img_mv_settings *host_mv_settings_hierarchical;
+ unsigned char hierarchical;
+
+ /* IDR */
+ vid_ctx->mv_settings_idr.mv_calc_config = DEFAULT_MVCALC_CONFIG; /* default based on TRM */
+ vid_ctx->mv_settings_idr.mv_calc_colocated = 0x00100100; /* default based on TRM */
+ vid_ctx->mv_settings_idr.mv_calc_below = 0x01000100; /* default based on TRM */
+
+ update_driver_mv_scaling(0, 0, 0, 0, &vid_ctx->mv_settings_idr.mv_calc_below,
+ &vid_ctx->mv_settings_idr.mv_calc_colocated,
+ &vid_ctx->mv_settings_idr.mv_calc_config);
+
+ /* NonB (I or P) */
+ for (distance = 1; distance <= MAX_BFRAMES + 1; distance++) {
+ /* default based on TRM */
+ vid_ctx->mv_settings_non_b[distance - 1].mv_calc_config = DEFAULT_MVCALC_CONFIG;
+ /* default based on TRM */
+ vid_ctx->mv_settings_non_b[distance - 1].mv_calc_colocated = 0x00100100;
+ /* default based on TRM */
+ vid_ctx->mv_settings_non_b[distance - 1].mv_calc_below = 0x01000100;
+
+ update_driver_mv_scaling
+ (distance, 0, 0, 0,
+ &vid_ctx->mv_settings_non_b[distance - 1].mv_calc_below,
+ &vid_ctx->mv_settings_non_b[distance - 1].mv_calc_colocated,
+ &vid_ctx->mv_settings_non_b[distance - 1].mv_calc_config);
+ }
+
+ hierarchical = (bool)(vid_ctx->mv_settings_hierarchical.cpu_virt);
+
+ host_mv_settings_b_table = (struct img_mv_settings *)(vid_ctx->mv_settings_btable.cpu_virt);
+
+ if (hierarchical)
+ host_mv_settings_hierarchical =
+ (struct img_mv_settings *)(vid_ctx->mv_settings_hierarchical.cpu_virt);
+
+ for (distance_b = 0; distance_b < MAX_BFRAMES; distance_b++) {
+ for (position = 1; position <= distance_b + 1; position++) {
+ struct img_mv_settings *mv_element =
+ (struct img_mv_settings *)((unsigned char *)host_mv_settings_b_table +
+ MV_OFFSET_IN_TABLE(distance_b, position - 1));
+
+ mv_element->mv_calc_config =
+ /* default based on TRM */
+ (DEFAULT_MVCALC_CONFIG | MASK_TOPAZHP_CR_MVCALC_GRID_DISABLE);
+
+ mv_element->mv_calc_colocated = 0x00100100;/* default based on TRM */
+ mv_element->mv_calc_below = 0x01000100; /* default based on TRM */
+
+ update_driver_mv_scaling(position, distance_b + 2, 0, ISINTERB_FLAGS,
+ &mv_element->mv_calc_below,
+ &mv_element->mv_calc_colocated,
+ &mv_element->mv_calc_config);
+ }
+ }
+
+ if (hierarchical) {
+ for (distance_b = 0; distance_b < MAX_BFRAMES; distance_b++)
+ memcpy(host_mv_settings_hierarchical + distance_b,
+ (unsigned char *)host_mv_settings_b_table +
+ MV_OFFSET_IN_TABLE(distance_b, distance_b >> 1),
+ sizeof(struct img_mv_settings));
+ }
+}
+
+/*
+ * Generates the video pic params template
+ */
+void adjust_pic_flags(struct img_enc_context *enc, struct img_rc_params *rc_params,
+ unsigned char first_pic, unsigned int *flags)
+{
+ unsigned int flags_local;
+ struct pic_params *pic_params = &enc->video->pic_params;
+
+ flags_local = pic_params->flags;
+
+ if (!rc_params->rc_enable || !first_pic)
+ flags_local = 0;
+
+ *flags = flags_local;
+}
+
+/*
+ * Sets up RC Data
+ */
+void setup_rc_data(struct img_video_context *video, struct pic_params *pic_params,
+ struct img_rc_params *rc_params)
+{
+ int tmp_qp = 0;
+ int buffer_size_in_frames;
+ short max_qp = MAX_QP_H264;
+ short min_qp = 0;
+ int mul_of_8mbits;
+ int framerate, scale = 1;
+ int l1, l2, l3, l4, l5, scaled_bpp;
+
+ /* If Bit Rate and Basic Units are not specified then set to default values. */
+ if (rc_params->bits_per_second == 0 && !video->enable_mvc)
+ rc_params->bits_per_second = 640000; /* kbps */
+
+ if (!rc_params->bu_size)
+ /* BU = 1 Frame */
+ rc_params->bu_size = (video->picture_height >> 4) * (video->width >> 4);
+
+ if (!rc_params->frame_rate)
+ rc_params->frame_rate = 30; /* fps */
+
+ /* Calculate Bits per Pixel */
+ if (video->width <= 176)
+ framerate = 30;
+ else
+ framerate = rc_params->frame_rate;
+
+ mul_of_8mbits = rc_params->bits_per_second / 8000000;
+
+ if (mul_of_8mbits == 0)
+ scale = 256;
+ else if (mul_of_8mbits > 127)
+ scale = 1;
+ else
+ scale = 128 / mul_of_8mbits;
+
+ scaled_bpp = (scale * rc_params->bits_per_second) /
+ (framerate * video->width * video->frame_height);
+
+ pic_params->in_params.se_init_qp_i = rc_params->initial_qp_i;
+
+ pic_params->in_params.mb_per_row = (video->width >> 4);
+ pic_params->in_params.mb_per_bu = rc_params->bu_size;
+ pic_params->in_params.mb_per_frm = ((unsigned int)(video->width >> 4)) *
+ (video->frame_height >> 4);
+ pic_params->in_params.bu_per_frm = (pic_params->in_params.mb_per_frm) /
+ rc_params->bu_size;
+
+ pic_params->in_params.intra_period = rc_params->intra_freq;
+ pic_params->in_params.bframes = rc_params->bframes;
+ pic_params->in_params.bit_rate = rc_params->bits_per_second;
+
+ pic_params->in_params.frm_skip_disable = rc_params->disable_frame_skipping;
+
+ pic_params->in_params.bits_per_frm =
+ (rc_params->bits_per_second + rc_params->frame_rate / 2) / rc_params->frame_rate;
+
+ pic_params->in_params.bits_per_bu = pic_params->in_params.bits_per_frm /
+ (4 * pic_params->in_params.bu_per_frm);
+
+ /*Disable Vcm Hardware*/
+ pic_params->in_params.disable_vcm_hardware = rc_params->disable_vcm_hardware;
+ /* Codec-dependent fields */
+ if (video->standard == IMG_STANDARD_H264) {
+ pic_params->in_params.mode.h264.transfer_rate =
+ (rc_params->transfer_bits_per_second + rc_params->frame_rate / 2) /
+ rc_params->frame_rate;
+ pic_params->in_params.mode.h264.hierarchical_mode = rc_params->hierarchical;
+
+ pic_params->in_params.mode.h264.enable_slice_bob =
+ (unsigned char)rc_params->enable_slice_bob;
+ pic_params->in_params.mode.h264.max_slice_bob =
+ (unsigned char)rc_params->max_slice_bob;
+ pic_params->in_params.mode.h264.slice_bob_qp =
+ (unsigned char)rc_params->slice_bob_qp;
+ }
+
+ if (pic_params->in_params.bits_per_frm) {
+ buffer_size_in_frames =
+ (rc_params->buffer_size + (pic_params->in_params.bits_per_frm / 2)) /
+ pic_params->in_params.bits_per_frm;
+ } else {
+ IMG_DBG_ASSERT(video->enable_mvc && ("Can happen only in MVC mode" != NULL));
+ /* Asigning more or less `normal` value. To be overridden by MVC RC module */
+ buffer_size_in_frames = 30;
+ }
+
+ /* select thresholds and initial Qps etc that are codec dependent */
+ switch (video->standard) {
+ case IMG_STANDARD_H264:
+ /* Setup MAX and MIN Quant Values */
+ pic_params->in_params.max_qp = (rc_params->max_qp > 0) &&
+ (rc_params->max_qp < max_qp) ? rc_params->max_qp : max_qp;
+
+ if (rc_params->min_qp == 0) {
+ if (scaled_bpp >= (scale >> 1)) {
+ tmp_qp = 4;
+ } else if (scaled_bpp > ((scale << 1) / 15)) {
+ tmp_qp = (22 * scale) - (40 * scaled_bpp);
+ tmp_qp = tmp_qp / scale;
+ } else {
+ tmp_qp = (30 * scale) - (100 * scaled_bpp);
+ tmp_qp = tmp_qp / scale;
+ }
+
+ /* Adjust minQp up for small buffer size and down for large buffer size */
+ if (buffer_size_in_frames < 5) {
+ tmp_qp += 2;
+ } else if (buffer_size_in_frames > 40) {
+ if (tmp_qp >= 1)
+ tmp_qp -= 1;
+ }
+ /* for HD content allow a lower minQp as bitrate is
+ * more easily controlled in this case
+ */
+ if (pic_params->in_params.mb_per_frm > 2000)
+ tmp_qp -= 6;
+ } else {
+ tmp_qp = rc_params->min_qp;
+ }
+
+ min_qp = 2;
+
+ if (tmp_qp < min_qp)
+ pic_params->in_params.min_qp = min_qp;
+ else
+ pic_params->in_params.min_qp = tmp_qp;
+
+ /* Calculate Initial QP if it has not been specified */
+ tmp_qp = pic_params->in_params.se_init_qp_i;
+ if (pic_params->in_params.se_init_qp_i == 0) {
+ l1 = scale / 20;
+ l2 = scale / 5;
+ l3 = (scale * 2) / 5;
+ l4 = (scale * 4) / 5;
+ l5 = (scale * 1011) / 1000;
+
+ tmp_qp = pic_params->in_params.min_qp;
+
+ pic_params->in_params.se_init_qp_i = tmp_qp;
+ if (scaled_bpp < l1)
+ tmp_qp = (45 * scale) - (78 * scaled_bpp);
+ else if (scaled_bpp < l2)
+ tmp_qp = (44 * scale) - (73 * scaled_bpp);
+ else if (scaled_bpp < l3)
+ tmp_qp = (34 * scale) - (25 * scaled_bpp);
+ else if (scaled_bpp < l4)
+ tmp_qp = (32 * scale) - (20 * scaled_bpp);
+ else if (scaled_bpp < l5)
+ tmp_qp = (25 * scale) - (10 * scaled_bpp);
+ else
+ tmp_qp = (18 * scale) - (5 * scaled_bpp);
+
+ /* Adjust ui8SeInitQP up for small buffer size or small fps */
+ /* Adjust ui8SeInitQP up for small gop size */
+ if (buffer_size_in_frames < 20 || rc_params->intra_freq < 20)
+ tmp_qp += 2 * scale;
+
+ /* for very small buffers increase initial Qp even more */
+ if (buffer_size_in_frames < 5)
+ tmp_qp += 8 * scale;
+
+ /* start on a lower initial Qp for HD content
+ * as the coding is more efficient
+ */
+ if (pic_params->in_params.mb_per_frm > 2000)
+ tmp_qp -= 2 * scale;
+
+ if (pic_params->in_params.intra_period == 1) {
+ /* for very small GOPS start with a much higher initial Qp */
+ tmp_qp += 12 * scale;
+ } else if (pic_params->in_params.intra_period < 5) {
+ tmp_qp += 6 * scale;
+ }
+
+ tmp_qp = tmp_qp / scale;
+ }
+
+ max_qp = 49;
+
+ if (tmp_qp > max_qp)
+ tmp_qp = max_qp;
+
+ if (tmp_qp < pic_params->in_params.min_qp)
+ tmp_qp = pic_params->in_params.min_qp;
+
+ pic_params->in_params.se_init_qp_i = tmp_qp;
+
+ if (scaled_bpp <= ((3 * scale) / 10))
+ pic_params->flags |= ISRC_I16BIAS;
+ break;
+
+ default:
+ /* the NO RC cases will fall here */
+ break;
+ }
+
+ if (video->rc_params.rc_mode == IMG_RCMODE_VBR) {
+ pic_params->in_params.mb_per_bu = pic_params->in_params.mb_per_frm;
+ pic_params->in_params.bu_per_frm = 1;
+
+ /* Initialize the parameters of fluid flow traffic model. */
+ pic_params->in_params.buffer_size = rc_params->buffer_size;
+
+ /* VBR shouldn't skip frames */
+ pic_params->in_params.frm_skip_disable = TRUE;
+
+ /*
+ * These scale factor are used only for rate control to avoid overflow
+ * in fixed-point calculation these scale factors are decided by bit rate
+ */
+ if (rc_params->bits_per_second < 640000)
+ pic_params->in_params.scale_factor = 2; /* related to complexity */
+ else if (rc_params->bits_per_second < 2000000) /* 2 Mbits */
+ pic_params->in_params.scale_factor = 4;
+ else if (rc_params->bits_per_second < 8000000) /* 8 Mbits */
+ pic_params->in_params.scale_factor = 6;
+ else
+ pic_params->in_params.scale_factor = 8;
+ } else {
+ /* Set up Input Parameters that are mode dependent */
+ switch (video->standard) {
+ case IMG_STANDARD_H264:
+ /*
+ * H264 CBR RC: Initialize the parameters of fluid flow traffic model.
+ */
+ pic_params->in_params.buffer_size = rc_params->buffer_size;
+
+ /* HRD consideration - These values are used by H.264 reference code. */
+ if (rc_params->bits_per_second < 1000000) /* 1 Mbits/s */
+ pic_params->in_params.scale_factor = 0;
+ else if (rc_params->bits_per_second < 2000000) /* 2 Mbits/s */
+ pic_params->in_params.scale_factor = 1;
+ else if (rc_params->bits_per_second < 4000000) /* 4 Mbits/s */
+ pic_params->in_params.scale_factor = 2;
+ else if (rc_params->bits_per_second < 8000000) /* 8 Mbits/s */
+ pic_params->in_params.scale_factor = 3;
+ else
+ pic_params->in_params.scale_factor = 4;
+
+ if (video->rc_params.rc_mode == IMG_RCMODE_VCM)
+ pic_params->in_params.buffer_size_frames = buffer_size_in_frames;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (rc_params->sc_detect_disable)
+ pic_params->flags |= ISSCENE_DISABLED;
+
+ pic_params->in_params.initial_delay = rc_params->initial_delay;
+ pic_params->in_params.initial_level = rc_params->initial_level;
+ rc_params->initial_qp_i = pic_params->in_params.se_init_qp_i;
+
+ /* The rate control uses this value to adjust
+ * the reaction rate to larger than expected frames
+ */
+ if (video->standard == IMG_STANDARD_H264) {
+ if (pic_params->in_params.bits_per_frm) {
+ const int bits_per_gop =
+ (rc_params->bits_per_second / rc_params->frame_rate) *
+ rc_params->intra_freq;
+
+ pic_params->in_params.mode.h264.rc_scale_factor = (bits_per_gop * 256) /
+ (pic_params->in_params.buffer_size -
+ pic_params->in_params.initial_level);
+ } else {
+ pic_params->in_params.mode.h264.rc_scale_factor = 0;
+ }
+ }
+}
+
+void topaz_setup_input_format(struct img_video_context *video,
+ struct img_vxe_scaler_setup *scaler_setup)
+{
+ const unsigned int scaler_coeff_regs_no_crop[] = {4261951490U, 4178589440U,
+ 4078580480U, 4045614080U};
+
+ if (video->enable_scaler) {
+ unsigned int pitch_x, pitch_y;
+ int phase;
+
+ pitch_x = (((unsigned int)(video->source_width - video->crop_left -
+ video->crop_right)) << 13) / video->unrounded_width;
+
+ pitch_y = (((unsigned int)(video->source_frame_height - video->crop_top -
+ video->crop_bottom)) << 13) / video->unrounded_frame_height;
+
+ /* Input size */
+ scaler_setup->scaler_input_size_reg =
+ F_ENCODE(video->source_width - 1,
+ TOPAZHP_EXT_CR_SCALER_INPUT_WIDTH_MIN1) |
+ F_ENCODE((video->source_frame_height >>
+ (video->is_interlaced ? 1 : 0)) - 1,
+ TOPAZHP_EXT_CR_SCALER_INPUT_HEIGHT_MIN1);
+
+ scaler_setup->scaler_crop_reg = F_ENCODE(video->crop_left,
+ TOPAZHP_EXT_CR_SCALER_INPUT_CROP_HOR) |
+ F_ENCODE(video->crop_top,
+ TOPAZHP_EXT_CR_SCALER_INPUT_CROP_VER);
+
+ /* Scale factors */
+ scaler_setup->scaler_pitch_reg = 0;
+
+ if (pitch_x > 0x7FFF) {
+ scaler_setup->scaler_pitch_reg |= F_ENCODE(1,
+ TOPAZHP_EXT_CR_SCALER_HOR_BILINEAR_FILTER);
+ pitch_x >>= 1;
+ }
+
+ if (pitch_x > 0x7FFF)
+ pitch_x = 0x7FFF;
+
+ if (pitch_y > 0x7FFF) {
+ scaler_setup->scaler_pitch_reg |= F_ENCODE(1U,
+ TOPAZHP_EXT_CR_SCALER_VER_BILINEAR_FILTER);
+ pitch_y >>= 1;
+ }
+
+ if (pitch_y > 0x7FFF)
+ pitch_y = 0x7FFF;
+
+ scaler_setup->scaler_pitch_reg |=
+ F_ENCODE(pitch_x, TOPAZHP_EXT_CR_SCALER_INPUT_HOR_PITCH) |
+ F_ENCODE(pitch_y, TOPAZHP_EXT_CR_SCALER_INPUT_VER_PITCH);
+
+ /*
+ * Coefficients
+ * With no crop, the coefficients remain the same.
+ * If crop is desired, new values will need to be calculated.
+ */
+ for (phase = 0; phase < 4; phase++)
+ scaler_setup->hor_scaler_coeff_regs[phase] =
+ scaler_coeff_regs_no_crop[phase];
+
+ for (phase = 0; phase < 4; phase++)
+ scaler_setup->ver_scaler_coeff_regs[phase] =
+ scaler_coeff_regs_no_crop[phase];
+
+ scaler_setup->scaler_control = F_ENCODE(1, TOPAZHP_EXT_CR_SCALER_ENABLE);
+
+ switch (video->format) {
+ case IMG_CODEC_420_YUV:
+ case IMG_CODEC_420_PL8:
+ case IMG_CODEC_420_YV12:
+ case IMG_CODEC_420_IMC2:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_420PL111YCBCR8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_420_PL12:
+ case IMG_CODEC_420_PL12_PACKED:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_420PL12YCBCR8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_420_PL21:
+ case IMG_CODEC_420_PL21_PACKED:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_420PL12YCRCB8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_422_YUV:
+ case IMG_CODEC_422_PL8:
+ case IMG_CODEC_422_YV12:
+ case IMG_CODEC_422_IMC2:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422PL111YCBCR8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_422_PL12:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422PL12YCBCR8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_422_PL21:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422PL12YCRCB8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_Y0UY1V_8888:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422IL3YCBYCR8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_Y0VY1U_8888:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422IL3YCRYCB8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_UY0VY1_8888:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422IL3CBYCRY8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_VY0UY1_8888:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_422IL3CRYCBY8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_444_YUV:
+ case IMG_CODEC_444_PL8:
+ case IMG_CODEC_444_YV12:
+ case IMG_CODEC_444_IMC2:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444PL111YCBCR8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_444_PL12:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444PL12YCBCR8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_444_PL21:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444PL12YCRCB8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_ABCX:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444IL4ABCX8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_XBCA:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444IL4XBCA8,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ case IMG_CODEC_ABC565:
+ scaler_setup->input_scaler_control =
+ F_ENCODE(TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT_444IL3RGB565,
+ TOPAZHP_EXT_CR_INPUT_FRAME_STORE_FORMAT);
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* Disable Scaling */
+ scaler_setup->scaler_control = 0;
+ }
+}
diff --git a/drivers/media/platform/vxe-vxd/encoder/topaz_api_utils.h b/drivers/media/platform/vxe-vxd/encoder/topaz_api_utils.h
new file mode 100644
index 000000000000..c000a3778045
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/topaz_api_utils.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * topaz utility header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include "topaz_api.h"
+
+/*
+ * Calculates the correct number of macroblocks per kick and kicks per BU
+ */
+void calculate_kick_and_bu_size(unsigned int width_in_mbs,
+ unsigned int height_in_mbs,
+ unsigned char is_interlaced,
+ unsigned int max_bu_per_frame,
+ unsigned int *kick_size,
+ unsigned int *kicks_per_bu,
+ unsigned int *min_slice_height);
+
+unsigned int calculate_stride(enum img_format format,
+ unsigned short requested_stride_bytes,
+ unsigned short width);
+
+void topaz_setup_input_format(struct img_video_context *video,
+ struct img_vxe_scaler_setup *scaler_setup);
+
+void topaz_setup_input_csc(struct img_video_context *video,
+ struct img_vxe_scaler_setup *scaler_setup,
+ struct img_vxe_csc_setup *csc_setup,
+ enum img_csc_preset csc_preset);
+
+unsigned int topaz_get_packed_buffer_strides(unsigned short buffer_stride_bytes,
+ enum img_format format,
+ unsigned char enable_scaler,
+ unsigned char is_interlaced,
+ unsigned char is_interleaved);
+
+void prepare_mv_estimates(struct img_enc_context *enc);
+
+void adjust_pic_flags(struct img_enc_context *enc, struct img_rc_params *prc_params,
+ unsigned char first_pic, unsigned int *flags);
+
+void setup_rc_data(struct img_video_context *video, struct pic_params *pic_params,
+ struct img_rc_params *rc_params);
+
+void patch_hw_profile(struct img_video_params *video_params, struct img_video_context *video);
diff --git a/drivers/media/platform/vxe-vxd/encoder/topaz_color_formats.h b/drivers/media/platform/vxe-vxd/encoder/topaz_color_formats.h
new file mode 100644
index 000000000000..2187e520ac2a
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/topaz_color_formats.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * buffer sizes calculation
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include "topaz_api.h"
+#include "fw_headers/defs.h"
+
+void plane_size(enum img_format color_format, unsigned int stride,
+ unsigned int height, unsigned int *y_size, unsigned int *u_size,
+ unsigned int *v_size)
+{
+ *y_size = *u_size = *v_size = 0;
+
+ switch (color_format) {
+ case IMG_CODEC_420_PL8:
+ /* allocate frame for 4:2:0 planar format */
+ *y_size = stride * height;
+ *u_size = stride * height / 4;
+ *v_size = stride * height / 4;
+ break;
+ case IMG_CODEC_420_PL12:
+ /* allocate frame for 4:2:0 planar format (chroma interleaved) */
+ *y_size = stride * height;
+ *u_size = stride * height / 2;
+ break;
+ case IMG_CODEC_422_YUV:
+ case IMG_CODEC_422_YV12:
+ case IMG_CODEC_422_IMC2:
+ /* allocate frame for 4:2:2 format */
+ *y_size = stride * height * 2;
+ break;
+ case IMG_CODEC_422_PL8:
+ /* allocate frame for 4:2:2 planar format */
+ *y_size = stride * height;
+ *u_size = stride * height / 2;
+ *v_size = stride * height / 2;
+ break;
+ case IMG_CODEC_422_PL12:
+ /* allocate frame for 4:2:2 planar format (chroma interleaved) */
+ *y_size = stride * height;
+ *u_size = stride * height;
+ break;
+ case IMG_CODEC_Y0UY1V_8888:
+ case IMG_CODEC_UY0VY1_8888:
+ case IMG_CODEC_Y0VY1U_8888:
+ case IMG_CODEC_VY0UY1_8888:
+ /* allocate frame for 4:2:2 format */
+ *y_size = stride * height;
+ break;
+ case IMG_CODEC_444_YUV:
+ case IMG_CODEC_444_YV12:
+ case IMG_CODEC_444_IMC2:
+ /* allocate frame for 4:2:2 format */
+ *y_size = stride * height * 3;
+ break;
+ case IMG_CODEC_444_PL8:
+ /* allocate frame for 4:2:2 planar format */
+ *y_size = stride * height;
+ *u_size = stride * height;
+ *v_size = stride * height;
+ break;
+ case IMG_CODEC_444_PL12:
+ /* allocate frame for 4:2:2 planar format (chroma interleaved) */
+ *y_size = stride * height;
+ *u_size = stride * height * 2;
+ break;
+ case IMG_CODEC_ABCX:
+ case IMG_CODEC_XBCA:
+ case IMG_CODEC_ABC565:
+ /* allocate frame for RGB interleaved format */
+ *y_size = stride * height;
+ break;
+ case IMG_CODEC_420_YUV:
+ case IMG_CODEC_420_YV12:
+ case IMG_CODEC_420_IMC2:
+ case IMG_CODEC_420_PL12_PACKED:
+ case IMG_CODEC_420_PL21_PACKED:
+ /* allocate frame for 4:2:0 format */
+ *y_size = stride * height * 3 / 2;
+ break;
+ default:
+ *y_size = 0;
+ *u_size = 0;
+ *v_size = 0;
+ break;
+ }
+}
diff --git a/drivers/media/platform/vxe-vxd/encoder/topaz_device.c b/drivers/media/platform/vxe-vxd/encoder/topaz_device.c
new file mode 100644
index 000000000000..6304d1156a7a
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/topaz_device.c
@@ -0,0 +1,1671 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Encoder device function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/delay.h>
+#include <linux/time64.h>
+#include <linux/jiffies.h>
+
+#include "work_queue.h"
+#include "fw_headers/defs.h"
+#include "fw_headers/vxe_common.h"
+#include "target.h"
+#include "target_config.h"
+#include "topaz_device.h"
+#include "topazmmu.h"
+#include "vid_buf.h"
+#include "vxe_public_regdefs.h"
+#include "img_errors.h"
+
+#ifdef DEBUG_ENCODER_DRIVER
+static char command_string[][38] = {
+ "MTX_CMDID_NULL",
+ "MTX_CMDID_SHUTDOWN",
+ "MTX_CMDID_DO_HEADER",
+ "MTX_CMDID_ENCODE_FRAME",
+ "MTX_CMDID_START_FRAME",
+ "MTX_CMDID_ENCODE_SLICE",
+ "MTX_CMDID_END_FRAME",
+ "MTX_CMDID_SETVIDEO",
+ "MTX_CMDID_GETVIDEO",
+ "MTX_CMDID_DO_CHANGE_PIPEWORK",
+#if SECURE_IO_PORTS
+ "MTX_CMDID_SECUREIO",
+#endif
+ "MTX_CMDID_PICMGMT",
+ "MTX_CMDID_RC_UPDATE",
+ "MTX_CMDID_PROVIDE_SOURCE_BUFFER",
+ "MTX_CMDID_PROVIDE_REF_BUFFER",
+ "MTX_CMDID_PROVIDE_CODEDPACKAGE_BUFFER",
+ "MTX_CMDID_ABORT",
+ "MTX_CMDID_SETQUANT",
+ "MTX_CMDID_SETUP_INTERFACE",
+ "MTX_CMDID_ISSUEBUFF",
+ "MTX_CMDID_SETUP",
+ "MTX_CMDID_UPDATE_SOURCE_FORMAT",
+ "MTX_CMDID_UPDATE_CSC",
+ "MTX_CMDID_ENDMARKER"
+};
+#endif
+
+DECLARE_WAIT_QUEUE_HEAD(event_wait_queue);
+
+#define TOPAZ_DEV_SPIN_LOCK_NAME "topaz_dev"
+/* max syncStatus value used (at least 4 * MAX_TOPAZ_CMDS_QUEUED) */
+#define MAX_TOPAZ_CMD_COUNT (0x1000)
+
+#define COMM_WB_DATA_BUF_SIZE (64)
+
+/* Sempahore locks */
+#define COMM_LOCK_TX 0x01
+#define COMM_LOCK_RX 0x02
+#define COMM_LOCK_BOTH (COMM_LOCK_TX | COMM_LOCK_RX)
+
+static unsigned int topaz_timeout_retries = 817000;
+
+#define TOPAZ_TIMEOUT_JPEG (50000)
+#define TOPAZ_TIMEOUT_RETRIES (topaz_timeout_retries)
+
+unsigned short g_load_method = MTX_LOADMETHOD_DMA; /* This is the load method used */
+
+unsigned int g_core_rev;
+unsigned int g_core_des1;
+void *g_lock;
+
+struct vidio_ddbufinfo *g_aps_wb_data_info;
+
+static unsigned char g_pipe_usage[TOPAZHP_MAX_NUM_PIPES] = { 0 };
+
+/* Order MUST match with topaz_mem_space_idx enum */
+struct mem_space topaz_mem_space[] = {
+ /* Multicore sync RAM */
+ { "REG_TOPAZHP_MULTICORE", MEMSPACE_REGISTER,
+ {{0x00000000, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_DMAC", MEMSPACE_REGISTER,
+ {{0x00000400, 0x000000ff, TARGET_NO_IRQ}}},
+ { "REG_COMMS", MEMSPACE_REGISTER,
+ {{0x00000500, 0x000000ff, TARGET_NO_IRQ}}},
+ { "REG_MTX", MEMSPACE_REGISTER,
+ {{0x00000800, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_MMU", MEMSPACE_REGISTER,
+ {{0x00000C00, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_TEST", MEMSPACE_REGISTER,
+ {{0xFFFF0000, 0x000001ff, TARGET_NO_IRQ}}},
+ { "REGMTXRAM", MEMSPACE_REGISTER,
+ {{0x80000000, 0x0000ffff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_CORE_0", MEMSPACE_REGISTER,
+ {{0x00001000, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_VLC_CORE_0", MEMSPACE_REGISTER,
+ {{0x00001400, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_DEBLOCKER_CORE_0", MEMSPACE_REGISTER,
+ {{0x00001800, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_COREEXT_0", MEMSPACE_REGISTER,
+ {{0x00001C00, 0x000003ff, TARGET_NO_IRQ}}},
+
+ { "REG_TOPAZHP_CORE_1", MEMSPACE_REGISTER,
+ {{0x00002000, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_VLC_CORE_1", MEMSPACE_REGISTER,
+ {{0x00002400, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_DEBLOCKER_CORE_1", MEMSPACE_REGISTER,
+ {{0x00002800, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_COREEXT_1", MEMSPACE_REGISTER,
+ {{0x00002C00, 0x000003ff, TARGET_NO_IRQ}}},
+
+ { "REG_TOPAZHP_CORE_2", MEMSPACE_REGISTER,
+ {{0x00003000, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_VLC_CORE_2", MEMSPACE_REGISTER,
+ {{0x00003400, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_DEBLOCKER_CORE_2", MEMSPACE_REGISTER,
+ {{0x00003800, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_COREEXT_2", MEMSPACE_REGISTER,
+ {{0x00003C00, 0x000003ff, TARGET_NO_IRQ}}},
+
+ { "REG_TOPAZHP_CORE_3", MEMSPACE_REGISTER,
+ {{0x00004000, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_VLC_CORE_3", MEMSPACE_REGISTER,
+ {{0x00004400, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_DEBLOCKER_CORE_3", MEMSPACE_REGISTER,
+ {{0x00004800, 0x000003ff, TARGET_NO_IRQ}}},
+ { "REG_TOPAZHP_COREEXT_3", MEMSPACE_REGISTER,
+ {{0x00004C00, 0x000003ff, TARGET_NO_IRQ}}},
+
+ { "FW", MEMSPACE_MEMORY,
+ {{0x00000000, 0x00800000, 0 }}},
+ { "SYSMEM", MEMSPACE_MEMORY,
+ {{0x00000000, 0, 0 }}},
+ { "MEMSYSMEM", MEMSPACE_MEMORY,
+ {{0x00000000, 0, 0 }}},
+ { "MEM", MEMSPACE_MEMORY,
+ {{0x00000000, 0, 0 }}},
+ { "FB", MEMSPACE_MEMORY,
+ {{0x00000000, 0, 0 }}},
+ { "MEMDMAC_00", MEMSPACE_MEMORY,
+ {{0x00000000, 0, 0 }}},
+ { "MEMDMAC_01", MEMSPACE_MEMORY,
+ {{0x00000000, 0, 0 }}},
+ { "MEMDMAC_02", MEMSPACE_MEMORY,
+ {{0x00000000, 0, 0 }}},
+};
+
+#define MEMORYSPACES_NUM (sizeof(topaz_mem_space) / sizeof(struct mem_space))
+
+static struct target_config topaz_target_config = {
+ MEMORYSPACES_NUM,
+ &topaz_mem_space[0]
+};
+
+/*
+ * topazdd_int_enable
+ */
+static void topazdd_int_enable(struct topaz_dev_ctx *ctx, unsigned int mask)
+{
+ unsigned int reg;
+ unsigned long flags;
+
+ spin_lock_irqsave(ctx->lock, flags);
+
+ /* config interrupts on Topaz core */
+ reg = VXE_RD_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB);
+
+ /* set enable interrupt bits */
+ reg |= mask;
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB, reg);
+
+ spin_unlock_irqrestore(ctx->lock, (unsigned long)flags);
+}
+
+/*
+ * topazdd_int_disable
+ */
+static void topazdd_int_disable(struct topaz_dev_ctx *ctx, unsigned int mask)
+{
+ unsigned int reg;
+ unsigned long flags;
+
+ spin_lock_irqsave(ctx->lock, flags);
+
+ /* config interrupts on Topaz core */
+ reg = VXE_RD_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB);
+
+ /* clear enable interrupt bits */
+ reg &= ~mask;
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB, reg);
+
+ spin_unlock_irqrestore(ctx->lock, (unsigned long)flags);
+}
+
+/*
+ * Get the number of pipes present
+ */
+unsigned int topazdd_get_num_pipes(struct topaz_dev_ctx *ctx)
+{
+ static unsigned int g_pipes_avail;
+
+ if (!ctx->multi_core_mem_addr)
+ return 0;
+
+ if (g_pipes_avail == 0) {
+ /* get the actual number of cores */
+ g_pipes_avail = VXE_RD_REG32(ctx->multi_core_mem_addr,
+ TOPAZHP_TOP_CR_MULTICORE_HW_CFG);
+ g_pipes_avail = (g_pipes_avail & MASK_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED);
+ IMG_DBG_ASSERT(g_pipes_avail != 0);
+ }
+
+ return g_pipes_avail;
+}
+
+unsigned int topazdd_get_core_rev(void)
+{
+ return g_core_rev;
+}
+
+unsigned int topazdd_get_core_des1(void)
+{
+ return g_core_des1;
+}
+
+static void wbfifo_clear(struct img_comm_socket *sock)
+{
+ sock->in_fifo_producer = 0;
+ sock->in_fifo_consumer = 0;
+}
+
+static unsigned char wbfifo_add(struct img_comm_socket *sock, struct img_writeback_msg *msg)
+{
+ unsigned int new_producer = sock->in_fifo_producer + 1;
+
+ if (new_producer == COMM_INCOMING_FIFO_SIZE)
+ new_producer = 0;
+
+ if (new_producer == sock->in_fifo_consumer)
+ return FALSE;
+
+ memcpy(&sock->in_fifo[sock->in_fifo_producer], msg, sizeof(struct img_writeback_msg));
+
+ sock->in_fifo_producer = new_producer;
+
+ return TRUE;
+}
+
+static unsigned char wbfifo_is_empty(struct img_comm_socket *sock)
+{
+ return (sock->in_fifo_producer == sock->in_fifo_consumer);
+}
+
+static unsigned char wbfifo_get(struct img_comm_socket *sock, struct img_writeback_msg *msg)
+{
+ if (wbfifo_is_empty(sock))
+ return FALSE;
+
+ memcpy(msg, &sock->in_fifo[sock->in_fifo_consumer], sizeof(struct img_writeback_msg));
+
+ sock->in_fifo_consumer++;
+
+ if (sock->in_fifo_consumer == COMM_INCOMING_FIFO_SIZE)
+ sock->in_fifo_consumer = 0;
+
+ return TRUE;
+}
+
+unsigned char topazdd_is_idle(struct img_comm_socket *sock)
+{
+ if (sock->msgs_sent == sock->ack_recv && wbfifo_is_empty(sock))
+ return TRUE;
+
+ return FALSE;
+}
+
+static void set_auto_clock_gating(struct topaz_dev_ctx *ctx, struct img_fw_context *fw_ctx,
+ unsigned char gating)
+{
+ unsigned int reg;
+
+ reg = F_ENCODE(1U, TOPAZHP_TOP_CR_WRITES_CORE_ALL);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0, reg);
+
+ reg = F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE) |
+ F_ENCODE(gating, TOPAZHP_CR_TOPAZHP_INPUT_SCALER_AUTO_CLK_GATE);
+
+ VXE_WR_REG32(ctx->hp_core_reg_addr[0], TOPAZHP_CR_TOPAZHP_AUTO_CLOCK_GATING, reg);
+
+ reg = 0;
+ reg = VXE_RD_REG32(ctx->hp_core_reg_addr[0], TOPAZHP_CR_TOPAZHP_MAN_CLOCK_GATING);
+
+ /* Disable LRITC clocks */
+ reg = F_INSERT(reg, 1, TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE);
+
+ VXE_WR_REG32(ctx->hp_core_reg_addr[0], TOPAZHP_CR_TOPAZHP_MAN_CLOCK_GATING, reg);
+
+ reg = F_ENCODE(0, TOPAZHP_TOP_CR_WRITES_CORE_ALL);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0, reg);
+}
+
+static void comm_lock(struct topaz_dev_ctx *ctx, unsigned int flags)
+{
+ if (flags & COMM_LOCK_TX)
+ mutex_lock_nested(ctx->comm_tx_mutex, SUBCLASS_TOPAZDD_TX);
+}
+
+static void comm_unlock(struct topaz_dev_ctx *ctx, unsigned int flags)
+{
+ if (flags & COMM_LOCK_TX)
+ mutex_unlock((struct mutex *)ctx->comm_tx_mutex);
+}
+
+int comm_prepare_fw(struct img_fw_context *fw_ctx, enum img_codec codec)
+{
+ if (fw_ctx->populated || fw_ctx->initialized)
+ return IMG_SUCCESS;
+
+ return mtx_populate_fw_ctx(codec, fw_ctx);
+}
+
+static unsigned int H264_RCCONFIG_TABLE_5[27] = {
+ 0x00000007, 0x00000006, 0x00000006, 0x00000006, 0x00000006, 0x00000005, 0x00000005,
+ 0x00000005, 0x00000005,
+ 0x00000005, 0x00000005, 0x00000004, 0x00000004,
+ 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000005,
+ 0x00000005, 0x00000005,
+ 0x00000005, 0x00000005, 0x00000005, 0x00000006,
+ 0x00000006,
+};
+
+static unsigned int H264_RCCONFIG_TABLE_6[27] = {
+ 0x00000018, 0x00000018, 0x00000018, 0x00000018, 0x00000018, 0x00000018, 0x00000018,
+ 0x00000018, 0x00000024,
+ 0x00000030, 0x00000030, 0x0000003c, 0x0000003c,
+ 0x00000048, 0x00000048, 0x00000054, 0x00000060, 0x0000006c, 0x000000c8, 0x00000144,
+ 0x00000180, 0x00000210,
+ 0x000002a0, 0x00000324, 0x0000039c, 0x00000414,
+ 0x00000450,
+};
+
+static unsigned int H264_RCCONFIG_TABLE_7[27] = {
+ 0x00000014, 0x00000014, 0x00000014, 0x00000014, 0x00000014, 0x00000014, 0x00000032,
+ 0x00000064, 0x000000d2,
+ 0x000001a4, 0x000001a4, 0x000001bd, 0x000001d6,
+ 0x000001ef, 0x00000208, 0x00000217, 0x00000226, 0x0000023a, 0x000002cb, 0x0000035c,
+ 0x00000384, 0x000003e8,
+ 0x000004b0, 0x00000578, 0x00000640, 0x00000708,
+ 0x000007d0,
+};
+
+static unsigned int MPEG_RCCONFIG_TABLE_7[17] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000003c,
+ 0x000000b4, 0x0000012c,
+ 0x000001a4, 0x0000021c, 0x00000294, 0x0000030c,
+ 0x00000384, 0x000003fc, 0x00000474, 0x000004ec,
+};
+
+/*
+ * Load the tables for H.264
+ */
+void comm_load_h264_tables(struct topaz_dev_ctx *ctx)
+{
+ int n;
+ unsigned int pipe, pipe_cnt;
+
+ pipe_cnt = topazdd_get_num_pipes(ctx);
+
+ for (n = 26; n >= 0; n--) {
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_RC_CONFIG_TABLE4, 0);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_RC_CONFIG_TABLE5,
+ H264_RCCONFIG_TABLE_5[n]);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_RC_CONFIG_TABLE6,
+ H264_RCCONFIG_TABLE_6[n]);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_RC_CONFIG_TABLE7,
+ H264_RCCONFIG_TABLE_7[n]);
+ }
+
+ for (pipe = 0; pipe < pipe_cnt; pipe++) {
+ VXE_WR_REG32(ctx->hp_core_reg_addr[pipe], TOPAZHP_CR_RC_CONFIG_REG8, 0x00000006);
+ VXE_WR_REG32(ctx->hp_core_reg_addr[pipe], TOPAZHP_CR_RC_CONFIG_REG9, 0x00000406);
+ }
+}
+
+/*
+ * Load the tables for mpeg4
+ */
+void comm_load_tables(struct topaz_dev_ctx *ctx)
+{
+ int n;
+ unsigned int pipe;
+
+ for (n = 16; n > 0; n--) {
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_RC_CONFIG_TABLE4, 0);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_RC_CONFIG_TABLE6, 0);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_RC_CONFIG_TABLE7,
+ MPEG_RCCONFIG_TABLE_7[n]);
+ }
+
+ for (pipe = 0; pipe < topazdd_get_num_pipes(ctx); pipe++)
+ VXE_WR_REG32(ctx->hp_core_reg_addr[pipe], TOPAZHP_CR_RC_CONFIG_REG8, 0x00000006);
+}
+
+/*
+ * Load bias tables
+ */
+static int comm_load_bias(struct topaz_dev_ctx *ctx, unsigned int codec_mask)
+{
+ if ((codec_mask & CODEC_MASK_H263) || (codec_mask & CODEC_MASK_MPEG2) ||
+ (codec_mask & CODEC_MASK_MPEG4))
+ comm_load_tables(ctx);
+
+ if ((codec_mask & CODEC_MASK_H264) || (codec_mask & CODEC_MASK_H264MVC))
+ comm_load_h264_tables(ctx);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * Loads MTX firmware
+ */
+void topaz_setup_firmware(struct topaz_dev_ctx *ctx,
+ struct img_fw_context *fw_ctx,
+ enum mtx_load_method load_method,
+ enum img_codec codec, unsigned char num_pipes)
+{
+ unsigned int reg;
+ unsigned int secure_reg;
+ int ret;
+
+ fw_ctx->initialized = FALSE;
+
+ /* Reset the MTXs and Upload the code. */
+ /* start each MTX in turn MUST start with master to enable comms to other cores */
+
+#if SECURE_IO_PORTS
+ /* reset SECURE_CONFIG register to allow loading FW without security.
+ * Default option is secure.
+ */
+
+ secure_reg = 0x000F0F0F;
+
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_SECURE_CONFIG, secure_reg);
+#endif
+
+ ret = comm_prepare_fw(fw_ctx, codec);
+
+ if (ret != IMG_SUCCESS) {
+ pr_err("Failed to populate firmware context. Error code: %i\n", ret);
+ return;
+ }
+
+ /* initialise the MTX */
+ mtx_initialize(ctx, fw_ctx);
+
+ /* clear TOHOST register now so that our ISR doesn't see any
+ * intermediate value before the FW has output anything
+ */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_TOHOST << 2), 0);
+
+ /* clear BOOTSTATUS register. Firmware will write to
+ * this to indicate firmware boot progress
+ */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_BOOTSTATUS << 2), 0);
+
+ /* Soft reset of MTX */
+ reg = 0;
+ reg = F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_SRST, reg);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_SRST, 0x0);
+
+ if (fw_ctx->initialized) {
+ set_auto_clock_gating(ctx, fw_ctx, 1);
+ mtx_load(ctx, fw_ctx, load_method);
+
+ /* flush the command FIFO */
+ reg = 0;
+ reg = F_ENCODE(1, TOPAZHP_TOP_CR_CMD_FIFO_FLUSH);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_TOPAZ_CMD_FIFO_FLUSH, reg);
+
+ /* we do not want to run in secre FW mode so write a place holder
+ * to the FIFO that the firmware will know to ignore
+ */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+ TOPAZHP_NON_SECURE_FW_MARKER);
+
+ /* Clear FW_IDLE_STATUS register */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, MTX_SCRATCHREG_IDLE, 0);
+
+ /* turn on MTX */
+ mtx_start(fw_ctx);
+ /* get MTX Clk Freq */
+
+ mtx_kick(fw_ctx, 1);
+
+ /*
+ * We do not need to do this POLL here as it is safe to continue without it.
+ * We do it because it serves to warn us that there is a problem if the
+ * firmware doesn't start for some reason
+ */
+ VXE_POLL_REG32_ISEQ(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_BOOTSTATUS << 2), TOPAZHP_FW_BOOT_SIGNAL,
+ 0xffffffff, TOPAZ_TIMEOUT_RETRIES);
+ }
+}
+
+static int comm_send(struct img_comm_socket *sock, struct mtx_tomtx_msg *msg, unsigned int *wb_val)
+{
+ struct topaz_dev_ctx *ctx;
+ struct img_fw_context *fw_ctx;
+ unsigned int space_avail;
+ unsigned int cmd_word;
+ unsigned int writeback_val;
+ enum mtx_cmd_id cmd_id = (enum mtx_cmd_id)(msg->cmd_id & 0x7F);
+
+ ctx = sock->ctx;
+ fw_ctx = &ctx->fw_ctx;
+
+ /* mark the context as active in case we need to save its state later */
+ fw_ctx->active_ctx_mask |= (1 << sock->id);
+
+ space_avail = VXE_RD_REG32(ctx->multi_core_mem_addr,
+ TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE_SPACE);
+
+ space_avail = F_DECODE(space_avail, TOPAZHP_TOP_CR_CMD_FIFO_SPACE);
+
+ if (space_avail < 4)
+ return IMG_ERROR_RETRY;
+
+ /* Write command to FIFO */
+ cmd_word = F_ENCODE(sock->id, MTX_MSG_CORE) | msg->cmd_id;
+
+ if (msg->cmd_id & MTX_CMDID_PRIORITY) {
+ /* increment the command counter */
+ sock->high_cmd_cnt++;
+
+ /* Prepare high priority command */
+ cmd_word |= F_ENCODE(1, MTX_MSG_PRIORITY) |
+ F_ENCODE(((sock->low_cmd_cnt - 1) & 0xff) | (sock->high_cmd_cnt << 8),
+ MTX_MSG_COUNT);
+ } else {
+ /* Prepare low priority command */
+ cmd_word |= F_ENCODE(sock->low_cmd_cnt & 0xff, MTX_MSG_COUNT);
+ }
+
+ /* write command into FIFO */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE, cmd_word);
+
+ /* Write data to FIFO */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE, msg->data);
+
+ if (msg->command_data_buf) {
+ /* Write address */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+ msg->command_data_buf->dev_virt);
+ } else {
+ /* Write nothing */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE, 0);
+ }
+
+ /* Write writeback value to FIFO */
+
+ /* prepare Writeback value */
+
+ /* We don't actually use this value, but it may be useful to customers */
+ if (msg->cmd_id & MTX_CMDID_PRIORITY) {
+ /* HIGH priority command */
+
+ writeback_val = sock->high_cmd_cnt << 24;
+
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+ writeback_val);
+ } else {
+ /* LOW priority command */
+ writeback_val = sock->low_cmd_cnt << 16;
+
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+ writeback_val);
+
+ /* increment the command counter */
+ sock->low_cmd_cnt++;
+ }
+
+ if (wb_val)
+ *wb_val = writeback_val;
+
+ sock->last_sync = writeback_val;
+
+ switch (cmd_id) {
+ case MTX_CMDID_PROVIDE_CODEDPACKAGE_BUFFER:
+ {
+#ifdef DEBUG_ENCODER_DRIVER
+ unsigned int slot;
+
+ slot = F_DECODE(msg->data, MTX_MSG_PROVIDE_CODEDPACKAGE_BUFFER_SLOT);
+ pr_debug("MSG_TX[%d]: %s(%d) %s %s cmd: %#08x cmd_word: %#08x data: %#08x: addr: 0x%p writeback_val: %#08x\n",
+ sock->id, command_string[cmd_id], slot,
+ (msg->cmd_id & MTX_CMDID_PRIORITY ? "(PRIORITY)" : "(NORMAL)"),
+ (msg->cmd_id & MTX_CMDID_WB_INTERRUPT ? "(Interrupt)" : "(NO Interrupt)"),
+ (msg->cmd_id), cmd_word, (msg->data), msg->command_data_buf,
+ writeback_val);
+#endif
+ break;
+ }
+#ifdef ENABLE_PROFILING
+ case MTX_CMDID_ENCODE_FRAME:
+ {
+ struct timespec64 time;
+
+ ktime_get_real_ts64(&time);
+
+ sock->fw_lat.start_time = timespec64_to_ns((const struct timespec64 *)&time);
+ }
+#endif
+ default:
+#ifdef DEBUG_ENCODER_DRIVER
+ pr_debug("MSG_TX[%d]: %s %s %s cmd: %#08x cmd_word: %#08x data: %#08x addr: 0x%p writeback_val: %#08x\n",
+ sock->id, command_string[cmd_id],
+ (msg->cmd_id & MTX_CMDID_PRIORITY ? "(PRIORITY)" : "(NORMAL)"),
+ (msg->cmd_id & MTX_CMDID_WB_INTERRUPT ? "(Interrupt)" : "(NO Interrupt)"),
+ (msg->cmd_id), cmd_word, (msg->data), msg->command_data_buf,
+ writeback_val);
+#endif
+ break;
+ }
+#ifdef DEBUG_ENCODER_DRIVER
+ if (msg->command_data_buf) {
+ int i;
+
+ pr_debug("Has msg->command_data_buf cpu_virt=0x%p dev_virt=%#08x\n",
+ msg->command_data_buf->cpu_virt, msg->command_data_buf->dev_virt);
+
+ for (i = 0; i < 350; i++) {
+ pr_debug("MSG_TX %03d %#08x\n", i,
+ ((unsigned int *)msg->command_data_buf->cpu_virt)[i]);
+ }
+ }
+#endif
+
+ /* kick the master MTX */
+ mtx_kick(fw_ctx, 1);
+
+ sock->msgs_sent++;
+
+ return IMG_SUCCESS;
+}
+
+int topazdd_send_msg(void *dd_str_ctx, enum mtx_cmd_id cmd_id,
+ unsigned int data, struct vidio_ddbufinfo *cmd_data_buf,
+ unsigned int *wb_val)
+{
+ struct mtx_tomtx_msg *msg;
+ struct img_comm_socket *sock;
+ int err;
+
+ if (!dd_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ sock = (struct img_comm_socket *)dd_str_ctx;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ IMG_DBG_ASSERT(msg);
+ if (!msg)
+ return IMG_ERROR_UNDEFINED;
+
+ msg->command_data_buf = cmd_data_buf;
+ msg->cmd_id = cmd_id;
+ msg->data = data;
+
+ if (!wb_val) {
+ comm_lock(sock->ctx, COMM_LOCK_TX);
+ err = comm_send(sock, msg, NULL);
+ comm_unlock(sock->ctx, COMM_LOCK_TX);
+ } else {
+ unsigned int ret_wb_val;
+
+ comm_lock(sock->ctx, COMM_LOCK_TX);
+ err = comm_send(sock, msg, &ret_wb_val);
+ comm_unlock(sock->ctx, COMM_LOCK_TX);
+
+ if (err == IMG_SUCCESS)
+ *wb_val = ret_wb_val;
+ }
+
+ kfree(msg);
+ return err;
+}
+
+#define WAIT_FOR_SYNC_RETRIES 1200
+#define WAIT_FOR_SYNC_TIMEOUT 1
+
+static int wait_event_obj(void *event, unsigned char uninterruptible, unsigned int timeout)
+{
+ struct event *p_event = (struct event *)event;
+ int ret;
+
+ IMG_DBG_ASSERT(event);
+ if (!event)
+ return IMG_ERROR_GENERIC_FAILURE;
+
+ if (uninterruptible) {
+ if (timeout == (unsigned int)(-1)) {
+ ret = 0;
+ wait_event(event_wait_queue, p_event->signalled);
+ } else {
+ ret = wait_event_timeout(event_wait_queue, p_event->signalled, timeout);
+ if (!ret)
+ return IMG_ERROR_TIMEOUT;
+ }
+ } else {
+ if (timeout == (unsigned int)(-1)) {
+ ret = wait_event_interruptible(event_wait_queue, p_event->signalled);
+ } else {
+ ret = wait_event_interruptible_timeout(event_wait_queue,
+ p_event->signalled, timeout);
+ if (!ret)
+ return IMG_ERROR_TIMEOUT;
+ }
+ }
+
+ /* If there are signals pending... */
+ if (ret == -ERESTARTSYS)
+ return IMG_ERROR_INTERRUPTED;
+
+ /* If there was no signal...*/
+ IMG_DBG_ASSERT(p_event->signalled);
+
+ /* Clear signal pending...*/
+ p_event->signalled = FALSE;
+
+ return IMG_SUCCESS;
+}
+
+static int topazdd_wait_on_sync(struct img_comm_socket *sock, unsigned int wb_val)
+{
+ unsigned int retries = 0;
+
+ if (!sock)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ while (wait_event_obj(sock->event, TRUE, WAIT_FOR_SYNC_TIMEOUT) != IMG_SUCCESS) {
+ if (retries == WAIT_FOR_SYNC_RETRIES) {
+ /*
+ * We shouldn't wait any longer than that!
+ * If the hardware locked up, we will get stuck otherwise.
+ */
+ pr_err("TIMEOUT: %s timed out waiting for writeback 0x%08x.\n",
+ __func__, sock->sync_wb_val);
+ return IMG_ERROR_TIMEOUT;
+ }
+
+ msleep(WAIT_FOR_SYNC_TIMEOUT);
+ retries++;
+ continue;
+ }
+
+ return IMG_SUCCESS;
+}
+
+int topazdd_send_msg_with_sync(void *dd_str_ctx, enum mtx_cmd_id cmd_id,
+ unsigned int data,
+ struct vidio_ddbufinfo *cmd_data_buf)
+{
+ struct img_comm_socket *sock;
+ unsigned int wb_val = 0;
+
+ if (!dd_str_ctx)
+ return IMG_ERROR_INVALID_CONTEXT;
+
+ sock = (struct img_comm_socket *)dd_str_ctx;
+
+ mutex_lock_nested(sock->sync_wb_mutex, SUBCLASS_TOPAZDD);
+ topazdd_send_msg(dd_str_ctx, cmd_id, data, cmd_data_buf, &wb_val);
+ sock->sync_waiting = TRUE;
+ sock->sync_wb_val = wb_val;
+ mutex_unlock((struct mutex *)sock->sync_wb_mutex);
+
+ return topazdd_wait_on_sync(sock, wb_val);
+}
+
+static void stream_worker(void *work)
+{
+ struct img_comm_socket *sock = NULL;
+ struct img_writeback_msg msg;
+ struct event *p_event;
+
+ work = get_work_buff(work, FALSE);
+ sock = container_of(work, struct img_comm_socket, work);
+
+ while (wbfifo_get(sock, &msg)) {
+ if (F_DECODE(msg.cmd_word, MTX_MSG_MESSAGE_ID) == MTX_MESSAGE_ACK)
+ sock->ack_recv++;
+
+ mutex_lock_nested(sock->sync_wb_mutex, SUBCLASS_TOPAZDD);
+ if (sock->sync_waiting && msg.writeback_val == sock->sync_wb_val) {
+ sock->sync_waiting = FALSE;
+ mutex_unlock((struct mutex *)sock->sync_wb_mutex);
+ /* signal the waiting sync event */
+ p_event = (struct event *)sock->event;
+
+ IMG_DBG_ASSERT(sock->event);
+ if (!sock->event)
+ return;
+
+ p_event->signalled = TRUE;
+ wake_up(&event_wait_queue);
+ return;
+ }
+ mutex_unlock((struct mutex *)sock->sync_wb_mutex);
+
+ if (sock->cb)
+ sock->cb(&msg, sock->str_ctx);
+ }
+}
+
+int topazdd_create_stream_context(struct topaz_dev_ctx *ctx, enum img_codec codec,
+ enc_cb cb, void *cb_priv,
+ void **dd_str_ctx, struct vidio_ddbufinfo **wb_data_info)
+{
+ struct img_comm_socket *p_sock;
+ struct event *p_event;
+
+ p_sock = kmalloc(sizeof(*p_sock), GFP_KERNEL);
+ IMG_DBG_ASSERT(p_sock);
+ if (!p_sock)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ p_sock->sync_wb_mutex = kzalloc(sizeof(*p_sock->sync_wb_mutex), GFP_KERNEL);
+ if (!p_sock->sync_wb_mutex) {
+ kfree(p_sock);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+ mutex_init(p_sock->sync_wb_mutex);
+
+ /* Allocate a Sync structure...*/
+ p_event = kmalloc(sizeof(struct event *), GFP_KERNEL);
+ IMG_DBG_ASSERT(p_event);
+ if (!p_event)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ memset(p_event, 0, sizeof(struct event));
+
+ p_sock->event = (void *)p_event;
+
+ if (!p_sock->event) {
+ mutex_destroy(p_sock->sync_wb_mutex);
+ kfree(p_sock->sync_wb_mutex);
+ p_sock->sync_wb_mutex = NULL;
+ kfree(p_sock);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ p_sock->low_cmd_cnt = 0xa5a5a5a5 % MAX_TOPAZ_CMD_COUNT;
+ p_sock->high_cmd_cnt = 0;
+ p_sock->msgs_sent = 0;
+ p_sock->ack_recv = 0;
+ p_sock->codec = codec;
+ p_sock->ctx = ctx;
+ p_sock->cb = cb;
+ p_sock->str_ctx = (struct topaz_stream_context *)cb_priv;
+
+ init_work(&p_sock->work, stream_worker, HWA_ENCODER);
+ if (!p_sock->work) {
+ mutex_destroy(p_sock->sync_wb_mutex);
+ kfree(p_sock->sync_wb_mutex);
+ p_sock->sync_wb_mutex = NULL;
+ kfree(p_sock);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ wbfifo_clear(p_sock);
+
+ *wb_data_info = g_aps_wb_data_info;
+
+ *dd_str_ctx = (void *)p_sock;
+
+#ifdef DEBUG_ENCODER_DRIVE
+ pr_info("topazdd context created with codec %d\n", codec);
+#endif
+
+ return IMG_SUCCESS;
+}
+
+static int topaz_upload_firmware(struct topaz_dev_ctx *ctx, enum img_codec codec)
+{
+#ifdef DEBUG_ENCODER_DRIVE
+ pr_info("Loading firmware.\n");
+#endif
+ /* Upload FW */
+ /* load and start MTX cores */
+ ctx->fw_ctx.load_method = (enum mtx_load_method)g_load_method;
+
+ topaz_setup_firmware(ctx, &ctx->fw_ctx, ctx->fw_ctx.load_method,
+ codec, topazdd_get_num_pipes(ctx));
+
+ if (!ctx->fw_ctx.initialized) {
+ pr_err("\nERROR: Firmware cannot be loaded!\n");
+ return IMG_ERROR_UNDEFINED;
+ }
+
+ comm_load_bias(ctx, ctx->fw_ctx.supported_codecs);
+ /* initialise read offset of firmware output fifo */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_TOMTX << 2), 0);
+
+ ctx->fw_uploaded = codec;
+
+#ifdef DEBUG_ENCODER_DRIVE
+ pr_info("firmware uploaded!\n");
+#endif
+ return IMG_SUCCESS;
+}
+
+int topazdd_setup_stream_ctx(void *dd_str_ctx, unsigned short height,
+ unsigned short width, unsigned char *ctx_num,
+ unsigned int *used_sock)
+{
+ unsigned char idx;
+ struct img_fw_context *fw_ctx;
+ struct img_comm_socket *sock;
+ int res = IMG_ERROR_UNDEFINED;
+ unsigned int codec_mask = 0;
+
+ sock = (struct img_comm_socket *)dd_str_ctx;
+
+ comm_lock(sock->ctx, COMM_LOCK_BOTH);
+
+ fw_ctx = &sock->ctx->fw_ctx;
+
+ switch (sock->codec) {
+ case IMG_CODEC_JPEG:
+ codec_mask = CODEC_MASK_JPEG;
+ break;
+ case IMG_CODEC_H264_NO_RC:
+ case IMG_CODEC_H264_VBR:
+ case IMG_CODEC_H264_CBR:
+ case IMG_CODEC_H264_VCM:
+ case IMG_CODEC_H264_ERC:
+ codec_mask = CODEC_MASK_H264;
+ break;
+ case IMG_CODEC_H263_NO_RC:
+ case IMG_CODEC_H263_VBR:
+ case IMG_CODEC_H263_CBR:
+ case IMG_CODEC_H263_ERC:
+ codec_mask = CODEC_MASK_H263;
+ break;
+ case IMG_CODEC_MPEG4_NO_RC:
+ case IMG_CODEC_MPEG4_VBR:
+ case IMG_CODEC_MPEG4_CBR:
+ case IMG_CODEC_MPEG4_ERC:
+ codec_mask = CODEC_MASK_MPEG4;
+ break;
+ case IMG_CODEC_MPEG2_NO_RC:
+ case IMG_CODEC_MPEG2_VBR:
+ case IMG_CODEC_MPEG2_CBR:
+ case IMG_CODEC_MPEG2_ERC:
+ codec_mask = CODEC_MASK_MPEG2;
+ break;
+
+ case IMG_CODEC_H264MVC_NO_RC:
+ case IMG_CODEC_H264MVC_VBR:
+ case IMG_CODEC_H264MVC_CBR:
+ case IMG_CODEC_H264MVC_ERC:
+ codec_mask = CODEC_MASK_H264MVC;
+ break;
+ default:
+ IMG_DBG_ASSERT("Impossible use case!\n" == NULL);
+ break;
+ }
+ /* Only do the following checks if some other firmware is loaded */
+ if (sock->ctx->fw_uploaded != IMG_CODEC_NONE &&
+ (sock->ctx->fw_uploaded != sock->codec || /* Different firmware is uploaded */
+ /* We currently only support one JPEG context to be encoded at the same time */
+ (sock->ctx->fw_uploaded == IMG_CODEC_JPEG && sock->ctx->used_socks))) {
+ if (!(fw_ctx->supported_codecs & codec_mask)) {
+ comm_unlock(sock->ctx, COMM_LOCK_BOTH);
+ res = IMG_ERROR_UNDEFINED;
+ pr_err("\nERROR: Incompatible firmware context types!. Required codec: 0x%x Loaded FW : 0x%x\n",
+ codec_mask, fw_ctx->supported_codecs);
+ return res;
+ }
+ }
+
+ if (fw_ctx->initialized && sock->ctx->used_socks >= fw_ctx->num_contexts) {
+ /* the firmware can't support any more contexts */
+ comm_unlock(sock->ctx, COMM_LOCK_BOTH);
+ pr_err("\nERROR: Firmware context limit reached!\n");
+ return IMG_ERROR_UNDEFINED;
+ }
+
+ /* Search for an Available socket. */
+ IMG_DBG_ASSERT(TOPAZHP_MAX_POSSIBLE_STREAMS < (1 << 8));
+ for (idx = 0; idx < TOPAZHP_MAX_POSSIBLE_STREAMS; idx++) {
+ if (!(sock->ctx->socks[idx])) {
+ unsigned int index = idx;
+
+ sock->id = idx;
+ *ctx_num = idx;
+ *used_sock = index;
+ sock->ctx->socks[idx] = sock;
+ sock->ctx->used_socks++;
+ break;
+ }
+ }
+
+ if (idx == TOPAZHP_MAX_POSSIBLE_STREAMS) {
+ comm_unlock(sock->ctx, COMM_LOCK_BOTH);
+ return IMG_ERROR_INVALID_SIZE;
+ }
+
+ if (sock->codec == IMG_CODEC_JPEG) {
+ topaz_timeout_retries = TOPAZ_TIMEOUT_JPEG;
+ } else {
+ unsigned int mbs_per_pic = (height * width) / 256;
+
+ if (topaz_timeout_retries < (mbs_per_pic + 10) * 100)
+ topaz_timeout_retries = (mbs_per_pic + 10) * 100;
+ }
+
+ if (sock->ctx->fw_uploaded == IMG_CODEC_NONE) {
+#ifdef DEBUG_ENCODER_DRIVE
+ pr_info("Loading a different firmware.\n");
+#endif
+ res = topaz_upload_firmware(sock->ctx, (enum img_codec)sock->codec);
+ if (!res) {
+ comm_unlock(sock->ctx, COMM_LOCK_BOTH);
+ res = IMG_ERROR_UNDEFINED;
+ pr_err("\nERROR: Firmware cannot be loaded!\n");
+ return res;
+ }
+ }
+
+ res = IMG_SUCCESS;
+
+ comm_unlock(sock->ctx, COMM_LOCK_BOTH);
+
+ return res;
+}
+
+void topazdd_destroy_stream_ctx(void *dd_str_ctx)
+{
+ unsigned int idx;
+ struct img_comm_socket *sock;
+
+ sock = (struct img_comm_socket *)dd_str_ctx;
+
+ WARN_ON((!sock));
+ if (!sock) {
+ pr_err("topazdd_destroy_sock: invalid sock\n");
+ return;
+ }
+
+ flush_work(sock->work);
+
+ mutex_lock_nested(sock->sync_wb_mutex, SUBCLASS_TOPAZDD);
+ comm_lock(sock->ctx, COMM_LOCK_BOTH);
+ for (idx = 0; idx < TOPAZHP_MAX_POSSIBLE_STREAMS; idx++) {
+ if (sock->ctx->socks[idx] == sock) {
+ sock->ctx->used_socks--;
+ break;
+ }
+ }
+
+#ifdef DEBUG_ENCODER_DRIVE
+ pr_info("topazdd sock context closed\n");
+#endif
+
+ /* Flush the MMU table cache (so it we can't accidentally access
+ * the freed device memory due to cache/table mismatch.)
+ */
+ topaz_core_mmu_flush_cache();
+
+ /*
+ * if nIndex == TOPAZHP_MAX_POSSIBLE_STREAMS then OpenSocket succeeded
+ * and SetupSocket failed (maybe incompatible firmware)
+ */
+ if (idx != TOPAZHP_MAX_POSSIBLE_STREAMS) {
+ /*
+ * Abort the stream first.
+ * This function can be called as a result of abnormal process
+ * exit, and since the hardware might be encoding some frame it
+ * means that the hardware still needs the context resources
+ * (buffers mapped to the hardware, etc), so we need to make
+ * sure that hardware encoding is aborted first before releasing
+ * the resources.
+ * This is important if you're doing several encodes
+ * simultaneously because releasing the resources too early will
+ * cause a page-fault that will halt all simultaneous encodes
+ * not just the one that caused the page-fault.
+ */
+ struct mtx_tomtx_msg msg;
+ unsigned int wb_val = 0;
+
+ wbfifo_clear(sock);
+
+ msg.cmd_id = (enum mtx_cmd_id)(MTX_CMDID_ABORT | MTX_CMDID_PRIORITY |
+ MTX_CMDID_WB_INTERRUPT);
+ msg.data = 0;
+ msg.command_data_buf = NULL;
+ comm_send(sock, &msg, &wb_val);
+ sock->sync_waiting = TRUE;
+ sock->sync_wb_val = wb_val;
+ mutex_unlock((struct mutex *)sock->sync_wb_mutex);
+
+ topazdd_wait_on_sync(sock, wb_val);
+ /*
+ * Set it to NULL here -not any time sooner-, we need it in case
+ * we had to abort the stream.
+ */
+ sock->ctx->socks[idx] = NULL;
+ }
+
+ comm_unlock(sock->ctx, COMM_LOCK_BOTH);
+ kfree(sock->event);
+ mutex_destroy(sock->sync_wb_mutex);
+ kfree(sock->sync_wb_mutex);
+ sock->sync_wb_mutex = NULL;
+ kfree(sock->work);
+ kfree(sock);
+}
+
+/*
+ * topazdd_int_clear
+ */
+static void topazdd_int_clear(struct topaz_dev_ctx *ctx, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(ctx->lock, flags);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_INT_CLEAR, mask);
+
+ spin_unlock_irqrestore(ctx->lock, (unsigned long)flags);
+}
+
+unsigned char topazdd_get_pipe_usage(unsigned char pipe)
+{
+ IMG_DBG_ASSERT(pipe < TOPAZHP_MAX_NUM_PIPES);
+ if (pipe >= TOPAZHP_MAX_NUM_PIPES)
+ return 0;
+
+ return g_pipe_usage[pipe];
+}
+
+void topazdd_set_pipe_usage(unsigned char pipe, unsigned char val)
+{
+ IMG_DBG_ASSERT(pipe < TOPAZHP_MAX_NUM_PIPES);
+ if (pipe < TOPAZHP_MAX_NUM_PIPES)
+ g_pipe_usage[pipe] = val;
+}
+
+static unsigned int comm_get_consumer(struct topaz_dev_ctx *ctx)
+{
+ unsigned int reg;
+
+ reg = VXE_RD_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_TOMTX << 2));
+
+ return F_DECODE(reg, WB_CONSUMER);
+}
+
+static void comm_set_consumer(struct topaz_dev_ctx *ctx, unsigned int consumer)
+{
+ unsigned int reg;
+
+ reg = VXE_RD_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_TOMTX << 2));
+
+ reg = F_INSERT(reg, consumer, WB_CONSUMER);
+
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_TOMTX << 2), reg);
+}
+
+static unsigned int comm_get_producer(struct topaz_dev_ctx *ctx)
+{
+ unsigned int reg;
+
+ reg = VXE_RD_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_TOHOST << 2));
+
+ return F_DECODE(reg, WB_PRODUCER);
+}
+
+static void comm_set_producer(struct topaz_dev_ctx *ctx, unsigned int producer)
+{
+ unsigned int reg;
+
+ reg = VXE_RD_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_TOHOST << 2));
+
+ reg = F_INSERT(reg, producer, WB_PRODUCER);
+
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+ (MTX_SCRATCHREG_TOHOST << 2), reg);
+}
+
+static int topazdd_init_comms(struct topaz_dev_ctx *ctx, unsigned int mmu_flags)
+{
+ unsigned int num_cores;
+ unsigned int i;
+ unsigned int reg;
+
+ num_cores = topazdd_get_num_pipes(ctx);
+
+ for (i = 0; i < num_cores; i++) {
+ unsigned int offset = REG_TOPAZHP_CORE_0 + (i * 4);
+
+ ctx->hp_core_reg_addr[i] = (void *)topaz_mem_space[offset].cpu_addr;
+
+ offset = REG_TOPAZHP_VLC_CORE_0 + (i * 4);
+ ctx->vlc_reg_addr[i] = (void *)topaz_mem_space[offset].cpu_addr;
+ }
+
+ if (topaz_mmu_device_create(&ctx->topaz_mmu_ctx, mmu_flags) != IMG_SUCCESS) {
+ pr_err("\nERROR: Could not initialize MMU with selected parameters!\n");
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Start up MMU support for each core (if MMU is switched on) */
+ reg = (F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET));
+
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_SRST, reg);
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_SRST, 0x0);
+
+ for (i = 0; i < num_cores; i++) {
+ unsigned int reset_bits = F_ENCODE(1, TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET) |
+ F_ENCODE(1, TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET);
+
+#ifdef TOPAZHP // TODO: strangely, this doesn't seem defined in the build... but we ARE topazhp...
+ reset_bits |= F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET(1)) |
+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET(1));
+#endif
+
+ VXE_WR_REG32(ctx->hp_core_reg_addr[i], TOPAZHP_CR_TOPAZHP_SRST, reset_bits);
+
+ VXE_WR_REG32(ctx->hp_core_reg_addr[i], TOPAZHP_CR_TOPAZHP_SRST, 0);
+ }
+
+ ctx->topaz_mmu_ctx.ptd_phys_addr = ctx->ptd;
+ topaz_core_mmu_hw_setup(&ctx->topaz_mmu_ctx, ctx->multi_core_mem_addr);
+
+ ctx->fw_uploaded = IMG_CODEC_NONE;
+
+ g_core_rev = VXE_RD_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_TOPAZHP_CORE_REV);
+ g_core_rev &=
+ (MASK_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV | MASK_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV |
+ MASK_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV);
+ g_core_des1 = VXE_RD_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_TOPAZHP_CORE_DES1);
+
+ ctx->comm_tx_mutex = kzalloc(sizeof(*ctx->comm_tx_mutex), GFP_KERNEL);
+ if (!(ctx->comm_tx_mutex))
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ mutex_init(ctx->comm_tx_mutex);
+
+ ctx->comm_rx_mutex = kzalloc(sizeof(*ctx->comm_rx_mutex), GFP_KERNEL);
+ if (!ctx->comm_rx_mutex) {
+ mutex_destroy(ctx->comm_tx_mutex);
+ kfree(ctx->comm_tx_mutex);
+ ctx->comm_tx_mutex = NULL;
+ pr_err("Memory allocation failed for mutex\n");
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+ mutex_init(ctx->comm_rx_mutex);
+
+ g_aps_wb_data_info = kmalloc(sizeof(*g_aps_wb_data_info) * WB_FIFO_SIZE, GFP_KERNEL);
+ if (!g_aps_wb_data_info) {
+ mutex_destroy(ctx->comm_rx_mutex);
+ kfree(ctx->comm_rx_mutex);
+ ctx->comm_rx_mutex = NULL;
+
+ mutex_destroy(ctx->comm_tx_mutex);
+ kfree(ctx->comm_tx_mutex);
+ ctx->comm_tx_mutex = NULL;
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Allocate WB buffers */
+ for (i = 0; i < WB_FIFO_SIZE; i++) {
+ struct vidio_ddbufinfo *mem_info = &g_aps_wb_data_info[i];
+
+ if (topaz_mmu_alloc(ctx->topaz_mmu_ctx.mmu_context_handle,
+ ctx->vxe_arg, MMU_GENERAL_HEAP_ID, 1,
+ (enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE),
+ COMM_WB_DATA_BUF_SIZE, 64, mem_info)) {
+ pr_err("mmu_alloc failed!\n");
+ kfree(g_aps_wb_data_info);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ /* Initialise the COMM registers */
+ comm_set_producer(ctx, 0);
+
+ /* Must reset the Consumer register too,
+ * otherwise the COMM stack may be initialised incorrectly
+ */
+ comm_set_consumer(ctx, 0);
+
+ for (i = 0; i < TOPAZHP_MAX_POSSIBLE_STREAMS; i++)
+ ctx->socks[i] = NULL;
+
+ ctx->used_socks = 0;
+ ctx->initialized = TRUE;
+
+ return 0;
+}
+
+static void topazdd_deinit_comms(struct topaz_dev_ctx *ctx)
+{
+ unsigned int idx;
+ struct img_fw_context *fw_ctx;
+
+ fw_ctx = &ctx->fw_ctx;
+
+ if (fw_ctx && fw_ctx->initialized) {
+ /* Stop the MTX */
+ mtx_stop(fw_ctx);
+ mtx_wait_for_completion(fw_ctx);
+ }
+
+ if (g_aps_wb_data_info) {
+ for (idx = 0; idx < WB_FIFO_SIZE; idx++) {
+ struct vidio_ddbufinfo *mem_info = &g_aps_wb_data_info[idx];
+
+ topaz_mmu_free(ctx->vxe_arg, mem_info);
+ }
+ kfree(g_aps_wb_data_info);
+ }
+
+ /* Close all of the opened sockets */
+ for (idx = 0; idx < TOPAZHP_MAX_POSSIBLE_STREAMS; idx++) {
+ if (ctx->socks[idx])
+ topazdd_destroy_stream_ctx(ctx->socks[idx]);
+ }
+
+ mutex_destroy(ctx->comm_tx_mutex);
+ kfree(ctx->comm_tx_mutex);
+ ctx->comm_tx_mutex = NULL;
+
+ mutex_destroy(ctx->comm_rx_mutex);
+ kfree(ctx->comm_rx_mutex);
+ ctx->comm_rx_mutex = NULL;
+
+ if (fw_ctx && fw_ctx->initialized)
+ mtx_deinitialize(fw_ctx);
+
+ topaz_mmu_device_destroy(&ctx->topaz_mmu_ctx);
+
+ ctx->fw_uploaded = IMG_CODEC_NONE;
+ ctx->initialized = FALSE;
+}
+
+static void setup_topaz_mem(unsigned long long reg_base, unsigned int reg_size)
+{
+ unsigned int idx;
+
+ /* set up the kernel virtual address for mem space access */
+ for (idx = 0; idx < topaz_target_config.num_mem_spaces; idx++) {
+ unsigned long long offset = topaz_target_config.mem_spaces[idx].reg.addr;
+
+ topaz_target_config.mem_spaces[idx].cpu_addr = reg_base + offset;
+ }
+}
+
+/*
+ * topazdd_init
+ */
+int topazdd_init(unsigned long long reg_base, unsigned int reg_size, unsigned int mmu_flags,
+ void *vxe_arg, unsigned int ptd, void **data)
+{
+ struct topaz_dev_ctx *ctx;
+ int ret;
+ spinlock_t **lock; /* spinlock */
+
+ setup_topaz_mem(reg_base, reg_size);
+
+ /* Allocate device structure...*/
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ IMG_DBG_ASSERT(ctx);
+ if (!ctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ lock = (spinlock_t **)&ctx->lock;
+ *lock = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
+
+ if (!(*lock)) {
+ pr_err("Memory allocation failed for spin-lock\n");
+ kfree(ctx);
+ return IMG_ERROR_OUT_OF_MEMORY;
+ }
+ spin_lock_init(*lock);
+ g_lock = ctx->lock;
+
+ *data = ctx;
+ ctx->initialized = FALSE;
+
+ ctx->multi_core_mem_addr = (void *)topaz_mem_space[REG_TOPAZHP_MULTICORE].cpu_addr;
+
+ if (!ctx->multi_core_mem_addr) {
+ kfree(&ctx->lock);
+ kfree(ctx);
+ return IMG_ERROR_DEVICE_NOT_FOUND;
+ }
+
+ /* Now enabled interrupts */
+ topazdd_int_enable(ctx, (MASK_TOPAZHP_TOP_CR_HOST_INTEN_MTX |
+ MASK_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN |
+ MASK_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT |
+ MASK_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT_B));
+
+ ctx->vxe_arg = vxe_arg;
+ ctx->ptd = ptd;
+
+ ret = topazdd_init_comms(ctx, mmu_flags);
+ if (ret) {
+ topazdd_int_disable(ctx, ~0);
+ kfree(&ctx->lock);
+ kfree(ctx);
+ return ret;
+ }
+
+ comm_lock(ctx, COMM_LOCK_BOTH);
+ ret = topaz_upload_firmware(ctx, IMG_CODEC_H264_NO_RC);
+ comm_unlock(ctx, COMM_LOCK_BOTH);
+
+ if (ret) {
+ topazdd_deinit_comms(ctx);
+ topazdd_int_disable(ctx, ~0);
+ kfree(&ctx->lock);
+ kfree(ctx);
+ return ret;
+ }
+
+ /* Device now initailised...*/
+ ctx->initialized = TRUE;
+
+ /* Return success...*/
+ return IMG_SUCCESS;
+}
+
+/*
+ * topazdd_deinit
+ */
+void topazdd_deinit(void *data)
+{
+ struct topaz_dev_ctx *ctx = data;
+ unsigned int reg;
+
+ /* If the interrupt was defined then it is also safe to clear interrupts
+ * and reset the core....
+ */
+ if (ctx->initialized) {
+ topazdd_deinit_comms(ctx);
+
+ /* Disable interrupts...*/
+ topazdd_int_disable(ctx, ~0);
+
+ /* disable interrupts on Topaz core */
+ reg =
+ VXE_RD_REG32(ctx->multi_core_mem_addr,
+ TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB);
+
+ reg &= ~MASK_TOPAZHP_TOP_CR_HOST_INTEN_MTX;
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB, reg);
+
+ /* clear interrupt - just in case */
+ VXE_WR_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_INT_CLEAR,
+ MASK_TOPAZHP_TOP_CR_INTCLR_MTX);
+
+ g_lock = NULL;
+ kfree(&ctx->lock);
+ }
+
+ kfree(data);
+}
+
+static int comm_dispatch_in_msg(struct topaz_dev_ctx *ctx)
+{
+ unsigned int hw_fifo_producer;
+ unsigned int hw_fifo_consumer;
+
+ hw_fifo_consumer = comm_get_consumer(ctx);
+ hw_fifo_producer = comm_get_producer(ctx);
+
+ while (hw_fifo_consumer != hw_fifo_producer) {
+ struct img_writeback_msg *wb_msg;
+ unsigned char conn_id;
+ struct vidio_ddbufinfo *mem_info = &g_aps_wb_data_info[hw_fifo_consumer];
+ enum mtx_cmd_id cmd_id;
+
+ /* Update corresponding memory region */
+ topaz_update_host_mem(ctx->vxe_arg, mem_info);
+ wb_msg = (struct img_writeback_msg *)(mem_info->cpu_virt);
+
+ /* Copy to the corresponding SW fifo */
+ conn_id = F_DECODE(wb_msg->cmd_word, MTX_MSG_CORE);
+
+ /* Find corresponding Buffer Addr */
+ cmd_id = (enum mtx_cmd_id)F_DECODE(wb_msg->cmd_word, MTX_MSG_MESSAGE_ID);
+#ifdef DEBUG_ENCODER_DRIVER
+ if ((unsigned int)cmd_id == (unsigned int)MTX_MESSAGE_ACK) {
+ pr_debug("MSG_RX[%d]: 0x%03X %s (ACK) cmd_word: %#08x data: %#08x extra_data: %#08x writeback_val: %#08x\n",
+ F_DECODE(wb_msg->cmd_word, MTX_MSG_CORE),
+ hw_fifo_producer & 0x1f,
+ command_string[wb_msg->cmd_word & 0x1f],
+ wb_msg->cmd_word, wb_msg->data,
+ wb_msg->extra_data, wb_msg->writeback_val);
+ } else {
+#ifdef ENABLE_PROFILING
+ struct timespec64 time;
+
+ ktime_get_real_ts64(&time);
+ ctx->socks[conn_id]->fw_lat.end_time =
+ timespec64_to_ns((const struct timespec64 *)&time);
+ pr_err("fw encode time is %llu us for msg_id x%0x\n",
+ div_s64(ctx->socks[conn_id]->fw_lat.end_time -
+ ctx->socks[conn_id]->fw_lat.start_time, 1000),
+ wb_msg->writeback_val);
+#endif
+ pr_debug("MSG_RX[%d]: 0x%03X CODED_BUFFER cmd_word: %#08x coded_package_consumed: %d\n",
+ F_DECODE(wb_msg->cmd_word, MTX_MSG_CORE),
+ hw_fifo_producer & 0x1f,
+ wb_msg->cmd_word,
+ wb_msg->coded_package_consumed_idx);
+ }
+#endif
+
+ /* If corresponding socket still exists, call the callback */
+ if (ctx->socks[conn_id]) {
+ wbfifo_add(ctx->socks[conn_id], wb_msg);
+ schedule_work(ctx->socks[conn_id]->work);
+ }
+
+ /* Activate corresponding FIFO
+ * proceed to the next one
+ */
+ hw_fifo_consumer++;
+
+ if (hw_fifo_consumer == WB_FIFO_SIZE)
+ hw_fifo_consumer = 0;
+
+ comm_set_consumer(ctx, hw_fifo_consumer);
+
+ /*
+ * We need to update the producer because we might have received a new
+ * message meanwhile. This new message won't trigger an interrupt and
+ * consequently will be lost till another message arrives
+ */
+ hw_fifo_producer = comm_get_producer(ctx);
+ }
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * topazdd_threaded_isr
+ */
+unsigned char topazdd_threaded_isr(void *inst_data)
+{
+ struct topaz_dev_ctx *ctx = *(struct topaz_dev_ctx **)inst_data;
+
+ /* If interrupts not defined then...*/
+ if (!ctx || !ctx->initialized)
+ return FALSE;
+
+ /* Now dispatch the messages */
+ comm_dispatch_in_msg(ctx);
+
+ /* Signal this interrupt has been handled...*/
+ return TRUE;
+}
+
+/*
+ * topazdd_isr
+ */
+irqreturn_t topazdd_isr(void *inst_data)
+{
+ unsigned int reg;
+ unsigned int mmu_fault_mask = MASK_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT;
+
+ struct topaz_dev_ctx *ctx = *(struct topaz_dev_ctx **)inst_data;
+
+ /* More requesters with topaz hp */
+ mmu_fault_mask |= MASK_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT_B;
+
+ /* If interrupts not defined then...*/
+ if (!ctx || !ctx->initialized)
+ return IRQ_NONE;
+
+ /* read device interrupt status */
+ reg = VXE_RD_REG32(ctx->multi_core_mem_addr, TOPAZHP_TOP_CR_MULTICORE_INT_STAT);
+
+ /* if interrupts enabled and fired...*/
+ if (((reg & MASK_TOPAZHP_TOP_CR_INT_STAT_MTX) == (MASK_TOPAZHP_TOP_CR_INT_STAT_MTX))) {
+ /* Clear interrupt source...*/
+ topazdd_int_clear(ctx, MASK_TOPAZHP_TOP_CR_INTCLR_MTX);
+
+ /* Signal this interrupt has been handled...*/
+ return IRQ_WAKE_THREAD;
+ }
+
+ /* if page fault ever happenned */
+ if (reg & (mmu_fault_mask)) {
+ static unsigned char dump_once = TRUE;
+
+ if (dump_once) {
+ VXE_WR_REG32(ctx->multi_core_mem_addr,
+ TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB, 0);
+
+ dump_once = FALSE; /* only on first page fault for readability */
+ }
+
+ /* Clear interrupt source...*/
+ topazdd_int_clear(ctx, mmu_fault_mask);
+
+ /* IT served, we might never reach that point on kernel crashes */
+ return IRQ_HANDLED;
+ }
+
+ /* Signal not this device...*/
+ return IRQ_NONE;
+}
diff --git a/drivers/media/platform/vxe-vxd/encoder/topaz_device.h b/drivers/media/platform/vxe-vxd/encoder/topaz_device.h
new file mode 100644
index 000000000000..6da9e4e7d9bf
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/topaz_device.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * topaz driver data strcutures
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#if !defined(__TOPAZ_DEVICE_H__)
+#define __TOPAZ_DEVICE_H__
+
+#include <linux/interrupt.h>
+
+#include "fw_headers/topazscfwif.h"
+#include "fw_headers/mtx_fwif.h"
+#include "topazmmu.h"
+#include "vid_buf.h"
+#include "topaz_api.h"
+
+# define CODEC_MASK_JPEG 0x0001
+# define CODEC_MASK_MPEG2 0x0002
+# define CODEC_MASK_MPEG4 0x0004
+# define CODEC_MASK_H263 0x0008
+# define CODEC_MASK_H264 0x0010
+# define CODEC_MASK_H264MVC 0x0020
+# define CODEC_MASK_VP8 0x0040
+# define CODEC_MASK_H265 0x0080
+# define CODEC_MASK_FAKE 0x007F
+
+struct img_comm_socket;
+
+/*!
+ ****************************************************************************
+ Event object structure
+ ****************************************************************************
+ */
+struct event {
+ unsigned char signalled;
+};
+
+/* prototype for callback for incoming message */
+typedef void (*enc_cb)(struct img_writeback_msg *msg, void *priv);
+
+#ifdef ENABLE_PROFILING
+struct enc_fw_latency {
+ unsigned int start_time;
+ unsigned int end_time;
+};
+#endif
+
+struct mtx_tohost_msg {
+ enum mtx_message_id cmd_id;
+ unsigned int input_cmd_word;
+ unsigned char coded_pkg_idx;
+ unsigned int wb_val;
+ unsigned int data;
+ struct vidio_ddbufinfo *command_data_buf;
+};
+
+struct mtx_tomtx_msg {
+ enum mtx_cmd_id cmd_id;
+ unsigned int data;
+ struct vidio_ddbufinfo *command_data_buf;
+};
+
+/*
+ * This structure contains the device context.
+ */
+struct topaz_dev_ctx {
+ /* Parent context, needed to pass to mmu_alloc */
+ void *vxe_arg;
+
+ /* KM addresses for mem spaces */
+ void *multi_core_mem_addr;
+ void *hp_core_reg_addr[TOPAZHP_MAX_NUM_PIPES];
+ void *vlc_reg_addr[TOPAZHP_MAX_NUM_PIPES];
+
+ unsigned char initialized; /*!< Indicates that the device driver has been initialised */
+
+ unsigned int used_socks;
+ struct img_comm_socket *socks[TOPAZHP_MAX_POSSIBLE_STREAMS];
+
+ unsigned int fw_uploaded;
+ struct img_fw_context fw_ctx;
+
+ void *lock; /* basic device level spinlock */
+ struct mutex *comm_tx_mutex;
+ struct mutex *comm_rx_mutex;
+
+ unsigned int ptd;
+ struct topaz_mmu_context topaz_mmu_ctx;
+};
+
+#define COMM_INCOMING_FIFO_SIZE (WB_FIFO_SIZE * 2)
+struct img_comm_socket {
+ unsigned char id;
+ unsigned int low_cmd_cnt; /* count of low-priority commands sent to TOPAZ */
+ unsigned int high_cmd_cnt; /* count of high-priority commands sent to TOPAZ */
+ unsigned int last_sync; /* Last sync value sent */
+ struct img_writeback_msg in_fifo[COMM_INCOMING_FIFO_SIZE];
+ unsigned int in_fifo_consumer;
+ unsigned int in_fifo_producer;
+ void *work;
+
+ enc_cb cb; /* User-provided callback function */
+ struct topaz_stream_context *str_ctx; /* User-provided callback data */
+
+ void *event;
+ unsigned char sync_waiting;
+ unsigned int sync_wb_val;
+ struct mutex *sync_wb_mutex;
+
+ unsigned int msgs_sent;
+ unsigned int ack_recv;
+ unsigned char is_serialized;
+
+ unsigned int codec;
+
+ struct topaz_dev_ctx *ctx;
+#ifdef ENABLE_PROFILING
+ struct enc_fw_latency fw_lat;
+#endif
+};
+
+unsigned char topazdd_threaded_isr(void *data);
+irqreturn_t topazdd_isr(void *data);
+
+int topazdd_init(unsigned long long reg_base, unsigned int reg_size,
+ unsigned int mmu_flags,
+ void *vxe_arg, unsigned int ptd, void **data);
+void topazdd_deinit(void *data);
+unsigned int topazdd_get_num_pipes(struct topaz_dev_ctx *ctx);
+unsigned int topazdd_get_core_rev(void);
+unsigned int topazdd_get_core_des1(void);
+unsigned char topazdd_is_idle(struct img_comm_socket *sock);
+
+int topazdd_upload_firmware(struct topaz_dev_ctx *ctx, enum img_codec codec);
+int topazdd_create_stream_context(struct topaz_dev_ctx *ctx, enum img_codec codec, enc_cb cb,
+ void *cb_priv, void **dd_str_ctx,
+ struct vidio_ddbufinfo **wb_data_info);
+void topazdd_destroy_stream_ctx(void *dd_str_ctx);
+int topazdd_setup_stream_ctx(void *dd_str_ctx, unsigned short height,
+ unsigned short width, unsigned char *ctx_num,
+ unsigned int *used_sock);
+int topazdd_send_msg(void *dd_str_ctx, enum mtx_cmd_id cmd_id,
+ unsigned int data, struct vidio_ddbufinfo *cmd_data_buf,
+ unsigned int *wb_val);
+int topazdd_send_msg_with_sync(void *dd_str_ctx, enum mtx_cmd_id cmd_id,
+ unsigned int data,
+ struct vidio_ddbufinfo *cmd_data_buf);
+
+extern unsigned int mmu_control_val;
+
+#endif /* __TOPAZ_DEVICE_H__ */
diff --git a/drivers/media/platform/vxe-vxd/encoder/topazmmu.c b/drivers/media/platform/vxe-vxd/encoder/topazmmu.c
new file mode 100644
index 000000000000..184bcb75ab7b
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/topazmmu.c
@@ -0,0 +1,741 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * topaz mmu function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/types.h>
+
+#include "fw_headers/defs.h"
+#include "img_errors.h"
+#include "img_mem.h"
+#include "img_mem_man.h"
+#include "talmmu_api.h"
+#include "topazmmu.h"
+#include "vxe_public_regdefs.h"
+
+int use_extended_addressing;
+unsigned int mmu_control_val;
+unsigned char device_initialized = FALSE;
+
+/*
+ * These determine the sizes of the MMU heaps we are using.
+ * The tiled heap is set arbitrarily large at present.
+ */
+#define GENERALMMUHEAPLENGTH 0x40000000
+
+/*
+ * This describes the heaps - the separate areas mapped by the MMU
+ * Currently we only use a single large heap as Topaz Core has no
+ * MMU specific memory features.
+ */
+struct talmmu_heap_info mmu_heap_info[HEAP_ID_NO_OF_HEAPS] = {
+ { MMU_GENERAL_HEAP_ID, TALMMU_HEAP_PERCONTEXT, TALMMU_HEAPFLAGS_NONE, "MEMSYSMEM",
+ 0x00400000, GENERALMMUHEAPLENGTH }
+};
+
+/* This describes the memory being mapped by the MMU */
+struct talmmu_devmem_info mmu_device_memory_info = {
+ /* ui32DeviceId */
+ 1,
+ /* eMMUType */
+ TALMMU_MMUTYPE_4K_PAGES_32BIT_ADDR,
+ /* eDevFlags */
+ TALMMU_DEVFLAGS_NONE,
+ /* pszPageDirMemSpaceName */
+ "MEMSYSMEM",
+ /* pszPageTableMemSpaceName */
+ "MEMSYSMEM",
+ /* ui32PageSize */
+ 4096,
+ /* ui32PageTableDirAlignment */
+ 0
+};
+
+/*
+ * mmu template is global. so we don't need to worry about maintaining device
+ * context
+ */
+void *mmu_template;
+
+/*
+ * Stream context is global. Can be modified in future to handle list of streams.
+ */
+struct mmu_str_context *str_ctx;
+
+/*
+ * Called once during initialization to initialize the MMU hardware, create
+ * the template and define the MMU heap.
+ * This is where talmmu initialization and template will be created.
+ *
+ * NOTE : We are not taking care of alignment here, need to be updated in
+ * mmu_device_memory_info.
+ */
+int topaz_mmu_device_create(struct topaz_mmu_context *mmu_context, unsigned int mmu_flags)
+{
+ void *topaz_multi_core_regid;
+ unsigned int hw_rev;
+ int result, i;
+
+ use_extended_addressing = (mmu_flags & MMU_EXTENDED_ADDR_FLAG);
+
+ /* Initialize TALMMU API and create a template */
+ result = talmmu_init();
+ IMG_DBG_ASSERT(result == 0);
+
+ if (result != 0) {
+ pr_err("talmmu_init failed!\n");
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ }
+
+ /*
+ * We are reading the register and finding the mmu type, if needed this
+ * can be passed from the upper layers directly.
+ */
+
+ topaz_multi_core_regid = (void *)topaz_mem_space[REG_TOPAZHP_MULTICORE].cpu_addr;
+
+ hw_rev = VXE_RD_REG32(topaz_multi_core_regid, TOPAZHP_TOP_CR_TOPAZHP_CORE_REV);
+ hw_rev &=
+ (MASK_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV | MASK_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV |
+ MASK_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV);
+
+ if (use_extended_addressing) {
+ unsigned int reg_val;
+
+ /* Versions 3.6 and above may be 32-bit, 36-bit or 40-bit */
+ reg_val = VXE_RD_REG32(topaz_multi_core_regid, TOPAZHP_TOP_CR_MULTICORE_HW_CFG);
+
+ switch (F_DECODE(reg_val, TOPAZHP_TOP_CR_EXTENDED_ADDR_RANGE)) {
+ case 0:
+ mmu_device_memory_info.mmu_type = TALMMU_MMUTYPE_4K_PAGES_32BIT_ADDR;
+ break;
+ case 4:
+ mmu_device_memory_info.mmu_type = TALMMU_MMUTYPE_4K_PAGES_36BIT_ADDR;
+ break;
+ case 8:
+ mmu_device_memory_info.mmu_type = TALMMU_MMUTYPE_4K_PAGES_40BIT_ADDR;
+ break;
+ default:
+ pr_err("Unsupported MMU mode requested\n");
+ return IMG_ERROR_NOT_SUPPORTED;
+ }
+ }
+
+ result = talmmu_devmem_template_create(&mmu_device_memory_info, &mmu_template);
+ IMG_DBG_ASSERT(result == 0);
+ if (result != 0) {
+ pr_err("talmmu_devmem_template_create failed!\n");
+ talmmu_deinit();
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ }
+
+ /* Add heaps to the template */
+ for (i = 0; i < HEAP_ID_NO_OF_HEAPS; i++) {
+ result = talmmu_devmem_heap_add(mmu_template, &mmu_heap_info[i]);
+ IMG_DBG_ASSERT(result == 0);
+ if (result != 0) {
+ pr_err("talmmu_devmem_heap_add failed!\n");
+ talmmu_devmem_template_destroy(mmu_template);
+ talmmu_deinit();
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ }
+ }
+
+ /* Create a context from the template */
+ /* (Template, User allocated user ID) */
+ result = talmmu_devmem_ctx_create(mmu_template, 1, &mmu_context->mmu_context_handle);
+ IMG_DBG_ASSERT(result == 0);
+ if (result != 0) {
+ pr_err("talmmu_devmem_ctx_create failed!\n");
+ talmmu_devmem_template_destroy(mmu_template);
+ talmmu_deinit();
+ return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+ }
+
+ topaz_core_mmu_flush_cache();
+
+ /* Initialise stream list. */
+ lst_init(&mmu_context->str_list);
+
+ device_initialized = TRUE;
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * This function is used to destroy the MMU device context.
+ * NOTE: Destroy device automatically destroys any streams and frees and
+ * memory allocated using MMU_StreamMalloc().
+ */
+int topaz_mmu_device_destroy(struct topaz_mmu_context *mmu_context)
+{
+ unsigned int result = 0;
+ struct mmu_str_context *str_ctx;
+
+ /* Destroy all streams associated with the device. */
+ str_ctx = lst_first(&mmu_context->str_list);
+ while (str_ctx) {
+ /* remove stream to list. */
+ lst_remove(&mmu_context->str_list, str_ctx);
+ topaz_mmu_stream_destroy(mmu_context, str_ctx);
+
+ /* See if there are more streams. */
+ str_ctx = lst_first(&mmu_context->str_list);
+ }
+
+ /* Destroy the device context */
+ result = talmmu_devmem_ctx_destroy(mmu_context->mmu_context_handle);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Destroy the template. */
+ return talmmu_devmem_template_destroy(mmu_template);
+}
+
+/*
+ * This function is used to create and initialize the MMU stream context.
+ */
+int topaz_mmu_stream_create(struct topaz_mmu_context *mmu_context, unsigned int km_str_id,
+ void *vxe_enc_ctx_arg, void **mmu_str_ctx)
+{
+ struct mmu_str_context *str_ctx;
+
+ /* Validate inputs. */
+ if (!device_initialized)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Allocate a stream context structure */
+ str_ctx = kzalloc(sizeof(*str_ctx), GFP_KERNEL);
+ if (!str_ctx)
+ return IMG_ERROR_OUT_OF_MEMORY;
+
+ str_ctx->km_str_id = km_str_id;
+ str_ctx->int_reg_num = 32;
+ str_ctx->vxe_enc_context = (struct vxe_enc_ctx *)vxe_enc_ctx_arg;
+
+ /* copy the mmu context created earlier */
+ str_ctx->mmu_context_handle = mmu_context->mmu_context_handle;
+
+ *mmu_str_ctx = str_ctx;
+
+ /* Add stream to list. */
+ lst_add(&mmu_context->str_list, str_ctx);
+
+ return IMG_SUCCESS;
+}
+
+/*
+ * This function is used to destroy the MMU stream context.
+ * NOTE: Destroy automatically frees and memory allocated using
+ * mmu_stream_malloc().
+ */
+int topaz_mmu_stream_destroy(struct topaz_mmu_context *mmu_context,
+ struct mmu_str_context *str_ctx)
+{
+ /* Validate inputs. */
+ if (!str_ctx)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* remove stream to list. */
+ lst_remove(&mmu_context->str_list, str_ctx);
+
+ kfree(str_ctx);
+
+ return IMG_SUCCESS;
+}
+
+static unsigned int set_attributes(enum sys_emem_attrib mem_attrib)
+{
+ unsigned int attrib = 0;
+
+ if (mem_attrib & SYS_MEMATTRIB_CACHED)
+ attrib |= MEM_ATTR_CACHED;
+
+ if (mem_attrib & SYS_MEMATTRIB_UNCACHED)
+ attrib |= MEM_ATTR_UNCACHED;
+
+ if (mem_attrib & SYS_MEMATTRIB_WRITECOMBINE)
+ attrib |= MEM_ATTR_WRITECOMBINE;
+
+ if (mem_attrib & SYS_MEMATTRIB_SECURE)
+ attrib |= MEM_ATTR_SECURE;
+
+ return attrib;
+}
+
+int topaz_mmu_alloc(void *mmu_context_handle, struct vxe_enc_ctx *vxe_enc_ctx_arg,
+ enum topaz_mmu_eheap_id heap_id, unsigned int mem_heap_id,
+ enum sys_emem_attrib mem_attrib, unsigned int size, unsigned int alignment,
+ struct vidio_ddbufinfo *ddbuf_info)
+{
+ int result = 0;
+ void *devmem_heap_hndl;
+ struct vxe_enc_ctx *ctx;
+ struct vxe_dev *vxe;
+ unsigned int flags = 0;
+ unsigned int attributes = 0;
+
+ if (!mmu_context_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Set buffer size. */
+ ddbuf_info->buf_size = size;
+
+ /* Round size up to next multiple of physical pages */
+ if ((size % HOST_MMU_PAGE_SIZE) != 0)
+ size = ((size / HOST_MMU_PAGE_SIZE) + 1) * HOST_MMU_PAGE_SIZE;
+
+ /* Allocate memory */
+ ctx = vxe_enc_ctx_arg;
+ vxe = ctx->dev;
+
+ attributes = set_attributes(mem_attrib);
+
+ result = img_mem_alloc(vxe->dev, ctx->mem_ctx, mem_heap_id,
+ size, (enum mem_attr)attributes, (int *)&ddbuf_info->buff_id);
+ if (result != IMG_SUCCESS)
+ goto error_alloc;
+
+ ddbuf_info->is_internal = 1;
+
+ /* TODO need to check more on attributes from memmgr_km */
+ if (mem_attrib & SYS_MEMATTRIB_SECURE) {
+ ddbuf_info->cpu_virt = NULL;
+ } else {
+ /* Map the buffer to CPU */
+ result = img_mem_map_km(ctx->mem_ctx, ddbuf_info->buff_id);
+ if (result) {
+ dev_err(vxe->dev, "%s: failed to map buf to cpu!(%d)\n",
+ __func__, result);
+ goto error_get_heap_handle;
+ }
+ ddbuf_info->cpu_virt = img_mem_get_kptr(ctx->mem_ctx, ddbuf_info->buff_id);
+ }
+
+ /* Get heap handle */
+ result = talmmu_get_heap_handle(heap_id, mmu_context_handle, &devmem_heap_hndl);
+ if (result != IMG_SUCCESS)
+ goto error_get_heap_handle;
+
+ /* Allocate device "virtual" memory. */
+ result = talmmu_devmem_addr_alloc(mmu_context_handle, devmem_heap_hndl,
+ size, alignment, &ddbuf_info->hndl_memory);
+ if (result != IMG_SUCCESS)
+ goto error_mem_map_ext_mem;
+
+ /* Get the device virtual address. */
+ result = talmmu_get_dev_virt_addr(ddbuf_info->hndl_memory, &ddbuf_info->dev_virt);
+ if (result != IMG_SUCCESS)
+ goto error_get_dev_virt_addr;
+
+ result = img_mmu_map(ctx->mmu_ctx, ctx->mem_ctx, ddbuf_info->buff_id, ddbuf_info->dev_virt,
+ flags);
+ if (result != IMG_SUCCESS)
+ goto error_map_dev;
+
+ return IMG_SUCCESS;
+
+error_map_dev:
+error_get_dev_virt_addr:
+ talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+ ddbuf_info->hndl_memory = NULL;
+error_mem_map_ext_mem:
+error_get_heap_handle:
+ img_mem_free(ctx->mem_ctx, ddbuf_info->buff_id);
+error_alloc:
+ return result;
+}
+
+/*
+ * mmu_stream_malloc
+ */
+int topaz_mmu_stream_alloc(void *mmu_str_hndl, enum topaz_mmu_eheap_id heap_id,
+ unsigned int mem_heap_id, enum sys_emem_attrib mem_attrib,
+ unsigned int size, unsigned int alignment,
+ struct vidio_ddbufinfo *ddbuf_info)
+{
+ struct mmu_str_context *str_ctx;
+
+ /* Validate inputs. */
+ if (!mmu_str_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ str_ctx = (struct mmu_str_context *)mmu_str_hndl;
+
+ /* Check if device level heap. */
+ switch (heap_id) {
+ case MMU_GENERAL_HEAP_ID:
+ break;
+
+ default:
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ ddbuf_info->kmstr_id = str_ctx->km_str_id;
+
+ /* Allocate device memory. */
+ return (topaz_mmu_alloc(str_ctx->mmu_context_handle, str_ctx->vxe_enc_context,
+ heap_id, mem_heap_id, mem_attrib, size, alignment, ddbuf_info));
+}
+
+/*
+ * mmu_stream_map_ext_sg
+ */
+int topaz_mmu_stream_map_ext_sg(void *mmu_str_hndl, enum topaz_mmu_eheap_id heap_id,
+ void *sgt, unsigned int size, unsigned int alignment,
+ enum sys_emem_attrib mem_attrib, void *cpu_linear_addr,
+ struct vidio_ddbufinfo *ddbuf_info, unsigned int *buff_id)
+{
+ int result;
+ void *devmem_heap_hndl;
+ struct mmu_str_context *str_ctx;
+ struct vxe_enc_ctx *ctx;
+ struct vxe_dev *vxe;
+
+ if (!mmu_str_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ str_ctx = (struct mmu_str_context *)mmu_str_hndl;
+
+ ctx = str_ctx->vxe_enc_context;
+ vxe = ctx->dev;
+
+ /* Check if device level heap. */
+ switch (heap_id) {
+ case MMU_GENERAL_HEAP_ID:
+ break;
+
+ default:
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ if (!str_ctx->mmu_context_handle)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Set buffer size. */
+ ddbuf_info->buf_size = size;
+
+ /* Round size up to next multiple of physical pages */
+ if ((size % HOST_MMU_PAGE_SIZE) != 0)
+ size = ((size / HOST_MMU_PAGE_SIZE) + 1) * HOST_MMU_PAGE_SIZE;
+
+ result = img_mem_import(vxe->dev, ctx->mem_ctx, ddbuf_info->buf_size,
+ (enum mem_attr)set_attributes(mem_attrib), (int *)buff_id);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ if (mem_attrib & SYS_MEMATTRIB_SECURE)
+ ddbuf_info->cpu_virt = NULL;
+
+ ddbuf_info->buff_id = *buff_id;
+ ddbuf_info->is_internal = 0;
+
+ ddbuf_info->kmstr_id = str_ctx->km_str_id;
+
+ /* Set buffer size. */
+ ddbuf_info->buf_size = size;
+
+ /* Ensure the address of the buffer is at least page aligned. */
+ ddbuf_info->cpu_virt = cpu_linear_addr;
+
+ /* Get heap handle */
+ result = talmmu_get_heap_handle(heap_id, str_ctx->mmu_context_handle, &devmem_heap_hndl);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Allocate device "virtual" memory. */
+ result = talmmu_devmem_addr_alloc(str_ctx->mmu_context_handle, devmem_heap_hndl,
+ size, alignment, &ddbuf_info->hndl_memory);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Get the device virtual address. */
+ result = talmmu_get_dev_virt_addr(ddbuf_info->hndl_memory, &ddbuf_info->dev_virt);
+ if (result != IMG_SUCCESS)
+ goto error_get_dev_virt_addr;
+
+ result = img_mmu_map_sg(ctx->mmu_ctx, ctx->mem_ctx, ddbuf_info->buff_id, sgt,
+ ddbuf_info->dev_virt, mem_attrib);
+ if (result != IMG_SUCCESS)
+ goto error_map_dev;
+
+ return IMG_SUCCESS;
+
+error_map_dev:
+error_get_dev_virt_addr:
+ talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+ ddbuf_info->hndl_memory = NULL;
+ return result;
+}
+
+/*
+ * topaz_mmu_stream_map_ext
+ */
+int topaz_mmu_stream_map_ext(void *mmu_str_hndl, enum topaz_mmu_eheap_id heap_id,
+ unsigned int buff_id, unsigned int size, unsigned int alignment,
+ enum sys_emem_attrib mem_attrib, void *cpu_linear_addr,
+ struct vidio_ddbufinfo *ddbuf_info)
+{
+ int result = 0;
+ void *devmem_heap_hndl;
+ struct vxe_enc_ctx *ctx;
+ struct mmu_str_context *str_ctx;
+
+ if (!mmu_str_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ str_ctx = (struct mmu_str_context *)mmu_str_hndl;
+
+ /* Check if device level heap. */
+ switch (heap_id) {
+ case MMU_GENERAL_HEAP_ID:
+ break;
+
+ default:
+ return IMG_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* Round size up to next multiple of physical pages */
+ if ((size % HOST_MMU_PAGE_SIZE) != 0)
+ size = ((size / HOST_MMU_PAGE_SIZE) + 1) * HOST_MMU_PAGE_SIZE;
+
+ ddbuf_info->buff_id = buff_id;
+ ddbuf_info->is_internal = 0;
+
+ ddbuf_info->kmstr_id = str_ctx->km_str_id;
+
+ /* Set buffer size. */
+ ddbuf_info->buf_size = size;
+
+ /* Ensure the address of the buffer is at least page aligned. */
+ ddbuf_info->cpu_virt = cpu_linear_addr;
+
+ /* Get heap handle */
+ result = talmmu_get_heap_handle(heap_id, str_ctx->mmu_context_handle,
+ &devmem_heap_hndl);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Allocate device "virtual" memory. */
+ result = talmmu_devmem_addr_alloc(str_ctx->mmu_context_handle,
+ devmem_heap_hndl, size, alignment,
+ &ddbuf_info->hndl_memory);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /* Get the device virtual address. */
+ result = talmmu_get_dev_virt_addr(ddbuf_info->hndl_memory, &ddbuf_info->dev_virt);
+ if (result != IMG_SUCCESS)
+ return result;
+
+ /*
+ * Map device memory (allocated from outside VDEC)
+ * into the stream PTD.
+ */
+ ctx = str_ctx->vxe_enc_context;
+
+ return img_mmu_map(ctx->mmu_ctx, ctx->mem_ctx, ddbuf_info->buff_id, ddbuf_info->dev_virt,
+ mem_attrib);
+}
+
+/*
+ * topaz_mmu_free
+ */
+int topaz_mmu_free(struct vxe_enc_ctx *vxe_enc_ctx_arg, struct vidio_ddbufinfo *ddbuf_info)
+{
+ int result = 0;
+ struct vxe_enc_ctx *ctx;
+
+ /* Validate inputs. */
+ if (!ddbuf_info)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ /* Unmap the memory mapped to the device */
+ ctx = vxe_enc_ctx_arg;
+ result = img_mmu_unmap(ctx->mmu_ctx, ctx->mem_ctx, ddbuf_info->buff_id);
+
+ /*
+ * Unmapping the memory mapped to the device - done
+ * Free the memory.
+ */
+ result = talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+
+ if (ddbuf_info->is_internal)
+ img_mem_free(ctx->mem_ctx, ddbuf_info->buff_id);
+
+ return result;
+}
+
+/*
+ * topaz_mmu_free_mem.
+ * This should be used only to free the stream memory.
+ */
+int topaz_mmu_stream_free(void *mmu_str_hndl, struct vidio_ddbufinfo *ddbuf_info)
+{
+ struct mmu_str_context *str_ctx;
+
+ if (!mmu_str_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ str_ctx = (struct mmu_str_context *)mmu_str_hndl;
+
+ return topaz_mmu_free(str_ctx->vxe_enc_context, ddbuf_info);
+}
+
+/*
+ * topaz_mmu_free_mem_sg.
+ * This should be used only to free the stream memory.
+ */
+int topaz_mmu_stream_free_sg(void *mmu_str_hndl, struct vidio_ddbufinfo *ddbuf_info)
+{
+ int result = 0;
+ struct vxe_enc_ctx *ctx;
+ struct mmu_str_context *str_ctx;
+
+ /* Validate inputs. */
+ if (!ddbuf_info || !mmu_str_hndl)
+ return IMG_ERROR_INVALID_PARAMETERS;
+
+ str_ctx = (struct mmu_str_context *)mmu_str_hndl;
+
+ /* Unmap the memory mapped to the device */
+ ctx = str_ctx->vxe_enc_context;
+
+ result = img_mmu_unmap(ctx->mmu_ctx, ctx->mem_ctx, ddbuf_info->buff_id);
+
+ /*
+ * Unmapping the memory mapped to the device - done
+ * Free the memory.
+ */
+ result = talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+
+ /*
+ * for external mem manager buffers, just cleanup the idr list and
+ * buffer objects
+ */
+ img_mem_free_bufid(ctx->mem_ctx, ddbuf_info->buff_id);
+
+ return result;
+}
+
+int topaz_update_device_mem(struct vxe_enc_ctx *vxe_enc_ctx_arg,
+ struct vidio_ddbufinfo *ddbuf_info)
+{
+ struct vxe_enc_ctx *ctx = vxe_enc_ctx_arg;
+
+ return img_mem_sync_cpu_to_device(ctx->mem_ctx,
+ ddbuf_info->buff_id);
+}
+
+int topaz_update_host_mem(struct vxe_enc_ctx *vxe_enc_ctx_arg,
+ struct vidio_ddbufinfo *ddbuf_info)
+{
+ struct vxe_enc_ctx *ctx = vxe_enc_ctx_arg;
+
+ return img_mem_sync_device_to_cpu(ctx->mem_ctx,
+ ddbuf_info->buff_id);
+}
+
+/*
+ * Called for each Topaz core when MMU support is activated, sets up the MMU
+ * hardware for the specified core.
+ */
+int topaz_core_mmu_hw_setup(struct topaz_mmu_context *mmu_context, void *core_reg)
+{
+ unsigned int cmd;
+
+ /* Bypass all requesters while MMU is being configured */
+ cmd = F_ENCODE(1, TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ);
+ VXE_WR_REG32(core_reg, TOPAZHP_TOP_CR_MMU_CONTROL0, cmd);
+
+ VXE_WR_REG32(core_reg, TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE(0), mmu_context->ptd_phys_addr);
+
+ cmd = VXE_RD_REG32(core_reg, TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE(0));
+
+#ifdef DEBUG_ENCODER_DRIVER
+ pr_info("Page table directory at physical address 0x%08x\n", cmd);
+#endif
+ /*
+ * Set up the Index Register (to point to the base register)
+ * We're setting all fields to zero (all flags pointing to directory bank 0)
+ */
+ cmd = 0;
+
+ /* Now enable MMU access for all requesters
+ * 36-bit actually means "not 32-bit"
+ */
+ cmd = F_ENCODE(use_extended_addressing ? 1 : 0, TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING);
+ VXE_WR_REG32(core_reg, TOPAZHP_TOP_CR_MMU_CONTROL2, cmd);
+
+ mmu_control_val = F_ENCODE(0, TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ);
+ cmd = F_ENCODE(0, TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ);
+
+ VXE_WR_REG32(core_reg, TOPAZHP_TOP_CR_MMU_CONTROL0, cmd);
+
+ return 0;
+}
+
+/*
+ * topaz_core_mmu_flush_cache
+ */
+int topaz_core_mmu_flush_cache(void)
+{
+ static void *core_reg;
+ unsigned int reg_value;
+ unsigned long flags;
+
+ if (!core_reg)
+ core_reg = (void *)topaz_mem_space[REG_TOPAZHP_MULTICORE].cpu_addr;
+
+ /* TODO we can have global mutex or local based on need */
+ spin_lock_irqsave(g_lock, flags);
+
+ reg_value = VXE_RD_REG32(core_reg, TOPAZHP_TOP_CR_MMU_CONTROL0);
+
+ /* PAUSE */
+ reg_value |= F_ENCODE(1, TOPAZHP_TOP_CR_MMU_PAUSE);
+ VXE_WR_REG32(core_reg, TOPAZHP_TOP_CR_MMU_CONTROL0, reg_value);
+
+ {
+ unsigned int i, mem_req_reg;
+
+wait_till_idle:
+ for (i = 0; i < 10; i++) {
+ mem_req_reg = VXE_RD_REG32(core_reg, TOPAZHP_TOP_CR_MMU_MEM_REQ);
+ if (mem_req_reg != 0)
+ goto wait_till_idle;
+ }
+ }
+
+ /* Set invalidate */
+ reg_value |= F_ENCODE(1, TOPAZHP_TOP_CR_MMU_INVALDC);
+ VXE_WR_REG32(core_reg, TOPAZHP_TOP_CR_MMU_CONTROL0, reg_value);
+
+ /* Clear invalidate */
+ reg_value &= ~((unsigned int)F_ENCODE(1, TOPAZHP_TOP_CR_MMU_INVALDC));
+ VXE_WR_REG32(core_reg, TOPAZHP_TOP_CR_MMU_CONTROL0, reg_value);
+
+ /* UNPAUSE */
+ reg_value &= ~((unsigned int)F_ENCODE(1, TOPAZHP_TOP_CR_MMU_PAUSE));
+ VXE_WR_REG32(core_reg, TOPAZHP_TOP_CR_MMU_CONTROL0, reg_value);
+
+ /* TODO we can have global mutex or local based on need */
+ spin_unlock_irqrestore(g_lock, flags);
+
+ return 0;
+}
diff --git a/drivers/media/platform/vxe-vxd/encoder/topazmmu.h b/drivers/media/platform/vxe-vxd/encoder/topazmmu.h
new file mode 100644
index 000000000000..733d1ba7529f
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/topazmmu.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * topaz mmu header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef TOPAZZ_MMU_H_
+#define TOPAZZ_MMU_H_
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "talmmu_api.h"
+#include "vxe_enc.h"
+#include "img_mem.h"
+#include "target_config.h"
+
+/* Page size of the device MMU */
+#define DEV_MMU_PAGE_SIZE (0x1000)
+/* Page alignment of the device MMU */
+#define DEV_MMU_PAGE_ALIGNMENT (0x1000)
+
+#define HOST_MMU_PAGE_SIZE PAGE_SIZE
+
+/*
+ * This structure contains the stream context.
+ * @brief MMU Stream Context
+ * @devmem_ctx_hndl: Handle for MMU context.
+ * @dev_ctx: Pointer to device context.
+ * @ctx_id: MMU context Id.
+ * km_str_id: Stream ID used in communication with new KM interface
+ */
+struct mmu_str_context {
+ void **link; // to be able to maintain in single linked list.
+ void *mmu_context_handle;
+ unsigned int int_reg_num;
+ unsigned int km_str_id;
+ /* vxe encoder context. Need in stream context to access mem_ctx. */
+ struct vxe_enc_ctx *vxe_enc_context;
+ struct lst_t ddbuf_list;
+};
+
+struct topaz_mmu_context {
+ void *mmu_context_handle;
+ unsigned int ptd_phys_addr;
+ struct lst_t str_list;
+};
+
+/*
+ * This type defines the MMU heaps.
+ * @0: General heap ID.
+ */
+enum topaz_mmu_eheap_id {
+ MMU_GENERAL_HEAP_ID = 0x00,
+ /* Do not remove - keeps count of size */
+ HEAP_ID_NO_OF_HEAPS
+};
+
+/* Function definitions */
+
+/*
+ * Called once during initialization to initialize the MMU hardware, create
+ * the template and define the MMU heap.
+ * This is where talmmu initialization and template will be created.
+ *
+ * NOTE : We are not taking care of alignment here, need to be updated in
+ * mmu_device_memory_info.
+ */
+int topaz_mmu_device_create(struct topaz_mmu_context *mmu_context, unsigned int mmu_flags);
+
+/*
+ * @Function mmu_device_destroy
+ * @Description
+ * This function is used to destroy the MMU device context.
+ * NOTE: Destroy device automatically destroys any streams and frees and
+ * memory allocated using MMU_StreamMalloc().
+ * @Return IMG_SUCCESS or an error code.
+ */
+int topaz_mmu_device_destroy(struct topaz_mmu_context *mmu_context);
+
+/*
+ * @Function mmu_stream_create
+ * @Description
+ * This function is used to create and initialize the MMU stream context.
+ * @Input km_str_id : Stream Id used in communication with KM driver.
+ * @Return IMG_SUCCESS or an error code.
+ *
+ * Context ID is 1, since we are creating single stream.
+ */
+int topaz_mmu_stream_create(struct topaz_mmu_context *mmu_context, unsigned int km_str_id,
+ void *vxe_enc_ctx_arg, void **mmu_str_ctx);
+
+/*
+ * @Function mmu_stream_destroy
+ * @Description
+ * This function is used to destroy the MMU stream context.
+ * NOTE: Destroy automatically frees and memory allocated using
+ * mmu_stream_malloc().
+ * @Input str_ctx : The MMU stream handle.
+ * @Return IMG_SUCCESS or an error code.
+ */
+int topaz_mmu_stream_destroy(struct topaz_mmu_context *mmu_context,
+ struct mmu_str_context *str_ctx);
+
+int topaz_mmu_alloc(void *mmu_context_handle, struct vxe_enc_ctx *vxe_enc_ctx_arg,
+ enum topaz_mmu_eheap_id heap_id, unsigned int mem_heap_id,
+ enum sys_emem_attrib mem_attrib, unsigned int size, unsigned int alignment,
+ struct vidio_ddbufinfo *ddbuf_info);
+/*
+ * @Function mmu_stream_malloc
+ */
+int topaz_mmu_stream_alloc(void *mmu_str_hndl, enum topaz_mmu_eheap_id heap_id,
+ unsigned int mem_heap_id,
+ enum sys_emem_attrib mem_attrib,
+ unsigned int size,
+ unsigned int alignment,
+ struct vidio_ddbufinfo *ddbuf_info);
+
+/*
+ * @Function mmu_stream_map_ext_sg
+ */
+int topaz_mmu_stream_map_ext_sg(void *mmu_str_hndl, enum topaz_mmu_eheap_id heap_id,
+ void *sgt,
+ unsigned int size,
+ unsigned int alignment,
+ enum sys_emem_attrib mem_attrib,
+ void *cpu_linear_addr,
+ struct vidio_ddbufinfo *ddbuf_info,
+ unsigned int *buff_id);
+
+/*
+ * @Function mmu_stream_map_ext
+ */
+int topaz_mmu_stream_map_ext(void *mmu_str_hndl, enum topaz_mmu_eheap_id heap_id,
+ unsigned int buff_id, unsigned int size,
+ unsigned int alignment,
+ enum sys_emem_attrib mem_attrib,
+ void *cpu_linear_addr,
+ struct vidio_ddbufinfo *ddbuf_info);
+
+/* topaz core mmu hardware setup */
+int topaz_core_mmu_hw_setup(struct topaz_mmu_context *mmu_context, void *core_reg);
+
+/* topaz core mmu flush cache */
+int topaz_core_mmu_flush_cache(void);
+
+/*
+ * @Function mmu_free
+ *
+ * Free memory allocated with mmu_alloc
+ */
+int topaz_mmu_free(struct vxe_enc_ctx *vxe_enc_ctx_arg,
+ struct vidio_ddbufinfo *ddbuf_info);
+
+/*
+ * @Function mmu_free_mem.
+ *
+ * NOTE : This should be used only to free the stream memory.
+ */
+int topaz_mmu_stream_free(void *mmu_str_hndl, struct vidio_ddbufinfo *ddbuf_info);
+
+/*
+ * @Function mmu_free_mem_sg.
+ *
+ * NOTE : This should be used only to free the stream memory.
+ */
+int topaz_mmu_stream_free_sg(void *mmu_str_hndl, struct vidio_ddbufinfo *ddbuf_info);
+
+/*
+ * @Function update_device_mem
+ *
+ * Update the memory to the device
+ */
+int topaz_update_device_mem(struct vxe_enc_ctx *vxe_enc_ctx_arg,
+ struct vidio_ddbufinfo *ddbuf_info);
+
+/*
+ * @Function update_host_mem
+ *
+ * Update the memory to the host
+ */
+int topaz_update_host_mem(struct vxe_enc_ctx *vxe_enc_ctx_arg,
+ struct vidio_ddbufinfo *ddbuf_info);
+
+/* Global */
+extern struct mem_space topaz_mem_space[];
+extern void *g_lock;
+
+#endif /* TOPAZZ_MMU_H_ */
diff --git a/drivers/media/platform/vxe-vxd/encoder/vxe_enc.c b/drivers/media/platform/vxe-vxd/encoder/vxe_enc.c
new file mode 100644
index 000000000000..0c338bd1c37b
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/vxe_enc.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Encoder Interface API function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_mem_man.h"
+#include "topazmmu.h"
+#include "vxe_enc.h"
+
+#define MAX(a, b, type) ({ \
+ type __a = a; \
+ type __b = b; \
+ (((__a) >= (__b)) ? (__a) : (__b)); })
+
+void mmu_callback(enum mmu_callback_type callback_type,
+ int buff_id, void *data)
+{
+ topaz_core_mmu_flush_cache();
+}
+
+int vxe_init_mem(struct vxe_dev *vxe)
+{
+ int ret;
+
+ /* Create memory management context for HW buffers */
+ ret = img_mem_create_ctx(&vxe->drv_ctx.mem_ctx);
+ if (ret) {
+ dev_err(vxe->dev, "%s: failed to create mem context (err:%d)!\n",
+ __func__, ret);
+ goto create_mem_context_failed;
+ }
+
+ ret = img_mmu_ctx_create(vxe->dev, 40 /* mmu_addr_width is 40 */,
+ vxe->drv_ctx.mem_ctx, vxe->drv_ctx.internal_heap_id,
+ mmu_callback, vxe, &vxe->drv_ctx.mmu_ctx);
+ if (ret) {
+ dev_err(vxe->dev, "%s:%d: failed to create mmu ctx\n",
+ __func__, __LINE__);
+ goto create_mmu_context_failed;
+ }
+
+ ret = img_mmu_get_ptd(vxe->drv_ctx.mmu_ctx, &vxe->drv_ctx.ptd);
+ if (ret) {
+ dev_err(vxe->dev, "%s:%d: failed to get PTD\n",
+ __func__, __LINE__);
+ goto get_ptd_failed;
+ }
+
+ return 0;
+
+get_ptd_failed:
+ img_mmu_ctx_destroy(vxe->drv_ctx.mmu_ctx);
+create_mmu_context_failed:
+ img_mem_destroy_ctx(vxe->drv_ctx.mem_ctx);
+create_mem_context_failed:
+ return ret;
+}
+
+void vxe_deinit_mem(struct vxe_dev *vxe)
+{
+ if (vxe->drv_ctx.mmu_ctx) {
+ img_mmu_ctx_destroy(vxe->drv_ctx.mmu_ctx);
+ vxe->drv_ctx.mmu_ctx = NULL;
+ }
+
+ if (vxe->drv_ctx.mem_ctx) {
+ img_mem_destroy_ctx(vxe->drv_ctx.mem_ctx);
+ vxe->drv_ctx.mem_ctx = NULL;
+ }
+
+ /* Deinitialize memory management component */
+ while (!list_empty(&vxe->drv_ctx.heaps)) {
+ struct vxe_heap *heap;
+
+ heap = list_first_entry(&vxe->drv_ctx.heaps, struct vxe_heap, list);
+ __list_del_entry(&heap->list);
+ img_mem_del_heap(heap->id);
+ kfree(heap);
+ }
+
+ vxe->drv_ctx.internal_heap_id = VXE_INVALID_ID;
+
+ img_mem_exit();
+}
+
+void vxe_create_ctx(struct vxe_dev *vxe, struct vxe_enc_ctx *ctx)
+{
+ ctx->mem_ctx = vxe->drv_ctx.mem_ctx;
+ ctx->mmu_ctx = vxe->drv_ctx.mmu_ctx;
+}
+
+int calculate_h264_level(unsigned int width, unsigned int height, unsigned int framerate,
+ unsigned char rc_enable, unsigned int bitrate,
+ unsigned char lossless,
+ enum sh_profile_type profile_type,
+ unsigned int max_num_ref_frames)
+{
+ unsigned int level = 0, mbf = 0, mbs = 0, temp_level = 0, dpb_mbs;
+ unsigned int num = 1, den = 1;
+ unsigned int lossless_min_level = 320;
+
+ mbf = (width * height) / 256;
+ mbs = mbf * framerate;
+
+ if (mbf > 36864) {
+ pr_warn("WARNING: Frame size is too high for maximum supported level!\n");
+ level = 520;
+ } else if (mbf > 22080) {
+ level = 510;
+ } else if (mbf > 8704) {
+ level = 500;
+ } else if (mbf > 8192) {
+ level = 420;
+ } else if (mbf > 5120) {
+ level = 400;
+ } else if (mbf > 3600) {
+ level = 320;
+ } else if (mbf > 1620) {
+ level = 310;
+ } else if (mbf > 792) {
+ level = 220;
+ } else if (mbf > 396) {
+ level = 210;
+ } else if (mbf > 99) {
+ level = 110;
+ } else {
+ level = 100;
+ }
+
+ dpb_mbs = mbf * max_num_ref_frames;
+
+ if (dpb_mbs > 184320) {
+ pr_warn("ERROR: Decoded picture buffer is too high for supported level!\n");
+ return -1;
+ } else if (dpb_mbs > 110400) {
+ temp_level = 510;
+ } else if (dpb_mbs > 34816) {
+ temp_level = 500;
+ } else if (dpb_mbs > 32768) {
+ temp_level = 420;
+ } else if (dpb_mbs > 20480) {
+ temp_level = 400;
+ } else if (dpb_mbs > 18000) {
+ temp_level = 320;
+ } else if (dpb_mbs > 8100) {
+ temp_level = 310;
+ } else if (dpb_mbs > 4752) {
+ temp_level = 220;
+ } else if (dpb_mbs > 2376) {
+ temp_level = 210;
+ } else if (dpb_mbs > 900) {
+ temp_level = 120;
+ } else if (dpb_mbs > 396) {
+ temp_level = 110;
+ } else {
+ temp_level = 100;
+ }
+
+ level = MAX(level, temp_level, unsigned int);
+
+ /* now restrict based on the number of macroblocks per second */
+ if (mbs > 2073600) {
+ pr_err("ERROR: Macroblock processing rate is too high for supported level!\n");
+ return -1;
+ } else if (mbs > 983040) {
+ temp_level = 520;
+ } else if (mbs > 589824) {
+ temp_level = 510;
+ } else if (mbs > 522240) {
+ temp_level = 500;
+ } else if (mbs > 245760) {
+ temp_level = 420;
+ } else if (mbs > 216000) {
+ temp_level = 400;
+ } else if (mbs > 108000) {
+ temp_level = 320;
+ } else if (mbs > 40500) {
+ temp_level = 310;
+ } else if (mbs > 20250) {
+ temp_level = 300;
+ } else if (mbs > 19800) {
+ temp_level = 220;
+ } else if (mbs > 11880) {
+ temp_level = 210;
+ } else if (mbs > 6000) {
+ temp_level = 130;
+ } else if (mbs > 3000) {
+ temp_level = 120;
+ } else if (mbs > 1485) {
+ temp_level = 110;
+ } else {
+ temp_level = 100;
+ }
+
+ level = MAX(level, temp_level, unsigned int);
+
+ if (rc_enable) {
+ /*
+ * SH_PROFILE_H10P and SH_PROFILE_H422P are
+ * not valid choices for HW_3_X, skipping
+ */
+ if (profile_type == SH_PROFILE_HP) {
+ num = 5;
+ den = 4;
+ } else if (profile_type == SH_PROFILE_H444P) {
+ num = 4;
+ den = 1;
+ }
+
+ if (bitrate > ((135000000 * num) / den))
+ temp_level = 510;
+ else if (bitrate > ((50000000 * num) / den))
+ temp_level = 500;
+ else if (bitrate > ((20000000 * num) / den))
+ temp_level = 410;
+ else if (bitrate > ((14000000 * num) / den))
+ temp_level = 320;
+ else if (bitrate > ((10000000 * num) / den))
+ temp_level = 310;
+ else if (bitrate > ((4000000 * num) / den))
+ temp_level = 300;
+ else if (bitrate > ((2000000 * num) / den))
+ temp_level = 210;
+ else if (bitrate > ((768000 * num) / den))
+ temp_level = 200;
+ else if (bitrate > ((384000 * num) / den))
+ temp_level = 130;
+ else if (bitrate > ((192000 * num) / den))
+ temp_level = 120;
+ else if (bitrate > ((128000 * num) / den))
+ temp_level = 110;
+ else if (bitrate > ((64000 * num) / den))
+ temp_level = 101;
+ else
+ temp_level = 100;
+
+ level = MAX(level, temp_level, unsigned int);
+ } else {
+ level = 510;
+ }
+
+ if (lossless)
+ level = MAX(level, lossless_min_level, unsigned int);
+
+ return level;
+}
+
+enum sh_profile_type find_h264_profile(unsigned char lossless,
+ unsigned char h264_use_default_scaling_list,
+ unsigned int custom_quant_mask,
+ unsigned char h264_8x8_transform,
+ unsigned char enable_mvc,
+ unsigned int b_frame_count,
+ unsigned char interlaced,
+ unsigned char h264_cabac,
+ unsigned int weighted_prediction_mode,
+ unsigned int weighted_implicit_bi_pred)
+{
+ enum sh_profile_type profile = SH_PROFILE_BP;
+
+ if (lossless)
+ profile = SH_PROFILE_H444P;
+ else if (h264_use_default_scaling_list || custom_quant_mask ||
+ h264_8x8_transform || enable_mvc)
+ profile = SH_PROFILE_HP;
+ else if ((b_frame_count > 0) || interlaced || h264_cabac ||
+ weighted_prediction_mode || weighted_implicit_bi_pred)
+ profile = SH_PROFILE_MP;
+
+ return profile;
+}
+
+void vxe_fill_default_src_frame_params(struct vxe_buffer *buf)
+{
+ buf->src_frame.component_count = 0; /* Unset in IMG */
+ buf->src_frame.format = IMG_CODEC_420_YUV; /* Unset in IMG */
+ buf->src_frame.component_offset[0] = 0;
+ buf->src_frame.component_offset[1] = 0;
+ buf->src_frame.component_offset[2] = 0;
+ buf->src_frame.bottom_component_offset[0] = 0; /* Unset in IMG */
+ buf->src_frame.bottom_component_offset[1] = 0; /* Unset in IMG */
+ buf->src_frame.bottom_component_offset[2] = 0; /* Unset in IMG */
+ buf->src_frame.component_info[0].step = 0;
+ buf->src_frame.component_info[0].width = 0;
+ buf->src_frame.component_info[0].height = 0;
+ buf->src_frame.component_info[0].phys_width = 0;
+ buf->src_frame.component_info[0].phys_height = 0;
+ buf->src_frame.component_info[1].step = 0;
+ buf->src_frame.component_info[1].width = 0;
+ buf->src_frame.component_info[1].height = 0;
+ buf->src_frame.component_info[1].phys_width = 0;
+ buf->src_frame.component_info[1].phys_height = 0;
+ buf->src_frame.component_info[2].step = 0;
+ buf->src_frame.component_info[2].width = 0;
+ buf->src_frame.component_info[2].height = 0;
+ buf->src_frame.component_info[2].phys_width = 0;
+ buf->src_frame.component_info[2].phys_height = 0;
+ buf->src_frame.field0_y_offset = 0;
+ buf->src_frame.field1_y_offset = 0;
+ buf->src_frame.field0_u_offset = 0;
+ buf->src_frame.field1_u_offset = 0;
+ buf->src_frame.field0_v_offset = 0;
+ buf->src_frame.field1_v_offset = 0;
+ buf->src_frame.imported = FALSE;
+}
+
+void vxe_fill_default_params(struct vxe_enc_ctx *ctx)
+{
+ int i, j;
+ unsigned short h264_rounding_offsets[18][4] = {
+ {683, 683, 683, 683}, /* 0 I-Slice - INTRA4 LUMA */
+ {683, 683, 683, 683}, /* 1 P-Slice - INTRA4 LUMA */
+ {683, 683, 683, 683}, /* 2 B-Slice - INTRA4 LUMA */
+
+ {683, 683, 683, 683}, /* 3 I-Slice - INTRA8 LUMA */
+ {683, 683, 683, 683}, /* 4 P-Slice - INTRA8 LUMA */
+ {683, 683, 683, 683}, /* 5 B-Slice - INTRA8 LUMA */
+
+ {341, 341, 341, 341}, /* 6 P-Slice - INTER8 LUMA */
+ {341, 341, 341, 341}, /* 7 B-Slice - INTER8 LUMA */
+
+ {683, 683, 683, 000}, /* 8 I-Slice - INTRA16 LUMA */
+ {683, 683, 683, 000}, /* 9 P-Slice - INTRA16 LUMA */
+ {683, 683, 683, 000}, /* 10 B-Slice - INTRA16 LUMA */
+
+ {341, 341, 341, 341}, /* 11 P-Slice - INTER16 LUMA */
+ {341, 341, 341, 341}, /* 12 B-Slice - INTER16 LUMA */
+
+ {683, 683, 683, 000}, /* 13 I-Slice - INTRA16 CR */
+ {683, 683, 683, 000}, /* 14 P-Slice - INTRA16 CR */
+ {683, 683, 683, 000}, /* 15 B-Slice - INTRA16 CR */
+
+ {341, 341, 341, 000 }, /* 16 P-Slice - INTER16 CHROMA */
+ {341, 341, 341, 000 } /* 17 B-Slice - INTER16 CHROMA */
+ };
+
+ ctx->vparams.csc_preset = IMG_CSC_NONE;
+ ctx->vparams.slices_per_picture = 1;
+ ctx->vparams.is_interleaved = FALSE;
+ ctx->vparams.constrained_intra = FALSE;
+ ctx->vparams.h264_8x8 = TRUE;
+ ctx->vparams.bottom_field_first = FALSE;
+ ctx->vparams.arbitrary_so = FALSE;
+ ctx->vparams.cabac_enabled = TRUE;
+ ctx->vparams.cabac_bin_limit = 2800;
+ ctx->vparams.cabac_bin_flex = 2800;
+ ctx->vparams.deblock_idc = 0;
+ ctx->vparams.output_reconstructed = FALSE;
+ ctx->vparams.f_code = 4;
+ ctx->vparams.fine_y_search_size = 2;
+ ctx->vparams.no_offscreen_mv = FALSE;
+ ctx->vparams.idr_period = 1800; /* 60 * 30fps */
+ ctx->vparams.intra_cnt = 30;
+ ctx->vparams.vop_time_resolution = 15;
+ ctx->vparams.enc_features.disable_bpic_ref1 = FALSE;
+ ctx->vparams.enc_features.disable_bpic_ref0 = FALSE;
+ ctx->vparams.enc_features.disable_bframes = FALSE;
+ ctx->vparams.enc_features.restricted_intra_pred = FALSE;
+ ctx->vparams.enable_sel_stats_flags = 0;
+ ctx->vparams.enable_inp_ctrl = FALSE;
+ ctx->vparams.enable_air = FALSE;
+ ctx->vparams.num_air_mbs = -1;
+ ctx->vparams.air_threshold = -1;
+ ctx->vparams.air_skip_cnt = -1;
+ ctx->vparams.enable_cumulative_biases = FALSE;
+ ctx->vparams.enable_host_bias = TRUE;
+ ctx->vparams.enable_host_qp = FALSE;
+ ctx->vparams.use_default_scaling_list = FALSE;
+ ctx->vparams.use_custom_scaling_lists = 0;
+ ctx->vparams.pps_scaling = 0;
+ ctx->vparams.disable_bit_stuffing = TRUE;
+ ctx->vparams.coded_skipped_index = 3;
+ ctx->vparams.inter_intra_index = 3;
+ ctx->vparams.mpeg2_intra_dc_precision = 0;
+ ctx->vparams.carc = 0;
+ ctx->vparams.carc_baseline = 0;
+ ctx->vparams.carc_threshold = 1;
+ ctx->vparams.carc_cutoff = 15;
+ ctx->vparams.carc_neg_range = 5;
+ ctx->vparams.carc_neg_scale = 12;
+ ctx->vparams.carc_pos_range = 5;
+ ctx->vparams.carc_pos_scale = 12;
+ ctx->vparams.carc_shift = 3;
+ ctx->vparams.weighted_prediction = FALSE;
+ ctx->vparams.vp_weighted_implicit_bi_pred = 0;
+ ctx->vparams.insert_hrd_params = FALSE;
+ ctx->vparams.intra_refresh = 0;
+ ctx->vparams.chunks_per_mb = 64;
+ ctx->vparams.max_chunks = 160;
+ ctx->vparams.priority_chunks = 64;
+ ctx->vparams.mbps = 0;
+ ctx->vparams.multi_reference_p = FALSE;
+ ctx->vparams.ref_spacing = 0;
+ ctx->vparams.spatial_direct = FALSE;
+ ctx->vparams.vp_adaptive_rounding_disable = 0;
+
+ for (i = 0; i < 18; i++) {
+ for (j = 0; j < 4; j++) {
+ ctx->vparams.vp_adaptive_rounding_offsets[i][j] =
+ h264_rounding_offsets[i][j];
+ }
+ }
+
+ ctx->vparams.debug_crcs = 0;
+ ctx->vparams.enable_mvc = FALSE;
+ ctx->vparams.mvc_view_idx = 65535;
+ ctx->vparams.high_latency = TRUE;
+ ctx->vparams.disable_bh_rounding = FALSE;
+ ctx->vparams.no_sequence_headers = FALSE;
+ ctx->vparams.auto_encode = FALSE;
+ ctx->vparams.slice_level = FALSE;
+ ctx->vparams.coded_header_per_slice = FALSE;
+ ctx->vparams.auto_expand_pipes = FALSE;
+ ctx->vparams.enable_lossless = FALSE;
+ ctx->vparams.lossless_8x8_prefilter = FALSE;
+ ctx->vparams.enable_scaler = FALSE;
+ ctx->vparams.line_counter_enabled = FALSE;
+
+ ctx->rc.initial_qp_i = 0;
+ ctx->rc.initial_qp_p = 0;
+ ctx->rc.initial_qp_b = 0;
+
+ ctx->rc.min_qp = 0;
+ ctx->rc.max_qp = 0;
+ ctx->rc.rc_enable = TRUE;
+
+ ctx->rc.hierarchical = FALSE;
+
+ ctx->rc.enable_slice_bob = FALSE;
+ ctx->rc.max_slice_bob = 2;
+ ctx->rc.slice_bob_qp = 44;
+
+ ctx->rc.qcp_offset = 0;
+ ctx->rc.sc_detect_disable = FALSE;
+ ctx->rc.slice_byte_limit = 0;
+ ctx->rc.slice_mb_limit = 0;
+ ctx->rc.rc_mode = IMG_RCMODE_VBR;
+ ctx->rc.rc_vcm_mode = IMG_RC_VCM_MODE_DEFAULT;
+ ctx->rc.rc_cfs_max_margin_perc = 9;
+ ctx->rc.disable_frame_skipping = FALSE;
+ ctx->rc.disable_vcm_hardware = FALSE;
+
+ ctx->s_fmt_flags = 0;
+
+ ctx->above_mb_params_sgt[0].sgl = NULL;
+ ctx->above_mb_params_sgt[1].sgl = NULL;
+}
+
+unsigned int vxe_get_sizeimage(int w, int h, struct vxe_enc_fmt *fmt, unsigned char plane_id)
+{
+ return (ALIGN_16(w) * ALIGN_16(h) * fmt->size_num[plane_id] / fmt->size_den[plane_id]);
+}
+
+unsigned int vxe_get_stride(int w, struct vxe_enc_fmt *fmt)
+{
+ return ALIGN(w * fmt->bytes_pp, HW_ALIGN);
+}
diff --git a/drivers/media/platform/vxe-vxd/encoder/vxe_enc.h b/drivers/media/platform/vxe-vxd/encoder/vxe_enc.h
new file mode 100644
index 000000000000..1df659a32c60
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/vxe_enc.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * encoder interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _VXE_ENC_H
+#define _VXE_ENC_H
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include "topaz_api.h"
+
+#define HW_ALIGN 64
+#define MB_SIZE 16
+#define VXE_INVALID_ID (-1)
+#define OCM_RAM_POOL_CHUNK_SIZE (32 * 1024)
+
+enum {
+ Q_ENC_DATA_SRC = 0,
+ Q_ENC_DATA_DST = 1,
+ Q_ENC_DATA_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum {
+ IMG_ENC_FMT_TYPE_CAPTURE = 0x01,
+ IMG_ENC_FMT_TYPE_OUTPUT = 0x10,
+ IMG_ENC_FMT_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum vxe_map_flags {
+ VXE_MAP_FLAG_NONE = 0x0,
+ VXE_MAP_FLAG_READ_ONLY = 0x1,
+ VXE_MAP_FLAG_WRITE_ONLY = 0x2,
+ VXE_MAP_FLAG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * struct vxe_enc_fmt - contains info for each supported video format
+ */
+struct vxe_enc_fmt {
+ unsigned int fourcc;
+ unsigned int num_planes;
+ unsigned int type;
+ union {
+ enum img_standard std;
+ enum img_format fmt;
+ };
+ unsigned int min_bufs;
+ unsigned int size_num[MAX_PLANES];
+ unsigned int size_den[MAX_PLANES];
+ unsigned int bytes_pp;
+ enum img_csc_preset csc_preset;
+};
+
+/*
+ * struct vxe_buffer - contains info for all buffers
+ */
+struct vxe_buffer {
+ struct v4l2_m2m_buffer buffer;
+ unsigned int index;
+ unsigned int buf_map_id;
+ struct vidio_ddbufinfo buf_info;
+ union {
+ struct img_frame src_frame;
+ struct img_coded_buffer coded_buffer;
+ };
+ struct img_buffer y_buffer;
+ struct img_buffer u_buffer;
+ struct img_buffer v_buffer;
+ unsigned char src_slot_num;
+ unsigned char mapped;
+};
+
+/*
+ * struct vxe_heap - node for heaps list
+ * @id: heap id
+ * @list: Entry in <struct vxe_drv:heaps>
+ */
+struct vxe_heap {
+ int id;
+ struct list_head list;
+};
+
+/* Driver context */
+struct vxe_drv_ctx {
+ /* Available memory heaps. List of <struct vxe_heap> */
+ struct list_head heaps;
+ /* heap id for all internal allocations */
+ int internal_heap_id;
+ /* Memory Management context for driver */
+ struct mem_ctx *mem_ctx;
+ /* MMU context for driver */
+ struct mmu_ctx *mmu_ctx;
+ /* PTD */
+ unsigned int ptd;
+};
+
+/*
+ * struct vxe_dev - The struct containing encoder driver internal parameters.
+ */
+struct vxe_dev {
+ void *dev;
+ struct video_device *vfd;
+ struct v4l2_device ti_vxe_dev;
+ struct platform_device *plat_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct mutex *mutex;
+ int module_irq;
+ struct idr *streams;
+ void __iomem *reg_base;
+ void *topaz_dev_ctx;
+ struct vxe_drv_ctx drv_ctx;
+ /* dummy context for MMU mappings and allocations */
+ struct vxe_enc_ctx *ctx;
+ unsigned int num_pipes;
+
+ /* The variables defined below are used in RTOS only. */
+ /* This variable holds queue handler */
+ void *vxe_worker_queue_handle;
+ void *vxe_worker_queue_sem_handle;
+
+ /* On Chip Memory Pool for above MB params struct */
+ /* Supporting only 2 max instances (upto 1080p resolutions) to make use of this */
+ void *ocm_ram_chunk[2]; //each chunk of 32KB
+ void *ram_chunk_owner[2];
+
+};
+
+#define S_FMT_FLAG_OUT_RECV 0x1
+#define S_FMT_FLAG_CAP_RECV 0x2
+#define S_FMT_FLAG_STREAM_CREATED 0x4
+
+/*
+ * struct vxe_enc_q_data - contains queue data information
+ *
+ * @fmt: format info
+ * @width: frame width
+ * @height: frame height
+ * @bytesperline: bytes per line in memory
+ * @size_image: image size in memory
+ */
+struct vxe_enc_q_data {
+ struct vxe_enc_fmt *fmt;
+ unsigned int width;
+ unsigned int height;
+ unsigned int bytesperline[MAX_PLANES];
+ unsigned int size_image[MAX_PLANES];
+ unsigned char streaming;
+};
+
+#ifdef ENABLE_PROFILING
+struct enc_drv_latency {
+ unsigned int start_time;
+ unsigned int end_time;
+};
+#endif
+
+/*
+ * struct vxe_ctx - The struct containing stream context parameters.
+ */
+struct vxe_enc_ctx {
+ struct v4l2_fh fh;
+ struct vxe_dev *dev;
+ void **enc_context;
+ void *topaz_str_context;
+ struct mutex *mutex;
+ unsigned char core_streaming;
+ struct img_enc_caps caps;
+ struct img_rc_params rc;
+ struct img_video_params vparams;
+ struct vxe_enc_q_data out_queue;
+ struct vxe_enc_q_data cap_queue;
+ struct mem_ctx *mem_ctx;
+ struct mmu_ctx *mmu_ctx;
+ /* list open_slots*/
+ unsigned char s_fmt_flags;
+ struct h264_vui_params vui_params;
+ struct h264_crop_params crop_params;
+ struct h264_sequence_header_params sh_params;
+ unsigned char eos;
+ unsigned char flag_last;
+ unsigned int coded_packages_per_frame; /* How many slices per frame */
+ unsigned int available_coded_packages;
+ unsigned int available_source_frames;
+ unsigned int frames_encoding;
+ unsigned int frame_num;
+ unsigned int last_frame_num;
+
+ /* The below variable used only in Rtos */
+ void *mm_return_resource; /* Place holder for CB to application */
+ void *stream_worker_queue_handle;
+ void *stream_worker_queue_sem_handle;
+ void *work;
+ struct vxe_enc_q_data q_data[2];
+
+ struct sg_table above_mb_params_sgt[2];
+
+#ifdef ENABLE_PROFILING
+ struct enc_drv_latency drv_lat;
+#endif
+};
+
+int vxe_init_mem(struct vxe_dev *vxe);
+void vxe_deinit_mem(struct vxe_dev *vxe);
+void vxe_create_ctx(struct vxe_dev *vxe, struct vxe_enc_ctx *ctx);
+int calculate_h264_level(unsigned int width, unsigned int height, unsigned int framerate,
+ unsigned char rc_enable, unsigned int bitrate,
+ unsigned char lossless,
+ enum sh_profile_type profile_type,
+ unsigned int max_num_ref_frames);
+enum sh_profile_type find_h264_profile(unsigned char lossless,
+ unsigned char h264_use_default_scaling_list,
+ unsigned int custom_quant_mask,
+ unsigned char h264_8x8_transform,
+ unsigned char enable_mvc,
+ unsigned int b_frame_count,
+ unsigned char interlaced,
+ unsigned char h264_cabac,
+ unsigned int weighted_prediction_mode,
+ unsigned int weighted_implicit_bi_pred);
+void vxe_fill_default_src_frame_params(struct vxe_buffer *buf);
+void vxe_fill_default_params(struct vxe_enc_ctx *ctx);
+unsigned int vxe_get_sizeimage(int w, int h, struct vxe_enc_fmt *fmt, unsigned char plane_id);
+unsigned int vxe_get_stride(int w, struct vxe_enc_fmt *fmt);
+
+#endif /* _VXE_ENC_H */
diff --git a/drivers/media/platform/vxe-vxd/encoder/vxe_public_regdefs.h b/drivers/media/platform/vxe-vxd/encoder/vxe_public_regdefs.h
new file mode 100644
index 000000000000..41fa841a8f55
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/vxe_public_regdefs.h
@@ -0,0 +1,926 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * encoder public register definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef __VXE_PUBLIC_REGDEFS_H__
+#define __VXE_PUBLIC_REGDEFS_H__
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+
+/* Write to the register */
+#define VXE_WR_REG32(base, offs, val) \
+ (iowrite32((val), (void *)((offs) + (unsigned long)(base))))
+
+/* Read the register */
+#define VXE_RD_REG32(base, offs) \
+ (ioread32((void *)((base) + (offs))))
+
+#define VXE_POLL_REG32_ISEQ(base, offs, val, mask, cnt) \
+ (ioreg32_poll_iseq((unsigned long)(base) + (offs), val, mask, cnt))
+
+#define REG_BASE_HOST 0x00000000
+#define REG_OFFSET_TOPAZ_MTX 0x00000800
+#define REG_START_TOPAZ_MTX_HOST (REG_BASE_HOST + REG_OFFSET_TOPAZ_MTX)
+
+static inline int ioreg32_poll_iseq(unsigned long addr,
+ unsigned int req_val, unsigned int mask, unsigned int cnt)
+{
+ unsigned int count, val;
+ unsigned int res = 0;
+
+ /* Add high-frequency poll loops. */
+ cnt += 10;
+
+ /*
+ * High-frequency loop (designed for shorter hardware latency such as
+ * reset).
+ */
+ for (count = 0; count < cnt; count++) {
+ /* Read from the device */
+ val = ioread32((void *)addr);
+ val = (val & mask);
+
+ if (val == req_val) {
+ res = 0;
+ break;
+ }
+
+ /*
+ * Sleep to wait for hardware.
+ * Period is selected to allow for high-frequency polling
+ * (5us, e.g. reset) over the first 10 iterations, then
+ * reverting to a lower-frequency (100us, e.g. DMA) for the
+ * remainder.
+ */
+ if (count < 10)
+ usleep_range(5, 5);
+ else
+ usleep_range(100, 100);
+ }
+
+ if (res || count >= cnt) {
+ pr_info("Poll failed!\n");
+ res = -1;
+ }
+
+ return res;
+}
+
+/*
+ * DMAC configuration values:
+ */
+/*! The maximum number of channels in the SoC */
+#define DMAC_MAX_CHANNELS (1)
+
+/* Register CR_TOPAZHP_CORE_REV */
+#define TOPAZHP_TOP_CR_TOPAZHP_CORE_REV 0x03D0
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV 0
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV 0x03D0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV 8
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV 0x03D0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV 0x00FF0000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV 16
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV 0x03D0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_DESIGNER 0xFF000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_DESIGNER 24
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_DESIGNER 0x03D0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_DESIGNER 0
+
+/* Register CR_TOPAZHP_CORE_DES1 */
+#define TOPAZHP_TOP_CR_TOPAZHP_CORE_DES1 0x03E0
+
+/* Register CR_MULTICORE_HW_CFG */
+#define TOPAZHP_TOP_CR_MULTICORE_HW_CFG 0x0058
+#define MASK_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED 0x0000001F
+#define SHIFT_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED 0
+#define REGNUM_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED 0x0058
+#define SIGNED_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_NUM_MTX_SUPPORTED 0x00000700
+#define SHIFT_TOPAZHP_TOP_CR_NUM_MTX_SUPPORTED 8
+#define REGNUM_TOPAZHP_TOP_CR_NUM_MTX_SUPPORTED 0x0058
+#define SIGNED_TOPAZHP_TOP_CR_NUM_MTX_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_NUM_CORES_PER_MTX 0x00070000
+#define SHIFT_TOPAZHP_TOP_CR_NUM_CORES_PER_MTX 16
+#define REGNUM_TOPAZHP_TOP_CR_NUM_CORES_PER_MTX 0x0058
+#define SIGNED_TOPAZHP_TOP_CR_NUM_CORES_PER_MTX 0
+
+#define MASK_TOPAZHP_TOP_CR_EXTENDED_ADDR_RANGE 0x0F000000
+#define SHIFT_TOPAZHP_TOP_CR_EXTENDED_ADDR_RANGE 24
+#define REGNUM_TOPAZHP_TOP_CR_EXTENDED_ADDR_RANGE 0x0058
+#define SIGNED_TOPAZHP_TOP_CR_EXTENDED_ADDR_RANGE 0
+
+/* Register CR_MULTICORE_SRST */
+#define TOPAZHP_TOP_CR_MULTICORE_SRST 0x0000
+#define MASK_TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET 0
+#define REGNUM_TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET 0
+
+#define MASK_TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET 1
+#define REGNUM_TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET 0
+
+#define MASK_TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET 2
+#define REGNUM_TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET 0
+
+/* Register CR_MULTICORE_INT_STAT */
+#define TOPAZHP_TOP_CR_MULTICORE_INT_STAT 0x0004
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_DMAC 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_DMAC 0
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_DMAC 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_DMAC 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_MTX 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_MTX 1
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_MTX 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_MTX 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_MTX_HALT 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_MTX_HALT 2
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_MTX_HALT 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_MTX_HALT 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT 0x00000078
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT 3
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_MTX_CORES 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_MTX_CORES 8
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_MTX_CORES 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_MTX_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_HOST_CORES 0x00FF0000
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_HOST_CORES 16
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_HOST_CORES 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_HOST_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT_B 0x1E000000
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT_B 25
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT_B 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT_B 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MAS_MTX_INTS 0x40000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MAS_MTX_INTS 30
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MAS_MTX_INTS 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MAS_MTX_INTS 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MAS_HOST_INTS 0x80000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MAS_HOST_INTS 31
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MAS_HOST_INTS 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MAS_HOST_INTS 0
+
+/* Register CR_MULTICORE_HOST_INT_ENAB */
+#define TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB 0x000C
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_DMAC 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_DMAC 0
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_DMAC 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_DMAC 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_MTX 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_MTX 1
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_MTX 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_MTX 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_MTX_HALT 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_MTX_HALT 2
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_MTX_HALT 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_MTX_HALT 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT 0x00000078
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT 3
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_MTX_CORES 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_MTX_CORES 8
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_MTX_CORES 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_MTX_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_HOST_CORES 0x00FF0000
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_HOST_CORES 16
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_HOST_CORES 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_HOST_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT_B 0x1E000000
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT_B 25
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT_B 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT_B 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN 0x80000000
+#define SHIFT_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN 31
+#define REGNUM_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN 0
+
+/* Register CR_MULTICORE_INT_CLEAR */
+#define TOPAZHP_TOP_CR_MULTICORE_INT_CLEAR 0x0010
+#define MASK_TOPAZHP_TOP_CR_INTCLR_DMAC 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_INTCLR_DMAC 0
+#define REGNUM_TOPAZHP_TOP_CR_INTCLR_DMAC 0x0010
+#define SIGNED_TOPAZHP_TOP_CR_INTCLR_DMAC 0
+
+#define MASK_TOPAZHP_TOP_CR_INTCLR_MTX 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_INTCLR_MTX 1
+#define REGNUM_TOPAZHP_TOP_CR_INTCLR_MTX 0x0010
+#define SIGNED_TOPAZHP_TOP_CR_INTCLR_MTX 0
+
+#define MASK_TOPAZHP_TOP_CR_INTCLR_MTX_HALT 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_INTCLR_MTX_HALT 2
+#define REGNUM_TOPAZHP_TOP_CR_INTCLR_MTX_HALT 0x0010
+#define SIGNED_TOPAZHP_TOP_CR_INTCLR_MTX_HALT 0
+
+#define MASK_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT 0x00000078
+#define SHIFT_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT 3
+#define REGNUM_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT 0x0010
+#define SIGNED_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT 0
+
+#define MASK_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT_B 0x1E000000
+#define SHIFT_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT_B 25
+#define REGNUM_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT_B 0x0010
+#define SIGNED_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT_B 0
+
+/* Register CR_TOPAZ_CMD_FIFO_FLUSH */
+#define TOPAZHP_TOP_CR_TOPAZ_CMD_FIFO_FLUSH 0x0078
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_FLUSH 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_FLUSH 0
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_FLUSH 0x0078
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_FLUSH 0
+
+/* Register CR_MULTICORE_CMD_FIFO_WRITE */
+#define TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE 0x0060
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_WDATA 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_WDATA 0
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_WDATA 0x0060
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_WDATA 0
+
+/* Register CR_MULTICORE_CMD_FIFO_WRITE_SPACE */
+#define TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE_SPACE 0x0064
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_SPACE 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_SPACE 0
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_SPACE 0x0064
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_SPACE 0
+
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_FULL 0x00000100
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_FULL 8
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_FULL 0x0064
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_FULL 0
+
+/* Register CR_MULTICORE_IDLE_PWR_MAN */
+#define TOPAZHP_TOP_CR_MULTICORE_IDLE_PWR_MAN 0x0118
+#define MASK_TOPAZHP_TOP_CR_TOPAZ_IDLE_DISABLE 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZ_IDLE_DISABLE 0
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZ_IDLE_DISABLE 0x0118
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZ_IDLE_DISABLE 0
+
+/* Register CR_FIRMWARE_REG_1 */
+#define TOPAZHP_TOP_CR_FIRMWARE_REG_1 0x0100
+#define MASK_TOPAZHP_TOP_CR_FIRMWARE_REG_1 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_FIRMWARE_REG_1 0
+#define REGNUM_TOPAZHP_TOP_CR_FIRMWARE_REG_1 0x0100
+#define SIGNED_TOPAZHP_TOP_CR_FIRMWARE_REG_1 0
+
+/* Register CR_FIRMWARE_REG_2 */
+#define TOPAZHP_TOP_CR_FIRMWARE_REG_2 0x0104
+#define MASK_TOPAZHP_TOP_CR_FIRMWARE_REG_2 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_FIRMWARE_REG_2 0
+#define REGNUM_TOPAZHP_TOP_CR_FIRMWARE_REG_2 0x0104
+#define SIGNED_TOPAZHP_TOP_CR_FIRMWARE_REG_2 0
+
+/* Register CR_FIRMWARE_REG_3 */
+#define TOPAZHP_TOP_CR_FIRMWARE_REG_3 0x0108
+#define MASK_TOPAZHP_TOP_CR_FIRMWARE_REG_3 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_FIRMWARE_REG_3 0
+#define REGNUM_TOPAZHP_TOP_CR_FIRMWARE_REG_3 0x0108
+#define SIGNED_TOPAZHP_TOP_CR_FIRMWARE_REG_3 0
+
+/* Register CR_FIRMWARE_REG_4 */
+#define TOPAZHP_TOP_CR_FIRMWARE_REG_4 0x0300
+#define MASK_TOPAZHP_TOP_CR_FIRMWARE_REG_4 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_FIRMWARE_REG_4 0
+#define REGNUM_TOPAZHP_TOP_CR_FIRMWARE_REG_4 0x0300
+#define SIGNED_TOPAZHP_TOP_CR_FIRMWARE_REG_4 0
+
+/* Register CR_FIRMWARE_REG_5 */
+#define TOPAZHP_TOP_CR_FIRMWARE_REG_5 0x0304
+#define MASK_TOPAZHP_TOP_CR_FIRMWARE_REG_5 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_FIRMWARE_REG_5 0
+#define REGNUM_TOPAZHP_TOP_CR_FIRMWARE_REG_5 0x0304
+#define SIGNED_TOPAZHP_TOP_CR_FIRMWARE_REG_5 0
+
+/* Register CR_FIRMWARE_REG_6 */
+#define TOPAZHP_TOP_CR_FIRMWARE_REG_6 0x0308
+#define MASK_TOPAZHP_TOP_CR_FIRMWARE_REG_6 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_FIRMWARE_REG_6 0
+#define REGNUM_TOPAZHP_TOP_CR_FIRMWARE_REG_6 0x0308
+#define SIGNED_TOPAZHP_TOP_CR_FIRMWARE_REG_6 0
+
+/* Register CR_FIRMWARE_REG_7 */
+#define TOPAZHP_TOP_CR_FIRMWARE_REG_7 0x030C
+#define MASK_TOPAZHP_TOP_CR_FIRMWARE_REG_7 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_FIRMWARE_REG_7 0
+#define REGNUM_TOPAZHP_TOP_CR_FIRMWARE_REG_7 0x030C
+#define SIGNED_TOPAZHP_TOP_CR_FIRMWARE_REG_7 0
+
+/* Register CR_MTX_DEBUG_MSTR */
+#define TOPAZHP_TOP_CR_MTX_DEBUG_MSTR 0x0044
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN 0x00000003
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN 0
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE 2
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_OUT 0x00000018
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_OUT 3
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_OUT 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_OUT 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS 0x00000F00
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS 8
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE 0x000F0000
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE 16
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE 0x0F000000
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE 24
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE 0
+
+/* Register CR_MULTICORE_CORE_SEL_0 */
+#define TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0 0x0050
+#define MASK_TOPAZHP_TOP_CR_DMAC_MTX_SELECT 0x00000007
+#define SHIFT_TOPAZHP_TOP_CR_DMAC_MTX_SELECT 0
+#define REGNUM_TOPAZHP_TOP_CR_DMAC_MTX_SELECT 0x0050
+#define SIGNED_TOPAZHP_TOP_CR_DMAC_MTX_SELECT 0
+
+#define MASK_TOPAZHP_TOP_CR_WRITES_MTX_ALL 0x40000000
+#define SHIFT_TOPAZHP_TOP_CR_WRITES_MTX_ALL 30
+#define REGNUM_TOPAZHP_TOP_CR_WRITES_MTX_ALL 0x0050
+#define SIGNED_TOPAZHP_TOP_CR_WRITES_MTX_ALL 0
+
+#define MASK_TOPAZHP_TOP_CR_WRITES_CORE_ALL 0x80000000
+#define SHIFT_TOPAZHP_TOP_CR_WRITES_CORE_ALL 31
+#define REGNUM_TOPAZHP_TOP_CR_WRITES_CORE_ALL 0x0050
+#define SIGNED_TOPAZHP_TOP_CR_WRITES_CORE_ALL 0
+
+/* Register CR_TOPAZHP_AUTO_CLOCK_GATING */
+#define TOPAZHP_CR_TOPAZHP_AUTO_CLOCK_GATING 0x0024
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE 0x00000001
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE 0
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE 0x00000002
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE 1
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE 0x00000004
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE 2
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE 0x00000008
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE 3
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE 0x00000010
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE 4
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE 0x00000020
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE 5
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE 0x00000040
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE 6
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE 0x00000080
+#define SHIFT_TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE 7
+#define REGNUM_TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE 0x00000200
+#define SHIFT_TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE 9
+#define REGNUM_TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE 0x00000400
+#define SHIFT_TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE 10
+#define REGNUM_TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE 0x00000800
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE 11
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE 0x00001000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE 12
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE 0x00002000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE 13
+#define REGNUM_TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_INPUT_SCALER_AUTO_CLK_GATE 0x00008000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_INPUT_SCALER_AUTO_CLK_GATE 15
+#define REGNUM_TOPAZHP_CR_TOPAZHP_INPUT_SCALER_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_INPUT_SCALER_AUTO_CLK_GATE 0
+
+/* Register CR_TOPAZHP_MAN_CLOCK_GATING */
+#define TOPAZHP_CR_TOPAZHP_MAN_CLOCK_GATING 0x0028
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE0_MAN_CLK_GATE 0x00000001
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE0_MAN_CLK_GATE 0
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE0_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE0_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE1_MAN_CLK_GATE 0x00000002
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE1_MAN_CLK_GATE 1
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE1_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE1_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE0_MAN_CLK_GATE 0x00000004
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE0_MAN_CLK_GATE 2
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE0_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE0_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE1_MAN_CLK_GATE 0x00000008
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE1_MAN_CLK_GATE 3
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE1_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE1_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP4X4_MAN_CLK_GATE 0x00000010
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP4X4_MAN_CLK_GATE 4
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP4X4_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP4X4_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP8X8_MAN_CLK_GATE 0x00000020
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP8X8_MAN_CLK_GATE 5
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP8X8_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP8X8_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP16X16_MAN_CLK_GATE 0x00000040
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP16X16_MAN_CLK_GATE 6
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP16X16_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP16X16_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_JMCOMP_MAN_CLK_GATE 0x00000080
+#define SHIFT_TOPAZHP_CR_TOPAZHP_JMCOMP_MAN_CLK_GATE 7
+#define REGNUM_TOPAZHP_CR_TOPAZHP_JMCOMP_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_JMCOMP_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PREFETCH_MAN_CLK_GATE 0x00000100
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PREFETCH_MAN_CLK_GATE 8
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PREFETCH_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PREFETCH_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_VLC_MAN_CLK_GATE 0x00000200
+#define SHIFT_TOPAZHP_CR_TOPAZHP_VLC_MAN_CLK_GATE 9
+#define REGNUM_TOPAZHP_CR_TOPAZHP_VLC_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_VLC_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_DEB_MAN_CLK_GATE 0x00000400
+#define SHIFT_TOPAZHP_CR_TOPAZHP_DEB_MAN_CLK_GATE 10
+#define REGNUM_TOPAZHP_CR_TOPAZHP_DEB_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_DEB_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_DM_MAN_CLK_GATE 0x00000800
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_DM_MAN_CLK_GATE 11
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_DM_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_DM_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_DMS_MAN_CLK_GATE 0x00001000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_DMS_MAN_CLK_GATE 12
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_DMS_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_DMS_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_CABAC_MAN_CLK_GATE 0x00002000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_CABAC_MAN_CLK_GATE 13
+#define REGNUM_TOPAZHP_CR_TOPAZHP_CABAC_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_CABAC_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE 0x00004000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE 14
+#define REGNUM_TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_INPUT_SCALER_MAN_CLK_GATE 0x00008000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_INPUT_SCALER_MAN_CLK_GATE 15
+#define REGNUM_TOPAZHP_CR_TOPAZHP_INPUT_SCALER_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_INPUT_SCALER_MAN_CLK_GATE 0
+
+/* Register CR_TOPAZHP_SRST */
+#define TOPAZHP_CR_TOPAZHP_SRST 0x0000
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET 0x00000001
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET 0
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET 0x00000002
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET 1
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET 0x00000004
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET 2
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET 0x00000008
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET 3
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET 0x00000010
+#define SHIFT_TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET 4
+#define REGNUM_TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET 0x00000020
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET 5
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET 0x00000040
+#define SHIFT_TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET 6
+#define REGNUM_TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET 0x00000080
+#define SHIFT_TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET 7
+#define REGNUM_TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET 0x00000100
+#define SHIFT_TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET 8
+#define REGNUM_TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SCALER_SOFT_RESET 0x00000200
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SCALER_SOFT_RESET 9
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SCALER_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SCALER_SOFT_RESET 0
+
+/* Register CR_MMU_STATUS */
+#define TOPAZHP_TOP_CR_MMU_STATUS 0x001C
+#define MASK_TOPAZHP_TOP_CR_MMU_PF_N_RW 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_MMU_PF_N_RW 0
+#define REGNUM_TOPAZHP_TOP_CR_MMU_PF_N_RW 0x001C
+#define SIGNED_TOPAZHP_TOP_CR_MMU_PF_N_RW 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_FAULT_ADDR 0xFFFFF000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_FAULT_ADDR 12
+#define REGNUM_TOPAZHP_TOP_CR_MMU_FAULT_ADDR 0x001C
+#define SIGNED_TOPAZHP_TOP_CR_MMU_FAULT_ADDR 0
+
+/* Register CR_MMU_MEM_REQ */
+#define TOPAZHP_TOP_CR_MMU_MEM_REQ 0x0020
+#define MASK_TOPAZHP_TOP_CR_MEM_REQ_STAT_READS 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_MEM_REQ_STAT_READS 0
+#define REGNUM_TOPAZHP_TOP_CR_MEM_REQ_STAT_READS 0x0020
+#define SIGNED_TOPAZHP_TOP_CR_MEM_REQ_STAT_READS 0
+
+/* Register CR_MMU_CONTROL0 */
+#define TOPAZHP_TOP_CR_MMU_CONTROL0 0x0024
+#define MASK_TOPAZHP_TOP_CR_MMU_NOREORDER 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_MMU_NOREORDER 0
+#define REGNUM_TOPAZHP_TOP_CR_MMU_NOREORDER 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_NOREORDER 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_PAUSE 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_MMU_PAUSE 1
+#define REGNUM_TOPAZHP_TOP_CR_MMU_PAUSE 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_PAUSE 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_FLUSH 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_MMU_FLUSH 2
+#define REGNUM_TOPAZHP_TOP_CR_MMU_FLUSH 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_FLUSH 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_INVALDC 0x00000008
+#define SHIFT_TOPAZHP_TOP_CR_MMU_INVALDC 3
+#define REGNUM_TOPAZHP_TOP_CR_MMU_INVALDC 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_INVALDC 0
+
+#define MASK_TOPAZHP_TOP_CR_FLOWRATE_TOPAZ 0x00000700
+#define SHIFT_TOPAZHP_TOP_CR_FLOWRATE_TOPAZ 8
+#define REGNUM_TOPAZHP_TOP_CR_FLOWRATE_TOPAZ 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_FLOWRATE_TOPAZ 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ 0x00010000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ 16
+#define REGNUM_TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ 0
+
+/* Register CR_MMU_CONTROL1 */
+#define TOPAZHP_TOP_CR_MMU_CONTROL1 0x0028
+#define MASK_TOPAZHP_TOP_CR_MMU_TTE_THRESHOLD 0x00000FFF
+#define SHIFT_TOPAZHP_TOP_CR_MMU_TTE_THRESHOLD 0
+#define REGNUM_TOPAZHP_TOP_CR_MMU_TTE_THRESHOLD 0x0028
+#define SIGNED_TOPAZHP_TOP_CR_MMU_TTE_THRESHOLD 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_ADT_TTE 0x000FF000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_ADT_TTE 12
+#define REGNUM_TOPAZHP_TOP_CR_MMU_ADT_TTE 0x0028
+#define SIGNED_TOPAZHP_TOP_CR_MMU_ADT_TTE 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_BEST_COUNT 0x0FF00000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_BEST_COUNT 20
+#define REGNUM_TOPAZHP_TOP_CR_MMU_BEST_COUNT 0x0028
+#define SIGNED_TOPAZHP_TOP_CR_MMU_BEST_COUNT 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_PAGE_SIZE 0xF0000000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_PAGE_SIZE 28
+#define REGNUM_TOPAZHP_TOP_CR_MMU_PAGE_SIZE 0x0028
+#define SIGNED_TOPAZHP_TOP_CR_MMU_PAGE_SIZE 0
+
+/* Register CR_MMU_CONTROL2 */
+#define TOPAZHP_TOP_CR_MMU_CONTROL2 0x002C
+#define MASK_TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING 0
+#define REGNUM_TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING 0x002C
+#define SIGNED_TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_TILING_SCHEME 0x00000008
+#define SHIFT_TOPAZHP_TOP_CR_MMU_TILING_SCHEME 3
+#define REGNUM_TOPAZHP_TOP_CR_MMU_TILING_SCHEME 0x002C
+#define SIGNED_TOPAZHP_TOP_CR_MMU_TILING_SCHEME 0
+
+/* Table MMU_DIR_LIST_BASE */
+
+/* Register CR_MMU_DIR_LIST_BASE */
+#define TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
+#define MASK_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFFFF0
+#define SHIFT_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR 4
+#define REGNUM_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
+#define SIGNED_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR 0
+
+/* Number of entries in table MMU_DIR_LIST_BASE */
+
+#define TOPAZHP_TOP_MMU_DIR_LIST_BASE_SIZE_UINT32 1
+#define TOPAZHP_TOP_MMU_DIR_LIST_BASE_NUM_ENTRIES 1
+
+/* Table MMU_TILE */
+
+/* Register CR_MMU_TILE */
+#define TOPAZHP_TOP_CR_MMU_TILE(X) (0x0038 + (4 * (X)))
+#define MASK_TOPAZHP_TOP_CR_TILE_MIN_ADDR 0x00000FFF
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MIN_ADDR 0
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MIN_ADDR 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MIN_ADDR 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_MAX_ADDR 0x00FFF000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MAX_ADDR 12
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MAX_ADDR 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MAX_ADDR 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_STRIDE 0x07000000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_STRIDE 24
+#define REGNUM_TOPAZHP_TOP_CR_TILE_STRIDE 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_STRIDE 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_ENABLE 0x10000000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_ENABLE 28
+#define REGNUM_TOPAZHP_TOP_CR_TILE_ENABLE 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_ENABLE 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_128BYTE_INTERLEAVE 0x20000000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_128BYTE_INTERLEAVE 29
+#define REGNUM_TOPAZHP_TOP_CR_TILE_128BYTE_INTERLEAVE 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_128BYTE_INTERLEAVE 0
+
+/* Number of entries in table MMU_TILE */
+
+#define TOPAZHP_TOP_MMU_TILE_SIZE_UINT32 2
+#define TOPAZHP_TOP_MMU_TILE_NUM_ENTRIES 2
+
+/* Table MMU_TILE_EXT */
+
+/* Register CR_MMU_TILE_EXT */
+#define TOPAZHP_TOP_CR_MMU_TILE_EXT(X) (0x0080 + (4 * (X)))
+#define MASK_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT 0
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT 0x0080
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT 8
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT 0x0080
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT 0
+
+/* Number of entries in table MMU_TILE_EXT */
+
+#define TOPAZHP_TOP_MMU_TILE_EXT_SIZE_UINT32 2
+#define TOPAZHP_TOP_MMU_TILE_EXT_NUM_ENTRIES 2
+
+#define TOPAZHP_CR_PROC_ESB_ACCESS_WORD0 0x00F0
+
+/* Register CR_PROC_ESB_ACCESS_CONTROL */
+#define TOPAZHP_CR_PROC_ESB_ACCESS_CONTROL 0x00EC
+#define MASK_TOPAZHP_CR_PROC_ESB_ADDR 0x00003FF0
+#define SHIFT_TOPAZHP_CR_PROC_ESB_ADDR 4
+#define REGNUM_TOPAZHP_CR_PROC_ESB_ADDR 0x00EC
+#define SIGNED_TOPAZHP_CR_PROC_ESB_ADDR 0
+
+#define MASK_TOPAZHP_CR_PROC_ESB_READ_N_WRITE 0x00010000
+#define SHIFT_TOPAZHP_CR_PROC_ESB_READ_N_WRITE 16
+#define REGNUM_TOPAZHP_CR_PROC_ESB_READ_N_WRITE 0x00EC
+#define SIGNED_TOPAZHP_CR_PROC_ESB_READ_N_WRITE 0
+
+#define MASK_TOPAZHP_CR_PROC_ESB_OP_VALID 0x00020000
+#define SHIFT_TOPAZHP_CR_PROC_ESB_OP_VALID 17
+#define REGNUM_TOPAZHP_CR_PROC_ESB_OP_VALID 0x00EC
+#define SIGNED_TOPAZHP_CR_PROC_ESB_OP_VALID 0
+
+#define MASK_TOPAZHP_CR_PROC_ACCESS_FLAG 0x03000000
+#define SHIFT_TOPAZHP_CR_PROC_ACCESS_FLAG 24
+#define REGNUM_TOPAZHP_CR_PROC_ACCESS_FLAG 0x00EC
+#define SIGNED_TOPAZHP_CR_PROC_ACCESS_FLAG 0
+
+/* Register CR_SECURE_CONFIG */
+#define TOPAZHP_TOP_CR_SECURE_CONFIG 0x0200
+
+/* Register CR_VLC_MPEG4_CFG */
+#define TOPAZ_VLC_CR_VLC_MPEG4_CFG 0x0064
+#define MASK_TOPAZ_VLC_CR_RSIZE 0x00000007
+#define SHIFT_TOPAZ_VLC_CR_RSIZE 0
+#define REGNUM_TOPAZ_VLC_CR_RSIZE 0x0064
+#define SIGNED_TOPAZ_VLC_CR_RSIZE 0
+
+/* RC Config registers and tables */
+#define TOPAZHP_TOP_CR_RC_CONFIG_TABLE7 0x012C
+#define TOPAZHP_TOP_CR_RC_CONFIG_TABLE6 0x0124
+#define TOPAZHP_TOP_CR_RC_CONFIG_TABLE4 0x0128
+#define TOPAZHP_TOP_CR_RC_CONFIG_TABLE5 0x0130
+#define TOPAZHP_CR_RC_CONFIG_REG8 0x0344
+#define TOPAZHP_CR_RC_CONFIG_REG9 0x0184
+#define TOPAZHP_CR_JMCOMP_RC_STATS 0x0340
+
+/* Register CR_TOPAZHP_CORE_DES1 */
+#define TOPAZHP_TOP_CR_TOPAZHP_CORE_DES1 0x03E0
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED 0x00000080
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED 7
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_GENERATE_PERFORMANCE_STORE 0x00000100
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_GENERATE_PERFORMANCE_STORE 8
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_GENERATE_PERFORMANCE_STORE 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_GENERATE_PERFORMANCE_STORE 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED 0x00000200
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED 9
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED 0x00000400
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED 10
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MPEG2_SUPPORTED 0x00000800
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MPEG2_SUPPORTED 11
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MPEG2_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MPEG2_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_SUBSET 0x00001000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_SUBSET 12
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_SUBSET 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_SUBSET 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_ALL 0x00002000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_ALL 13
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_ALL 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_ALL 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_ME_SUPPORTED 0x00004000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_ME_SUPPORTED 14
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_ME_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_ME_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED 0x00008000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED 15
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED 0x00010000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED 16
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED 0x00020000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED 17
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED 0x00040000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED 18
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED 0x00080000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED 19
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED 0x00100000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED 20
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED 0x00200000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED 21
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED 0x00400000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED 22
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_16X8_8X16_SUPPORTED 0x00800000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_16X8_8X16_SUPPORTED 23
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_16X8_8X16_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_16X8_8X16_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED 0x01000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED 24
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_SLAVE_JPEG_SUPPORTED 0x02000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_SLAVE_JPEG_SUPPORTED 25
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_SLAVE_JPEG_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_SLAVE_JPEG_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_JPEG_SUPPORTED 0x04000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_JPEG_SUPPORTED 26
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_JPEG_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_JPEG_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H263_SUPPORTED 0x08000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H263_SUPPORTED 27
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H263_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H263_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MPEG4_SUPPORTED 0x10000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MPEG4_SUPPORTED 28
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MPEG4_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MPEG4_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_SUPPORTED 0x20000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_SUPPORTED 29
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_DMAC_SUPPORTED 0x40000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_DMAC_SUPPORTED 30
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_DMAC_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_DMAC_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MMU_SUPPORTED 0x80000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MMU_SUPPORTED 31
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MMU_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MMU_SUPPORTED 0
+#endif
diff --git a/drivers/media/platform/vxe-vxd/encoder/vxe_v4l2.c b/drivers/media/platform/vxe-vxd/encoder/vxe_v4l2.c
new file mode 100644
index 000000000000..e3b32588ba05
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/vxe_v4l2.c
@@ -0,0 +1,1949 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG Encoder v4l2 Driver Interface function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ * David Huang <d-huang@ti.com>
+ *
+ * Re-written for upstreaming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/time64.h>
+#include <linux/interrupt.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "fw_headers/vxe_common.h"
+#include "img_mem_man.h"
+#include "target_config.h"
+#include "topaz_device.h"
+#include "vxe_enc.h"
+#include "vxe_v4l2.h"
+#include "img_errors.h"
+
+#define IMG_VXE_ENC_MODULE_NAME "vxe-enc"
+
+static struct heap_config vxe_enc_heap_configs[] = {
+ {
+ .type = MEM_HEAP_TYPE_UNIFIED,
+ .options.unified = {
+ .gfp_type = __GFP_DMA32 | __GFP_ZERO,
+ },
+ .to_dev_addr = NULL,
+ },
+};
+
+static struct vxe_enc_fmt vxe_enc_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .fmt = IMG_CODEC_420_PL12,
+ .min_bufs = 2,
+ .size_num[0] = 3,
+ .size_den[0] = 2,
+ .bytes_pp = 1,
+ .csc_preset = IMG_CSC_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .fmt = IMG_CODEC_ABCX,
+ .min_bufs = 2,
+ .size_num[0] = 1,
+ .size_den[0] = 1,
+ .bytes_pp = 4,
+ .csc_preset = IMG_CSC_RGB_TO_601_ANALOG,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ .std = IMG_STANDARD_H264,
+ .min_bufs = 1,
+ .size_num[0] = 1,
+ .size_den[0] = 1,
+ .bytes_pp = 1,
+ .csc_preset = IMG_CSC_NONE,
+ },
+};
+
+/* Note: Arrange in order of ascending CID # to simplify QUERYCTRL */
+static struct vxe_ctrl controls[] = {
+ {
+ /*
+ * idr_period
+ *
+ * Period between IDR frames. Default to 60 * framerate.
+ * Since default framerate is 30fps, default to 1800 frames
+ * between IDR frames. IDR frames are a special I frame in
+ * H.264 that specifies no frame after the IDR frame can
+ * reference any frame before the IDR frame.
+ *
+ * This period is in number of frames.
+ * ex. Default: 1800
+ * Every 1800 frames is an IDR frame. At 30fps this means there
+ * is an IDR frame every 60 seconds.
+ */
+ .cid = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "GOP size",
+ .minimum = 1,
+ .maximum = 7200,
+ .step = 1,
+ .default_value = 1800,
+ .compound = FALSE,
+ },
+ {
+ /*
+ * bits_per_second
+ *
+ * Bits per second for the encode. This will be the final
+ * bitrate of the encoded stream. Warning, setting this too
+ * low results in extreme loss of quality and choppy output.
+ *
+ * This is specified in bits per second
+ */
+ .cid = V4L2_CID_MPEG_VIDEO_BITRATE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Video Bitrate",
+ .minimum = 50000,
+ .maximum = 100000000,
+ .step = 1,
+ .default_value = 500000,
+ .compound = FALSE,
+ },
+ {
+ /*
+ * intra_freq
+ *
+ * Period between I-frames. I-frames are complete frames that
+ * do not need to reference any other frames to decode. Named
+ * intra_freq instead of intra_period due to naming in
+ * underlying topaz_api layers.
+ *
+ * This frequency is actually the period between I-frames.
+ * ex. Default: 30
+ * This means there is an I-frame every 30 frames. At 30fps
+ * this would mean one I-frame every second.
+ */
+ .cid = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H264 I period",
+ .minimum = 1,
+ .maximum = 600,
+ .step = 1,
+ .default_value = 30,
+ .compound = FALSE,
+ },
+};
+
+static struct v4l2_fract frmivals[] = {
+ {
+ .numerator = 1,
+ .denominator = 960,
+ },
+ {
+ .numerator = 1,
+ .denominator = 1,
+ },
+};
+
+static struct vxe_enc_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct vxe_enc_ctx, fh);
+}
+
+static void vxe_eos(struct vxe_enc_ctx *ctx)
+{
+ struct v4l2_event event = {};
+ struct vb2_v4l2_buffer *vb;
+
+ event.type = V4L2_EVENT_EOS;
+ v4l2_event_queue_fh(&ctx->fh, &event);
+ /*
+ * If a capture buffer is available, dequeue with FLAG_LAST
+ * else, mark for next qbuf to handle
+ */
+ if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0) {
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ vb->flags |= V4L2_BUF_FLAG_LAST;
+ vb2_set_plane_payload(&vb->vb2_buf, 0, 0);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
+ } else {
+ ctx->flag_last = TRUE;
+ }
+
+ topaz_flush_stream(ctx->topaz_str_context, ctx->last_frame_num);
+}
+
+static void vxe_return_resource(void *ctx_handle, enum vxe_cb_type type,
+ void *img_buf_ref, unsigned int size,
+ unsigned int coded_frm_cnt)
+{
+ struct vxe_enc_ctx *ctx = ctx_handle;
+ struct device *dev = ctx->dev->dev;
+ struct vxe_buffer *buf;
+#ifdef ENABLE_PROFILING
+ struct timespec64 time;
+#endif
+
+ switch (type) {
+ case VXE_CB_CODED_BUFF_READY:
+ if (!img_buf_ref)
+ dev_err(dev, "VXE_CB_STRUNIT_PROCESSED had no buffer\n");
+
+ buf = container_of((struct img_coded_buffer *)img_buf_ref,
+ struct vxe_buffer, coded_buffer);
+ vb2_set_plane_payload(&buf->buffer.vb.vb2_buf, 0, size);
+#ifdef ENABLE_PROFILING
+ ktime_get_real_ts64(&time);
+ ctx->drv_lat.end_time = timespec64_to_ns((const struct timespec64 *)&time);
+
+ pr_err("driver encode time is %llu us\n", div_s64(ctx->drv_lat.end_time -
+ ctx->drv_lat.start_time, 1000));
+#endif
+
+ v4l2_m2m_buf_done(&buf->buffer.vb, VB2_BUF_STATE_DONE);
+
+ if ((coded_frm_cnt == ctx->last_frame_num) && (coded_frm_cnt != 0)) {
+ vxe_eos(ctx);
+ ctx->eos = TRUE;
+ }
+ if (ctx->frames_encoding < 2)
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ break;
+ case VXE_CB_SRC_FRAME_RELEASE:
+ if (!img_buf_ref)
+ dev_err(dev, "VXE_CB_PICT_RELEASE had no buffer\n");
+
+ buf = container_of((struct img_frame *)img_buf_ref, struct vxe_buffer, src_frame);
+ vb2_set_plane_payload(&buf->buffer.vb.vb2_buf, 0, size);
+ v4l2_m2m_buf_done(&buf->buffer.vb, VB2_BUF_STATE_DONE);
+ ctx->frames_encoding--;
+ break;
+ case VXE_CB_ERROR_FATAL:
+ break;
+ default:
+ break;
+ }
+}
+
+static void device_run(void *priv)
+{
+ struct vxe_enc_ctx *ctx = priv;
+ struct device *dev = ctx->dev->dev;
+ struct vb2_v4l2_buffer *dst_vbuf, *src_vbuf;
+ struct vxe_buffer *buf;
+ int ret = 0;
+#ifdef ENABLE_PROFILING
+ struct timespec64 time;
+#endif
+
+ mutex_lock_nested(ctx->mutex, SUBCLASS_VXE_V4L2);
+ while (((topaz_query_empty_coded_slots(ctx->topaz_str_context) > 0) &&
+ (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0)) &&
+ ((topaz_query_empty_source_slots(ctx->topaz_str_context) > 0) &&
+ (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0))) {
+#ifdef ENABLE_PROFILING
+ ktime_get_real_ts64(&time);
+ ctx->drv_lat.start_time = timespec64_to_ns((const struct timespec64 *)&time);
+#endif
+ /*
+ * Submit src and dst buffers one to one
+ * Note: Will have to revisit for B frame support
+ */
+ dst_vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!dst_vbuf)
+ dev_err(dev, "Next src buffer is null\n");
+
+ src_vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (!src_vbuf)
+ dev_err(dev, "Next src buffer is null\n");
+
+ /* Handle EOS */
+ if (ctx->eos && (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) == 0)) {
+ pr_debug("%s eos found\n", __func__);
+ ret = topaz_end_of_stream(ctx->topaz_str_context, ctx->frame_num + 1);
+ if (ret)
+ dev_err(dev, "Failed to send EOS to topaz %d\n",
+ ret);
+ ctx->last_frame_num = ctx->frame_num + 1;
+ }
+
+ /* Submit coded package */
+ buf = container_of(dst_vbuf, struct vxe_buffer, buffer.vb);
+ ret = topaz_reserve_coded_package_slot(ctx->topaz_str_context);
+ if (ret)
+ dev_err(dev, "Failed to reserve coded package slot %d\n", ret);
+ ret = topaz_send_coded_package(ctx->topaz_str_context, &buf->coded_buffer);
+ if (ret)
+ dev_err(dev, "Failed to send coded package %d\n",
+ ret);
+ if (!ret)
+ ctx->available_coded_packages++;
+
+ /* Submit source frame */
+ buf = container_of(src_vbuf, struct vxe_buffer, buffer.vb);
+ ret = topaz_reserve_source_slot(ctx->topaz_str_context, &buf->src_slot_num);
+ if (ret)
+ dev_err(dev, "Failed to reserve source slot %d\n",
+ ret);
+ ret = topaz_send_source_frame(ctx->topaz_str_context, &buf->src_frame,
+ ctx->frame_num, (unsigned long)ctx);
+ if (ret)
+ dev_err(dev, "Failed to send source frame %d\n",
+ ret);
+ ctx->frame_num++;
+ if (!ret)
+ ctx->available_source_frames++;
+ }
+
+ while ((ctx->available_source_frames > 0) && (ctx->available_coded_packages > 0)) {
+ pr_debug("Calling topaz_encode_frame #src=%d #coded=%d frames_encoding=%d\n",
+ ctx->available_source_frames,
+ ctx->available_coded_packages,
+ ctx->frames_encoding);
+ ret = topaz_encode_frame(ctx->topaz_str_context);
+ if (ret) {
+ dev_err(dev, "Failed to send encode_frame command %d\n",
+ ret);
+ } else {
+ /* TODO: Account for scenarios where these are not 1 */
+ ctx->available_source_frames--;
+ ctx->available_coded_packages--;
+ ctx->frames_encoding++;
+ }
+ }
+
+ mutex_unlock((struct mutex *)ctx->mutex);
+}
+
+static int job_ready(void *priv)
+{
+ struct vxe_enc_ctx *ctx = priv;
+
+ /*
+ * In normal play, check if we can
+ * submit any source or coded buffers
+ */
+ if (((topaz_query_empty_source_slots(ctx->topaz_str_context) > 0) &&
+ (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0)) &&
+ ((topaz_query_empty_coded_slots(ctx->topaz_str_context) > 0) &&
+ (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0)) && ctx->core_streaming)
+ return 1;
+
+ /*
+ * In EOS state, we only need to know
+ * that coded buffers are available
+ */
+ if (ctx->eos && (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0) &&
+ (topaz_query_empty_coded_slots(ctx->topaz_str_context) > 0) && ctx->core_streaming)
+ return 1;
+
+ /*
+ * Since we're allowing device_run for both submissions and actual
+ * encodes, say job ready if buffers are ready in fw
+ */
+ if (ctx->available_source_frames > 0 && ctx->available_coded_packages > 0
+ && ctx->core_streaming)
+ return 1;
+
+ return 0;
+}
+
+static void job_abort(void *priv)
+{
+ /* TODO: stub */
+ struct vxe_enc_ctx *ctx = priv;
+
+ ctx->core_streaming = FALSE;
+}
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+};
+
+static struct vxe_enc_q_data *get_queue(struct vxe_enc_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return &ctx->out_queue;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return &ctx->cap_queue;
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static int vxe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ int i;
+ struct vxe_enc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vxe_enc_q_data *queue;
+
+ queue = get_queue(ctx, vq->type);
+ if (!queue)
+ return -EINVAL;
+
+ if (*nplanes) {
+ /* This is being called from CREATEBUFS, perform validation */
+ if (*nplanes != queue->fmt->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ if (sizes[i] != queue->size_image[i])
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ *nplanes = queue->fmt->num_planes;
+
+ if (V4L2_TYPE_IS_OUTPUT(queue->fmt->type)) {
+ *nbuffers = max(*nbuffers, queue->fmt->min_bufs);
+ } else {
+ *nbuffers = topaz_get_coded_package_max_num(ctx->topaz_str_context,
+ queue->fmt->std,
+ queue->width,
+ queue->height,
+ &ctx->rc);
+ for (i = 0; i < *nplanes; i++) {
+ queue->size_image[i] =
+ topaz_get_coded_buffer_max_size(ctx->topaz_str_context,
+ queue->fmt->std,
+ queue->width,
+ queue->height,
+ &ctx->rc);
+ }
+ }
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = queue->size_image[i];
+
+ return 0;
+}
+
+static int vxe_buf_init(struct vb2_buffer *vb)
+{
+ struct vxe_enc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = ctx->dev->dev;
+ struct vxe_enc_q_data *queue;
+ void *sgt;
+ int i, num_planes, ret;
+ struct vxe_buffer *buf = container_of(vb, struct vxe_buffer,
+ buffer.vb.vb2_buf);
+
+ queue = get_queue(ctx, vb->vb2_queue->type);
+ if (!queue) {
+ dev_err(dev, "Invalid queue type %d\n",
+ vb->vb2_queue->type);
+ return -EINVAL;
+ }
+
+ num_planes = queue->fmt->num_planes;
+
+ for (i = 0; i < num_planes; i++) {
+ if (vb2_plane_size(vb, i) < queue->size_image[i]) {
+ dev_err(dev, "data will not fit into plane(%lu < %lu)\n",
+ vb2_plane_size(vb, i),
+ (long)queue->size_image[i]);
+ return -EINVAL;
+ }
+ }
+
+ buf->buf_info.cpu_virt = vb2_plane_vaddr(vb, 0);
+ buf->buf_info.buf_size = vb2_plane_size(vb, 0);
+
+ sgt = vb2_dma_sg_plane_desc(vb, 0);
+ if (!sgt) {
+ dev_err(dev, "Could not get sg_table from plane 0\n");
+ return -EINVAL;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->type)) {
+ ret = topaz_stream_map_buf_sg(ctx->topaz_str_context, VENC_BUFTYPE_PICTURE,
+ &buf->buf_info, sgt);
+ if (ret) {
+ dev_err(dev, "OUTPUT core_stream_map_buf_sg failed\n");
+ return ret;
+ }
+ pr_debug("Picture buffer mapped successfully, buf_id[%d], dev_virt[%x]\n",
+ buf->buf_info.buff_id, buf->buf_info.dev_virt);
+
+ vxe_fill_default_src_frame_params(buf);
+
+ buf->y_buffer.mem_info = buf->buf_info;
+ buf->y_buffer.lock = BUFFER_FREE;
+ buf->y_buffer.size = 0; /* IMG has 0 */
+ buf->y_buffer.bytes_written = 0;
+
+ /* TODO Fill U/V img buffers if necessary */
+ buf->src_frame.y_plane_buffer = &buf->y_buffer;
+ buf->src_frame.u_plane_buffer = NULL;
+ buf->src_frame.v_plane_buffer = NULL;
+ buf->src_frame.y_component_offset = 0;
+ buf->src_frame.u_component_offset = queue->bytesperline[0] * queue->height;
+ buf->src_frame.v_component_offset = queue->bytesperline[0] * queue->height;
+
+ buf->src_frame.width_bytes = queue->bytesperline[0];
+ buf->src_frame.height = queue->height;
+ buf->src_frame.src_y_stride_bytes = queue->bytesperline[0];
+ buf->src_frame.src_uv_stride_bytes = queue->bytesperline[0];
+ } else {
+ ret = topaz_stream_map_buf_sg(ctx->topaz_str_context,
+ VENC_BUFTYPE_BITSTREAM,
+ &buf->buf_info, sgt);
+ if (ret) {
+ dev_err(dev, "CAPTURE core_stream_map_buf_sg failed\n");
+ return ret;
+ }
+ pr_debug("Bit-stream buffer mapped successfully, buf_id[%d], dev_virt[%x]\n",
+ buf->buf_info.buff_id, buf->buf_info.dev_virt);
+
+ buf->coded_buffer.mem_info = buf->buf_info;
+ buf->coded_buffer.lock = BUFFER_FREE;
+ buf->coded_buffer.size = vb2_plane_size(vb, 0);
+ buf->coded_buffer.bytes_written = 0;
+ }
+
+ return 0;
+}
+
+static int vxe_buf_prepare(struct vb2_buffer *vb)
+{
+#ifdef DEBUG_ENCODER_DRIVER
+ int i;
+ struct vxe_buffer *buf = container_of(vb, struct vxe_buffer,
+ buffer.vb.vb2_buf);
+
+ pr_info("%s printing contents of buffer %d at 0x%p\n",
+ __func__, vb->index, buf->buf_info.cpu_virt);
+ for (i = 0; i < 1536; i = i + 8) {
+ pr_info("[%d] 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x,\n",
+ ((i + 1) / 8),
+ ((char *)buf->buf_info.cpu_virt)[i + 0],
+ ((char *)buf->buf_info.cpu_virt)[i + 1],
+ ((char *)buf->buf_info.cpu_virt)[i + 2],
+ ((char *)buf->buf_info.cpu_virt)[i + 3],
+ ((char *)buf->buf_info.cpu_virt)[i + 4],
+ ((char *)buf->buf_info.cpu_virt)[i + 5],
+ ((char *)buf->buf_info.cpu_virt)[i + 6],
+ ((char *)buf->buf_info.cpu_virt)[i + 7]);
+ }
+#endif
+ return 0;
+}
+
+static void vxe_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vxe_enc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ mutex_lock_nested(ctx->mutex, SUBCLASS_VXE_V4L2);
+ if (ctx->flag_last && (!V4L2_TYPE_IS_OUTPUT(vb->type))) {
+ /*
+ * If EOS came and we did not have a buffer ready
+ * to service it, service now that we have a buffer
+ */
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, 0);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+ } else {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ }
+ mutex_unlock((struct mutex *)ctx->mutex);
+}
+
+static void vxe_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vxe_enc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vxe_buffer *buf = container_of(vb, struct vxe_buffer,
+ buffer.vb.vb2_buf);
+
+ pr_debug("%s Unmapping buffer %d\n", __func__, buf->index);
+ topaz_stream_unmap_buf_sg(ctx->topaz_str_context, &buf->buf_info);
+}
+
+static int vxe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vxe_enc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vxe_enc_q_data *queue;
+ ctx->core_streaming = TRUE;
+
+ queue = get_queue(ctx, vq->type);
+ queue->streaming = TRUE;
+
+ return 0;
+}
+
+static void vxe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vxe_enc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct device *dev = ctx->dev->dev;
+ struct vb2_v4l2_buffer *vb;
+ struct vxe_enc_q_data *queue;
+
+ queue = get_queue(ctx, vq->type);
+ /* Unmap all buffers in v4l2 from mmu */
+ mutex_lock_nested(ctx->mutex, SUBCLASS_VXE_V4L2);
+ ctx->core_streaming = FALSE;
+ if (!V4L2_TYPE_IS_OUTPUT(queue->fmt->type)) {
+ while (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vb)
+ dev_err(dev, "Next dst buffer is null\n");
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ }
+ } else {
+ while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (!vb)
+ dev_err(dev, "Next dst buffer is null\n");
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ }
+ }
+ mutex_unlock(ctx->mutex);
+}
+
+static struct vb2_ops vxe_video_ops = {
+ .queue_setup = vxe_queue_setup,
+ .buf_init = vxe_buf_init,
+ .buf_prepare = vxe_buf_prepare,
+ .buf_queue = vxe_buf_queue,
+ .buf_cleanup = vxe_buf_cleanup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vxe_start_streaming,
+ .stop_streaming = vxe_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct vxe_enc_ctx *ctx = priv;
+ struct vxe_dev *vxe = ctx->dev;
+ int ret = 0;
+
+ /* src_vq */
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct vxe_buffer);
+ src_vq->ops = &vxe_video_ops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = vxe->mutex;
+ src_vq->dev = vxe->ti_vxe_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ /* dst_vq */
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct vxe_buffer);
+ dst_vq->ops = &vxe_video_ops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = vxe->mutex;
+ dst_vq->dev = vxe->ti_vxe_dev.dev;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vxe_open(struct file *file)
+{
+ struct vxe_dev *vxe = video_drvdata(file);
+ struct vxe_enc_ctx *ctx;
+ int i, ret = 0;
+
+ dev_dbg(vxe->dev, "%s:%d vxe %p\n", __func__, __LINE__, vxe);
+
+ mutex_lock((struct mutex *)vxe->mutex);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock((struct mutex *)vxe->mutex);
+ return -ENOMEM;
+ }
+
+ ctx->mutex = kzalloc(sizeof(*ctx->mutex), GFP_KERNEL);
+ if (!ctx->mutex)
+ return -ENOMEM;
+
+ mutex_init(ctx->mutex);
+
+ ctx->dev = vxe;
+ ctx->s_fmt_flags = 0;
+ ctx->eos = FALSE;
+ ctx->flag_last = FALSE;
+ ctx->available_coded_packages = 0;
+ ctx->available_source_frames = 0;
+ ctx->frames_encoding = 0;
+ ctx->frame_num = 0;
+ ctx->out_queue.streaming = FALSE;
+ ctx->cap_queue.streaming = FALSE;
+
+ for (i = 0; i < ARRAY_SIZE(vxe_enc_formats); i++) {
+ if (vxe_enc_formats[i].type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ctx->out_queue.fmt = &vxe_enc_formats[i];
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(vxe_enc_formats); i++) {
+ if (vxe_enc_formats[i].type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ctx->cap_queue.fmt = &vxe_enc_formats[i];
+ break;
+ }
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vxe->m2m_dev, ctx, &queue_init);
+ if (IS_ERR_VALUE((unsigned long)ctx->fh.m2m_ctx)) {
+ ret = (long)(ctx->fh.m2m_ctx);
+ goto exit;
+ }
+
+ vxe_fill_default_params(ctx);
+
+ v4l2_fh_add(&ctx->fh);
+
+ vxe_create_ctx(vxe, ctx);
+
+ /* TODO: Add stream id creation */
+exit:
+ mutex_unlock((struct mutex *)vxe->mutex);
+ return ret;
+}
+
+static int vxe_release(struct file *file)
+{
+ struct vxe_dev *vxe = video_drvdata(file);
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+ /* TODO Need correct API */
+
+ mutex_lock((struct mutex *)vxe->mutex);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ if (ctx->s_fmt_flags & S_FMT_FLAG_STREAM_CREATED)
+ topaz_stream_destroy(ctx->topaz_str_context);
+ ctx->topaz_str_context = NULL;
+
+ mutex_destroy(ctx->mutex);
+ kfree(ctx->mutex);
+ ctx->mutex = NULL;
+ kfree(ctx);
+
+ mutex_unlock((struct mutex *)vxe->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vxe_enc_fops = {
+ .owner = THIS_MODULE,
+ .open = vxe_open,
+ .release = vxe_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int vxe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, IMG_VXE_ENC_MODULE_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, IMG_VXE_ENC_MODULE_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", IMG_VXE_ENC_MODULE_NAME);
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static struct vxe_enc_fmt *find_format(struct v4l2_format *f)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vxe_enc_formats); ++i) {
+ if (vxe_enc_formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ vxe_enc_formats[i].type == f->type)
+ return &vxe_enc_formats[i];
+ }
+ return NULL;
+}
+
+static int vxe_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
+{
+ int i, index = 0;
+ struct vxe_enc_fmt *fmt = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(vxe_enc_formats); ++i) {
+ if (vxe_enc_formats[i].type == f->type) {
+ if (index == f->index) {
+ fmt = &vxe_enc_formats[i];
+ break;
+ }
+ index++;
+ }
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int vxe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct vxe_enc_q_data *queue;
+ int i;
+
+ pix_mp = &f->fmt.pix_mp;
+
+ queue = get_queue(ctx, f->type);
+ if (!queue)
+ return -EINVAL;
+
+ pix_mp->width = queue->width;
+ pix_mp->height = queue->height;
+ pix_mp->pixelformat = queue->fmt->fourcc;
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ for (i = 0; i < queue->fmt->num_planes; i++) {
+ pix_mp->plane_fmt[i].sizeimage = queue->size_image[i];
+ pix_mp->plane_fmt[i].bytesperline = queue->bytesperline[i];
+ }
+ pix_mp->num_planes = queue->fmt->num_planes;
+
+ return 0;
+}
+
+static int vxe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+ struct vxe_enc_fmt *fmt;
+ struct vxe_enc_q_data *queue;
+ int i;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt = pix_mp->plane_fmt;
+ struct img_rc_params rc;
+
+ fmt = find_format(f);
+ if (!fmt)
+ return -EINVAL;
+
+ queue = get_queue(ctx, f->type);
+ if (!queue)
+ return -EINVAL;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ pix_mp->num_planes = fmt->num_planes;
+ pix_mp->flags = 0;
+ for (i = 0; i < fmt->num_planes; i++) {
+ plane_fmt[i].bytesperline = vxe_get_stride(pix_mp->width, fmt);
+ plane_fmt[i].sizeimage = vxe_get_sizeimage(plane_fmt[i].bytesperline,
+ pix_mp->height, fmt, i);
+ }
+ } else {
+ pix_mp->flags = 0;
+ /* Worst case estimation of sizeimage
+ *plane_fmt[0].sizeimage = ALIGN(pix_mp->width, HW_ALIGN) *
+ * ALIGN(pix_mp->height, HW_ALIGN) * 2;
+ */
+ /* TODO: This is the only thing that matters here, make sure this is correct */
+ rc.initial_qp_i = 18;
+ plane_fmt[0].bytesperline = 0;
+ plane_fmt[0].sizeimage = topaz_get_coded_buffer_max_size(NULL, fmt->std,
+ pix_mp->width,
+ pix_mp->height,
+ &rc);
+ }
+
+ if (pix_mp->field == V4L2_FIELD_ANY)
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int vxe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct vxe_enc_fmt *fmt;
+ struct vxe_enc_q_data *queue;
+ int i, ret = 0;
+ unsigned int level_h264;
+ static int base_pipe;
+
+ ret = vxe_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ fmt = find_format(f);
+ if (!fmt)
+ return -EINVAL;
+
+ queue = get_queue(ctx, f->type);
+ if (!queue)
+ return -EINVAL;
+
+ pix_mp = &f->fmt.pix_mp;
+
+ queue->fmt = fmt;
+ queue->width = pix_mp->width;
+ queue->height = pix_mp->height;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ ctx->vparams.format = fmt->fmt;
+ ctx->vparams.source_width = pix_mp->width;
+ ctx->vparams.source_frame_height = pix_mp->height;
+ ctx->vparams.csc_preset = fmt->csc_preset;
+ if (ctx->vparams.csc_preset != IMG_CSC_NONE)
+ ctx->vparams.enable_scaler = TRUE;
+
+ pr_debug("img_video_params: format=%d\n", ctx->vparams.format);
+ pr_debug("img_video_params: source_width=%d\n", ctx->vparams.source_width);
+ pr_debug("img_video_params: source_frame_height=%d\n",
+ ctx->vparams.source_frame_height);
+ pr_debug("img_video_params: csc_preset=%d\n", ctx->vparams.csc_preset);
+ pr_debug("img_video_params: enable_scaler=%s\n",
+ ctx->vparams.enable_scaler ? "true" : "false");
+
+ for (i = 0; i < fmt->num_planes; i++) {
+ queue->bytesperline[i] = vxe_get_stride(queue->width, fmt);
+ queue->size_image[i] = vxe_get_sizeimage(pix_mp->plane_fmt[i].bytesperline,
+ queue->height, fmt, i);
+ }
+
+ /* Rate Control parameters */
+ ctx->rc.transfer_bits_per_second = ctx->rc.bits_per_second;
+ ctx->rc.bu_size = -1414812757; /* Pretty sure uninitialized */
+ ctx->rc.buffer_size = ctx->rc.transfer_bits_per_second;
+
+ ctx->rc.initial_level = (3 * ctx->rc.buffer_size) >> 4;
+ ctx->rc.initial_level = ((ctx->rc.initial_level +
+ ((ctx->rc.bits_per_second /
+ ctx->rc.frame_rate) / 2)) /
+ (ctx->rc.bits_per_second /
+ ctx->rc.frame_rate)) *
+ (ctx->rc.bits_per_second / ctx->rc.frame_rate);
+ ctx->rc.initial_level = max((unsigned int)ctx->rc.initial_level,
+ (unsigned int)(ctx->rc.bits_per_second /
+ ctx->rc.frame_rate));
+ ctx->rc.initial_delay = ctx->rc.buffer_size - ctx->rc.initial_level;
+ ctx->rc.bframes = 0;
+
+ pr_debug("img_rc_params: initial_level=%d\n", ctx->rc.initial_level);
+ pr_debug("img_rc_params: initial_delay=%d\n", ctx->rc.initial_delay);
+ /* TODO Figure out which lossless to use */
+ ctx->sh_params.profile = find_h264_profile
+ (FALSE,
+ ctx->vparams.use_default_scaling_list,
+ FALSE,
+ ctx->vparams.h264_8x8,
+ ctx->vparams.enable_mvc,
+ ctx->rc.bframes,
+ ctx->vparams.is_interlaced,
+ ctx->vparams.cabac_enabled,
+ ctx->vparams.weighted_prediction,
+ ctx->vparams.vp_weighted_implicit_bi_pred);
+ ctx->sh_params.max_num_ref_frames = 1; //TODO Need more logic
+
+ level_h264 = calculate_h264_level(pix_mp->width, pix_mp->height,
+ ctx->rc.frame_rate,
+ ctx->rc.rc_enable,
+ ctx->rc.bits_per_second,
+ /* TODO Figure out which lossless to use */
+ FALSE,
+ ctx->sh_params.profile,
+ ctx->sh_params.max_num_ref_frames);
+ pr_debug("level_h264=%d\n", level_h264);
+
+ ctx->vparams.vert_mv_limit = 255;
+ if (level_h264 >= 110)
+ ctx->vparams.vert_mv_limit = 511;
+ if (level_h264 >= 210)
+ ctx->vparams.vert_mv_limit = 1023;
+ if (level_h264 >= 310)
+ ctx->vparams.vert_mv_limit = 2047;
+
+ if (level_h264 >= 300)
+ ctx->vparams.limit_num_vectors = TRUE;
+ else
+ ctx->vparams.limit_num_vectors = FALSE;
+
+ pr_debug("ctx->vparams.vert_mv_limit=%d\n", ctx->vparams.vert_mv_limit);
+ pr_debug("ctx->vparams.limit_num_vectors=%d\n", ctx->vparams.limit_num_vectors);
+
+ /* VUI parameters */
+ ctx->vui_params.time_scale = ctx->rc.frame_rate * 2;
+ ctx->vui_params.bit_rate_value_minus1 = (ctx->rc.bits_per_second / 64)
+ - 1;
+ ctx->vui_params.cbp_size_value_minus1 = (ctx->rc.buffer_size / 64) - 1;
+ ctx->vui_params.aspect_ratio_info_present_flag = FALSE; //unset
+ ctx->vui_params.aspect_ratio_idc = 0; //unset
+ ctx->vui_params.sar_width = 0; //unset
+ ctx->vui_params.sar_height = 0; //unset
+ ctx->vui_params.cbr = (ctx->rc.rc_mode == IMG_RCMODE_CBR) ?
+ TRUE : FALSE;
+ ctx->vui_params.initial_cpb_removal_delay_length_minus1 =
+ BPH_SEI_NAL_INITIAL_CPB_REMOVAL_DELAY_SIZE - 1;
+ ctx->vui_params.cpb_removal_delay_length_minus1 =
+ PTH_SEI_NAL_CPB_REMOVAL_DELAY_SIZE - 1;
+ ctx->vui_params.dpb_output_delay_length_minus1 =
+ PTH_SEI_NAL_DPB_OUTPUT_DELAY_SIZE - 1;
+ ctx->vui_params.time_offset_length = 24; //hard coded
+ ctx->vui_params.num_reorder_frames = 0; //TODO
+ ctx->vui_params.max_dec_frame_buffering = 0; //unset
+
+ pr_debug("h264_vui_params: time_scale=%d\n", ctx->vui_params.time_scale);
+ pr_debug("h264_vui_params: bit_rate_value_minus1=%d\n",
+ ctx->vui_params.bit_rate_value_minus1);
+ pr_debug("h264_vui_params: cbp_size_value_minus1=%d\n",
+ ctx->vui_params.cbp_size_value_minus1);
+ pr_debug("h264_vui_params: cbr=%d\n", ctx->vui_params.cbr);
+ pr_debug("h264_vui_params: initial_cpb_removal_delay_length_minus1=%d\n",
+ ctx->vui_params.initial_cpb_removal_delay_length_minus1);
+ pr_debug("h264_vui_params: cpb_removal_delay_length_minus1=%d\n",
+ ctx->vui_params.cpb_removal_delay_length_minus1);
+ pr_debug("h264_vui_params: dpb_output_delay_length_minus1=%d\n",
+ ctx->vui_params.dpb_output_delay_length_minus1);
+
+ /* Sequence Header parameters */
+ switch (level_h264) {
+ case 100:
+ ctx->sh_params.level = SH_LEVEL_1;
+ break;
+ case 101:
+ ctx->sh_params.level = SH_LEVEL_1B;
+ break;
+ case 110:
+ ctx->sh_params.level = SH_LEVEL_11;
+ break;
+ case 120:
+ ctx->sh_params.level = SH_LEVEL_12;
+ break;
+ case 130:
+ ctx->sh_params.level = SH_LEVEL_13;
+ break;
+ case 200:
+ ctx->sh_params.level = SH_LEVEL_2;
+ break;
+ case 210:
+ ctx->sh_params.level = SH_LEVEL_21;
+ break;
+ case 220:
+ ctx->sh_params.level = SH_LEVEL_22;
+ break;
+ case 300:
+ ctx->sh_params.level = SH_LEVEL_3;
+ break;
+ case 310:
+ ctx->sh_params.level = SH_LEVEL_31;
+ break;
+ case 320:
+ ctx->sh_params.level = SH_LEVEL_32;
+ break;
+ case 400:
+ ctx->sh_params.level = SH_LEVEL_4;
+ break;
+ case 410:
+ ctx->sh_params.level = SH_LEVEL_41;
+ break;
+ case 420:
+ ctx->sh_params.level = SH_LEVEL_42;
+ break;
+ case 500:
+ ctx->sh_params.level = SH_LEVEL_5;
+ break;
+ case 510:
+ ctx->sh_params.level = SH_LEVEL_51;
+ break;
+ case 520:
+ ctx->sh_params.level = SH_LEVEL_52;
+ break;
+ default:
+ pr_err("Error invalid h264 level %d\n", level_h264);
+ return -EINVAL;
+ }
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ ctx->sh_params.width_in_mbs_minus1 = ((queue->width +
+ (MB_SIZE - 1))/MB_SIZE)-1;
+ ctx->sh_params.height_in_maps_units_minus1 = ((queue->height +
+ (MB_SIZE - 1))/MB_SIZE) - 1;
+ pr_debug("h264_sequence_header_params: width_in_mbs_minus1=%d\n",
+ ctx->sh_params.width_in_mbs_minus1);
+ pr_debug("h264_sequence_header_params: height_in_maps_units_minus1=%d\n",
+ ctx->sh_params.height_in_maps_units_minus1);
+ }
+ ctx->sh_params.log2_max_pic_order_cnt = 6; //hard coded
+ ctx->sh_params.gaps_in_frame_num_value = FALSE;
+ ctx->sh_params.frame_mbs_only_flag = ctx->vparams.is_interlaced ?
+ FALSE : TRUE;
+ ctx->sh_params.vui_params_present = (ctx->rc.rc_mode == IMG_RCMODE_NONE)
+ ? FALSE : TRUE;
+ ctx->sh_params.seq_scaling_matrix_present_flag = FALSE;
+ ctx->sh_params.use_default_scaling_list = FALSE;
+ ctx->sh_params.is_lossless = FALSE;
+ ctx->sh_params.vui_params = ctx->vui_params;
+
+ pr_debug("h264_sequence_header_params: frame_mbs_only_flag=%d\n",
+ ctx->sh_params.frame_mbs_only_flag);
+ pr_debug("h264_sequence_header_params: vui_params_present=%d\n",
+ ctx->sh_params.vui_params_present);
+
+ ctx->s_fmt_flags |= S_FMT_FLAG_OUT_RECV;
+ } else {
+ for (i = 0; i < fmt->num_planes; i++) {
+ queue->bytesperline[i] = 0;
+ queue->size_image[i] =
+ topaz_get_coded_buffer_max_size(ctx->topaz_str_context,
+ queue->fmt->std,
+ queue->width,
+ queue->height,
+ &ctx->rc);
+ }
+ ctx->vparams.standard = fmt->std;
+ ctx->vparams.width = pix_mp->width;
+ /*
+ * Note: Do not halve height for interlaced.
+ * App should take care of this.
+ */
+ ctx->vparams.frame_height = pix_mp->height;
+
+ pr_debug("img_video_params: standard=%d\n", ctx->vparams.standard);
+ pr_debug("img_video_params: width=%d\n", ctx->vparams.width);
+ pr_debug("img_video_params: frame_height=%d\n", ctx->vparams.frame_height);
+
+ ctx->s_fmt_flags |= S_FMT_FLAG_CAP_RECV;
+ }
+ ctx->vparams.is_interlaced = FALSE;
+
+ ctx->vparams.intra_pred_modes = -1414812757; /* Pretty sure uninitialized */
+
+ ctx->vparams.buffer_stride_bytes = 0;
+ ctx->vparams.buffer_height = 0;
+
+ ctx->vparams.crop_left = 0;
+ ctx->vparams.crop_right = 0;
+ ctx->vparams.crop_top = 0;
+ ctx->vparams.crop_bottom = 0;
+
+ ctx->vparams.slices_per_picture = 1;
+
+ /* Crop parameters */
+ ctx->crop_params.clip = FALSE;
+ ctx->crop_params.left_crop_offset = 0;
+ ctx->crop_params.right_crop_offset = (((ctx->sh_params.width_in_mbs_minus1 + 1)*MB_SIZE) -
+ ctx->vparams.source_width)/2;
+ ctx->crop_params.top_crop_offset = 0;
+ ctx->crop_params.bottom_crop_offset = (((ctx->sh_params.height_in_maps_units_minus1 + 1)
+ *MB_SIZE) - ctx->vparams.source_frame_height)/2;
+ if (ctx->crop_params.right_crop_offset | ctx->crop_params.bottom_crop_offset)
+ ctx->crop_params.clip = TRUE;
+
+ pr_debug("s_fmt_flags=%#08x\n", ctx->s_fmt_flags);
+ if ((ctx->s_fmt_flags & S_FMT_FLAG_OUT_RECV) &&
+ (ctx->s_fmt_flags & S_FMT_FLAG_CAP_RECV)) {
+ pr_debug("Calling topaz_stream_create()\n");
+ topaz_stream_create(ctx, &ctx->vparams, ((base_pipe++ % 2) ? 0 : 1), 2,
+ &ctx->rc, &ctx->topaz_str_context);
+
+ topaz_h264_prepare_sequence_header(ctx->topaz_str_context,
+ ctx->sh_params.width_in_mbs_minus1 + 1,
+ ctx->sh_params.height_in_maps_units_minus1 + 1,
+ TRUE, &ctx->vui_params,
+ &ctx->crop_params,
+ &ctx->sh_params, FALSE);
+ /* Note: cqp_offset looks unset in img */
+ topaz_h264_prepare_picture_header(ctx->topaz_str_context, 0);
+
+ topaz_load_context(ctx->topaz_str_context);
+
+ ctx->s_fmt_flags |= S_FMT_FLAG_STREAM_CREATED;
+ }
+
+ return 0;
+}
+
+static int vxe_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_EOS)
+ return -EINVAL;
+
+ v4l2_event_subscribe(fh, sub, 0, NULL);
+ return 0;
+}
+
+static int vxe_try_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *cmd)
+{
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+ return 0;
+}
+
+static int vxe_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *cmd)
+{
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ mutex_lock_nested(ctx->mutex, SUBCLASS_VXE_V4L2);
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0 ||
+ !ctx->out_queue.streaming || !ctx->cap_queue.streaming) {
+ /* Buffers are still in queue for encode, set eos flag */
+ ctx->eos = TRUE;
+ mutex_unlock((struct mutex *)ctx->mutex);
+ } else if ((ctx->available_source_frames > 0) ||
+ (ctx->frames_encoding) > 0) {
+ /*
+ * Buffers are still in firmware for encode. Tell topaz
+ * that last frame sent is last frame in stream
+ */
+ topaz_end_of_stream(ctx->topaz_str_context, ctx->frame_num);
+ ctx->last_frame_num = ctx->frame_num;
+ mutex_unlock((struct mutex *)ctx->mutex);
+ } else {
+ /* All buffers are encoded, so issue dummy stream end */
+ mutex_unlock((struct mutex *)ctx->mutex);
+ vxe_eos(ctx);
+ }
+ return 0;
+}
+
+static int vxe_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *query)
+{
+ int i;
+
+ query->reserved[0] = 0;
+ query->reserved[1] = 0;
+
+ /* Enumerate controls */
+ if (query->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
+ query->id &= ~V4L2_CTRL_FLAG_NEXT_CTRL;
+ for (i = 0; i < ARRAY_SIZE(controls); i++) {
+ if (!controls[i].compound && controls[i].cid > query->id) {
+ query->id = controls[i].cid;
+ query->type = controls[i].type;
+ strncpy(query->name, controls[i].name, sizeof(query->name));
+ query->minimum = controls[i].minimum;
+ query->maximum = controls[i].maximum;
+ query->step = controls[i].step;
+ query->default_value = controls[i].default_value;
+ query->flags = 0;
+ return 0;
+ }
+ }
+ return -EINVAL;
+ }
+
+ /* Return info on requested control */
+ for (i = 0; i < ARRAY_SIZE(controls); i++) {
+ if (controls[i].cid == query->id) {
+ query->id = controls[i].cid;
+ query->type = controls[i].type;
+ strncpy(query->name, controls[i].name, sizeof(query->name));
+ query->minimum = controls[i].minimum;
+ query->maximum = controls[i].maximum;
+ query->step = controls[i].step;
+ query->default_value = controls[i].default_value;
+ query->flags = 0;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vxe_query_ext_ctrl(struct file *file, void *priv,
+ struct v4l2_query_ext_ctrl *query)
+{
+ unsigned int queryid;
+ int i, j;
+
+ query->reserved[0] = 0;
+ query->reserved[1] = 0;
+
+ /* Enumerate controls */
+ if ((query->id & V4L2_CTRL_FLAG_NEXT_CTRL) ||
+ (query->id & V4L2_CTRL_FLAG_NEXT_COMPOUND)) {
+ queryid = query->id;
+ queryid &= ~V4L2_CTRL_FLAG_NEXT_CTRL;
+ queryid &= ~V4L2_CTRL_FLAG_NEXT_COMPOUND;
+ for (i = 0; i < ARRAY_SIZE(controls); i++) {
+ if (((!controls[i].compound && (query->id & V4L2_CTRL_FLAG_NEXT_CTRL)) ||
+ (controls[i].compound &&
+ (query->id & V4L2_CTRL_FLAG_NEXT_COMPOUND))) &&
+ controls[i].cid > queryid) {
+ query->id = controls[i].cid;
+ query->type = controls[i].type;
+ strncpy(query->name, controls[i].name, sizeof(query->name));
+ query->minimum = controls[i].minimum;
+ query->maximum = controls[i].maximum;
+ query->step = controls[i].step;
+ query->default_value = controls[i].default_value;
+ /* Our supported controls use int values */
+ query->elem_size = 4;
+ query->elems = 1;
+ query->nr_of_dims = 0;
+ for (j = 0; j < V4L2_CTRL_MAX_DIMS; j++)
+ query->dims[j] = 0;
+ query->flags = 0;
+ return 0;
+ }
+ }
+ return -EINVAL;
+ }
+
+ /* Return info on requested control */
+ for (i = 0; i < ARRAY_SIZE(controls); i++) {
+ if (controls[i].cid == query->id) {
+ query->id = controls[i].cid;
+ query->type = controls[i].type;
+ strncpy(query->name, controls[i].name, sizeof(query->name));
+ query->minimum = controls[i].minimum;
+ query->maximum = controls[i].maximum;
+ query->step = controls[i].step;
+ query->default_value = controls[i].default_value;
+ /* Our supported controls use int values */
+ query->elem_size = 4;
+ query->elems = 1;
+ query->nr_of_dims = 0;
+ for (j = 0; j < V4L2_CTRL_MAX_DIMS; j++)
+ query->dims[j] = 0;
+ query->flags = 0;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vxe_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+ struct device *dev = ctx->dev->dev;
+ struct v4l2_queryctrl query;
+ int i;
+
+ ctrls->reserved[0] = 0;
+ ctrls->reserved[1] = 0;
+
+ if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL) {
+ for (i = 0; i < ctrls->count; i++) {
+ query.id = ctrls->controls[i].id;
+ if (vxe_queryctrl(NULL, NULL, &query)) {
+ dev_err(dev, "%s could not find default value for id=%#08x\n",
+ __func__, ctrls->controls[i].id);
+ return -EINVAL;
+ }
+ ctrls->controls[i].value = query.default_value;
+ }
+ }
+
+ for (i = 0; i < ctrls->count; i++) {
+ ctrls->controls[i].reserved2[0] = 0;
+
+ switch (ctrls->controls[i].id) {
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctrls->controls[i].size = 0;
+ ctrls->controls[i].value = ctx->vparams.idr_period;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctrls->controls[i].size = 0;
+ ctrls->controls[i].value = ctx->rc.bits_per_second;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ ctrls->controls[i].size = 0;
+ ctrls->controls[i].value = ctx->rc.intra_freq;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ ctrls->controls[i].size = 0;
+ ctrls->controls[i].value = ctx->sh_params.profile;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ ctrls->controls[i].size = 0;
+ ctrls->controls[i].value = ctx->sh_params.level;
+ break;
+ default:
+ dev_err(dev, "%s Invalid control id %#08x\n",
+ __func__, ctrls->controls[i].id);
+ ctrls->error_idx = ctrls->count;
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int vxe_try_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+ struct device *dev = ctx->dev->dev;
+ struct v4l2_queryctrl query;
+ int i;
+
+ ctrls->reserved[0] = 0;
+ ctrls->reserved[1] = 0;
+
+ /* Can't write default values or support requests */
+ if (ctrls->which != V4L2_CTRL_WHICH_CUR_VAL)
+ return -EINVAL;
+
+ /* Cannot change values once context is created */
+ /* TODO: Handle controls after stream is created but before streamon */
+ if (ctx->s_fmt_flags & S_FMT_FLAG_STREAM_CREATED)
+ return -EBUSY;
+
+ for (i = 0; i < ctrls->count; i++) {
+ ctrls->controls[i].reserved2[0] = 0;
+
+ query.id = ctrls->controls[i].id;
+ if (vxe_queryctrl(NULL, NULL, &query)) {
+ dev_err(dev, "%s could not find control id=%#08x\n",
+ __func__, ctrls->controls[i].id);
+ ctrls->error_idx = i;
+ return -EINVAL;
+ }
+ if (ctrls->controls[i].value < query.minimum) {
+ dev_err(dev, "%s control id=%#08x value=%d less than minimum=%d\n",
+ __func__, ctrls->controls[i].id,
+ ctrls->controls[i].value, query.minimum);
+ ctrls->error_idx = i;
+ return -ERANGE;
+ }
+ if (ctrls->controls[i].value > query.maximum) {
+ dev_err(dev, "%s control id=%#08x value=%d greater than maximum=%d\n",
+ __func__, ctrls->controls[i].id,
+ ctrls->controls[i].value, query.maximum);
+ ctrls->error_idx = i;
+ return -ERANGE;
+ }
+ }
+
+ return 0;
+}
+
+static int vxe_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+ struct device *dev = ctx->dev->dev;
+ int i;
+ int ret;
+
+ ctrls->reserved[0] = 0;
+ ctrls->reserved[1] = 0;
+
+ if (ctrls->which != V4L2_CTRL_WHICH_CUR_VAL)
+ return -EINVAL;
+
+ /* Verify first with try_ext_ctrls */
+ ret = vxe_try_ext_ctrls(file, priv, ctrls);
+ if (ret) {
+ /* Indicate verification stage error */
+ ctrls->error_idx = ctrls->count;
+ return ret;
+ }
+
+ /* Set all values in this set of commands */
+ for (i = 0; i < ctrls->count; i++) {
+ ctrls->controls[i].reserved2[0] = 0;
+
+ switch (ctrls->controls[i].id) {
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctrls->controls[i].size = 0;
+ ctx->vparams.idr_period = ctrls->controls[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctrls->controls[i].size = 0;
+ ctx->rc.bits_per_second = ctrls->controls[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ ctrls->controls[i].size = 0;
+ ctx->rc.intra_freq = ctrls->controls[i].value;
+ ctx->vparams.intra_cnt = ctrls->controls[i].value;
+ break;
+ default:
+ dev_err(dev, "%s Invalid control id %#08x\n",
+ __func__, ctrls->controls[i].id);
+ ctrls->error_idx = i;
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int vxe_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = 1;
+ fsize->stepwise.max_width = 1920;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = 1;
+ fsize->stepwise.max_height = 1080;
+ fsize->stepwise.step_height = 1;
+
+ fsize->reserved[0] = 0;
+ fsize->reserved[1] = 0;
+
+ return 0;
+}
+
+static int vxe_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ if (fival->index)
+ return -EINVAL;
+
+ fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+ fival->stepwise.min = frmivals[0];
+ fival->stepwise.max = frmivals[1];
+ fival->stepwise.step = frmivals[1];
+
+ fival->reserved[0] = 0;
+ fival->reserved[1] = 1;
+
+ return 0;
+}
+
+static int vxe_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+
+ if (V4L2_TYPE_IS_OUTPUT(parm->type)) {
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.output.timeperframe.numerator = 1;
+ parm->parm.output.timeperframe.denominator = ctx->rc.frame_rate;
+ } else {
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe.numerator = 1;
+ parm->parm.capture.timeperframe.denominator = ctx->rc.frame_rate;
+ }
+
+ return 0;
+}
+
+static int vxe_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vxe_enc_ctx *ctx = file2ctx(file);
+ unsigned int num, den;
+
+ /* Cannot change values once context is created */
+ /* TODO: Handle controls after stream is created but before streamon */
+ if (ctx->s_fmt_flags & S_FMT_FLAG_STREAM_CREATED)
+ return -EBUSY;
+
+ if (V4L2_TYPE_IS_OUTPUT(parm->type)) {
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ num = parm->parm.output.timeperframe.numerator;
+ den = parm->parm.output.timeperframe.denominator;
+ } else {
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ num = parm->parm.capture.timeperframe.numerator;
+ den = parm->parm.capture.timeperframe.denominator;
+ }
+
+ if (parm->parm.output.timeperframe.denominator &&
+ parm->parm.output.timeperframe.numerator) {
+ ctx->rc.frame_rate = den / num;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(parm->type)) {
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.output.timeperframe.numerator = 1;
+ parm->parm.output.timeperframe.denominator = ctx->rc.frame_rate;
+ } else {
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe.numerator = 1;
+ parm->parm.capture.timeperframe.denominator = ctx->rc.frame_rate;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vxe_enc_ioctl_ops = {
+ .vidioc_querycap = vxe_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vxe_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vxe_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vxe_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vxe_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vxe_enum_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vxe_g_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vxe_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vxe_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+
+ .vidioc_subscribe_event = vxe_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_try_encoder_cmd = vxe_try_cmd,
+ .vidioc_encoder_cmd = vxe_cmd,
+
+ .vidioc_queryctrl = vxe_queryctrl,
+ .vidioc_query_ext_ctrl = vxe_query_ext_ctrl,
+ .vidioc_g_ext_ctrls = vxe_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = vxe_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = vxe_try_ext_ctrls,
+
+ .vidioc_enum_framesizes = vxe_enum_framesizes,
+ .vidioc_enum_frameintervals = vxe_enum_frameintervals,
+
+ .vidioc_g_parm = vxe_g_parm,
+ .vidioc_s_parm = vxe_s_parm,
+};
+
+static const struct of_device_id vxe_enc_of_match[] = {
+ {.compatible = "img,vxe384"}, { /* end */},
+};
+MODULE_DEVICE_TABLE(of, vxe_enc_of_match);
+
+static irqreturn_t soft_thread_irq(int irq, void *dev_data)
+{
+ unsigned char handled;
+
+ if (!dev_data)
+ return IRQ_NONE;
+
+ handled = topazdd_threaded_isr(dev_data);
+ if (handled)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t hard_isrcb(int irq, void *dev_data)
+{
+ if (!dev_data)
+ return IRQ_NONE;
+
+ return topazdd_isr(dev_data);
+}
+
+static int vxe_enc_probe(struct platform_device *pdev)
+{
+ struct vxe_dev *vxe;
+ struct resource *res;
+ const struct of_device_id *of_dev_id;
+ struct video_device *vfd;
+ int ret;
+ int module_irq;
+ struct vxe_enc_ctx *ctx;
+ struct heap_config *heap_configs = vxe_enc_heap_configs;
+ int num_heaps = ARRAY_SIZE(vxe_enc_heap_configs);
+ unsigned int i;
+
+ of_dev_id = of_match_device(vxe_enc_of_match, &pdev->dev);
+ if (!of_dev_id) {
+ dev_err(&pdev->dev, "%s: Unable to match device\n", __func__);
+ return -ENODEV;
+ }
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+
+ vxe = devm_kzalloc(&pdev->dev, sizeof(*vxe), GFP_KERNEL);
+ if (!vxe)
+ return -ENOMEM;
+
+ vxe->dev = &pdev->dev;
+ vxe->plat_dev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vxe->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(vxe->reg_base))
+ return PTR_ERR(vxe->reg_base);
+
+ module_irq = platform_get_irq(pdev, 0);
+ if (module_irq < 0)
+ return -ENXIO;
+ vxe->module_irq = module_irq;
+
+ ret = img_mem_init(vxe->dev);
+ if (ret) {
+ dev_err(vxe->dev, "Failed to initialize memory\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vxe->drv_ctx.heaps);
+ vxe->drv_ctx.internal_heap_id = VXE_INVALID_ID;
+
+ /* Initialise memory management component */
+ for (i = 0; i < num_heaps; i++) {
+ struct vxe_heap *heap;
+#ifdef DEBUG_ENCODER_DRIVER
+ dev_info(vxe->dev, "%s: adding heap of type %d\n",
+ __func__, heap_configs[i].type);
+#endif
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap) {
+ ret = -ENOMEM;
+ goto heap_add_failed;
+ }
+
+ ret = img_mem_add_heap(&heap_configs[i], &heap->id);
+ if (ret < 0) {
+ dev_err(vxe->dev, "%s: failed to init heap (type %d)!\n",
+ __func__, heap_configs[i].type);
+ kfree(heap);
+ goto heap_add_failed;
+ }
+ list_add(&heap->list, &vxe->drv_ctx.heaps);
+
+ /* Implicitly, first heap is used for internal allocations */
+ if (vxe->drv_ctx.internal_heap_id < 0) {
+ vxe->drv_ctx.internal_heap_id = heap->id;
+ dev_err(vxe->dev, "%s: using heap %d for internal alloc\n",
+ __func__, vxe->drv_ctx.internal_heap_id);
+ }
+ }
+
+ /* Do not proceed if internal heap not defined */
+ if (vxe->drv_ctx.internal_heap_id < 0) {
+ dev_err(vxe->dev, "%s: failed to locate heap for internal alloc\n",
+ __func__);
+ ret = -EINVAL;
+ /* Loop registered heaps just for sanity */
+ goto heap_add_failed;
+ }
+
+ ret = vxe_init_mem(vxe);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize memory\n");
+ return -ENOMEM;
+ }
+
+ vxe->mutex = kzalloc(sizeof(*vxe->mutex), GFP_KERNEL);
+ if (!vxe->mutex)
+ return -ENOMEM;
+
+ mutex_init(vxe->mutex);
+
+ platform_set_drvdata(pdev, vxe);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: failed to enable clock, status = %d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, module_irq, (irq_handler_t)hard_isrcb,
+ (irq_handler_t)soft_thread_irq, IRQF_SHARED,
+ IMG_VXE_ENC_MODULE_NAME, &vxe->topaz_dev_ctx);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ goto out_put_sync;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ free_irq(module_irq, &vxe->topaz_dev_ctx);
+ return -ENOMEM;
+ }
+ ctx->dev = vxe;
+
+ vxe_fill_default_params(ctx);
+
+ ctx->mem_ctx = vxe->drv_ctx.mem_ctx;
+ ctx->mmu_ctx = vxe->drv_ctx.mmu_ctx;
+
+ vxe->ctx = ctx;
+
+ ret = topazdd_init((unsigned long)vxe->reg_base, res->end - res->start + 1,
+ (MMU_USE_MMU_FLAG | MMU_EXTENDED_ADDR_FLAG),
+ ctx, vxe->drv_ctx.ptd, &vxe->topaz_dev_ctx);
+ if (ret)
+ goto out_free_irq;
+
+ vxe->streams = kzalloc(sizeof(*vxe->streams), GFP_KERNEL);
+ if (!vxe->streams) {
+ ret = -ENOMEM;
+ goto topazdd_deinit;
+ }
+ idr_init(vxe->streams);
+
+ ret = init_topaz_core(vxe->topaz_dev_ctx, &vxe->num_pipes,
+ (MMU_USE_MMU_FLAG | MMU_EXTENDED_ADDR_FLAG),
+ vxe_return_resource);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize topaz core\n");
+ goto topazdd_deinit;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &vxe->ti_vxe_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ goto topaz_core_deinit;
+ }
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ dev_err(&pdev->dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto out_v4l2_device;
+ }
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s", IMG_VXE_ENC_MODULE_NAME);
+ vfd->fops = &vxe_enc_fops;
+ vfd->ioctl_ops = &vxe_enc_ioctl_ops;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->v4l2_dev = &vxe->ti_vxe_dev;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ vfd->lock = vxe->mutex;
+
+ vxe->vfd = vfd;
+ video_set_drvdata(vfd, vxe);
+
+ vxe->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR_VALUE((unsigned long)vxe->m2m_dev)) {
+ dev_err(&pdev->dev, "Failed to init mem2mem device\n");
+ ret = -EINVAL;
+ goto out_vid_dev;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register video device\n");
+ goto out_vid_reg;
+ }
+ v4l2_info(&vxe->ti_vxe_dev, "encoder registered as /dev/video%d\n",
+ vfd->num);
+
+ return 0;
+
+out_vid_reg:
+ v4l2_m2m_release(vxe->m2m_dev);
+out_vid_dev:
+ video_device_release(vfd);
+out_v4l2_device:
+ v4l2_device_unregister(&vxe->ti_vxe_dev);
+topaz_core_deinit:
+ deinit_topaz_core();
+topazdd_deinit:
+ topazdd_deinit(vxe->topaz_dev_ctx);
+out_free_irq:
+ kfree(vxe->ctx);
+ free_irq(module_irq, &vxe->topaz_dev_ctx);
+out_put_sync:
+ pm_runtime_put_sync(&pdev->dev);
+heap_add_failed:
+ while (!list_empty(&vxe->drv_ctx.heaps)) {
+ struct vxe_heap *heap;
+
+ heap = list_first_entry(&vxe->drv_ctx.heaps, struct vxe_heap, list);
+ __list_del_entry(&heap->list);
+ img_mem_del_heap(heap->id);
+ kfree(heap);
+ }
+ vxe->drv_ctx.internal_heap_id = VXE_INVALID_ID;
+
+exit:
+ pm_runtime_disable(&pdev->dev);
+ vxe_deinit_mem(vxe);
+
+ return ret;
+}
+
+static int vxe_enc_remove(struct platform_device *pdev)
+{
+ struct vxe_dev *vxe = platform_get_drvdata(pdev);
+
+ topazdd_deinit(vxe->topaz_dev_ctx);
+
+ kfree(vxe->ctx);
+ vxe_deinit_mem(vxe);
+
+ free_irq(vxe->module_irq, &vxe->topaz_dev_ctx);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver vxe_enc_driver = {
+ .probe = vxe_enc_probe,
+ .remove = vxe_enc_remove,
+ .driver = {
+ .name = "img_enc",
+ .of_match_table = vxe_enc_of_match,
+ },
+};
+module_platform_driver(vxe_enc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IMG VXE384 video encoder driver");
diff --git a/drivers/media/platform/vxe-vxd/encoder/vxe_v4l2.h b/drivers/media/platform/vxe-vxd/encoder/vxe_v4l2.h
new file mode 100644
index 000000000000..d4dcc9a39e6c
--- /dev/null
+++ b/drivers/media/platform/vxe-vxd/encoder/vxe_v4l2.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * V4L2 interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ * Sunita Nadampalli <sunitan@ti.com>
+ *
+ * Re-written for upstreming
+ * Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _VXE_V4L2_H
+#define _VXE_V4L2_H
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+/*
+ * struct vxe_ctrl - contains info for each supported v4l2 control
+ */
+struct vxe_ctrl {
+ unsigned int cid;
+ enum v4l2_ctrl_type type;
+ unsigned char name[32];
+ int minimum;
+ int maximum;
+ int step;
+ int default_value;
+ unsigned char compound;
+};
+
+extern struct mem_space topaz_mem_space[];
+
+#endif
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
index fff7ddec6745..2773145a2226 100644
--- a/drivers/media/platform/xilinx/xilinx-csi2rxss.c
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -681,12 +681,13 @@ stream_done:
static struct v4l2_mbus_framefmt *
__xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&xcsi2rxss->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&xcsi2rxss->subdev,
+ sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &xcsi2rxss->format;
default:
@@ -705,7 +706,7 @@ __xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
* Return: 0 on success
*/
static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
struct v4l2_mbus_framefmt *format;
@@ -713,7 +714,7 @@ static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
mutex_lock(&xcsi2rxss->lock);
for (i = 0; i < XCSI_MEDIA_PADS; i++) {
- format = v4l2_subdev_get_try_format(sd, cfg, i);
+ format = v4l2_subdev_get_try_format(sd, sd_state, i);
*format = xcsi2rxss->default_format;
}
mutex_unlock(&xcsi2rxss->lock);
@@ -732,13 +733,14 @@ static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
* Return: 0 on success
*/
static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
mutex_lock(&xcsi2rxss->lock);
- fmt->format = *__xcsi2rxss_get_pad_format(xcsi2rxss, cfg, fmt->pad,
+ fmt->format = *__xcsi2rxss_get_pad_format(xcsi2rxss, sd_state,
+ fmt->pad,
fmt->which);
mutex_unlock(&xcsi2rxss->lock);
@@ -759,7 +761,7 @@ static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
* Return: 0 on success
*/
static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
@@ -773,7 +775,7 @@ static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
* CSI format cannot be changed at runtime.
* Ensure that format to set is copied to over to CSI pad format
*/
- __format = __xcsi2rxss_get_pad_format(xcsi2rxss, cfg,
+ __format = __xcsi2rxss_get_pad_format(xcsi2rxss, sd_state,
fmt->pad, fmt->which);
/* only sink pad format can be updated */
@@ -811,7 +813,7 @@ static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
* Return: -EINVAL or zero on success
*/
static int xcsi2rxss_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct xcsi2rxss_state *state = to_xcsi2rxssstate(sd);
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 2a56201cb853..07074eda5f70 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -175,8 +175,8 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
struct xvip_dma *start)
{
struct media_graph graph;
- struct media_entity *entity = &start->video.entity;
- struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_pad *pad = start->video.entity.pads;
+ struct media_device *mdev = pad->entity->graph_obj.mdev;
unsigned int num_inputs = 0;
unsigned int num_outputs = 0;
int ret;
@@ -190,15 +190,15 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
return ret;
}
- media_graph_walk_start(&graph, entity);
+ media_graph_walk_start(&graph, pad);
- while ((entity = media_graph_walk_next(&graph))) {
+ while ((pad = media_graph_walk_next(&graph))) {
struct xvip_dma *dma;
- if (entity->function != MEDIA_ENT_F_IO_V4L)
+ if (pad->entity->function != MEDIA_ENT_F_IO_V4L)
continue;
- dma = to_xvip_dma(media_entity_to_video_device(entity));
+ dma = to_xvip_dma(media_entity_to_video_device(pad->entity));
if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
pipe->output = dma;
@@ -403,10 +403,10 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
* Use the pipeline object embedded in the first DMA object that starts
* streaming.
*/
- pipe = dma->video.entity.pipe
+ pipe = dma->video.entity.pads->pipe
? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
- ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
+ ret = media_pipeline_start(dma->video.entity.pads, &pipe->pipe);
if (ret < 0)
goto error;
@@ -432,7 +432,7 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
error_stop:
- media_pipeline_stop(&dma->video.entity);
+ media_pipeline_stop(dma->video.entity.pads);
error:
/* Give back all queued buffers to videobuf2. */
@@ -460,7 +460,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq)
/* Cleanup the pipeline and mark it as being stopped. */
xvip_pipeline_cleanup(pipe);
- media_pipeline_stop(&dma->video.entity);
+ media_pipeline_stop(dma->video.entity.pads);
/* Give back all queued buffers to videobuf2. */
spin_lock_irq(&dma->queued_lock);
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index 2378bdae57ae..69ced71a5696 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -47,7 +47,7 @@ struct xvip_pipeline {
static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
{
- return container_of(e->pipe, struct xvip_pipeline, pipe);
+ return container_of(e->pads->pipe, struct xvip_pipeline, pipe);
}
/**
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index ed01bedb5db6..0f2d5a0edf0c 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -251,12 +251,13 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
static struct v4l2_mbus_framefmt *
__xtpg_get_pad_format(struct xtpg_device *xtpg,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&xtpg->xvip.subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&xtpg->xvip.subdev,
+ sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &xtpg->formats[pad];
default:
@@ -265,25 +266,26 @@ __xtpg_get_pad_format(struct xtpg_device *xtpg,
}
static int xtpg_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct xtpg_device *xtpg = to_tpg(subdev);
- fmt->format = *__xtpg_get_pad_format(xtpg, cfg, fmt->pad, fmt->which);
+ fmt->format = *__xtpg_get_pad_format(xtpg, sd_state, fmt->pad,
+ fmt->which);
return 0;
}
static int xtpg_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct xtpg_device *xtpg = to_tpg(subdev);
struct v4l2_mbus_framefmt *__format;
u32 bayer_phase;
- __format = __xtpg_get_pad_format(xtpg, cfg, fmt->pad, fmt->which);
+ __format = __xtpg_get_pad_format(xtpg, sd_state, fmt->pad, fmt->which);
/* In two pads mode the source pad format is always identical to the
* sink pad format.
@@ -306,7 +308,8 @@ static int xtpg_set_format(struct v4l2_subdev *subdev,
/* Propagate the format to the source pad. */
if (xtpg->npads == 2) {
- __format = __xtpg_get_pad_format(xtpg, cfg, 1, fmt->which);
+ __format = __xtpg_get_pad_format(xtpg, sd_state, 1,
+ fmt->which);
*__format = fmt->format;
}
@@ -318,12 +321,12 @@ static int xtpg_set_format(struct v4l2_subdev *subdev,
*/
static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+ format = v4l2_subdev_get_try_format(subdev, sd_state, fse->pad);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -351,11 +354,11 @@ static int xtpg_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct xtpg_device *xtpg = to_tpg(subdev);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 0);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
*format = xtpg->default_format;
if (xtpg->npads == 2) {
- format = v4l2_subdev_get_try_format(subdev, fh->pad, 1);
+ format = v4l2_subdev_get_try_format(subdev, fh->state, 1);
*format = xtpg->default_format;
}
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 6ad61b08a31a..b989fee8351d 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(xvip_cleanup_resources);
* is not valid.
*/
int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct v4l2_mbus_framefmt *format;
@@ -260,7 +260,7 @@ int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- format = v4l2_subdev_get_try_format(subdev, cfg, code->pad);
+ format = v4l2_subdev_get_try_format(subdev, sd_state, code->pad);
code->code = format->code;
@@ -284,7 +284,7 @@ EXPORT_SYMBOL_GPL(xvip_enum_mbus_code);
* if the index or the code is not valid.
*/
int xvip_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct v4l2_mbus_framefmt *format;
@@ -295,7 +295,7 @@ int xvip_enum_frame_size(struct v4l2_subdev *subdev,
if (fse->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
- format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+ format = v4l2_subdev_get_try_format(subdev, sd_state, fse->pad);
if (fse->index || fse->code != format->code)
return -EINVAL;
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index a528a32ea1dc..d0b0e0600952 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -125,10 +125,10 @@ const struct xvip_video_format *xvip_of_get_format(struct device_node *node);
void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
const struct v4l2_subdev_format *fmt);
int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code);
int xvip_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse);
static inline u32 xvip_read(struct xvip_device *xvip, u32 addr)
diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c
index 5e9fd902cd37..10724b0a868c 100644
--- a/drivers/media/test-drivers/vimc/vimc-capture.c
+++ b/drivers/media/test-drivers/vimc/vimc-capture.c
@@ -246,7 +246,7 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
vcap->sequence = 0;
/* Start the media pipeline */
- ret = media_pipeline_start(entity, &vcap->stream.pipe);
+ ret = media_pipeline_start(entity->pads, &vcap->stream.pipe);
if (ret) {
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
return ret;
@@ -254,7 +254,7 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
if (ret) {
- media_pipeline_stop(entity);
+ media_pipeline_stop(entity->pads);
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
return ret;
}
@@ -273,7 +273,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq)
vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
/* Stop the media pipeline */
- media_pipeline_stop(&vcap->vdev.entity);
+ media_pipeline_stop(vcap->vdev.entity.pads);
/* Release all active buffers */
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/test-drivers/vimc/vimc-debayer.c b/drivers/media/test-drivers/vimc/vimc-debayer.c
index c3f6fef34f68..2d06cdbacc76 100644
--- a/drivers/media/test-drivers/vimc/vimc-debayer.c
+++ b/drivers/media/test-drivers/vimc/vimc-debayer.c
@@ -150,17 +150,17 @@ static bool vimc_deb_src_code_is_valid(u32 code)
}
static int vimc_deb_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
unsigned int i;
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
*mf = sink_fmt_default;
for (i = 1; i < sd->entity.num_pads; i++) {
- mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, i);
*mf = sink_fmt_default;
mf->code = vdeb->src_code;
}
@@ -169,7 +169,7 @@ static int vimc_deb_init_cfg(struct v4l2_subdev *sd,
}
static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (VIMC_IS_SRC(code->pad)) {
@@ -188,7 +188,7 @@ static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index)
@@ -213,14 +213,14 @@ static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
}
static int vimc_deb_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
/* Get the current sink format */
fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
- *v4l2_subdev_get_try_format(sd, cfg, 0) :
+ *v4l2_subdev_get_try_format(sd, sd_state, 0) :
vdeb->sink_fmt;
/* Set the right code for the source pad */
@@ -251,7 +251,7 @@ static void vimc_deb_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
}
static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
@@ -266,8 +266,8 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
sink_fmt = &vdeb->sink_fmt;
src_code = &vdeb->src_code;
} else {
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
- src_code = &v4l2_subdev_get_try_format(sd, cfg, 1)->code;
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ src_code = &v4l2_subdev_get_try_format(sd, sd_state, 1)->code;
}
/*
diff --git a/drivers/media/test-drivers/vimc/vimc-scaler.c b/drivers/media/test-drivers/vimc/vimc-scaler.c
index 121fa7d62a2e..06880dd0b6ac 100644
--- a/drivers/media/test-drivers/vimc/vimc-scaler.c
+++ b/drivers/media/test-drivers/vimc/vimc-scaler.c
@@ -84,20 +84,20 @@ static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
}
static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mf;
struct v4l2_rect *r;
unsigned int i;
- mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
*mf = sink_fmt_default;
- r = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ r = v4l2_subdev_get_try_crop(sd, sd_state, 0);
*r = crop_rect_default;
for (i = 1; i < sd->entity.num_pads; i++) {
- mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, i);
*mf = sink_fmt_default;
mf->width = mf->width * sca_mult;
mf->height = mf->height * sca_mult;
@@ -107,7 +107,7 @@ static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
}
static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
u32 mbus_code = vimc_mbus_code_by_index(code->index);
@@ -128,7 +128,7 @@ static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
const struct vimc_pix_map *vpix;
@@ -156,7 +156,7 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
}
static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
@@ -164,8 +164,8 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
/* Get the current sink format */
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
} else {
format->format = vsca->sink_fmt;
crop_rect = &vsca->crop_rect;
@@ -201,7 +201,7 @@ static void vimc_sca_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
}
static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
@@ -216,8 +216,8 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
sink_fmt = &vsca->sink_fmt;
crop_rect = &vsca->crop_rect;
} else {
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
}
/*
@@ -254,7 +254,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
}
static int vimc_sca_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
@@ -268,8 +268,8 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
sink_fmt = &vsca->sink_fmt;
crop_rect = &vsca->crop_rect;
} else {
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
}
switch (sel->target) {
@@ -287,7 +287,7 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
}
static int vimc_sca_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
@@ -305,8 +305,8 @@ static int vimc_sca_set_selection(struct v4l2_subdev *sd,
crop_rect = &vsca->crop_rect;
sink_fmt = &vsca->sink_fmt;
} else {
- crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
}
switch (sel->target) {
diff --git a/drivers/media/test-drivers/vimc/vimc-sensor.c b/drivers/media/test-drivers/vimc/vimc-sensor.c
index ba5db5a150b4..74ab79cadb5d 100644
--- a/drivers/media/test-drivers/vimc/vimc-sensor.c
+++ b/drivers/media/test-drivers/vimc/vimc-sensor.c
@@ -42,14 +42,14 @@ static const struct v4l2_mbus_framefmt fmt_default = {
};
static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
unsigned int i;
for (i = 0; i < sd->entity.num_pads; i++) {
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, i);
*mf = fmt_default;
}
@@ -57,7 +57,7 @@ static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
}
static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
u32 mbus_code = vimc_mbus_code_by_index(code->index);
@@ -71,7 +71,7 @@ static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
const struct vimc_pix_map *vpix;
@@ -93,14 +93,14 @@ static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
}
static int vimc_sen_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_sen_device *vsen =
container_of(sd, struct vimc_sen_device, sd);
fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) :
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) :
vsen->mbus_format;
return 0;
@@ -146,7 +146,7 @@ static void vimc_sen_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
}
static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vimc_sen_device *vsen = v4l2_get_subdevdata(sd);
@@ -159,7 +159,7 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
mf = &vsen->mbus_format;
} else {
- mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
}
/* Set the new format */
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index caefac07af92..877e85a451cb 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -410,7 +410,7 @@ static int au0828_enable_source(struct media_entity *entity,
goto end;
}
- ret = __media_pipeline_start(entity, pipe);
+ ret = __media_pipeline_start(entity->pads, pipe);
if (ret) {
pr_err("Start Pipeline: %s->%s Error %d\n",
source->name, entity->name, ret);
@@ -501,12 +501,12 @@ static void au0828_disable_source(struct media_entity *entity)
return;
/* stop pipeline */
- __media_pipeline_stop(dev->active_link_owner);
+ __media_pipeline_stop(dev->active_link_owner->pads);
pr_debug("Pipeline stop for %s\n",
dev->active_link_owner->name);
ret = __media_pipeline_start(
- dev->active_link_user,
+ dev->active_link_user->pads,
dev->active_link_user_pipe);
if (ret) {
pr_err("Start Pipeline: %s->%s %d\n",
@@ -532,7 +532,7 @@ static void au0828_disable_source(struct media_entity *entity)
return;
/* stop pipeline */
- __media_pipeline_stop(dev->active_link_owner);
+ __media_pipeline_stop(dev->active_link_owner->pads);
pr_debug("Pipeline stop for %s\n",
dev->active_link_owner->name);
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index 2e5913bccb38..1fa6f10ee157 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -398,7 +398,7 @@ static int s2250_s_ctrl(struct v4l2_ctrl *ctrl)
}
static int s2250_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 3dc17ebe14fa..133d20e40f82 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -441,3 +441,36 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
+
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
+ unsigned int div)
+{
+ struct v4l2_ctrl *ctrl;
+ s64 freq;
+
+ ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ);
+ if (ctrl) {
+ struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
+ int ret;
+
+ qm.index = v4l2_ctrl_g_ctrl(ctrl);
+
+ ret = v4l2_querymenu(handler, &qm);
+ if (ret)
+ return -ENOENT;
+
+ freq = qm.value;
+ } else {
+ if (!mul || !div)
+ return -ENOENT;
+
+ ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl)
+ return -ENOENT;
+
+ freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
+ }
+
+ return freq > 0 ? freq : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 6d6d30dbbe68..3c7cfc77f93b 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/version.h>
+#include <linux/v4l2-subdev.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
@@ -1420,6 +1421,16 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
+ case V4L2_PIX_FMT_TI1210: descr = "10-bit YUV 4:2:0 (NV12)"; break;
+ case V4L2_PIX_FMT_TI1610: descr = "10-bit YUV 4:2:2 (NV16)"; break;
+ case V4L2_PIX_FMT_SRGGI10: descr = "10-bit Bayer RGBG/GIrGIr"; break;
+ case V4L2_PIX_FMT_SGRIG10: descr = "10-bit Bayer GRGB/IrGIrG"; break;
+ case V4L2_PIX_FMT_SBGGI10: descr = "10-bit Bayer BGRG/GIrGIr"; break;
+ case V4L2_PIX_FMT_SGBIG10: descr = "10-bit Bayer GBGR/IrGIrG"; break;
+ case V4L2_PIX_FMT_SGIRG10: descr = "10-bit Bayer GIrGIr/RGBG"; break;
+ case V4L2_PIX_FMT_SIGGR10: descr = "10-bit Bayer IrGIrG/GRGB"; break;
+ case V4L2_PIX_FMT_SGIBG10: descr = "10-bit Bayer GIrGIr/BGRG"; break;
+ case V4L2_PIX_FMT_SIGGB10: descr = "10-bit Bayer IrGIrG/GBGR"; break;
default:
/* Compressed formats */
@@ -3106,6 +3117,21 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
}
break;
}
+
+ case VIDIOC_SUBDEV_G_ROUTING:
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *routing = parg;
+
+ if (routing->num_routes > 256)
+ return -EINVAL;
+
+ *user_ptr = u64_to_user_ptr(routing->routes);
+ *kernel_ptr = (void **)&routing->routes;
+ *array_size = sizeof(struct v4l2_subdev_route)
+ * routing->num_routes;
+ ret = 1;
+ break;
+ }
}
return ret;
@@ -3342,8 +3368,15 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
/*
* Some ioctls can return an error, but still have valid
* results that must be returned.
+ *
+ * FIXME: subdev IOCTLS are partially handled here and partially in
+ * v4l2-subdev.c and the 'always_copy' flag can only be set for IOCTLS
+ * defined here as part of the 'v4l2_ioctls' array. As
+ * VIDIOC_SUBDEV_G_ROUTING needs to return results to applications even
+ * in case of failure, but it is not defined here as part of the
+ * 'v4l2_ioctls' array, insert an ad-hoc check to address that.
*/
- if (err < 0 && !always_copy)
+ if (err < 0 && !always_copy && cmd != VIDIOC_SUBDEV_G_ROUTING)
goto out;
out_array_args:
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index ba2f2b8dcc8c..55715b9fbe35 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -427,20 +427,20 @@ EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links);
/*
* pipeline_pm_use_count - Count the number of users of a pipeline
- * @entity: The entity
+ * @pad: Any pad along the pipeline
*
* Return the total number of users of all video device nodes in the pipeline.
*/
-static int pipeline_pm_use_count(struct media_entity *entity,
- struct media_graph *graph)
+static int pipeline_pm_use_count(struct media_pad *pad,
+ struct media_graph *graph)
{
int use = 0;
- media_graph_walk_start(graph, entity);
+ media_graph_walk_start(graph, pad);
- while ((entity = media_graph_walk_next(graph))) {
- if (is_media_entity_v4l2_video_device(entity))
- use += entity->use_count;
+ while ((pad = media_graph_walk_next(graph))) {
+ if (is_media_entity_v4l2_video_device(pad->entity))
+ use += pad->entity->use_count;
}
return use;
@@ -482,7 +482,7 @@ static int pipeline_pm_power_one(struct media_entity *entity, int change)
/*
* pipeline_pm_power - Apply power change to all entities in a pipeline
- * @entity: The entity
+ * @pad: Any pad along the pipeline
* @change: Use count change
*
* Walk the pipeline to update the use count and the power state of all non-node
@@ -490,30 +490,29 @@ static int pipeline_pm_power_one(struct media_entity *entity, int change)
*
* Return 0 on success or a negative error code on failure.
*/
-static int pipeline_pm_power(struct media_entity *entity, int change,
- struct media_graph *graph)
+static int pipeline_pm_power(struct media_pad *pad, int change,
+ struct media_graph *graph)
{
- struct media_entity *first = entity;
+ struct media_pad *tmp_pad, *first = pad;
int ret = 0;
if (!change)
return 0;
- media_graph_walk_start(graph, entity);
+ media_graph_walk_start(graph, pad);
- while (!ret && (entity = media_graph_walk_next(graph)))
- if (is_media_entity_v4l2_subdev(entity))
- ret = pipeline_pm_power_one(entity, change);
+ while (!ret && (pad = media_graph_walk_next(graph)))
+ if (is_media_entity_v4l2_subdev(pad->entity))
+ ret = pipeline_pm_power_one(pad->entity, change);
if (!ret)
return ret;
media_graph_walk_start(graph, first);
- while ((first = media_graph_walk_next(graph))
- && first != entity)
- if (is_media_entity_v4l2_subdev(first))
- pipeline_pm_power_one(first, -change);
+ while ((tmp_pad = media_graph_walk_next(graph)) && tmp_pad != pad)
+ if (is_media_entity_v4l2_subdev(tmp_pad->entity))
+ pipeline_pm_power_one(tmp_pad->entity, -change);
return ret;
}
@@ -531,7 +530,7 @@ static int v4l2_pipeline_pm_use(struct media_entity *entity, unsigned int use)
WARN_ON(entity->use_count < 0);
/* Apply power change to connected non-nodes. */
- ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk);
+ ret = pipeline_pm_power(entity->pads, change, &mdev->pm_count_walk);
if (ret < 0)
entity->use_count -= change;
@@ -557,8 +556,8 @@ int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
unsigned int notification)
{
struct media_graph *graph = &link->graph_obj.mdev->pm_count_walk;
- struct media_entity *source = link->source->entity;
- struct media_entity *sink = link->sink->entity;
+ struct media_pad *source = link->source;
+ struct media_pad *sink = link->sink;
int source_use;
int sink_use;
int ret = 0;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index fbf0dcb313c8..0b97c9083607 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -16,6 +16,7 @@
#include <linux/videodev2.h>
#include <linux/export.h>
#include <linux/version.h>
+#include <linux/sort.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -26,19 +27,22 @@
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
{
- if (sd->entity.num_pads) {
- fh->pad = v4l2_subdev_alloc_pad_config(sd);
- if (fh->pad == NULL)
- return -ENOMEM;
- }
+ struct v4l2_subdev_state *state;
+ static struct lock_class_key key;
+
+ state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ fh->state = state;
return 0;
}
static void subdev_fh_free(struct v4l2_subdev_fh *fh)
{
- v4l2_subdev_free_pad_config(fh->pad);
- fh->pad = NULL;
+ __v4l2_subdev_state_free(fh->state);
+ fh->state = NULL;
}
static int subdev_open(struct file *file)
@@ -146,63 +150,97 @@ static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
return 0;
}
-static int check_cfg(u32 which, struct v4l2_subdev_pad_config *cfg)
+static int check_state_pads(struct v4l2_subdev *sd, u32 which,
+ struct v4l2_subdev_state *state)
{
- if (which == V4L2_SUBDEV_FORMAT_TRY && !cfg)
+ if (sd->flags & V4L2_SUBDEV_FL_MULTIPLEXED)
+ return 0;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_state_pad_stream(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u32 stream)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_MULTIPLEXED))
+ return 0;
+
+ /*
+ * We need to take the state lock to access the format, but as we then
+ * have to unlock, nothing prevents someone changing the state before
+ * this call thread enters the driver's op and the driver has the
+ * change to lock the state.
+ */
+ v4l2_subdev_lock_state(state);
+
+ fmt = v4l2_state_get_stream_format(state, pad, stream);
+
+ v4l2_subdev_unlock_state(state);
+
+ if (!fmt)
return -EINVAL;
return 0;
}
static inline int check_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
if (!format)
return -EINVAL;
return check_which(format->which) ? : check_pad(sd, format->pad) ? :
- check_cfg(format->which, cfg);
+ check_state_pads(sd, format->which, state) ? :
+ check_state_pad_stream(sd, state, format->pad, format->stream);
}
static int call_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
- return check_format(sd, cfg, format) ? :
- sd->ops->pad->get_fmt(sd, cfg, format);
+ return check_format(sd, state, format) ? :
+ sd->ops->pad->get_fmt(sd, state, format);
}
static int call_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
- return check_format(sd, cfg, format) ? :
- sd->ops->pad->set_fmt(sd, cfg, format);
+ return check_format(sd, state, format) ? :
+ sd->ops->pad->set_fmt(sd, state, format);
}
static int call_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!code)
return -EINVAL;
return check_which(code->which) ? : check_pad(sd, code->pad) ? :
- check_cfg(code->which, cfg) ? :
- sd->ops->pad->enum_mbus_code(sd, cfg, code);
+ check_state_pads(sd, code->which, state) ? :
+ check_state_pad_stream(sd, state, code->pad, code->stream) ? :
+ sd->ops->pad->enum_mbus_code(sd, state, code);
}
static int call_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse)
{
if (!fse)
return -EINVAL;
return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
- check_cfg(fse->which, cfg) ? :
- sd->ops->pad->enum_frame_size(sd, cfg, fse);
+ check_state_pads(sd, fse->which, state) ? :
+ check_state_pad_stream(sd, state, fse->pad, fse->stream) ? :
+ sd->ops->pad->enum_frame_size(sd, state, fse);
}
static inline int check_frame_interval(struct v4l2_subdev *sd,
@@ -229,42 +267,44 @@ static int call_s_frame_interval(struct v4l2_subdev *sd,
}
static int call_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_interval_enum *fie)
{
if (!fie)
return -EINVAL;
return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
- check_cfg(fie->which, cfg) ? :
- sd->ops->pad->enum_frame_interval(sd, cfg, fie);
+ check_state_pads(sd, fie->which, state) ? :
+ check_state_pad_stream(sd, state, fie->pad, fie->stream) ? :
+ sd->ops->pad->enum_frame_interval(sd, state, fie);
}
static inline int check_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
if (!sel)
return -EINVAL;
return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
- check_cfg(sel->which, cfg);
+ check_state_pads(sd, sel->which, state) ? :
+ check_state_pad_stream(sd, state, sel->pad, sel->stream);
}
static int call_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
- return check_selection(sd, cfg, sel) ? :
- sd->ops->pad->get_selection(sd, cfg, sel);
+ return check_selection(sd, state, sel) ? :
+ sd->ops->pad->get_selection(sd, state, sel);
}
static int call_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel)
{
- return check_selection(sd, cfg, sel) ? :
- sd->ops->pad->set_selection(sd, cfg, sel);
+ return check_selection(sd, state, sel) ? :
+ sd->ops->pad->set_selection(sd, state, sel);
}
static inline int check_edid(struct v4l2_subdev *sd,
@@ -351,6 +391,59 @@ const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+
+static struct v4l2_subdev_state *
+subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
+ unsigned int cmd, void *arg)
+{
+ u32 which;
+
+ switch (cmd) {
+ default:
+ return NULL;
+
+ case VIDIOC_SUBDEV_G_FMT:
+ case VIDIOC_SUBDEV_S_FMT: {
+ which = ((struct v4l2_subdev_format *)arg)->which;
+ break;
+ }
+ case VIDIOC_SUBDEV_G_CROP:
+ case VIDIOC_SUBDEV_S_CROP: {
+ which = ((struct v4l2_subdev_crop *)arg)->which;
+ break;
+ }
+ case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
+ which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
+ break;
+ }
+ case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
+ which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
+ break;
+ }
+
+ case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
+ which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
+ break;
+ }
+
+ case VIDIOC_SUBDEV_G_SELECTION:
+ case VIDIOC_SUBDEV_S_SELECTION: {
+ which = ((struct v4l2_subdev_selection *)arg)->which;
+ break;
+ }
+
+ case VIDIOC_SUBDEV_G_ROUTING:
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ which = ((struct v4l2_subdev_routing *)arg)->which;
+ break;
+ }
+ }
+
+ return which == V4L2_SUBDEV_FORMAT_TRY ?
+ subdev_fh->state :
+ v4l2_subdev_get_active_state(sd);
+}
+
static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
@@ -358,15 +451,20 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_fh *vfh = file->private_data;
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
+ struct v4l2_subdev_state *state;
int rval;
+ state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
+
switch (cmd) {
case VIDIOC_SUBDEV_QUERYCAP: {
struct v4l2_subdev_capability *cap = arg;
memset(cap->reserved, 0, sizeof(cap->reserved));
cap->version = LINUX_VERSION_CODE;
- cap->capabilities = ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0;
+ cap->capabilities =
+ (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
+ ((sd->flags & V4L2_SUBDEV_FL_MULTIPLEXED) ? V4L2_SUBDEV_CAP_MPLEXED : 0);
return 0;
}
@@ -482,7 +580,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(format->reserved, 0, sizeof(format->reserved));
memset(format->format.reserved, 0, sizeof(format->format.reserved));
- return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh->pad, format);
+ return v4l2_subdev_call(sd, pad, get_fmt, state, format);
}
case VIDIOC_SUBDEV_S_FMT: {
@@ -493,7 +591,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(format->reserved, 0, sizeof(format->reserved));
memset(format->format.reserved, 0, sizeof(format->format.reserved));
- return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh->pad, format);
+ return v4l2_subdev_call(sd, pad, set_fmt, state, format);
}
case VIDIOC_SUBDEV_G_CROP: {
@@ -507,7 +605,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
sel.target = V4L2_SEL_TGT_CROP;
rval = v4l2_subdev_call(
- sd, pad, get_selection, subdev_fh->pad, &sel);
+ sd, pad, get_selection, state, &sel);
crop->rect = sel.r;
@@ -529,7 +627,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
sel.r = crop->rect;
rval = v4l2_subdev_call(
- sd, pad, set_selection, subdev_fh->pad, &sel);
+ sd, pad, set_selection, state, &sel);
crop->rect = sel.r;
@@ -540,7 +638,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_subdev_mbus_code_enum *code = arg;
memset(code->reserved, 0, sizeof(code->reserved));
- return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh->pad,
+ return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
code);
}
@@ -548,7 +646,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_subdev_frame_size_enum *fse = arg;
memset(fse->reserved, 0, sizeof(fse->reserved));
- return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh->pad,
+ return v4l2_subdev_call(sd, pad, enum_frame_size, state,
fse);
}
@@ -573,7 +671,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_subdev_frame_interval_enum *fie = arg;
memset(fie->reserved, 0, sizeof(fie->reserved));
- return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh->pad,
+ return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
fie);
}
@@ -582,7 +680,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
- sd, pad, get_selection, subdev_fh->pad, sel);
+ sd, pad, get_selection, state, sel);
}
case VIDIOC_SUBDEV_S_SELECTION: {
@@ -593,7 +691,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
- sd, pad, set_selection, subdev_fh->pad, sel);
+ sd, pad, set_selection, state, sel);
}
case VIDIOC_G_EDID: {
@@ -657,6 +755,74 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_QUERYSTD:
return v4l2_subdev_call(sd, video, querystd, arg);
+ case VIDIOC_SUBDEV_G_ROUTING: {
+ struct v4l2_subdev_routing *routing = arg;
+ struct v4l2_subdev_krouting *krouting;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_MULTIPLEXED))
+ return -ENOIOCTLCMD;
+
+ memset(routing->reserved, 0, sizeof(routing->reserved));
+
+ krouting = &state->routing;
+
+ if (routing->num_routes < krouting->num_routes) {
+ routing->num_routes = krouting->num_routes;
+ return -ENOSPC;
+ }
+
+ memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
+ krouting->routes,
+ krouting->num_routes * sizeof(*krouting->routes));
+ routing->num_routes = krouting->num_routes;
+
+ return 0;
+ }
+
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *routing = arg;
+ struct v4l2_subdev_route *routes =
+ (struct v4l2_subdev_route *)(uintptr_t)routing->routes;
+ struct v4l2_subdev_krouting krouting = {};
+ unsigned int i;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_MULTIPLEXED))
+ return -ENOIOCTLCMD;
+
+ if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
+ memset(routing->reserved, 0, sizeof(routing->reserved));
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ const struct v4l2_subdev_route *route = &routes[i];
+ const struct media_pad *pads = sd->entity.pads;
+
+ /* Do not check sink pad for source routes */
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE)) {
+ if (route->sink_pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(pads[route->sink_pad].flags &
+ MEDIA_PAD_FL_SINK))
+ return -EINVAL;
+ }
+
+ if (route->source_pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(pads[route->source_pad].flags &
+ MEDIA_PAD_FL_SOURCE))
+ return -EINVAL;
+ }
+
+ krouting.num_routes = routing->num_routes;
+ krouting.routes = routes;
+
+ return v4l2_subdev_call(sd, pad, set_routing, state,
+ routing->which, &krouting);
+ }
+
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
}
@@ -740,6 +906,71 @@ const struct v4l2_file_operations v4l2_subdev_fops = {
.poll = subdev_poll,
};
+static int
+v4l2_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
+ const struct v4l2_subdev_krouting *routing)
+{
+ u32 num_configs = 0;
+ unsigned int i;
+ u32 format_idx = 0;
+
+ kvfree(stream_configs->configs);
+ stream_configs->configs = NULL;
+ stream_configs->num_configs = 0;
+
+ /* Count number of formats needed */
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ /*
+ * Each route needs a format on both ends of the route, except
+ * for source streams which only need one format.
+ */
+ num_configs +=
+ (route->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE) ? 1 : 2;
+ }
+
+ if (num_configs) {
+ stream_configs->configs =
+ kvcalloc(num_configs, sizeof(*stream_configs->configs),
+ GFP_KERNEL);
+
+ if (!stream_configs->configs)
+ return -ENOMEM;
+
+ stream_configs->num_configs = num_configs;
+ }
+
+ /*
+ * Fill in the 'pad' and stream' value for each item in the array from
+ * the routing table
+ */
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+ u32 idx;
+
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE)) {
+ idx = format_idx++;
+
+ stream_configs->configs[idx].pad = route->sink_pad;
+ stream_configs->configs[idx].stream = route->sink_stream;
+ }
+
+ idx = format_idx++;
+
+ stream_configs->configs[idx].pad = route->source_pad;
+ stream_configs->configs[idx].stream = route->source_stream;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_MEDIA_CONTROLLER
int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
@@ -788,15 +1019,20 @@ EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
static int
v4l2_subdev_link_validate_get_format(struct media_pad *pad,
+ u32 stream,
struct v4l2_subdev_format *fmt)
{
if (is_media_entity_v4l2_subdev(pad->entity)) {
struct v4l2_subdev *sd =
media_entity_to_v4l2_subdev(pad->entity);
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_get_active_state(sd);
fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
fmt->pad = pad->index;
- return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ fmt->stream = stream;
+ return v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
}
WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
@@ -806,63 +1042,297 @@ v4l2_subdev_link_validate_get_format(struct media_pad *pad,
return -EINVAL;
}
-int v4l2_subdev_link_validate(struct media_link *link)
+static int cmp_u32(const void *a, const void *b)
{
- struct v4l2_subdev *sink;
- struct v4l2_subdev_format sink_fmt, source_fmt;
- int rval;
+ u32 a32 = *(u32 *)a;
+ u32 b32 = *(u32 *)b;
- rval = v4l2_subdev_link_validate_get_format(
- link->source, &source_fmt);
- if (rval < 0)
- return 0;
+ return a32 > b32 ? 1 : (a32 < b32 ? -1 : 0);
+}
- rval = v4l2_subdev_link_validate_get_format(
- link->sink, &sink_fmt);
- if (rval < 0)
+static int v4l2_link_validate_get_streams(struct media_link *link,
+ bool is_source, u32 *out_num_streams,
+ const u32 **out_streams,
+ bool *allocated)
+{
+ static const u32 default_streams[] = { 0 };
+ struct v4l2_subdev_krouting *routing;
+ struct v4l2_subdev *subdev;
+ u32 num_streams;
+ u32 *streams;
+ unsigned int i;
+ struct v4l2_subdev_state *state;
+
+ if (is_source)
+ subdev = media_entity_to_v4l2_subdev(link->source->entity);
+ else
+ subdev = media_entity_to_v4l2_subdev(link->sink->entity);
+
+ if (!(subdev->flags & V4L2_SUBDEV_FL_MULTIPLEXED)) {
+ *out_num_streams = 1;
+ *out_streams = default_streams;
+ *allocated = false;
return 0;
+ }
- sink = media_entity_to_v4l2_subdev(link->sink->entity);
+ state = v4l2_subdev_lock_active_state(subdev);
- rval = v4l2_subdev_call(sink, pad, link_validate, link,
- &source_fmt, &sink_fmt);
- if (rval != -ENOIOCTLCMD)
- return rval;
+ routing = &state->routing;
+
+ streams = kmalloc_array(routing->num_routes, sizeof(u32), GFP_KERNEL);
+
+ if (!streams) {
+ v4l2_subdev_unlock_state(state);
+ return -ENOMEM;
+ }
+
+ num_streams = 0;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+ int j;
+ u32 route_pad;
+ u32 route_stream;
+ u32 link_pad;
+
+ if (is_source) {
+ route_pad = route->source_pad;
+ route_stream = route->source_stream;
+ link_pad = link->source->index;
+ } else {
+ route_pad = route->sink_pad;
+ route_stream = route->sink_stream;
+ link_pad = link->sink->index;
+ }
+
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ if (route_pad != link_pad)
+ continue;
+
+ /* look for duplicates... */
+ for (j = 0; j < num_streams; ++j) {
+ if (streams[j] == route_stream)
+ break;
+ }
- return v4l2_subdev_link_validate_default(
- sink, link, &source_fmt, &sink_fmt);
+ /* ...and drop the stream if we already have it */
+ if (j != num_streams)
+ continue;
+
+ streams[num_streams++] = route_stream;
+ }
+
+ v4l2_subdev_unlock_state(state);
+
+ sort(streams, num_streams, sizeof(u32), &cmp_u32, NULL);
+
+ *out_num_streams = num_streams;
+ *out_streams = streams;
+ *allocated = true;
+
+ return 0;
+}
+
+int v4l2_subdev_link_validate(struct media_link *link)
+{
+ struct v4l2_subdev *sink_subdev =
+ media_entity_to_v4l2_subdev(link->sink->entity);
+ struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
+ u32 num_source_streams;
+ const u32 *source_streams;
+ bool source_allocated;
+ u32 num_sink_streams;
+ const u32 *sink_streams;
+ bool sink_allocated;
+ unsigned int sink_idx;
+ unsigned int source_idx;
+ int ret;
+
+ dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+
+ ret = v4l2_link_validate_get_streams(link, true, &num_source_streams,
+ &source_streams,
+ &source_allocated);
+ if (ret)
+ return ret;
+
+ ret = v4l2_link_validate_get_streams(link, false, &num_sink_streams,
+ &sink_streams, &sink_allocated);
+ if (ret)
+ goto free_source;
+
+ /* It is ok to have more source streams than sink streams */
+ if (num_source_streams < num_sink_streams) {
+ dev_err(dev,
+ "Not enough source streams: %d < %d\n",
+ num_source_streams, num_sink_streams);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Validate source and sink stream formats */
+
+ source_idx = 0;
+
+ for (sink_idx = 0; sink_idx < num_sink_streams; ++sink_idx) {
+ struct v4l2_subdev_format sink_fmt, source_fmt;
+ u32 stream;
+
+ stream = sink_streams[sink_idx];
+
+ for (; source_idx < num_source_streams; ++source_idx) {
+ if (source_streams[source_idx] == stream)
+ break;
+ }
+
+ if (source_idx == num_source_streams) {
+ dev_err(dev, "No source stream for sink stream %u\n",
+ stream);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
+ link->source->entity->name, link->source->index, stream,
+ link->sink->entity->name, link->sink->index, stream);
+
+ ret = v4l2_subdev_link_validate_get_format(link->source, stream,
+ &source_fmt);
+ if (ret < 0) {
+ dev_dbg(dev, "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
+ link->source->entity->name, link->source->index,
+ stream);
+ ret = 0;
+ continue;
+ }
+
+ ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
+ &sink_fmt);
+ if (ret < 0) {
+ dev_dbg(dev, "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
+ link->sink->entity->name, link->sink->index,
+ stream);
+ ret = 0;
+ continue;
+ }
+
+ /* TODO: add stream number to link_validate() */
+ ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
+ &source_fmt, &sink_fmt);
+ if (!ret)
+ continue;
+
+ if (ret != -ENOIOCTLCMD)
+ goto out;
+
+ ret = v4l2_subdev_link_validate_default(sink_subdev, link,
+ &source_fmt, &sink_fmt);
+
+ if (ret)
+ goto out;
+ }
+
+out:
+ if (sink_allocated)
+ kfree(sink_streams);
+
+free_source:
+ if (source_allocated)
+ kfree(source_streams);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
-struct v4l2_subdev_pad_config *
-v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd)
+bool v4l2_subdev_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
{
- struct v4l2_subdev_pad_config *cfg;
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct v4l2_subdev_krouting *routing;
+ unsigned int i;
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_lock_active_state(sd);
+
+ routing = &state->routing;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
+ (route->source_pad == pad0 && route->sink_pad == pad1)) {
+ v4l2_subdev_unlock_state(state);
+ return true;
+ }
+ }
+
+ v4l2_subdev_unlock_state(state);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_has_route);
+
+struct v4l2_subdev_state *
+__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
+ struct lock_class_key *lock_key)
+{
+ struct v4l2_subdev_state *state;
int ret;
- if (!sd->entity.num_pads)
- return NULL;
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
- cfg = kvmalloc_array(sd->entity.num_pads, sizeof(*cfg),
- GFP_KERNEL | __GFP_ZERO);
- if (!cfg)
- return NULL;
+ __mutex_init(&state->lock, lock_name, lock_key);
- ret = v4l2_subdev_call(sd, pad, init_cfg, cfg);
- if (ret < 0 && ret != -ENOIOCTLCMD) {
- kvfree(cfg);
- return NULL;
+ /* Drivers that support streams do not need the legacy pad config */
+ if (!(sd->flags & V4L2_SUBDEV_FL_MULTIPLEXED) && sd->entity.num_pads) {
+ state->pads = kvmalloc_array(sd->entity.num_pads,
+ sizeof(*state->pads),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!state->pads) {
+ ret = -ENOMEM;
+ goto err;
+ }
}
- return cfg;
+ ret = v4l2_subdev_call(sd, pad, init_cfg, state);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto err;
+
+ return state;
+
+err:
+ if (state && state->pads)
+ kvfree(state->pads);
+
+ kfree(state);
+
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(v4l2_subdev_alloc_pad_config);
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
-void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg)
+void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
{
- kvfree(cfg);
+ if (!state)
+ return;
+
+ mutex_destroy(&state->lock);
+
+ kvfree(state->routing.routes);
+ kvfree(state->stream_configs.configs);
+ kvfree(state->pads);
+ kfree(state);
}
-EXPORT_SYMBOL_GPL(v4l2_subdev_free_pad_config);
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
+
#endif /* CONFIG_MEDIA_CONTROLLER */
void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
@@ -891,3 +1361,203 @@ void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
}
EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
+
+int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
+ struct lock_class_key *key)
+{
+ struct v4l2_subdev_state *state;
+
+ state = __v4l2_subdev_state_alloc(sd, name, key);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ sd->state = state;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
+
+void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
+{
+ __v4l2_subdev_state_free(sd->state);
+ sd->state = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
+
+struct v4l2_subdev_state *v4l2_subdev_lock_active_state(struct v4l2_subdev *sd)
+{
+ mutex_lock(&sd->state->lock);
+
+ return sd->state;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_lock_active_state);
+
+void v4l2_subdev_lock_state(struct v4l2_subdev_state *state)
+{
+ mutex_lock(&state->lock);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_lock_state);
+
+void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state)
+{
+ mutex_unlock(&state->lock);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_unlock_state);
+
+int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ struct v4l2_subdev_krouting *dst = &state->routing;
+ const struct v4l2_subdev_krouting *src = routing;
+
+ lockdep_assert_held(&state->lock);
+
+ kvfree(dst->routes);
+ dst->routes = NULL;
+ dst->num_routes = 0;
+
+ if (src->num_routes > 0) {
+ dst->routes = kvmalloc_array(src->num_routes,
+ sizeof(*src->routes), GFP_KERNEL);
+ if (!dst->routes)
+ return -ENOMEM;
+
+ memcpy(dst->routes, src->routes,
+ src->num_routes * sizeof(*src->routes));
+ dst->num_routes = src->num_routes;
+ }
+
+ return v4l2_init_stream_configs(&state->stream_configs, dst);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
+
+int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing,
+ const struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+ int ret;
+
+ ret = v4l2_subdev_set_routing(sd, state, routing);
+ if (ret)
+ return ret;
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i)
+ stream_configs->configs[i].fmt = *fmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
+
+struct v4l2_mbus_framefmt *
+v4l2_state_get_stream_format(struct v4l2_subdev_state *state, unsigned int pad,
+ u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ lockdep_assert_held(&state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_state_get_stream_format);
+
+int v4l2_state_find_opposite_end(struct v4l2_subdev_krouting *routing, u32 pad,
+ u32 stream, u32 *other_pad, u32 *other_stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+
+ if (route->source_pad == pad &&
+ route->source_stream == stream) {
+ *other_pad = route->sink_pad;
+ *other_stream = route->sink_stream;
+ return 0;
+ }
+
+ if (route->sink_pad == pad && route->sink_stream == stream) {
+ *other_pad = route->source_pad;
+ *other_stream = route->source_stream;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_state_find_opposite_end);
+
+struct v4l2_mbus_framefmt *
+v4l2_state_get_opposite_stream_format(struct v4l2_subdev_state *state, u32 pad,
+ u32 stream)
+{
+ u32 other_pad, other_stream;
+ int ret;
+
+ ret = v4l2_state_find_opposite_end(&state->routing, pad, stream,
+ &other_pad, &other_stream);
+ if (ret)
+ return NULL;
+
+ return v4l2_state_get_stream_format(state, other_pad, other_stream);
+}
+EXPORT_SYMBOL_GPL(v4l2_state_get_opposite_stream_format);
+
+int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ v4l2_subdev_lock_state(state);
+
+ fmt = v4l2_state_get_stream_format(state, format->pad, format->stream);
+ if (!fmt) {
+ v4l2_subdev_unlock_state(state);
+ return -EINVAL;
+ }
+
+ format->format = *fmt;
+
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
+
+int v4l2_routing_simple_verify(const struct v4l2_subdev_krouting *routing)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ const struct v4l2_subdev_route *route = &routing->routes[i];
+
+ for (j = i + 1; j < routing->num_routes; ++j) {
+ const struct v4l2_subdev_route *r = &routing->routes[j];
+
+ if (route->sink_pad == r->sink_pad &&
+ route->sink_stream == r->sink_stream)
+ return -EINVAL;
+
+ if (route->source_pad == r->source_pad &&
+ route->source_stream == r->source_stream)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_routing_simple_verify);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index cc2c83e1accf..a086e4a644d5 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -103,8 +103,8 @@ config TI_EMIF
temperature changes
config OMAP_GPMC
- bool "Texas Instruments OMAP SoC GPMC driver" if COMPILE_TEST
- depends on OF_ADDRESS
+ bool "Texas Instruments OMAP SoC GPMC driver"
+ depends on OF_ADDRESS || COMPILE_TEST
select GPIOLIB
help
This driver is for the General Purpose Memory Controller (GPMC)
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index f80c2ea39ca4..d9bf1c2ac319 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -233,6 +233,7 @@ struct gpmc_device {
struct irq_chip irq_chip;
struct gpio_chip gpio_chip;
int nirqs;
+ struct resource *data;
};
static struct irq_domain *gpmc_irq_domain;
@@ -1452,12 +1453,18 @@ static void gpmc_mem_exit(void)
}
}
-static void gpmc_mem_init(void)
+static void gpmc_mem_init(struct gpmc_device *gpmc)
{
int cs;
- gpmc_mem_root.start = GPMC_MEM_START;
- gpmc_mem_root.end = GPMC_MEM_END;
+ if (!gpmc->data) {
+ /* All legacy devices have same data IO window */
+ gpmc_mem_root.start = GPMC_MEM_START;
+ gpmc_mem_root.end = GPMC_MEM_END;
+ } else {
+ gpmc_mem_root.start = gpmc->data->start;
+ gpmc_mem_root.end = gpmc->data->end;
+ }
/* Reserve all regions that has been set up by bootloader */
for (cs = 0; cs < gpmc_cs_num; cs++) {
@@ -1884,6 +1891,7 @@ static const struct of_device_id gpmc_dt_ids[] = {
{ .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
{ .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
{ .compatible = "ti,am3352-gpmc" }, /* am335x devices */
+ { .compatible = "ti,am64-gpmc" },
{ }
};
@@ -2171,7 +2179,8 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
}
}
- if (of_device_is_compatible(child, "ti,omap2-nand")) {
+ if (of_device_is_compatible(child, "ti,omap2-nand") ||
+ of_node_name_eq(child, "nand")) {
/* NAND specific setup */
val = 8;
of_property_read_u32(child, "nand-bus-width", &val);
@@ -2398,13 +2407,25 @@ static int gpmc_probe(struct platform_device *pdev)
gpmc->dev = &pdev->dev;
platform_set_drvdata(pdev, gpmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!res) {
+ /* legacy DT */
+ gpmc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpmc_base))
+ return PTR_ERR(gpmc_base);
+ } else {
+ gpmc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpmc_base))
+ return PTR_ERR(gpmc_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "data");
+ if (!res) {
+ dev_err(&pdev->dev, "couldn't get data reg resource\n");
+ return -ENOENT;
+ }
- gpmc_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(gpmc_base))
- return PTR_ERR(gpmc_base);
+ gpmc->data = res;
+ }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -2458,7 +2479,7 @@ static int gpmc_probe(struct platform_device *pdev)
dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
GPMC_REVISION_MINOR(l));
- gpmc_mem_init();
+ gpmc_mem_init(gpmc);
rc = gpmc_gpio_init(gpmc);
if (rc)
goto gpio_init_failed;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index b8847ae04d93..fe5215fc65cb 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1441,7 +1441,7 @@ config MFD_OMAP_USB_HOST
OMAP USB Host drivers.
config MFD_PALMAS
- bool "TI Palmas series chips"
+ tristate "TI Palmas series chips"
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -1566,6 +1566,20 @@ config MFD_TI_LP873X
This driver can also be built as a module. If so, the module
will be called lp873x.
+config MFD_TPS6594X
+ tristate "TI TPS6594X Power Management IC"
+ depends on I2C && OF
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ If you say yes here then you get support for the TPS6594X series of
+ Power Management Integrated Circuits (PMIC).
+ These include voltage regulators, RTS, configurable
+ General Purpose Outputs (GPO) that are used in portable devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called tps6594x.
+
config MFD_TI_LP87565
tristate "TI LP87565 Power Management IC"
depends on I2C && OF
@@ -1595,6 +1609,21 @@ config MFD_TPS65218
This driver can also be built as a module. If so, the module
will be called tps65218.
+config MFD_TPS65219
+ tristate "TI TPS65219 Power Management chips"
+ depends on I2C && OF
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ If you say yes here you get support for the TPS65219 series of
+ Power Management chips.
+ These include voltage regulators, gpio and other features
+ that are often used in portable devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called tps65219.
+
config MFD_TPS6586X
bool "TI TPS6586x Power Management chips"
depends on I2C=y
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 1780019d2474..3a091ce78b5e 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -101,10 +101,12 @@ obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MFD_TPS65086) += tps65086.o
obj-$(CONFIG_MFD_TPS65217) += tps65217.o
obj-$(CONFIG_MFD_TPS65218) += tps65218.o
+obj-$(CONFIG_MFD_TPS65219) += tps65219.o
obj-$(CONFIG_MFD_TPS65910) += tps65910.o
obj-$(CONFIG_MFD_TPS65912) += tps65912-core.o
obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
+obj-$(CONFIG_MFD_TPS6594X) += tps6594x.o
obj-$(CONFIG_MFD_TPS68470) += tps68470.o
obj-$(CONFIG_MFD_TPS80031) += tps80031.o
obj-$(CONFIG_MENELAUS) += menelaus.o
diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
new file mode 100644
index 000000000000..1440ed9bc2fc
--- /dev/null
+++ b/drivers/mfd/tps65219.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for TPS65219 Integrated power management chipsets
+ *
+ * Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+/* This implementation derived from tps65218 authored by
+ * "J Keerthy <j-keerthy@ti.com>"
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65219.h>
+
+static struct i2c_client *tps65219_i2c_client;
+
+/**
+ * tps65219_warm_reset: issue warm reset to SOC.
+ *
+ * @tps: Device to write to.
+ */
+static int tps65219_warm_reset(struct tps65219 *tps)
+{
+ dev_dbg(tps->dev, "warm reset");
+ return regmap_update_bits(tps->regmap, TPS65219_REG_MFP_CTRL,
+ TPS65219_MFP_WARM_RESET_I2C_CTRL_MASK,
+ TPS65219_MFP_WARM_RESET_I2C_CTRL_MASK);
+}
+
+/**
+ * tps65219_cold_reset: issue cold reset to SOC.
+ *
+ * @tps: Device to write to.
+ */
+static int tps65219_cold_reset(struct tps65219 *tps)
+{
+ dev_dbg(tps->dev, "cold reset");
+ return regmap_update_bits(tps->regmap, TPS65219_REG_MFP_CTRL,
+ TPS65219_MFP_COLD_RESET_I2C_CTRL_MASK,
+ TPS65219_MFP_COLD_RESET_I2C_CTRL_MASK);
+}
+
+/**
+ * tps65219_soft_shutdown: issue cold reset to SOC.
+ *
+ * @tps: Device to write to.
+ */
+static int tps65219_soft_shutdown(struct tps65219 *tps)
+{
+ dev_dbg(tps->dev, "software shutdown");
+ return regmap_update_bits(tps->regmap, TPS65219_REG_MFP_CTRL,
+ TPS65219_MFP_I2C_OFF_REQ_MASK,
+ TPS65219_MFP_I2C_OFF_REQ_MASK);
+}
+
+/**
+ * pmic_rst_restart: trig tps65219 reset to SOC.
+ *
+ * Trigged via notifier
+ */
+static int pmic_rst_restart(struct notifier_block *this,
+ unsigned long reboot_mode, void *cmd)
+{
+ struct tps65219 *tps;
+
+ tps = container_of(this, struct tps65219, nb);
+ if (!tps) {
+ pr_err("%s: pointer to tps65219 is invalid\n", __func__);
+ return -ENODEV;
+ }
+ if (reboot_mode == REBOOT_WARM)
+ tps65219_warm_reset(tps);
+ else
+ tps65219_cold_reset(tps);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pmic_rst_restart_nb = {
+ .notifier_call = pmic_rst_restart,
+ .priority = 200,
+};
+
+/**
+ * pmic_do_poweroff: trig tps65219 regulators power OFF sequence.
+ */
+static void pmic_do_poweroff(void)
+{
+ struct tps65219 *tps;
+
+ tps = dev_get_drvdata(&tps65219_i2c_client->dev);
+ tps65219_soft_shutdown(tps);
+}
+
+static const struct resource tps65219_pwrbutton_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_PB_FALLING_EDGE_DETECT, "falling"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_PB_RISING_EDGE_DETECT, "rising"),
+};
+
+static const struct resource tps65219_regulator_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO3_SCG, "LDO3_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO3_OC, "LDO3_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO3_UV, "LDO3_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO4_SCG, "LDO4_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO4_OC, "LDO4_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO4_UV, "LDO4_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_SCG, "LDO1_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_OC, "LDO1_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_UV, "LDO1_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO2_SCG, "LDO2_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO2_OC, "LDO2_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO2_UV, "LDO2_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_SCG, "BUCK3_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_OC, "BUCK3_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_NEG_OC, "BUCK3_NEG_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_UV, "BUCK3_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_SCG, "BUCK1_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_OC, "BUCK1_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_NEG_OC, "BUCK1_NEG_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_UV, "BUCK1_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_SCG, "BUCK2_SCG"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_OC, "BUCK2_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_NEG_OC, "BUCK2_NEG_OC"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_UV, "BUCK2_UV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_RV, "BUCK1_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_RV, "BUCK2_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_RV, "BUCK3_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_RV, "LDO1_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO2_RV, "LDO2_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO3_RV, "LDO3_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO4_RV, "LDO4_RV"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK1_RV_SD, "BUCK1_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK2_RV_SD, "BUCK2_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_BUCK3_RV_SD, "BUCK3_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO1_RV_SD, "LDO1_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO2_RV_SD, "LDO2_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO3_RV_SD, "LDO3_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_LDO4_RV_SD, "LDO4_RV_SD"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_TIMEOUT, "TIMEOUT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_3_WARM, "SENSOR_3_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_2_WARM, "SENSOR_2_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_1_WARM, "SENSOR_1_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_0_WARM, "SENSOR_0_WARM"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_3_HOT, "SENSOR_3_HOT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_2_HOT, "SENSOR_2_HOT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_1_HOT, "SENSOR_1_HOT"),
+ DEFINE_RES_IRQ_NAMED(TPS65219_INT_SENSOR_0_HOT, "SENSOR_0_HOT"),
+};
+
+#define TPS65219_MAX_CELLS 2
+
+static const struct mfd_cell tps65219_regulator_cell = {
+ .name = "tps65219-regulator",
+ .resources = tps65219_regulator_resources,
+ .num_resources = ARRAY_SIZE(tps65219_regulator_resources),
+};
+
+static const struct mfd_cell tps65219_pwrbutton_cell = {
+ .name = "tps65219-pwrbutton",
+ .resources = tps65219_pwrbutton_resources,
+ .num_resources = ARRAY_SIZE(tps65219_pwrbutton_resources),
+};
+
+static const struct regmap_config tps65219_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS65219_REG_FACTORY_CONFIG_2,
+};
+
+/*
+ * Mapping of main IRQ register bits to sub-IRQ register offsets so that we can
+ * access corect sub-IRQ registers based on bits that are set in main IRQ
+ * register.
+ */
+/* Timeout Residual Voltage Shutdown */
+static unsigned int bit0_offsets[] = {TPS65219_TO_RV_POS};
+static unsigned int bit1_offsets[] = {TPS65219_RV_POS}; /* Residual Voltage */
+static unsigned int bit2_offsets[] = {TPS65219_SYS_POS}; /* System */
+static unsigned int bit3_offsets[] = {TPS65219_BUCK_1_2_POS}; /* Buck 1-2 */
+static unsigned int bit4_offsets[] = {TPS65219_BUCK_3_POS}; /* Buck 3 */
+static unsigned int bit5_offsets[] = {TPS65219_LDO_1_2_POS}; /* LDO 1-2 */
+static unsigned int bit6_offsets[] = {TPS65219_LDO_3_4_POS}; /* LDO 3-4 */
+static unsigned int bit7_offsets[] = {TPS65219_PB_POS}; /* Power Button */
+
+static struct regmap_irq_sub_irq_map tps65219_sub_irq_offsets[] = {
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit3_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit4_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit5_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit6_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
+};
+
+static struct regmap_irq tps65219_irqs[] = {
+ REGMAP_IRQ_REG(TPS65219_INT_LDO3_SCG, TPS65219_LDO_3_4_POS,
+ TPS65219_INT_LDO3_SCG_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO3_OC,
+ TPS65219_LDO_3_4_POS, TPS65219_INT_LDO3_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO3_UV, TPS65219_LDO_3_4_POS,
+ TPS65219_INT_LDO3_UV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO4_SCG, TPS65219_LDO_3_4_POS,
+ TPS65219_INT_LDO4_SCG_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO4_OC, TPS65219_LDO_3_4_POS,
+ TPS65219_INT_LDO4_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO4_UV, TPS65219_LDO_3_4_POS,
+ TPS65219_INT_LDO4_UV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO1_SCG,
+ TPS65219_LDO_1_2_POS, TPS65219_INT_LDO1_SCG_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO1_OC, TPS65219_LDO_1_2_POS,
+ TPS65219_INT_LDO1_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO1_UV, TPS65219_LDO_1_2_POS,
+ TPS65219_INT_LDO1_UV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO2_SCG, TPS65219_LDO_1_2_POS,
+ TPS65219_INT_LDO2_SCG_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO2_OC, TPS65219_LDO_1_2_POS,
+ TPS65219_INT_LDO2_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO2_UV, TPS65219_LDO_1_2_POS,
+ TPS65219_INT_LDO2_UV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK3_SCG, TPS65219_BUCK_3_POS,
+ TPS65219_INT_BUCK3_SCG_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK3_OC, TPS65219_BUCK_3_POS,
+ TPS65219_INT_BUCK3_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK3_NEG_OC, TPS65219_BUCK_3_POS,
+ TPS65219_INT_BUCK3_NEG_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK3_UV, TPS65219_BUCK_3_POS,
+ TPS65219_INT_BUCK3_UV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK2_SCG, TPS65219_BUCK_1_2_POS,
+ TPS65219_INT_BUCK2_SCG_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK2_OC, TPS65219_BUCK_1_2_POS,
+ TPS65219_INT_BUCK2_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK2_NEG_OC, TPS65219_BUCK_1_2_POS,
+ TPS65219_INT_BUCK2_NEG_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK2_UV, TPS65219_BUCK_1_2_POS,
+ TPS65219_INT_BUCK2_UV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK1_SCG, TPS65219_BUCK_1_2_POS,
+ TPS65219_INT_BUCK1_SCG_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK1_OC, TPS65219_BUCK_1_2_POS,
+ TPS65219_INT_BUCK1_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK1_NEG_OC, TPS65219_BUCK_1_2_POS,
+ TPS65219_INT_BUCK1_NEG_OC_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK1_UV, TPS65219_BUCK_1_2_POS,
+ TPS65219_INT_BUCK1_UV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_SENSOR_3_WARM,
+ TPS65219_SYS_POS, TPS65219_INT_SENSOR_3_WARM_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_SENSOR_2_WARM, TPS65219_SYS_POS,
+ TPS65219_INT_SENSOR_2_WARM_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_SENSOR_1_WARM, TPS65219_SYS_POS,
+ TPS65219_INT_SENSOR_1_WARM_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_SENSOR_0_WARM, TPS65219_SYS_POS,
+ TPS65219_INT_SENSOR_0_WARM_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_SENSOR_3_HOT, TPS65219_SYS_POS,
+ TPS65219_INT_SENSOR_3_HOT_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_SENSOR_2_HOT, TPS65219_SYS_POS,
+ TPS65219_INT_SENSOR_2_HOT_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_SENSOR_1_HOT, TPS65219_SYS_POS,
+ TPS65219_INT_SENSOR_1_HOT_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_SENSOR_0_HOT, TPS65219_SYS_POS,
+ TPS65219_INT_SENSOR_0_HOT_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK1_RV, TPS65219_RV_POS,
+ TPS65219_INT_BUCK1_RV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK2_RV, TPS65219_RV_POS,
+ TPS65219_INT_BUCK2_RV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK3_RV, TPS65219_RV_POS,
+ TPS65219_INT_BUCK3_RV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO1_RV, TPS65219_RV_POS,
+ TPS65219_INT_LDO1_RV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO2_RV, TPS65219_RV_POS,
+ TPS65219_INT_LDO2_RV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO3_RV, TPS65219_RV_POS,
+ TPS65219_INT_LDO3_RV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO4_RV, TPS65219_RV_POS,
+ TPS65219_INT_LDO4_RV_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK1_RV_SD,
+ TPS65219_TO_RV_POS, TPS65219_INT_BUCK1_RV_SD_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK2_RV_SD,
+ TPS65219_TO_RV_POS, TPS65219_INT_BUCK2_RV_SD_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_BUCK3_RV_SD, TPS65219_TO_RV_POS,
+ TPS65219_INT_BUCK3_RV_SD_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO1_RV_SD, TPS65219_TO_RV_POS,
+ TPS65219_INT_LDO1_RV_SD_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO2_RV_SD, TPS65219_TO_RV_POS,
+ TPS65219_INT_LDO2_RV_SD_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO3_RV_SD,
+ TPS65219_TO_RV_POS, TPS65219_INT_LDO3_RV_SD_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_LDO4_RV_SD, TPS65219_TO_RV_POS,
+ TPS65219_INT_LDO4_RV_SD_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_TIMEOUT, TPS65219_TO_RV_POS,
+ TPS65219_INT_TIMEOUT_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_PB_FALLING_EDGE_DETECT,
+ TPS65219_PB_POS, TPS65219_INT_PB_FALLING_EDGE_DET_MASK),
+ REGMAP_IRQ_REG(TPS65219_INT_PB_RISING_EDGE_DETECT, TPS65219_PB_POS,
+ TPS65219_INT_PB_RISING_EDGE_DET_MASK),
+};
+
+static struct regmap_irq_chip tps65219_irq_chip = {
+ .name = "tps65219_irq",
+ .main_status = TPS65219_REG_INT_SOURCE,
+ .num_main_regs = 1,
+ .num_main_status_bits = 8,
+ .irqs = tps65219_irqs,
+ .num_irqs = ARRAY_SIZE(tps65219_irqs),
+ .status_base = TPS65219_REG_INT_LDO_3_4,
+ .ack_base = TPS65219_REG_INT_LDO_3_4,
+ .clear_ack = 1,
+ .num_regs = 8,
+ .sub_reg_offsets = &tps65219_sub_irq_offsets[0],
+};
+
+static const struct of_device_id of_tps65219_match_table[] = {
+ { .compatible = "ti,tps65219", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tps65219_match_table);
+
+static int tps65219_probe(struct i2c_client *client,
+ const struct i2c_device_id *ids)
+{
+ struct tps65219 *tps;
+ int ret;
+ unsigned int chipid;
+ bool pwr_button;
+ bool sys_pwr;
+ struct mfd_cell cells[TPS65219_MAX_CELLS];
+ int nr_cells = 0;
+
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, tps);
+ tps->dev = &client->dev;
+ tps->irq = client->irq;
+ tps->regmap = devm_regmap_init_i2c(client, &tps65219_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ ret = PTR_ERR(tps->regmap);
+ dev_err(tps->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
+ IRQF_ONESHOT, 0, &tps65219_irq_chip,
+ &tps->irq_data);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(tps->regmap, TPS65219_REG_TI_DEV_ID, &chipid);
+ if (ret) {
+ dev_err(tps->dev, "Failed to read device ID: %d\n", ret);
+ return ret;
+ }
+
+ memcpy(&cells[nr_cells++], &tps65219_regulator_cell,
+ sizeof(tps65219_regulator_cell));
+ pwr_button = of_property_read_bool(tps->dev->of_node, "power-button");
+ if (pwr_button)
+ memcpy(&cells[nr_cells++], &tps65219_pwrbutton_cell,
+ sizeof(tps65219_pwrbutton_cell));
+
+ ret = devm_mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, cells,
+ nr_cells, NULL, 0,
+ regmap_irq_get_domain(tps->irq_data));
+ if (ret) {
+ dev_err(tps->dev, "mfd_add_devices failed: %d\n", ret);
+ return ret;
+ }
+
+ tps->nb = pmic_rst_restart_nb;
+ ret = register_restart_handler(&tps->nb);
+ if (ret) {
+ dev_err(tps->dev, "%s: cannot register restart handler, %d\n",
+ __func__, ret);
+ return -ENODEV;
+ }
+
+ sys_pwr = of_property_read_bool(tps->dev->of_node,
+ "system-power-controller");
+
+ if (sys_pwr) {
+ if (pm_power_off)
+ dev_warn(tps->dev, "Setup as system-power-controller but pm_power_off function already registered, overwriting\n");
+ tps65219_i2c_client = client;
+ pm_power_off = &pmic_do_poweroff;
+ }
+ return ret;
+}
+
+static int tps65219_remove(struct i2c_client *client)
+{
+ struct tps65219 *tps = i2c_get_clientdata(client);
+
+ if (tps65219_i2c_client == client) {
+ pm_power_off = NULL;
+ tps65219_i2c_client = NULL;
+ }
+
+ return unregister_restart_handler(&tps->nb);
+}
+
+static const struct i2c_device_id tps65219_id_table[] = {
+ { "tps65219", TPS65219 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, tps65219_id_table);
+
+static struct i2c_driver tps65219_driver = {
+ .driver = {
+ .name = "tps65219",
+ .of_match_table = of_tps65219_match_table,
+ },
+ .probe = tps65219_probe,
+ .id_table = tps65219_id_table,
+ .remove = tps65219_remove,
+};
+
+module_i2c_driver(tps65219_driver);
+
+MODULE_AUTHOR("Jerome Neanne <jneanne@baylibre.com>");
+MODULE_DESCRIPTION("TPS65219 chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps6594x.c b/drivers/mfd/tps6594x.c
new file mode 100644
index 000000000000..0de7946b6b8b
--- /dev/null
+++ b/drivers/mfd/tps6594x.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for tps6594x PMIC chips
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Keerthy <j-keerthy@ti.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6594x.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+static const struct regmap_config tps6594x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS6594X_REG_MAX,
+};
+
+static const struct mfd_cell tps6594x_cells[] = {
+ { .name = "tps6594x-gpio" },
+ { .name = "tps6594x-rtc" },
+};
+
+static struct tps6594x *tps;
+
+static void tps6594x_power_off(void)
+{
+ regmap_write(tps->regmap, TPS6594X_FSM_NSLEEP_TRIGGERS,
+ TPS6594X_FSM_NSLEEP_NSLEEP1B | TPS6594X_FSM_NSLEEP_NSLEEP2B);
+
+ regmap_write(tps->regmap, TPS6594X_INT_STARTUP,
+ TPS6594X_INT_STARTUP_NPWRON_START_INT |
+ TPS6594X_INT_STARTUP_ENABLE_INT | TPS6594X_INT_STARTUP_RTC_INT |
+ TPS6594X_INT_STARTUP_SOFT_REBOOT_INT);
+
+ regmap_write(tps->regmap, TPS6594X_INT_MISC,
+ TPS6594X_INT_MISC_BIST_PASS_INT |
+ TPS6594X_INT_MISC_EXT_CLK_INT | TPS6594X_INT_MISC_TWARN_INT);
+
+ regmap_write(tps->regmap, TPS6594X_CONFIG_1,
+ TPS6594X_CONFIG_NSLEEP1_MASK | TPS6594X_CONFIG_NSLEEP2_MASK);
+
+ regmap_write(tps->regmap, TPS6594X_FSM_I2C_TRIGGERS,
+ TPS6594X_FSM_I2C_TRIGGERS_I2C0);
+}
+
+static int tps6594x_probe(struct i2c_client *client)
+{
+ struct tps6594x *ddata;
+ struct device_node *node = client->dev.of_node;
+ unsigned int otpid;
+ int ret;
+
+ ddata = devm_kzalloc(&client->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->dev = &client->dev;
+
+ ddata->regmap = devm_regmap_init_i2c(client, &tps6594x_regmap_config);
+ if (IS_ERR(ddata->regmap)) {
+ ret = PTR_ERR(ddata->regmap);
+ dev_err(ddata->dev,
+ "Failed to initialize register map: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(ddata->regmap, TPS6594X_REG_DEV_REV, &otpid);
+ if (ret) {
+ dev_err(ddata->dev, "Failed to read OTP ID\n");
+ return ret;
+ }
+
+ ddata->rev = otpid;
+ i2c_set_clientdata(client, ddata);
+
+ ret = mfd_add_devices(ddata->dev, PLATFORM_DEVID_AUTO, tps6594x_cells,
+ ARRAY_SIZE(tps6594x_cells), NULL, 0, NULL);
+ if (ret) {
+ dev_err(ddata->dev, "Failed to register cells\n");
+ return ret;
+ }
+
+ tps = ddata;
+
+ if (of_property_read_bool(node, "ti,system-power-controller"))
+ pm_power_off = tps6594x_power_off;
+
+ return 0;
+}
+
+static const struct of_device_id of_tps6594x_match_table[] = {
+ { .compatible = "ti,tps6594x", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tps6594x_match_table);
+
+static const struct i2c_device_id tps6594x_id_table[] = {
+ { "tps6594x", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tps6594x_id_table);
+
+static struct i2c_driver tps6594x_driver = {
+ .driver = {
+ .name = "tps6594x",
+ .of_match_table = of_tps6594x_match_table,
+ },
+ .probe_new = tps6594x_probe,
+ .id_table = tps6594x_id_table,
+};
+module_i2c_driver(tps6594x_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("TPS6594X PMIC device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index fafa8b0d8099..501dde250309 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -423,6 +423,13 @@ config SRAM
config SRAM_EXEC
bool
+config SRAM_DMA_HEAP
+ bool "Export on-chip SRAM pools using DMA-Heaps"
+ depends on DMABUF_HEAPS && SRAM
+ help
+ This driver allows the export of on-chip SRAM marked as exportable
+ to userspace using the DMA-Heaps interface.
+
config PCI_ENDPOINT_TEST
depends on PCI
select CRC32
@@ -466,6 +473,11 @@ config HISI_HIKEY_USB
switching between the dual-role USB-C port and the USB-A host ports
using only one USB controller.
+config DMA_BUF_PHYS
+ tristate "DMA-BUF physical address user-space exporter"
+ help
+ Exports CPU physical address of DMA-BUF to user-space.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d23231e73330..4b0ac87c7631 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
+obj-$(CONFIG_SRAM_DMA_HEAP) += sram-dma-heap.o
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_CXL_BASE) += cxl/
@@ -57,3 +58,4 @@ obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o
+obj-$(CONFIG_DMA_BUF_PHYS) += dma-buf-phys.o
diff --git a/drivers/misc/dma-buf-phys.c b/drivers/misc/dma-buf-phys.c
new file mode 100644
index 000000000000..c38cfa343dd7
--- /dev/null
+++ b/drivers/misc/dma-buf-phys.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMA-BUF contiguous buffer physical address user-space exporter
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Andrew Davis <afd@ti.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <uapi/linux/dma_buf_phys.h>
+
+#define DEVICE_NAME "dma-buf-phys"
+
+struct dma_buf_phys_priv {
+ struct miscdevice miscdev;
+};
+
+struct dma_buf_phys_file {
+ struct device *dev;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+};
+
+static int dma_buf_phys_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct device *dev = miscdev->this_device;
+ struct dma_buf_phys_file *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+ file->private_data = (void *)priv;
+
+ return 0;
+}
+
+static int dma_buf_phys_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf_phys_file *priv = file->private_data;
+
+ if (priv->attachment && priv->sgt)
+ dma_buf_unmap_attachment(priv->attachment, priv->sgt, DMA_BIDIRECTIONAL);
+ if (priv->dma_buf && priv->attachment)
+ dma_buf_detach(priv->dma_buf, priv->attachment);
+ if (priv->dma_buf)
+ dma_buf_put(priv->dma_buf);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int dma_buf_phys_convert(struct dma_buf_phys_file *priv, int fd, u64 *phys)
+{
+ struct device *dev = priv->dev;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ dma_addr_t dma_addr;
+ int ret;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ /* Attach as the parent device as it will have the correct DMA ops set */
+ attachment = dma_buf_attach(dma_buf, dev->parent);
+ if (IS_ERR(attachment)) {
+ ret = PTR_ERR(attachment);
+ goto fail_put;
+ }
+
+ sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ /* Without PAT only physically contiguous buffers can be supported */
+ if (sgt->orig_nents != 1) {
+ dev_err(dev, "DMA-BUF not contiguous\n");
+ ret = -EINVAL;
+ goto fail_unmap;
+ }
+
+ dma_addr = sg_dma_address(sgt->sgl);
+
+ *phys = dma_addr;
+
+ priv->dma_buf = dma_buf;
+ priv->attachment = attachment;
+ priv->sgt = sgt;
+
+ return 0;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attachment);
+fail_put:
+ dma_buf_put(dma_buf);
+
+ return ret;
+}
+
+static long dma_buf_phys_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct dma_buf_phys_file *priv = file->private_data;
+
+ switch (cmd) {
+ case DMA_BUF_PHYS_IOC_CONVERT:
+ {
+ struct dma_buf_phys_data data;
+ int ret;
+
+ /*
+ * TODO: this should likely be properly serialized, but I
+ * see no reason this file would ever need to be shared.
+ */
+ /* one attachment per file */
+ if (priv->dma_buf)
+ return -EFAULT;
+
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ ret = dma_buf_phys_convert(priv, data.fd, &data.phys);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static const struct file_operations dma_buf_phys_fops = {
+ .owner = THIS_MODULE,
+ .open = dma_buf_phys_open,
+ .release = dma_buf_phys_release,
+ .unlocked_ioctl = dma_buf_phys_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dma_buf_phys_ioctl,
+#endif
+};
+
+static int dma_buf_phys_probe(struct platform_device *pdev)
+{
+ struct dma_buf_phys_priv *priv;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(dev, priv);
+
+ /* No DMA restrictions */
+ dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s", DEVICE_NAME);
+ priv->miscdev.fops = &dma_buf_phys_fops;
+ priv->miscdev.parent = dev;
+ err = misc_register(&priv->miscdev);
+ if (err) {
+ dev_err(dev, "unable to register DMA-BUF to Phys misc device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dma_buf_phys_remove(struct platform_device *pdev)
+{
+ struct dma_buf_phys_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&priv->miscdev);
+
+ return 0;
+}
+
+static const struct of_device_id dma_buf_phys_of_match[] = {
+ { .compatible = "ti,dma-buf-phys", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dma_buf_phys_of_match);
+
+static struct platform_driver dma_buf_phys_driver = {
+ .probe = dma_buf_phys_probe,
+ .remove = dma_buf_phys_remove,
+ .driver = {
+ .name = "dma_buf_phys",
+ .of_match_table = dma_buf_phys_of_match,
+ }
+};
+module_platform_driver(dma_buf_phys_driver);
+
+MODULE_AUTHOR("Andrew Davis <afd@ti.com>");
+MODULE_DESCRIPTION("DMA-BUF contiguous buffer physical address user-space exporter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 6e5f544c9c73..80114f4c80ad 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -528,3 +528,4 @@ MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
MODULE_ALIAS("spi:93xx46");
MODULE_ALIAS("spi:eeprom-93xx46");
+MODULE_ALIAS("spi:93lc46b");
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 48eec5fe7397..da5bed1588c4 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -68,8 +68,9 @@
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
#define FLAG_USE_DMA BIT(0)
-#define PCI_DEVICE_ID_TI_J721E 0xb00d
#define PCI_DEVICE_ID_TI_AM654 0xb00c
+#define PCI_DEVICE_ID_TI_J7200 0xb00f
+#define PCI_DEVICE_ID_TI_AM64 0xb010
#define PCI_DEVICE_ID_LS1088A 0x80c0
#define is_am654_pci_dev(pdev) \
@@ -80,6 +81,9 @@
#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
+#define is_j721e_pci_dev(pdev) \
+ ((pdev)->device == PCI_DEVICE_ID_TI_J721E)
+
static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
@@ -833,9 +837,11 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
- err = -EINVAL;
- goto err_disable_irq;
+ if (!(is_am654_pci_dev(pdev) || is_j721e_pci_dev(pdev))) {
+ if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
+ err = -EINVAL;
+ goto err_disable_irq;
+ }
}
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
@@ -873,9 +879,11 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
goto err_ida_remove;
}
- if (!pci_endpoint_test_request_irq(test)) {
- err = -EINVAL;
- goto err_kfree_test_name;
+ if (!(is_am654_pci_dev(pdev) || is_j721e_pci_dev(pdev))) {
+ if (!pci_endpoint_test_request_irq(test)) {
+ err = -EINVAL;
+ goto err_kfree_test_name;
+ }
}
misc_device = &test->miscdev;
@@ -992,6 +1000,12 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
@@ -1001,6 +1015,7 @@ static struct pci_driver pci_endpoint_test_driver = {
.id_table = pci_endpoint_test_tbl,
.probe = pci_endpoint_test_probe,
.remove = pci_endpoint_test_remove,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(pci_endpoint_test_driver);
diff --git a/drivers/misc/sram-dma-heap.c b/drivers/misc/sram-dma-heap.c
new file mode 100644
index 000000000000..cf28c138f2c0
--- /dev/null
+++ b/drivers/misc/sram-dma-heap.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SRAM DMA-Heap userspace exporter
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+
+#include "sram.h"
+
+struct sram_dma_heap {
+ struct dma_heap *heap;
+ struct gen_pool *pool;
+};
+
+struct sram_dma_heap_buffer {
+ struct gen_pool *pool;
+ struct list_head attachments;
+ struct mutex attachments_lock;
+ unsigned long len;
+ void *vaddr;
+ phys_addr_t paddr;
+};
+
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
+};
+
+static int dma_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct sram_dma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+ struct sg_table *table;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table) {
+ kfree(a);
+ return -ENOMEM;
+ }
+ if (sg_alloc_table(table, 1, GFP_KERNEL)) {
+ kfree(table);
+ kfree(a);
+ return -ENOMEM;
+ }
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(buffer->paddr)), buffer->len, 0);
+
+ a->table = table;
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->attachments_lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->attachments_lock);
+
+ return 0;
+}
+
+static void dma_heap_detatch(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct sram_dma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+
+ mutex_lock(&buffer->attachments_lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->attachments_lock);
+
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+}
+
+static struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+ struct sg_table *table = a->table;
+
+ if (!dma_map_sg_attrs(attachment->dev, table->sgl, table->nents,
+ direction, DMA_ATTR_SKIP_CPU_SYNC))
+ return ERR_PTR(-ENOMEM);
+
+ return table;
+}
+
+static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents,
+ direction, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct sram_dma_heap_buffer *buffer = dmabuf->priv;
+
+ gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
+ kfree(buffer);
+}
+
+static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct sram_dma_heap_buffer *buffer = dmabuf->priv;
+ int ret;
+
+ /* SRAM mappings are not cached */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ ret = vm_iomap_memory(vma, buffer->paddr, buffer->len);
+ if (ret)
+ pr_err("Could not map buffer to userspace\n");
+
+ return ret;
+}
+
+static void *dma_heap_vmap(struct dma_buf *dmabuf)
+{
+ struct sram_dma_heap_buffer *buffer = dmabuf->priv;
+
+ return buffer->vaddr;
+}
+
+static const struct dma_buf_ops sram_dma_heap_buf_ops = {
+ .attach = dma_heap_attach,
+ .detach = dma_heap_detatch,
+ .map_dma_buf = dma_heap_map_dma_buf,
+ .unmap_dma_buf = dma_heap_unmap_dma_buf,
+ .release = dma_heap_dma_buf_release,
+ .mmap = dma_heap_mmap,
+ .vmap = dma_heap_vmap,
+};
+
+static int sram_dma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct sram_dma_heap *sram_dma_heap = dma_heap_get_drvdata(heap);
+ struct sram_dma_heap_buffer *buffer;
+
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dmabuf;
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ buffer->pool = sram_dma_heap->pool;
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->attachments_lock);
+ buffer->len = len;
+
+ buffer->vaddr = (void *)gen_pool_alloc(buffer->pool, buffer->len);
+ if (!buffer->vaddr) {
+ ret = -ENOMEM;
+ goto free_buffer;
+ }
+
+ buffer->paddr = gen_pool_virt_to_phys(buffer->pool, (unsigned long)buffer->vaddr);
+ if (buffer->paddr == -1) {
+ ret = -ENOMEM;
+ goto free_pool;
+ }
+
+ /* create the dmabuf */
+ exp_info.ops = &sram_dma_heap_buf_ops;
+ exp_info.size = buffer->len;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto free_pool;
+ }
+
+ ret = dma_buf_fd(dmabuf, fd_flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+free_pool:
+ gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
+free_buffer:
+ kfree(buffer);
+
+ return ret;
+}
+
+static struct dma_heap_ops sram_dma_heap_ops = {
+ .allocate = sram_dma_heap_allocate,
+};
+
+int sram_dma_heap_export(struct sram_dev *sram,
+ struct sram_reserve *block,
+ phys_addr_t start,
+ struct sram_partition *part)
+{
+ struct sram_dma_heap *sram_dma_heap;
+ struct dma_heap_export_info exp_info;
+
+ dev_info(sram->dev, "Exporting SRAM pool '%s'\n", block->label);
+
+ sram_dma_heap = kzalloc(sizeof(*sram_dma_heap), GFP_KERNEL);
+ if (!sram_dma_heap)
+ return -ENOMEM;
+ sram_dma_heap->pool = part->pool;
+
+ exp_info.name = block->label;
+ exp_info.ops = &sram_dma_heap_ops;
+ exp_info.priv = sram_dma_heap;
+ sram_dma_heap->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(sram_dma_heap->heap)) {
+ int ret = PTR_ERR(sram_dma_heap->heap);
+ kfree(sram_dma_heap);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 202bf951e909..ca261b43bfa0 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -109,6 +109,15 @@ static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
if (ret)
return ret;
}
+ if (block->dma_heap_export) {
+ ret = sram_add_pool(sram, block, start, part);
+ if (ret)
+ return ret;
+
+ ret = sram_dma_heap_export(sram, block, start, part);
+ if (ret)
+ return ret;
+ }
if (block->protect_exec) {
ret = sram_check_protect_exec(sram, block, part);
if (ret)
@@ -209,8 +218,11 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
if (of_find_property(child, "protect-exec", NULL))
block->protect_exec = true;
- if ((block->export || block->pool || block->protect_exec) &&
- block->size) {
+ if (of_find_property(child, "dma-heap-export", NULL))
+ block->dma_heap_export = true;
+
+ if ((block->export || block->pool || block->protect_exec ||
+ block->dma_heap_export) && block->size) {
exports++;
label = NULL;
@@ -272,8 +284,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
goto err_chunks;
}
- if ((block->export || block->pool || block->protect_exec) &&
- block->size) {
+ if ((block->export || block->pool || block->protect_exec ||
+ block->dma_heap_export) && block->size) {
ret = sram_add_partition(sram, block,
res->start + block->start);
if (ret) {
diff --git a/drivers/misc/sram.h b/drivers/misc/sram.h
index 9c1d21ff7347..e60ab13e8e6b 100644
--- a/drivers/misc/sram.h
+++ b/drivers/misc/sram.h
@@ -32,6 +32,7 @@ struct sram_reserve {
bool export;
bool pool;
bool protect_exec;
+ bool dma_heap_export;
const char *label;
};
@@ -52,4 +53,20 @@ static inline int sram_add_protect_exec(struct sram_partition *part)
return -ENODEV;
}
#endif /* CONFIG_SRAM_EXEC */
+
+#ifdef CONFIG_SRAM_DMA_HEAP
+int sram_dma_heap_export(struct sram_dev *sram,
+ struct sram_reserve *block,
+ phys_addr_t start,
+ struct sram_partition *part);
+#else
+static inline int sram_dma_heap_export(struct sram_dev *sram,
+ struct sram_reserve *block,
+ phys_addr_t start,
+ struct sram_partition *part)
+{
+ return 0;
+}
+#endif /* CONFIG_SRAM_DMA_HEAP */
+
#endif /* __SRAM_H */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 24cd6d3dc647..9a229b496a96 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -85,6 +85,7 @@
#define DRIVER_STRENGTH_40_OHM 0x4
#define CLOCK_TOO_SLOW_HZ 50000000
+#define SDHCI_AM654_AUTOSUSPEND_DELAY -1
/* Command Queue Host Controller Interface Base address */
#define SDHCI_AM654_CQE_BASE_ADDR 0x200
@@ -310,6 +311,7 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
unsigned char timing = host->mmc->ios.timing;
u32 otap_del_sel;
+ u32 itap_del_sel;
u32 mask, val;
/* Setup DLL Output TAP delay */
@@ -321,7 +323,14 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
val = (0x1 << OTAPDLYENA_SHIFT) |
(otap_del_sel << OTAPDLYSEL_SHIFT);
+
+ itap_del_sel = sdhci_am654->itap_del_sel[timing];
+ mask |= ITAPDLYSEL_MASK | ITAPDLYENA_MASK;
+ val |= (1 << ITAPDLYENA_SHIFT) | (itap_del_sel << ITAPDLYSEL_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
+ 1 << ITAPCHGWIN_SHIFT);
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
sdhci_am654->clkbuf_sel);
@@ -391,8 +400,18 @@ static void sdhci_am654_reset(struct sdhci_host *host, u8 mask)
static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
- int err = sdhci_execute_tuning(mmc, opcode);
+ int err;
+ bool dcrc_was_enabled = false;
+
+ if (host->ier & SDHCI_INT_DATA_CRC) {
+ host->ier &= ~SDHCI_INT_DATA_CRC | ~SDHCI_INT_DATA_END_BIT |
+ ~SDHCI_INT_DATA_TIMEOUT;
+ dcrc_was_enabled = true;
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+ err = sdhci_execute_tuning(mmc, opcode);
if (err)
return err;
/*
@@ -401,6 +420,13 @@ static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
*/
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ /* Reenable forbidden interrupt */
+ if (dcrc_was_enabled) {
+ host->ier |= SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_TIMEOUT;
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
return 0;
}
@@ -602,7 +628,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
return 0;
}
- for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
+ for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
ret = device_property_read_u32(dev, td[i].otap_binding,
&sdhci_am654->otap_del_sel[i]);
@@ -759,6 +785,18 @@ static const struct of_device_id sdhci_am654_of_match[] = {
.compatible = "ti,j721e-sdhci-4bit",
.data = &sdhci_j721e_4bit_drvdata,
},
+ {
+ .compatible = "ti,am64-sdhci-8bit",
+ .data = &sdhci_j721e_8bit_drvdata,
+ },
+ {
+ .compatible = "ti,am64-sdhci-4bit",
+ .data = &sdhci_j721e_4bit_drvdata,
+ },
+ {
+ .compatible = "ti,am62-sdhci",
+ .data = &sdhci_j721e_4bit_drvdata,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_am654_of_match);
@@ -801,18 +839,10 @@ static int sdhci_am654_probe(struct platform_device *pdev)
pltfm_host->clk = clk_xin;
- /* Clocks are enabled using pm_runtime */
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
- goto pm_runtime_disable;
- }
-
base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
- goto pm_runtime_put;
+ goto err_pltfm_free;
}
sdhci_am654->base = devm_regmap_init_mmio(dev, base,
@@ -820,31 +850,47 @@ static int sdhci_am654_probe(struct platform_device *pdev)
if (IS_ERR(sdhci_am654->base)) {
dev_err(dev, "Failed to initialize regmap\n");
ret = PTR_ERR(sdhci_am654->base);
- goto pm_runtime_put;
+ goto err_pltfm_free;
}
ret = sdhci_am654_get_of_property(pdev, sdhci_am654);
if (ret)
- goto pm_runtime_put;
+ goto err_pltfm_free;
ret = mmc_of_parse(host->mmc);
if (ret) {
dev_err(dev, "parsing dt failed (%d)\n", ret);
- goto pm_runtime_put;
+ goto err_pltfm_free;
}
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+ pm_runtime_get_noresume(dev);
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ goto pm_put;
+ pm_runtime_enable(dev);
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret)
+ goto pm_disable;
+
ret = sdhci_am654_init(host);
if (ret)
- goto pm_runtime_put;
+ goto clk_disable;
+ /* Setting up autosuspend */
+ pm_runtime_set_autosuspend_delay(dev, SDHCI_AM654_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return 0;
-pm_runtime_put:
- pm_runtime_put_sync(dev);
-pm_runtime_disable:
+clk_disable:
+ clk_disable_unprepare(pltfm_host->clk);
+pm_disable:
pm_runtime_disable(dev);
+pm_put:
+ pm_runtime_put_noidle(dev);
err_pltfm_free:
sdhci_pltfm_free(pdev);
return ret;
@@ -853,23 +899,127 @@ err_pltfm_free:
static int sdhci_am654_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int ret;
- sdhci_remove_host(host, true);
- ret = pm_runtime_put_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
+ sdhci_remove_host(host, true);
+ clk_disable_unprepare(pltfm_host->clk);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
sdhci_pltfm_free(pdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sdhci_am654_restore(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ u32 ctl_cfg_2 = 0;
+ u32 val;
+ int ret;
+
+ if (sdhci_am654->flags & DLL_CALIB) {
+ regmap_read(sdhci_am654->base, PHY_STAT1, &val);
+ if (~val & CALDONE_MASK) {
+ /* Calibrate IO lines */
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL1,
+ PDB_MASK, PDB_MASK);
+ ret = regmap_read_poll_timeout(sdhci_am654->base,
+ PHY_STAT1, val,
+ val & CALDONE_MASK,
+ 1, 20);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Enable pins by setting IO mux to 0 */
+ if (sdhci_am654->flags & IOMUX_PRESENT)
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL1,
+ IOMUX_ENABLE_MASK, 0);
+
+ /* Set slot type based on SD or eMMC */
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ ctl_cfg_2 = SLOTTYPE_EMBEDDED;
+
+ regmap_update_bits(sdhci_am654->base, CTL_CFG_2, SLOTTYPE_MASK,
+ ctl_cfg_2);
+
+ regmap_read(sdhci_am654->base, CTL_CFG_3, &val);
+ if (~val & TUNINGFORSDR50_MASK)
+ /* Enable tuning for SDR50 */
+ regmap_update_bits(sdhci_am654->base, CTL_CFG_3, TUNINGFORSDR50_MASK,
+ TUNINGFORSDR50_MASK);
return 0;
}
+static int sdhci_am654_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int ret;
+
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
+
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+
+ ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
+
+ /* disable the clock */
+ clk_disable_unprepare(pltfm_host->clk);
+ return 0;
+}
+
+static int sdhci_am654_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int ret;
+
+ /* Enable the clock */
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret)
+ return ret;
+
+ ret = sdhci_am654_restore(host);
+ if (ret)
+ return ret;
+
+ ret = sdhci_runtime_resume_host(host, 0);
+ if (ret)
+ return ret;
+
+ ret = cqhci_resume(host->mmc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops sdhci_am654_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sdhci_am654_runtime_suspend,
+ sdhci_am654_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
static struct platform_driver sdhci_am654_driver = {
.driver = {
.name = "sdhci-am654",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &sdhci_am654_dev_pm_ops,
.of_match_table = sdhci_am654_of_match,
},
.probe = sdhci_am654_probe,
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 6c46f25b57e2..a1e3ba6597dd 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -62,7 +62,7 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
- depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on HAS_IOMEM
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 512f60780a50..6d1dd7ef71fc 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -18,7 +18,7 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
-#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -28,6 +28,7 @@
#include <linux/omap-gpmc.h>
#include <linux/platform_data/mtd-nand-omap2.h>
+#include <linux/sys_soc.h>
#define DRIVER_NAME "omap2-nand"
#define OMAP_NAND_TIMEOUT_MS 5000
@@ -164,6 +165,7 @@ struct omap_nand_info {
u_char *buf;
int buf_len;
/* Interface to GPMC */
+ void __iomem *fifo;
struct gpmc_nand_regs reg;
struct gpmc_nand_ops *ops;
bool flash_bbt;
@@ -171,6 +173,13 @@ struct omap_nand_info {
struct device *elm_dev;
/* NAND ready gpio */
struct gpio_desc *ready_gpiod;
+
+ void (*data_in)(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit);
+ void (*data_out)(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit);
+ bool force_32bit;
};
static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
@@ -178,6 +187,13 @@ static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
}
+static void omap_nand_data_in(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit);
+
+static void omap_nand_data_out(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit);
+
/**
* omap_prefetch_enable - configures and starts prefetch transfer
* @cs: cs (chip select) number
@@ -236,169 +252,70 @@ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
}
/**
- * omap_hwcontrol - hardware specific access to control-lines
- * @chip: NAND chip object
- * @cmd: command to device
- * @ctrl:
- * NAND_NCE: bit 0 -> don't care
- * NAND_CLE: bit 1 -> Command Latch
- * NAND_ALE: bit 2 -> Address Latch
- *
- * NOTE: boards may use different bits for these!!
+ * omap_nand_data_in_pref - NAND data in using prefetch Prefetch engine
*/
-static void omap_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
+static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
-
- if (cmd != NAND_CMD_NONE) {
- if (ctrl & NAND_CLE)
- writeb(cmd, info->reg.gpmc_nand_command);
-
- else if (ctrl & NAND_ALE)
- writeb(cmd, info->reg.gpmc_nand_address);
-
- else /* NAND_NCE */
- writeb(cmd, info->reg.gpmc_nand_data);
- }
-}
-
-/**
- * omap_read_buf8 - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
-
- ioread8_rep(nand->legacy.IO_ADDR_R, buf, len);
-}
-
-/**
- * omap_write_buf8 - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct omap_nand_info *info = mtd_to_omap(mtd);
- u_char *p = (u_char *)buf;
- bool status;
-
- while (len--) {
- iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
- /* wait until buffer is available for write */
- do {
- status = info->ops->nand_writebuffer_empty();
- } while (!status);
- }
-}
-
-/**
- * omap_read_buf16 - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
-
- ioread16_rep(nand->legacy.IO_ADDR_R, buf, len / 2);
-}
-
-/**
- * omap_write_buf16 - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
-{
- struct omap_nand_info *info = mtd_to_omap(mtd);
- u16 *p = (u16 *) buf;
- bool status;
- /* FIXME try bursts of writesw() or DMA ... */
- len >>= 1;
-
- while (len--) {
- iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
- /* wait until buffer is available for write */
- do {
- status = info->ops->nand_writebuffer_empty();
- } while (!status);
- }
-}
-
-/**
- * omap_read_buf_pref - read data from NAND controller into buffer
- * @chip: NAND chip object
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf_pref(struct nand_chip *chip, u_char *buf, int len)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
uint32_t r_count = 0;
int ret = 0;
u32 *p = (u32 *)buf;
+ unsigned int pref_len;
- /* take care of subpage reads */
- if (len % 4) {
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, buf, len % 4);
- else
- omap_read_buf8(mtd, buf, len % 4);
- p = (u32 *) (buf + len % 4);
- len -= len % 4;
+ if (force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
+ return;
}
+ /* read 32-bit words using prefetch and remaining bytes normally */
+
/* configure and start prefetch transfer */
+ pref_len = len - (len & 3);
ret = omap_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, pref_len, 0x0, info);
if (ret) {
- /* PFPW engine is busy, use cpu copy method */
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, (u_char *)p, len);
- else
- omap_read_buf8(mtd, (u_char *)p, len);
+ /* prefetch engine is busy, use CPU copy method */
+ omap_nand_data_in(chip, buf, len, false);
} else {
do {
r_count = readl(info->reg.gpmc_prefetch_status);
r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
- ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
+ ioread32_rep(info->fifo, p, r_count);
p += r_count;
- len -= r_count << 2;
- } while (len);
- /* disable and stop the PFPW engine */
+ pref_len -= r_count << 2;
+ } while (pref_len);
+ /* disable and stop the Prefetch engine */
omap_prefetch_reset(info->gpmc_cs, info);
+ /* fetch any remaining bytes */
+ if (len & 3)
+ omap_nand_data_in(chip, p, len & 3, false);
}
}
/**
- * omap_write_buf_pref - write buffer to NAND controller
- * @chip: NAND chip object
- * @buf: data buffer
- * @len: number of bytes to write
+ * omap_nand_data_out_pref - NAND data out using Write Posting engine
*/
-static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
- int len)
+static void omap_nand_data_out_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
uint32_t w_count = 0;
int i = 0, ret = 0;
u16 *p = (u16 *)buf;
unsigned long tim, limit;
u32 val;
+ if (force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
+ return;
+ }
+
/* take care of subpage writes */
if (len % 2 != 0) {
- writeb(*buf, info->nand.legacy.IO_ADDR_W);
+ writeb(*(u8 *)buf, info->fifo);
p = (u16 *)(buf + 1);
len--;
}
@@ -407,18 +324,15 @@ static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) {
- /* PFPW engine is busy, use cpu copy method */
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_write_buf16(mtd, (u_char *)p, len);
- else
- omap_write_buf8(mtd, (u_char *)p, len);
+ /* write posting engine is busy, use CPU copy method */
+ omap_nand_data_out(chip, buf, len, false);
} else {
while (len) {
w_count = readl(info->reg.gpmc_prefetch_status);
w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
- iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
+ iowrite16(*p++, info->fifo);
}
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
@@ -446,15 +360,16 @@ static void omap_nand_dma_callback(void *data)
/*
* omap_nand_dma_transfer: configure and start dma transfer
- * @mtd: MTD device structure
+ * @chip: nand chip structure
* @addr: virtual address in RAM of source/destination
* @len: number of data bytes to be transferred
* @is_write: flag for read/write operation
*/
-static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
- unsigned int len, int is_write)
+static inline int omap_nand_dma_transfer(struct nand_chip *chip,
+ const void *addr, unsigned int len,
+ int is_write)
{
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct dma_async_tx_descriptor *tx;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
@@ -516,49 +431,41 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
out_copy_unmap:
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
out_copy:
- if (info->nand.options & NAND_BUSWIDTH_16)
- is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
- : omap_write_buf16(mtd, (u_char *) addr, len);
- else
- is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
- : omap_write_buf8(mtd, (u_char *) addr, len);
+ is_write == 0 ? omap_nand_data_in(chip, (void *)addr, len, false)
+ : omap_nand_data_out(chip, addr, len, false);
+
return 0;
}
/**
- * omap_read_buf_dma_pref - read data from NAND controller into buffer
- * @chip: NAND chip object
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
*/
-static void omap_read_buf_dma_pref(struct nand_chip *chip, u_char *buf,
- int len)
+static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (len <= mtd->oobsize)
- omap_read_buf_pref(chip, buf, len);
+ omap_nand_data_in_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, buf, len, 0x0);
+ omap_nand_dma_transfer(chip, buf, len, 0x0);
}
/**
- * omap_write_buf_dma_pref - write buffer to NAND controller
- * @chip: NAND chip object
- * @buf: data buffer
- * @len: number of bytes to write
+ * omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
*/
-static void omap_write_buf_dma_pref(struct nand_chip *chip, const u_char *buf,
- int len)
+static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (len <= mtd->oobsize)
- omap_write_buf_pref(chip, buf, len);
+ omap_nand_data_out_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, (u_char *)buf, len, 0x1);
+ omap_nand_dma_transfer(chip, buf, len, 0x1);
}
/*
@@ -582,13 +489,13 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
bytes = info->buf_len;
else if (!info->buf_len)
bytes = 0;
- iowrite32_rep(info->nand.legacy.IO_ADDR_W, (u32 *)info->buf,
+ iowrite32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
info->buf_len -= bytes;
} else {
- ioread32_rep(info->nand.legacy.IO_ADDR_R, (u32 *)info->buf,
+ ioread32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
@@ -608,20 +515,17 @@ done:
}
/*
- * omap_read_buf_irq_pref - read data from NAND controller into buffer
- * @chip: NAND chip object
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * omap_nand_data_in_irq_pref - NAND data in using Prefetch and IRQ
*/
-static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
- int len)
+static void omap_nand_data_in_irq_pref(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
- if (len <= mtd->oobsize) {
- omap_read_buf_pref(chip, buf, len);
+ if (len <= mtd->oobsize || force_8bit) {
+ omap_nand_data_in(chip, buf, len, force_8bit);
return;
}
@@ -632,9 +536,11 @@ static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
/* configure and start prefetch transfer */
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
- if (ret)
+ if (ret) {
/* PFPW engine is busy, use cpu copy method */
- goto out_copy;
+ omap_nand_data_in(chip, buf, len, false);
+ return;
+ }
info->buf_len = len;
@@ -647,31 +553,23 @@ static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
-
-out_copy:
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, buf, len);
- else
- omap_read_buf8(mtd, buf, len);
}
/*
- * omap_write_buf_irq_pref - write buffer to NAND controller
- * @chip: NAND chip object
- * @buf: data buffer
- * @len: number of bytes to write
+ * omap_nand_data_out_irq_pref - NAND out using write posting and IRQ
*/
-static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
- int len)
+static void omap_nand_data_out_irq_pref(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
unsigned long tim, limit;
u32 val;
- if (len <= mtd->oobsize) {
- omap_write_buf_pref(chip, buf, len);
+ if (len <= mtd->oobsize || force_8bit) {
+ omap_nand_data_out(chip, buf, len, force_8bit);
return;
}
@@ -682,9 +580,11 @@ static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
/* configure and start prefetch transfer : size=24 */
ret = omap_prefetch_enable(info->gpmc_cs,
(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
- if (ret)
+ if (ret) {
/* PFPW engine is busy, use cpu copy method */
- goto out_copy;
+ omap_nand_data_out(chip, buf, len, false);
+ return;
+ }
info->buf_len = len;
@@ -706,12 +606,6 @@ static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
-
-out_copy:
- if (info->nand.options & NAND_BUSWIDTH_16)
- omap_write_buf16(mtd, buf, len);
- else
- omap_write_buf8(mtd, buf, len);
}
/**
@@ -977,50 +871,6 @@ static void omap_enable_hwecc(struct nand_chip *chip, int mode)
}
/**
- * omap_wait - wait until the command is done
- * @this: NAND Chip structure
- *
- * Wait function is called during Program and erase operations and
- * the way it is called from MTD layer, we should wait till the NAND
- * chip is ready after the programming/erase operation has completed.
- *
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
- */
-static int omap_wait(struct nand_chip *this)
-{
- struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
- unsigned long timeo = jiffies;
- int status;
-
- timeo += msecs_to_jiffies(400);
-
- writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
- while (time_before(jiffies, timeo)) {
- status = readb(info->reg.gpmc_nand_data);
- if (status & NAND_STATUS_READY)
- break;
- cond_resched();
- }
-
- status = readb(info->reg.gpmc_nand_data);
- return status;
-}
-
-/**
- * omap_dev_ready - checks the NAND Ready GPIO line
- * @mtd: MTD device structure
- *
- * Returns true if ready and false if busy.
- */
-static int omap_dev_ready(struct nand_chip *chip)
-{
- struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
-
- return gpiod_get_value(info->ready_gpiod);
-}
-
-/**
* omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
* @mtd: MTD device structure
* @mode: Read/Write mode
@@ -1524,6 +1374,7 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
int ret;
uint8_t *ecc_calc = chip->ecc.calc_buf;
@@ -1533,7 +1384,7 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
- chip->legacy.write_buf(chip, buf, mtd->writesize);
+ info->data_out(chip, buf, mtd->writesize, false);
/* Update ecc vector from GPMC result registers */
omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
@@ -1544,7 +1395,7 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
return ret;
/* Write ecc vector to OOB area */
- chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+ info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
@@ -1565,6 +1416,7 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
u8 *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
@@ -1585,7 +1437,7 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
- chip->legacy.write_buf(chip, buf, mtd->writesize);
+ info->data_out(chip, buf, mtd->writesize, false);
for (step = 0; step < ecc_steps; step++) {
/* mask ECC of un-touched subpages by padding 0xFF */
@@ -1610,7 +1462,7 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
return ret;
/* write OOB buffer to NAND device */
- chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
+ info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
@@ -1633,6 +1485,7 @@ static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
uint8_t *ecc_calc = chip->ecc.calc_buf;
uint8_t *ecc_code = chip->ecc.code_buf;
int stat, ret;
@@ -1644,7 +1497,7 @@ static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
chip->ecc.hwctl(chip, NAND_ECC_READ);
/* Read data */
- chip->legacy.read_buf(chip, buf, mtd->writesize);
+ info->data_in(chip, buf, mtd->writesize, false);
/* Read oob bytes */
nand_change_read_column_op(chip,
@@ -1927,8 +1780,9 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
/* Re-populate low-level callbacks based on xfer modes */
switch (info->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
- chip->legacy.read_buf = omap_read_buf_pref;
- chip->legacy.write_buf = omap_write_buf_pref;
+ dev_info(dev, "using prefetch polled xfer mode\n");
+ info->data_in = omap_nand_data_in_pref;
+ info->data_out = omap_nand_data_out_pref;
break;
case NAND_OMAP_POLLED:
@@ -1960,12 +1814,14 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
err);
return err;
}
- chip->legacy.read_buf = omap_read_buf_dma_pref;
- chip->legacy.write_buf = omap_write_buf_dma_pref;
+
+ info->data_in = omap_nand_data_in_dma_pref;
+ info->data_out = omap_nand_data_out_dma_pref;
}
break;
case NAND_OMAP_PREFETCH_IRQ:
+ dev_info(dev, "using prefetch irq xfer mode\n");
info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
if (info->gpmc_irq_fifo <= 0)
return -ENODEV;
@@ -1992,9 +1848,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return err;
}
- chip->legacy.read_buf = omap_read_buf_irq_pref;
- chip->legacy.write_buf = omap_write_buf_irq_pref;
-
+ info->data_in = omap_nand_data_in_irq_pref;
+ info->data_out = omap_nand_data_out_irq_pref;
break;
default:
@@ -2158,22 +2013,146 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return 0;
}
+static void omap_nand_data_in(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (info->force_32bit) {
+ u32 val;
+ int left;
+ u8 *ptr;
+
+ ioread32_rep(info->fifo, buf, len >> 2);
+ left = len & 0x3;
+ if (left) {
+ val = ioread32(info->fifo);
+ ptr = (u8 *)(buf + (len - left));
+ while (left--) {
+ *ptr++ = val & 0xff;
+ val >>= 8;
+ }
+ }
+
+ return;
+ }
+
+ if (force_8bit || (alignment & 1))
+ ioread8_rep(info->fifo, buf, len);
+ else if (alignment & 3)
+ ioread16_rep(info->fifo, buf, len >> 1);
+ else
+ ioread32_rep(info->fifo, buf, len >> 2);
+}
+
+static void omap_nand_data_out(struct nand_chip *chip,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ iowrite8_rep(info->fifo, buf, len);
+ else if (alignment & 3)
+ iowrite16_rep(info->fifo, buf, len >> 1);
+ else
+ iowrite32_rep(info->fifo, buf, len >> 2);
+}
+
+static int omap_nand_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
+ unsigned int i;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ iowrite8(instr->ctx.cmd.opcode,
+ info->reg.gpmc_nand_command);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ iowrite8(instr->ctx.addr.addrs[i],
+ info->reg.gpmc_nand_address);
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ info->data_in(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ info->data_out(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = info->ready_gpiod ?
+ nand_gpio_waitrdy(chip, info->ready_gpiod, instr->ctx.waitrdy.timeout_ms) :
+ nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int omap_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ unsigned int i;
+
+ if (check_only)
+ return 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ int ret;
+
+ ret = omap_nand_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct nand_controller_ops omap_nand_controller_ops = {
.attach_chip = omap_nand_attach_chip,
+ .exec_op = omap_nand_exec_op,
};
/* Shared among all NAND instances to synchronize access to the ECC Engine */
static struct nand_controller omap_gpmc_controller;
static bool omap_gpmc_controller_initialized;
+static const struct of_device_id omap_nand_ids[];
+
static int omap_nand_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute k3_soc_devices[] = {
+ { .family = "AM64X", .revision = "SR1.0" },
+ { /* sentinel */ }
+ };
+
struct omap_nand_info *info;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int err;
struct resource *res;
struct device *dev = &pdev->dev;
+ void __iomem *vaddr;
info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
GFP_KERNEL);
@@ -2182,6 +2161,12 @@ static int omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
+ /* Some SoC's have 32-bit at least, read limitation */
+ if (soc_device_match(k3_soc_devices)) {
+ dev_info(&pdev->dev, "force 32-bit\n");
+ info->force_32bit = true;
+ }
+
err = omap_get_dt_info(dev, info);
if (err)
return err;
@@ -2208,10 +2193,11 @@ static int omap_nand_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand_chip->legacy.IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(nand_chip->legacy.IO_ADDR_R))
- return PTR_ERR(nand_chip->legacy.IO_ADDR_R);
+ vaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ info->fifo = vaddr;
info->phys_base = res->start;
if (!omap_gpmc_controller_initialized) {
@@ -2222,9 +2208,6 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->controller = &omap_gpmc_controller;
- nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
- nand_chip->legacy.cmd_ctrl = omap_hwcontrol;
-
info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
GPIOD_IN);
if (IS_ERR(info->ready_gpiod)) {
@@ -2232,27 +2215,16 @@ static int omap_nand_probe(struct platform_device *pdev)
return PTR_ERR(info->ready_gpiod);
}
- /*
- * If RDY/BSY line is connected to OMAP then use the omap ready
- * function and the generic nand_wait function which reads the status
- * register after monitoring the RDY/BSY line. Otherwise use a standard
- * chip delay which is slightly more than tR (AC Timing) of the NAND
- * device and read status register until you get a failure or success
- */
- if (info->ready_gpiod) {
- nand_chip->legacy.dev_ready = omap_dev_ready;
- nand_chip->legacy.chip_delay = 0;
- } else {
- nand_chip->legacy.waitfunc = omap_wait;
- nand_chip->legacy.chip_delay = 50;
- }
-
if (info->flash_bbt)
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
/* scan NAND device connected to chip controller */
nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
+ /* default operations */
+ info->data_in = omap_nand_data_in;
+ info->data_out = omap_nand_data_out;
+
err = nand_scan(nand_chip, 1);
if (err)
goto return_error;
@@ -2299,6 +2271,7 @@ static int omap_nand_remove(struct platform_device *pdev)
static const struct of_device_id omap_nand_ids[] = {
{ .compatible = "ti,omap2-nand", },
+ { .compatible = "ti,am64-nand", },
{},
};
MODULE_DEVICE_TABLE(of, omap_nand_ids);
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 8794a1f6eacd..73b8e7748dc5 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -22,9 +22,11 @@
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
- spinand->scratchbuf);
int ret;
+ struct spi_mem_op op = spinand->ctrl_ops->ops.get_feature;
+
+ op.data.buf.out = spinand->scratchbuf;
+ memset(&op.addr.val, reg, op.addr.nbytes);
ret = spi_mem_exec_op(spinand->spimem, &op);
if (ret)
@@ -36,10 +38,12 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
- spinand->scratchbuf);
+ struct spi_mem_op op = spinand->ctrl_ops->ops.set_feature;
+
+ op.data.buf.out = spinand->scratchbuf;
+ memset(&op.addr.val, reg, op.addr.nbytes);
+ memset(spinand->scratchbuf, val, op.data.nbytes);
- *spinand->scratchbuf = val;
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -138,20 +142,12 @@ int spinand_select_target(struct spinand_device *spinand, unsigned int target)
return 0;
}
-static int spinand_init_cfg_cache(struct spinand_device *spinand)
+static int spinand_read_cfg(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct device *dev = &spinand->spimem->spi->dev;
unsigned int target;
int ret;
- spinand->cfg_cache = devm_kcalloc(dev,
- nand->memorg.ntargets,
- sizeof(*spinand->cfg_cache),
- GFP_KERNEL);
- if (!spinand->cfg_cache)
- return -ENOMEM;
-
for (target = 0; target < nand->memorg.ntargets; target++) {
ret = spinand_select_target(spinand, target);
if (ret)
@@ -170,6 +166,21 @@ static int spinand_init_cfg_cache(struct spinand_device *spinand)
return 0;
}
+static int spinand_init_cfg_cache(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct device *dev = &spinand->spimem->spi->dev;
+
+ spinand->cfg_cache = devm_kcalloc(dev,
+ nand->memorg.ntargets,
+ sizeof(*spinand->cfg_cache),
+ GFP_KERNEL);
+ if (!spinand->cfg_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int spinand_init_quad_enable(struct spinand_device *spinand)
{
bool enable = false;
@@ -177,9 +188,9 @@ static int spinand_init_quad_enable(struct spinand_device *spinand)
if (!(spinand->flags & SPINAND_HAS_QE_BIT))
return 0;
- if (spinand->op_templates.read_cache->data.buswidth == 4 ||
- spinand->op_templates.write_cache->data.buswidth == 4 ||
- spinand->op_templates.update_cache->data.buswidth == 4)
+ if (spinand->data_ops.read_cache->data.buswidth == 4 ||
+ spinand->data_ops.write_cache->data.buswidth == 4 ||
+ spinand->data_ops.update_cache->data.buswidth == 4)
enable = true;
return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
@@ -193,9 +204,9 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
enable ? CFG_ECC_ENABLE : 0);
}
-static int spinand_write_enable_op(struct spinand_device *spinand)
+int spinand_write_enable_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op op = spinand->ctrl_ops->ops.write_enable;
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -205,7 +216,9 @@ static int spinand_load_page_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+ struct spi_mem_op op = spinand->ctrl_ops->ops.page_read;
+
+ op.addr.val = row;
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -326,7 +339,9 @@ static int spinand_program_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+ struct spi_mem_op op = spinand->ctrl_ops->ops.program_execute;
+
+ op.addr.val = row;
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -336,25 +351,35 @@ static int spinand_erase_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
- struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+ struct spi_mem_op op = spinand->ctrl_ops->ops.block_erase;
+
+ op.addr.val = row;
return spi_mem_exec_op(spinand->spimem, &op);
}
-static int spinand_wait(struct spinand_device *spinand, u8 *s)
+static int spinand_wait(struct spinand_device *spinand,
+ unsigned long initial_delay_us,
+ unsigned long poll_delay_us,
+ u8 *s)
{
- unsigned long timeo = jiffies + msecs_to_jiffies(400);
+ struct spi_mem_op op = spinand->ctrl_ops->ops.get_feature;
u8 status;
int ret;
- do {
- ret = spinand_read_status(spinand, &status);
- if (ret)
- return ret;
+ op.data.buf.out = spinand->scratchbuf;
+ memset(&op.addr.val, REG_STATUS, op.addr.nbytes);
+
+ ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
+ initial_delay_us,
+ poll_delay_us,
+ SPINAND_WAITRDY_TIMEOUT_MS);
+ if (ret)
+ return ret;
- if (!(status & STATUS_BUSY))
- goto out;
- } while (time_before(jiffies, timeo));
+ status = *spinand->scratchbuf;
+ if (!(status & STATUS_BUSY))
+ goto out;
/*
* Extra read, just in case the STATUS_READY bit has changed
@@ -387,14 +412,17 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
static int spinand_reset_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_RESET_OP;
+ struct spi_mem_op op = spinand->ctrl_ops->ops.reset;
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
if (ret)
return ret;
- return spinand_wait(spinand, NULL);
+ return spinand_wait(spinand,
+ SPINAND_RESET_INITIAL_DELAY_US,
+ SPINAND_RESET_POLL_DELAY_US,
+ NULL);
}
static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
@@ -442,7 +470,10 @@ static int spinand_read_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US,
+ &status);
if (ret < 0)
return ret;
@@ -474,7 +505,10 @@ static int spinand_write_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
if (!ret && (status & STATUS_PROG_FAILED))
ret = -EIO;
@@ -659,7 +693,11 @@ static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_ERASE_INITIAL_DELAY_US,
+ SPINAND_ERASE_POLL_DELAY_US,
+ &status);
+
if (!ret && (status & STATUS_ERASE_FAILED))
ret = -EIO;
@@ -707,7 +745,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
/* The plane number is passed in MSB just above the column address */
info.offset = plane << fls(nand->memorg.pagesize);
- info.op_tmpl = *spinand->op_templates.update_cache;
+ info.op_tmpl = *spinand->data_ops.update_cache;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
if (IS_ERR(desc))
@@ -715,7 +753,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
spinand->dirmaps[plane].wdesc = desc;
- info.op_tmpl = *spinand->op_templates.read_cache;
+ info.op_tmpl = *spinand->data_ops.read_cache;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
if (IS_ERR(desc))
@@ -762,6 +800,16 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
&winbond_spinand_manufacturer,
};
+static const struct spinand_ctrl_ops spinand_default_ctrl_ops =
+ SPINAND_CTRL_OPS(SPINAND_1S_1S_1S,
+ SPINAND_RESET_OP,
+ SPINAND_GET_FEATURE_OP(0, NULL),
+ SPINAND_SET_FEATURE_OP(0, NULL),
+ SPINAND_WR_EN_DIS_OP(true),
+ SPINAND_BLK_ERASE_OP(0),
+ SPINAND_PAGE_READ_OP(0),
+ SPINAND_PROG_EXEC_OP(0));
+
static int spinand_manufacturer_match(struct spinand_device *spinand,
enum spinand_readid_method rdid_method)
{
@@ -834,8 +882,8 @@ static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
}
static const struct spi_mem_op *
-spinand_select_op_variant(struct spinand_device *spinand,
- const struct spinand_op_variants *variants)
+spinand_select_data_op_variant(struct spinand_device *spinand,
+ const struct spinand_op_variants *variants)
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int i;
@@ -867,6 +915,117 @@ spinand_select_op_variant(struct spinand_device *spinand,
return NULL;
}
+static const struct spinand_ctrl_ops *
+spinand_select_ctrl_ops_variant(struct spinand_device *spinand,
+ const struct spinand_ctrl_ops_variants *variants,
+ const enum spinand_protocol protocol)
+{
+ unsigned int i;
+
+ for (i = 0; i < variants->nvariants; i++) {
+ const struct spinand_ctrl_ops *ctrl_ops =
+ &variants->ctrl_ops_list[i];
+
+ if (ctrl_ops->protocol != protocol)
+ continue;
+
+ if (!spi_mem_supports_op(spinand->spimem,
+ &ctrl_ops->ops.reset) ||
+ !spi_mem_supports_op(spinand->spimem,
+ &ctrl_ops->ops.get_feature) ||
+ !spi_mem_supports_op(spinand->spimem,
+ &ctrl_ops->ops.set_feature) ||
+ !spi_mem_supports_op(spinand->spimem,
+ &ctrl_ops->ops.write_enable) ||
+ !spi_mem_supports_op(spinand->spimem,
+ &ctrl_ops->ops.block_erase) ||
+ !spi_mem_supports_op(spinand->spimem,
+ &ctrl_ops->ops.page_read) ||
+ !spi_mem_supports_op(spinand->spimem,
+ &ctrl_ops->ops.program_execute))
+ continue;
+
+ return ctrl_ops;
+ }
+
+ return NULL;
+}
+
+static bool spinand_op_is_octal_dtr(const struct spi_mem_op *op)
+{
+ return op->cmd.buswidth == 8 && op->cmd.dtr &&
+ op->addr.buswidth == 8 && op->addr.dtr &&
+ op->data.buswidth == 8 && op->data.dtr;
+}
+
+static int spinand_init_octal_dtr_enable(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ const struct spinand_ctrl_ops *octal_dtr_ctrl_ops;
+ int ret;
+
+ if (!(spinand->flags & SPINAND_HAS_OCTAL_DTR_BIT))
+ return 0;
+
+ if (!(spinand_op_is_octal_dtr(spinand->data_ops.read_cache) &&
+ spinand_op_is_octal_dtr(spinand->data_ops.write_cache) &&
+ spinand_op_is_octal_dtr(spinand->data_ops.update_cache)))
+ return 0;
+
+ octal_dtr_ctrl_ops = spinand_select_ctrl_ops_variant(spinand,
+ spinand->desc_entry->ctrl_ops_variants,
+ SPINAND_8D_8D_8D);
+
+ if (!octal_dtr_ctrl_ops)
+ return 0;
+
+ if (!spinand->manufacturer->ops->change_mode) {
+ dev_dbg(dev,
+ "Missing ->change_mode(), unable to switch mode\n");
+ return -EINVAL;
+ }
+
+ ret = spinand->manufacturer->ops->change_mode(spinand,
+ SPINAND_8D_8D_8D);
+ if (ret) {
+ dev_err(dev,
+ "Failed to enable Octal DTR SPI mode (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ spinand->protocol = SPINAND_8D_8D_8D;
+ spinand->ctrl_ops = octal_dtr_ctrl_ops;
+
+ dev_dbg(dev,
+ "%s SPI NAND switched to Octal DTR SPI (8D-8D-8D) mode\n",
+ spinand->manufacturer->name);
+ return 0;
+}
+
+static int spinand_init_octal_dtr_disable(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ int ret;
+
+ if (!spinand->manufacturer->ops->change_mode)
+ return -EINVAL;
+
+ ret = spinand->manufacturer->ops->change_mode(spinand,
+ SPINAND_1S_1S_1S);
+
+ if (ret) {
+ dev_err(dev,
+ "Failed to disable Octal DTR SPI mode (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ spinand->protocol = SPINAND_1S_1S_1S;
+ spinand->ctrl_ops = &spinand_default_ctrl_ops;
+ return 0;
+}
+
/**
* spinand_match_and_init() - Try to find a match between a device ID and an
* entry in a spinand_info table
@@ -907,24 +1066,25 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
+ spinand->desc_entry = &table[i];
- op = spinand_select_op_variant(spinand,
- info->op_variants.read_cache);
+ op = spinand_select_data_op_variant(spinand,
+ info->data_ops_variants.read_cache);
if (!op)
return -ENOTSUPP;
- spinand->op_templates.read_cache = op;
+ spinand->data_ops.read_cache = op;
- op = spinand_select_op_variant(spinand,
- info->op_variants.write_cache);
+ op = spinand_select_data_op_variant(spinand,
+ info->data_ops_variants.write_cache);
if (!op)
return -ENOTSUPP;
- spinand->op_templates.write_cache = op;
+ spinand->data_ops.write_cache = op;
- op = spinand_select_op_variant(spinand,
- info->op_variants.update_cache);
- spinand->op_templates.update_cache = op;
+ op = spinand_select_data_op_variant(spinand,
+ info->data_ops_variants.update_cache);
+ spinand->data_ops.update_cache = op;
return 0;
}
@@ -989,12 +1149,88 @@ static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
.free = spinand_noecc_ooblayout_free,
};
+static int spinand_init_flash(struct spinand_device *spinand)
+{
+ struct device *dev = &spinand->spimem->spi->dev;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret, i;
+
+ ret = spinand_read_cfg(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_init_quad_enable(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ if (ret)
+ return ret;
+
+ ret = spinand_manufacturer_init(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
+ ret);
+ return ret;
+ }
+
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+ if (ret)
+ break;
+
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
+ if (ret)
+ break;
+ }
+
+ ret = spinand_init_octal_dtr_enable(spinand);
+ if (ret)
+ return ret;
+
+ if (ret)
+ spinand_manufacturer_cleanup(spinand);
+
+ return ret;
+}
+
+static void spinand_mtd_resume(struct mtd_info *mtd)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ spinand->protocol = SPINAND_1S_1S_1S;
+ spinand->ctrl_ops = &spinand_default_ctrl_ops;
+
+ ret = spinand_reset_op(spinand);
+ if (ret)
+ return;
+
+ ret = spinand_init_flash(spinand);
+ if (ret)
+ return;
+
+ spinand_ecc_enable(spinand, false);
+}
+
+static int spinand_mtd_suspend(struct mtd_info *mtd)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+
+ if (spinand->ctrl_ops->protocol == SPINAND_8D_8D_8D)
+ return spinand_init_octal_dtr_disable(spinand);
+
+ return 0;
+}
+
static int spinand_init(struct spinand_device *spinand)
{
struct device *dev = &spinand->spimem->spi->dev;
struct mtd_info *mtd = spinand_to_mtd(spinand);
struct nand_device *nand = mtd_to_nanddev(mtd);
- int ret, i;
+ int ret;
/*
* We need a scratch buffer because the spi_mem interface requires that
@@ -1004,6 +1240,9 @@ static int spinand_init(struct spinand_device *spinand)
if (!spinand->scratchbuf)
return -ENOMEM;
+ spinand->protocol = SPINAND_1S_1S_1S;
+ spinand->ctrl_ops = &spinand_default_ctrl_ops;
+
ret = spinand_detect(spinand);
if (ret)
goto err_free_bufs;
@@ -1027,22 +1266,10 @@ static int spinand_init(struct spinand_device *spinand)
if (ret)
goto err_free_bufs;
- ret = spinand_init_quad_enable(spinand);
- if (ret)
- goto err_free_bufs;
-
- ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ ret = spinand_init_flash(spinand);
if (ret)
goto err_free_bufs;
- ret = spinand_manufacturer_init(spinand);
- if (ret) {
- dev_err(dev,
- "Failed to initialize the SPI NAND chip (err = %d)\n",
- ret);
- goto err_free_bufs;
- }
-
ret = spinand_create_dirmaps(spinand);
if (ret) {
dev_err(dev,
@@ -1051,17 +1278,6 @@ static int spinand_init(struct spinand_device *spinand)
goto err_manuf_cleanup;
}
- /* After power up, all blocks are locked, so unlock them here. */
- for (i = 0; i < nand->memorg.ntargets; i++) {
- ret = spinand_select_target(spinand, i);
- if (ret)
- goto err_manuf_cleanup;
-
- ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
- if (ret)
- goto err_manuf_cleanup;
- }
-
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
if (ret)
goto err_manuf_cleanup;
@@ -1077,6 +1293,8 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_block_isreserved = spinand_mtd_block_isreserved;
mtd->_erase = spinand_mtd_erase;
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
+ mtd->_resume = spinand_mtd_resume;
+ mtd->_suspend = spinand_mtd_suspend;
if (spinand->eccinfo.ooblayout)
mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 76684428354e..42fdb578f731 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -7,6 +7,7 @@
* Boris Brezillon <boris.brezillon@bootlin.com>
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
@@ -15,7 +16,17 @@
#define WINBOND_CFG_BUF_READ BIT(3)
-static SPINAND_OP_VARIANTS(read_cache_variants,
+/* Octal DTR SPI mode (8D-8D-8D) with Data Strobe output*/
+#define WINBOND_VCR_IO_MODE_OCTAL_DTR 0xE7
+#define WINBOND_VCR_IO_MODE_SINGLE_STR 0xFF
+#define WINBOND_VCR_IO_MODE_ADDR 0x00
+
+/* Use 12 dummy clk cycles for using Octal DTR SPI at max 120MHZ */
+#define WINBOND_VCR_DUMMY_CLK_COUNT 12
+#define WINBOND_VCR_DUMMY_CLK_DEFAULT 0xFF
+#define WINBOND_VCR_DUMMY_CLK_ADDR 0x01
+
+static SPINAND_OP_VARIANTS(read_cache_variants_w25xxgv,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
@@ -23,14 +34,45 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
-static SPINAND_OP_VARIANTS(write_cache_variants,
+static SPINAND_OP_VARIANTS(write_cache_variants_w25xxgv,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
-static SPINAND_OP_VARIANTS(update_cache_variants,
+static SPINAND_OP_VARIANTS(update_cache_variants_w25xxgv,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
+static SPINAND_OP_VARIANTS(read_cache_variants_w35n01jw,
+ SPINAND_PAGE_READ_FROM_CACHE_OCTALIO_DTR_OP(0, 24, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants_w35n01jw,
+ SPINAND_PROG_LOAD_OCTALIO_DTR(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants_w35n01jw,
+ SPINAND_PROG_LOAD_OCTALIO_DTR(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static SPINAND_CTRL_OPS_VARIANTS(ctrl_ops_variants_w35n01jw,
+ SPINAND_CTRL_OPS(SPINAND_8D_8D_8D,
+ SPINAND_RESET_OP_OCTAL_DTR,
+ SPINAND_GET_FEATURE_OP_OCTAL_DTR(0, NULL),
+ SPINAND_SET_FEATURE_OP_OCTAL_DTR(0, NULL),
+ SPINAND_WR_EN_DIS_OP_OCTAL_DTR(true),
+ SPINAND_BLK_ERASE_OP_OCTAL_DTR(0),
+ SPINAND_PAGE_READ_OP_OCTAL_DTR(0),
+ SPINAND_PROG_EXEC_OP_OCTAL_DTR(0)),
+ SPINAND_CTRL_OPS(SPINAND_1S_1S_1S,
+ SPINAND_RESET_OP,
+ SPINAND_GET_FEATURE_OP(0, NULL),
+ SPINAND_SET_FEATURE_OP(0, NULL),
+ SPINAND_WR_EN_DIS_OP(true),
+ SPINAND_BLK_ERASE_OP(0),
+ SPINAND_PAGE_READ_OP(0),
+ SPINAND_PROG_EXEC_OP(0)));
+
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
@@ -55,11 +97,40 @@ static int w25m02gv_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
+static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 12;
+ region->length = 4;
+
+ return 0;
+}
+
+static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 2;
+ region->length = 10;
+
+ return 0;
+}
+
static const struct mtd_ooblayout_ops w25m02gv_ooblayout = {
.ecc = w25m02gv_ooblayout_ecc,
.free = w25m02gv_ooblayout_free,
};
+static const struct mtd_ooblayout_ops w35n01jw_ooblayout = {
+ .ecc = w35n01jw_ooblayout_ecc,
+ .free = w35n01jw_ooblayout_free,
+};
+
static int w25m02gv_select_target(struct spinand_device *spinand,
unsigned int target)
{
@@ -79,9 +150,9 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_w25xxgv,
+ &write_cache_variants_w25xxgv,
+ &update_cache_variants_w25xxgv),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
@@ -89,11 +160,22 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_w25xxgv,
+ &write_cache_variants_w25xxgv,
+ &update_cache_variants_w25xxgv),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ SPINAND_INFO("W35N01JW",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdc),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_w35n01jw,
+ &write_cache_variants_w35n01jw,
+ &update_cache_variants_w35n01jw),
+ SPINAND_HAS_OCTAL_DTR_BIT | SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL),
+ SPINAND_INFO_CTRL_OPS_VARIANTS(&ctrl_ops_variants_w35n01jw)),
+
};
static int winbond_spinand_init(struct spinand_device *spinand)
@@ -114,8 +196,150 @@ static int winbond_spinand_init(struct spinand_device *spinand)
return 0;
}
+/**
+ * winbond_write_vcr_op() - write values onto the volatile configuration
+ * registers (VCR)
+ * @spinand: the spinand device
+ * @reg: the address of the particular reg in the VCR to be written on
+ * @val: the value to be written on the reg in the VCR
+ *
+ * Volatile configuration registers are a separate set of configuration
+ * registers, i.e. they differ from the status registers SR-1/2/3. A different
+ * SPI instruction is required to write to these registers. Any changes
+ * to the Volatile Configuration Register get transferred directly to
+ * the Internal Configuration Register and instantly reflect on the
+ * device operation.
+ */
+static int winbond_write_vcr_op(struct spinand_device *spinand, u8 reg, u8 val)
+{
+ int ret;
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x81, 1),
+ SPI_MEM_OP_ADDR(3, reg, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, spinand->scratchbuf, 1));
+
+ *spinand->scratchbuf = val;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * Write VCR operation doesn't set the busy bit in SR, so can't perform
+ * a status poll. Minimum time of 50ns is needed to complete the write.
+ * So, give thrice the minimum required delay.
+ */
+ ndelay(150);
+ return 0;
+}
+
+static int winbond_spinand_octal_dtr_enable(struct spinand_device *spinand)
+{
+ int ret;
+ struct spi_mem_op op;
+
+ ret = winbond_write_vcr_op(spinand, WINBOND_VCR_DUMMY_CLK_ADDR,
+ WINBOND_VCR_DUMMY_CLK_COUNT);
+ if (ret)
+ return ret;
+
+ ret = winbond_write_vcr_op(spinand, WINBOND_VCR_IO_MODE_ADDR,
+ WINBOND_VCR_IO_MODE_OCTAL_DTR);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD_DTR(2, 0x9f9f, 8),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY_DTR(16, 8),
+ SPI_MEM_OP_DATA_IN_DTR(SPINAND_MAX_ID_LEN,
+ spinand->scratchbuf, 8));
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (memcmp(spinand->scratchbuf, spinand->id.data, SPINAND_MAX_ID_LEN))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int winbond_spinand_octal_dtr_disable(struct spinand_device *spinand)
+{
+ int ret;
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD_DTR(2, 0x8181, 8),
+ SPI_MEM_OP_ADDR_DTR(4, WINBOND_VCR_IO_MODE_ADDR, 8),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT_DTR(2, spinand->scratchbuf, 8));
+
+ *spinand->scratchbuf = WINBOND_VCR_IO_MODE_SINGLE_STR;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = winbond_write_vcr_op(spinand, WINBOND_VCR_DUMMY_CLK_ADDR,
+ WINBOND_VCR_DUMMY_CLK_DEFAULT);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY(1, 1),
+ SPI_MEM_OP_DATA_IN(SPINAND_MAX_ID_LEN,
+ spinand->scratchbuf, 1));
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (memcmp(spinand->scratchbuf, spinand->id.data, SPINAND_MAX_ID_LEN))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int winbond_change_spi_mode(struct spinand_device *spinand,
+ const enum spinand_protocol protocol)
+{
+ if (spinand->protocol == protocol)
+ return 0;
+
+ switch (spinand->protocol) {
+ case SPINAND_1S_1S_1S:
+ if (protocol == SPINAND_8D_8D_8D)
+ return winbond_spinand_octal_dtr_enable(spinand);
+ break;
+
+ case SPINAND_8D_8D_8D:
+ if (protocol == SPINAND_1S_1S_1S)
+ return winbond_spinand_octal_dtr_disable(spinand);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
.init = winbond_spinand_init,
+ .change_mode = winbond_change_spi_mode,
};
const struct spinand_manufacturer winbond_spinand_manufacturer = {
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 2c256d455c9f..5c935f1e4d6d 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -40,6 +40,81 @@
#define SPI_NOR_MAX_ADDR_WIDTH 4
+#define SPI_NOR_SRST_SLEEP_MIN 200
+#define SPI_NOR_SRST_SLEEP_MAX 400
+
+/**
+ * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
+ * extension type.
+ * @nor: pointer to a 'struct spi_nor'
+ * @op: pointer to the 'struct spi_mem_op' whose properties
+ * need to be initialized.
+ *
+ * Right now, only "repeat" and "invert" are supported.
+ *
+ * Return: The opcode extension.
+ */
+static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
+ const struct spi_mem_op *op)
+{
+ switch (nor->cmd_ext_type) {
+ case SPI_NOR_EXT_INVERT:
+ return ~op->cmd.opcode;
+
+ case SPI_NOR_EXT_REPEAT:
+ return op->cmd.opcode;
+
+ default:
+ dev_err(nor->dev, "Unknown command extension type\n");
+ return 0;
+ }
+}
+
+/**
+ * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
+ * @nor: pointer to a 'struct spi_nor'
+ * @op: pointer to the 'struct spi_mem_op' whose properties
+ * need to be initialized.
+ * @proto: the protocol from which the properties need to be set.
+ */
+void spi_nor_spimem_setup_op(const struct spi_nor *nor,
+ struct spi_mem_op *op,
+ const enum spi_nor_protocol proto)
+{
+ u8 ext;
+
+ op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
+
+ if (op->addr.nbytes)
+ op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
+
+ if (op->dummy.nbytes)
+ op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
+
+ if (op->data.nbytes)
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
+
+ if (spi_nor_protocol_is_dtr(proto)) {
+ /*
+ * SPIMEM supports mixed DTR modes, but right now we can only
+ * have all phases either DTR or STR. IOW, SPIMEM can have
+ * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
+ * phases to either DTR or STR.
+ */
+ op->cmd.dtr = true;
+ op->addr.dtr = true;
+ op->dummy.dtr = true;
+ op->data.dtr = true;
+
+ /* 2 bytes per clock cycle in DTR mode. */
+ op->dummy.nbytes *= 2;
+
+ ext = spi_nor_get_cmd_ext(nor, op);
+ op->cmd.opcode = (op->cmd.opcode << 8) | ext;
+ op->cmd.nbytes = 2;
+ }
+}
+
/**
* spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
* transfer
@@ -82,6 +157,59 @@ static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
return spi_mem_exec_op(nor->spimem, op);
}
+static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, size_t len)
+{
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
+ return -EOPNOTSUPP;
+
+ return nor->controller_ops->read_reg(nor, opcode, buf, len);
+}
+
+static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
+ const u8 *buf, size_t len)
+{
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
+ return -EOPNOTSUPP;
+
+ return nor->controller_ops->write_reg(nor, opcode, buf, len);
+}
+
+static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
+{
+ if (spi_nor_protocol_is_dtr(nor->write_proto))
+ return -EOPNOTSUPP;
+
+ return nor->controller_ops->erase(nor, offs);
+}
+
+/**
+ * spi_nor_spimem_get_read_op() - return a template for the spi_mem_op used for
+ * reading data from the flash via spi-mem.
+ * @nor: pointer to 'struct spi_nor'
+ *
+ * Return: A template of the 'struct spi_mem_op' for used for reading data from
+ * the flash. The caller is expected to fill in the address, data length, and
+ * the data buffer.
+ */
+static struct spi_mem_op spi_nor_spimem_get_read_op(struct spi_nor *nor)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
+ SPI_MEM_OP_DATA_IN(2, NULL, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op.dummy.nbytes *= 2;
+
+ return op;
+}
+
/**
* spi_nor_spimem_read_data() - read data from flash's memory region via
* spi-mem
@@ -95,23 +223,14 @@ static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
size_t len, u8 *buf)
{
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
- SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
- SPI_MEM_OP_DATA_IN(len, buf, 1));
+ struct spi_mem_op op = spi_nor_spimem_get_read_op(nor);
bool usebouncebuf;
ssize_t nbytes;
int error;
- /* get transfer protocols. */
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
-
- /* convert the dummy cycles to the number of bytes */
- op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+ op.addr.val = from;
+ op.data.nbytes = len;
+ op.data.buf.in = buf;
usebouncebuf = spi_nor_spimem_bounce(nor, &op);
@@ -162,20 +281,18 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
size_t len, const u8 *buf)
{
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ SPI_MEM_OP_DATA_OUT(len, buf, 0));
ssize_t nbytes;
int error;
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
-
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
op.addr.nbytes = 0;
+ spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+
if (spi_nor_spimem_bounce(nor, &op))
memcpy(nor->bouncebuf, buf, op.data.nbytes);
@@ -222,15 +339,17 @@ int spi_nor_write_enable(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
+ NULL, 0);
}
if (ret)
@@ -251,15 +370,17 @@ int spi_nor_write_disable(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
+ NULL, 0);
}
if (ret)
@@ -272,7 +393,7 @@ int spi_nor_write_disable(struct spi_nor *nor)
* spi_nor_read_sr() - Read the Status Register.
* @nor: pointer to 'struct spi_nor'.
* @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
+ * Status Register will be written. Should be at least 2 bytes.
*
* Return: 0 on success, -errno otherwise.
*/
@@ -282,15 +403,27 @@ static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
+ SPI_MEM_OP_DATA_IN(1, sr, 0));
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.addr.nbytes = nor->params->rdsr_addr_nbytes;
+ op.dummy.nbytes = nor->params->rdsr_dummy;
+ /*
+ * We don't want to read only one byte in DTR mode. So,
+ * read 2 and then discard the second byte.
+ */
+ op.data.nbytes = 2;
+ }
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
- sr, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
+ 1);
}
if (ret)
@@ -303,7 +436,8 @@ static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
* spi_nor_read_fsr() - Read the Flag Status Register.
* @nor: pointer to 'struct spi_nor'
* @fsr: pointer to a DMA-able buffer where the value of the
- * Flag Status Register will be written.
+ * Flag Status Register will be written. Should be at least 2
+ * bytes.
*
* Return: 0 on success, -errno otherwise.
*/
@@ -313,15 +447,27 @@ static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, fsr, 1));
+ SPI_MEM_OP_DATA_IN(1, fsr, 0));
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.addr.nbytes = nor->params->rdsr_addr_nbytes;
+ op.dummy.nbytes = nor->params->rdsr_dummy;
+ /*
+ * We don't want to read only one byte in DTR mode. So,
+ * read 2 and then discard the second byte.
+ */
+ op.data.nbytes = 2;
+ }
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
- fsr, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr,
+ 1);
}
if (ret)
@@ -345,14 +491,17 @@ static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, cr, 1));
+ SPI_MEM_OP_DATA_IN(1, cr, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
+ 1);
}
if (ret)
@@ -378,17 +527,19 @@ int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
SPINOR_OP_EN4B :
SPINOR_OP_EX4B,
- 1),
+ 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor,
- enable ? SPINOR_OP_EN4B :
- SPINOR_OP_EX4B,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor,
+ enable ? SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ NULL, 0);
}
if (ret)
@@ -414,15 +565,17 @@ static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
- nor->bouncebuf, 1);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
+ nor->bouncebuf, 1);
}
if (ret)
@@ -446,15 +599,17 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
- nor->bouncebuf, 1);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREAR,
+ nor->bouncebuf, 1);
}
if (ret)
@@ -477,15 +632,17 @@ int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
+ SPI_MEM_OP_DATA_IN(1, sr, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
- sr, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr,
+ 1);
}
if (ret)
@@ -522,15 +679,17 @@ static void spi_nor_clear_sr(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
}
if (ret)
@@ -586,15 +745,17 @@ static void spi_nor_clear_fsr(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
}
if (ret)
@@ -730,15 +891,17 @@ static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, sr, 1));
+ SPI_MEM_OP_DATA_OUT(len, sr, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
- sr, len);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
+ len);
}
if (ret) {
@@ -941,15 +1104,17 @@ static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, sr2, 1));
+ SPI_MEM_OP_DATA_OUT(1, sr2, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
- sr2, 1);
+ ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
+ sr2, 1);
}
if (ret) {
@@ -975,15 +1140,17 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr2, 1));
+ SPI_MEM_OP_DATA_IN(1, sr2, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
- sr2, 1);
+ ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
+ 1);
}
if (ret)
@@ -1006,15 +1173,18 @@ static int spi_nor_erase_chip(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
- NULL, 0);
+ ret = spi_nor_controller_ops_write_reg(nor,
+ SPINOR_OP_CHIP_ERASE,
+ NULL, 0);
}
if (ret)
@@ -1148,14 +1318,16 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_width, addr, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
+ spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+
return spi_mem_exec_op(nor->spimem, &op);
} else if (nor->controller_ops->erase) {
- return nor->controller_ops->erase(nor, addr);
+ return spi_nor_controller_ops_erase(nor, addr);
}
/*
@@ -1167,8 +1339,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
addr >>= 8;
}
- return nor->controller_ops->write_reg(nor, nor->erase_opcode,
- nor->bouncebuf, nor->addr_width);
+ return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
+ nor->bouncebuf, nor->addr_width);
}
/**
@@ -2093,6 +2265,82 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
return ERR_PTR(-ENODEV);
}
+/*
+ * On Octal DTR capable flashes like Micron Xcella reads cannot start or
+ * end at an odd address in Octal DTR mode. Extra bytes need to be read
+ * at the start or end to make sure both the start address and length
+ * remain even.
+ */
+static int spi_nor_octal_dtr_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
+{
+ u_char *tmp_buf;
+ size_t tmp_len;
+ loff_t start, end;
+ int ret, bytes_read;
+
+ if (IS_ALIGNED(from, 2) && IS_ALIGNED(len, 2))
+ return spi_nor_read_data(nor, from, len, buf);
+ else if (IS_ALIGNED(from, 2) && len > PAGE_SIZE)
+ return spi_nor_read_data(nor, from, round_down(len, PAGE_SIZE),
+ buf);
+
+ tmp_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ start = round_down(from, 2);
+ end = round_up(from + len, 2);
+
+ /*
+ * Avoid allocating too much memory. The requested read length might be
+ * quite large. Allocating a buffer just as large (slightly bigger, in
+ * fact) would put unnecessary memory pressure on the system.
+ *
+ * For example if the read is from 3 to 1M, then this will read from 2
+ * to 4098. The reads from 4098 to 1M will then not need a temporary
+ * buffer so they can proceed as normal.
+ */
+ tmp_len = min_t(size_t, end - start, PAGE_SIZE);
+
+ ret = spi_nor_read_data(nor, start, tmp_len, tmp_buf);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ /*
+ * More bytes are read than actually requested, but that number can't be
+ * reported to the calling function or it will confuse its calculations.
+ * Calculate how many of the _requested_ bytes were read.
+ */
+ bytes_read = ret;
+
+ if (from != start)
+ ret -= from - start;
+
+ /*
+ * Only account for extra bytes at the end if they were actually read.
+ * For example, if the total length was truncated because of temporary
+ * buffer size limit then the adjustment for the extra bytes at the end
+ * is not needed.
+ */
+ if (start + bytes_read == end)
+ ret -= end - (from + len);
+
+ if (ret < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ memcpy(buf, tmp_buf + (from - start), ret);
+out:
+ kfree(tmp_buf);
+ return ret;
+}
+
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@@ -2110,7 +2358,10 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
addr = spi_nor_convert_addr(nor, addr);
- ret = spi_nor_read_data(nor, addr, len, buf);
+ if (nor->read_proto == SNOR_PROTO_8_8_8_DTR)
+ ret = spi_nor_octal_dtr_read(nor, addr, len, buf);
+ else
+ ret = spi_nor_read_data(nor, addr, len, buf);
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
@@ -2133,6 +2384,71 @@ read_err:
}
/*
+ * On Octal DTR capable flashes like Micron Xcella the writes cannot start or
+ * end at an odd address in Octal DTR mode. Extra 0xff bytes need to be appended
+ * or prepended to make sure the start address and end address are even. 0xff is
+ * used because on NOR flashes a program operation can only flip bits from 1 to
+ * 0, not the other way round. 0 to 1 flip needs to happen via erases.
+ */
+static int spi_nor_octal_dtr_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf)
+{
+ u8 *tmp_buf;
+ size_t bytes_written;
+ loff_t start, end;
+ int ret;
+
+ if (IS_ALIGNED(to, 2) && IS_ALIGNED(len, 2))
+ return spi_nor_write_data(nor, to, len, buf);
+
+ tmp_buf = kmalloc(nor->page_size, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ memset(tmp_buf, 0xff, nor->page_size);
+
+ start = round_down(to, 2);
+ end = round_up(to + len, 2);
+
+ memcpy(tmp_buf + (to - start), buf, len);
+
+ ret = spi_nor_write_data(nor, start, end - start, tmp_buf);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ /*
+ * More bytes are written than actually requested, but that number can't
+ * be reported to the calling function or it will confuse its
+ * calculations. Calculate how many of the _requested_ bytes were
+ * written.
+ */
+ bytes_written = ret;
+
+ if (to != start)
+ ret -= to - start;
+
+ /*
+ * Only account for extra bytes at the end if they were actually
+ * written. For example, if for some reason the controller could only
+ * complete a partial write then the adjustment for the extra bytes at
+ * the end is not needed.
+ */
+ if (start + bytes_written == end)
+ ret -= end - (to + len);
+
+ if (ret < 0)
+ ret = -EIO;
+
+out:
+ kfree(tmp_buf);
+ return ret;
+}
+
+/*
* Write an address range to the nor chip. Data must be written in
* FLASH_PAGESIZE chunks. The address range may be any size provided
* it is within the physical boundaries.
@@ -2179,7 +2495,12 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
goto write_err;
- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ if (nor->write_proto == SNOR_PROTO_8_8_8_DTR)
+ ret = spi_nor_octal_dtr_write(nor, addr, page_remain,
+ buf + i);
+ else
+ ret = spi_nor_write_data(nor, addr, page_remain,
+ buf + i);
if (ret < 0)
goto write_err;
written = ret;
@@ -2217,7 +2538,7 @@ static int spi_nor_check(struct spi_nor *nor)
return 0;
}
-static void
+void
spi_nor_set_read_settings(struct spi_nor_read_command *read,
u8 num_mode_clocks,
u8 num_wait_states,
@@ -2266,6 +2587,7 @@ int spi_nor_hwcaps_read2cmd(u32 hwcaps)
{ SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
{ SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
{ SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
};
return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
@@ -2282,6 +2604,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
{ SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
{ SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
{ SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
};
return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
@@ -2294,7 +2617,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
*@nor: pointer to a 'struct spi_nor'
*@op: pointer to op template to be checked
*
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
*/
static int spi_nor_spimem_check_op(struct spi_nor *nor,
struct spi_mem_op *op)
@@ -2308,12 +2631,12 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor,
op->addr.nbytes = 4;
if (!spi_mem_supports_op(nor->spimem, op)) {
if (nor->mtd.size > SZ_16M)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* If flash size <= 16MB, 3 address bytes are sufficient */
op->addr.nbytes = 3;
if (!spi_mem_supports_op(nor->spimem, op))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -2325,22 +2648,22 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor,
*@nor: pointer to a 'struct spi_nor'
*@read: pointer to op template to be checked
*
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
*/
static int spi_nor_spimem_check_readop(struct spi_nor *nor,
const struct spi_nor_read_command *read)
{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
- SPI_MEM_OP_DUMMY(0, 1),
- SPI_MEM_OP_DATA_IN(0, NULL, 1));
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0),
+ SPI_MEM_OP_ADDR(3, 0, 0),
+ SPI_MEM_OP_DUMMY(1, 0),
+ SPI_MEM_OP_DATA_IN(2, NULL, 0));
+
+ spi_nor_spimem_setup_op(nor, &op, read->proto);
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
- op.dummy.buswidth / 8;
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op.dummy.nbytes *= 2;
return spi_nor_spimem_check_op(nor, &op);
}
@@ -2351,19 +2674,17 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor,
*@nor: pointer to a 'struct spi_nor'
*@pp: pointer to op template to be checked
*
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
*/
static int spi_nor_spimem_check_pp(struct spi_nor *nor,
const struct spi_nor_pp_command *pp)
{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0),
+ SPI_MEM_OP_ADDR(3, 0, 0),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(0, NULL, 1));
+ SPI_MEM_OP_DATA_OUT(2, NULL, 0));
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
+ spi_nor_spimem_setup_op(nor, &op, pp->proto);
return spi_nor_spimem_check_op(nor, &op);
}
@@ -2381,12 +2702,16 @@ spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
struct spi_nor_flash_parameter *params = nor->params;
unsigned int cap;
- /* DTR modes are not supported yet, mask them all. */
- *hwcaps &= ~SNOR_HWCAPS_DTR;
-
/* X-X-X modes are not supported yet, mask them all. */
*hwcaps &= ~SNOR_HWCAPS_X_X_X;
+ /*
+ * If the reset line is broken, we do not want to enter a stateful
+ * mode.
+ */
+ if (nor->flags & SNOR_F_BROKEN_RESET)
+ *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
+
for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
int rdidx, ppidx;
@@ -2641,7 +2966,7 @@ static int spi_nor_default_setup(struct spi_nor *nor,
* controller directly implements the spi_nor interface.
* Yet another reason to switch to spi-mem.
*/
- ignored_mask = SNOR_HWCAPS_X_X_X;
+ ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
if (shared_mask & ignored_mask) {
dev_dbg(nor->dev,
"SPI n-n-n protocols are not supported.\n");
@@ -2742,6 +3067,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
+ params->writesize = 1;
params->size = (u64)info->sector_size * info->n_sectors;
params->page_size = info->page_size;
@@ -2786,11 +3112,28 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
SNOR_PROTO_1_1_8);
}
+ if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, 20, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_8_8_8_DTR);
+ }
+
/* Page Program settings. */
params->hwcaps.mask |= SNOR_HWCAPS_PP;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ if (info->flags & SPI_NOR_OCTAL_DTR_PP) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
+ }
+
/*
* Sector Erase settings. Sort Erase Types in ascending order, with the
* smallest erase size starting at BIT(0).
@@ -2898,7 +3241,8 @@ static int spi_nor_init_params(struct spi_nor *nor)
spi_nor_manufacturer_init_params(nor);
- if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
+ if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) &&
!(nor->info->flags & SPI_NOR_SKIP_SFDP))
spi_nor_sfdp_init_params(nor);
@@ -2909,6 +3253,38 @@ static int spi_nor_init_params(struct spi_nor *nor)
return 0;
}
+/** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
+ * @nor: pointer to a 'struct spi_nor'
+ * @enable: whether to enable or disable Octal DTR
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ if (!nor->params->octal_dtr_enable)
+ return 0;
+
+ if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
+ nor->write_proto == SNOR_PROTO_8_8_8_DTR))
+ return 0;
+
+ if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
+ return 0;
+
+ ret = nor->params->octal_dtr_enable(nor, enable);
+ if (ret)
+ return ret;
+
+ if (enable)
+ nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
+ else
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+
+ return 0;
+}
+
/**
* spi_nor_quad_enable() - enable Quad I/O if needed.
* @nor: pointer to a 'struct spi_nor'
@@ -2955,6 +3331,12 @@ static int spi_nor_init(struct spi_nor *nor)
{
int err;
+ err = spi_nor_octal_dtr_enable(nor, true);
+ if (err) {
+ dev_dbg(nor->dev, "octal mode not supported\n");
+ return err;
+ }
+
err = spi_nor_quad_enable(nor);
if (err) {
dev_dbg(nor->dev, "quad mode not supported\n");
@@ -2963,7 +3345,9 @@ static int spi_nor_init(struct spi_nor *nor)
spi_nor_try_unlock_all(nor);
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
+ if (nor->addr_width == 4 &&
+ nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
+ !(nor->flags & SNOR_F_4B_OPCODES)) {
/*
* If the RESET# pin isn't hooked up properly, or the system
* otherwise doesn't perform a reset command in the boot
@@ -2979,6 +3363,59 @@ static int spi_nor_init(struct spi_nor *nor)
return 0;
}
+static void spi_nor_soft_reset(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ int ret;
+
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DATA);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret) {
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ return;
+ }
+
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DATA);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret) {
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ return;
+ }
+
+ /*
+ * Software Reset is not instant, and the delay varies from flash to
+ * flash. Looking at a few flashes, most range somewhere below 100
+ * microseconds. So, sleep for a range of 200-400 us.
+ */
+ usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
+}
+
+/* mtd suspend handler */
+static int spi_nor_suspend(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ /* Disable octal DTR mode if we enabled it. */
+ ret = spi_nor_octal_dtr_enable(nor, false);
+ if (ret)
+ dev_err(nor->dev, "suspend() failed\n");
+
+ return ret;
+}
+
/* mtd resume handler */
static void spi_nor_resume(struct mtd_info *mtd)
{
@@ -3029,6 +3466,9 @@ void spi_nor_restore(struct spi_nor *nor)
if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
nor->flags & SNOR_F_BROKEN_RESET)
nor->params->set_4byte_addr_mode(nor, false);
+
+ if (nor->flags & SNOR_F_SOFT_RESET)
+ spi_nor_soft_reset(nor);
}
EXPORT_SYMBOL_GPL(spi_nor_restore);
@@ -3053,6 +3493,20 @@ static int spi_nor_set_addr_width(struct spi_nor *nor)
{
if (nor->addr_width) {
/* already configured from SFDP */
+ } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
+ /*
+ * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
+ * in this protocol an odd address width cannot be used because
+ * then the address phase would only span a cycle and a half.
+ * Half a cycle would be left over. We would then have to start
+ * the dummy phase in the middle of a cycle and so too the data
+ * phase, and we will end the transaction with half a cycle left
+ * over.
+ *
+ * Force all 8D-8D-8D flashes to use an address width of 4 to
+ * avoid this situation.
+ */
+ nor->addr_width = 4;
} else if (nor->info->addr_width) {
nor->addr_width = nor->info->addr_width;
} else {
@@ -3193,11 +3647,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->name = dev_name(dev);
mtd->priv = nor;
mtd->type = MTD_NORFLASH;
- mtd->writesize = 1;
+ mtd->writesize = nor->params->writesize;
mtd->flags = MTD_CAP_NORFLASH;
mtd->size = nor->params->size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+ mtd->_suspend = spi_nor_suspend;
mtd->_resume = spi_nor_resume;
mtd->_get_device = spi_nor_get_device;
mtd->_put_device = spi_nor_put_device;
@@ -3250,6 +3705,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & SPI_NOR_4B_OPCODES)
nor->flags |= SNOR_F_4B_OPCODES;
+ if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE)
+ nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
+
ret = spi_nor_set_addr_width(nor);
if (ret)
return ret;
@@ -3285,23 +3743,10 @@ EXPORT_SYMBOL_GPL(spi_nor_scan);
static int spi_nor_create_read_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
- .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
- SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
- SPI_MEM_OP_DATA_IN(0, NULL, 1)),
+ .op_tmpl = spi_nor_spimem_get_read_op(nor),
.offset = 0,
.length = nor->mtd.size,
};
- struct spi_mem_op *op = &info.op_tmpl;
-
- /* get transfer protocols. */
- op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
- op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
- op->dummy.buswidth = op->addr.buswidth;
- op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
-
- /* convert the dummy cycles to the number of bytes */
- op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
&info);
@@ -3311,24 +3756,27 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor)
static int spi_nor_create_write_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
- .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
+ SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
.length = nor->mtd.size,
};
struct spi_mem_op *op = &info.op_tmpl;
- /* get transfer protocols. */
- op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
- op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
- op->dummy.buswidth = op->addr.buswidth;
- op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
-
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
op->addr.nbytes = 0;
+ spi_nor_spimem_setup_op(nor, op, nor->write_proto);
+
+ /*
+ * Since spi_nor_spimem_setup_op() only sets buswidth when the number
+ * of data bytes is non-zero, the data buswidth won't be set here. So,
+ * do it explicitly.
+ */
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
&info);
return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
@@ -3344,6 +3792,7 @@ static int spi_nor_probe(struct spi_mem *spimem)
* checking what's really supported using spi_mem_supports_op().
*/
const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
+ struct mtd_part *part;
char *flash_name;
int ret;
@@ -3403,8 +3852,25 @@ static int spi_nor_probe(struct spi_mem *spimem)
if (ret)
return ret;
- return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
- data ? data->nr_parts : 0);
+ ret = mtd_device_register(&nor->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(part, &nor->mtd.partitions, node) {
+ struct spi_mem_op op;
+ struct mtd_info *part_info = container_of(part,
+ struct mtd_info, part);
+
+ if (part_info->name &&
+ !strcmp(part_info->name, "ospi.phypattern")) {
+ op = spi_nor_spimem_get_read_op(nor);
+ op.addr.val = part->offset;
+ spi_mem_do_calibration(nor->spimem, &op);
+ }
+ }
+
+ return 0;
}
static int spi_nor_remove(struct spi_mem *spimem)
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 6f62ee861231..caf8b7def828 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -26,6 +26,8 @@ enum spi_nor_option_flags {
SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
SNOR_F_HAS_4BIT_BP = BIT(12),
SNOR_F_HAS_SR_BP3_BIT6 = BIT(13),
+ SNOR_F_IO_MODE_EN_VOLATILE = BIT(14),
+ SNOR_F_SOFT_RESET = BIT(15),
};
struct spi_nor_read_command {
@@ -62,6 +64,7 @@ enum spi_nor_read_command_index {
SNOR_CMD_READ_1_8_8,
SNOR_CMD_READ_8_8_8,
SNOR_CMD_READ_1_8_8_DTR,
+ SNOR_CMD_READ_8_8_8_DTR,
SNOR_CMD_READ_MAX
};
@@ -78,6 +81,7 @@ enum spi_nor_pp_command_index {
SNOR_CMD_PP_1_1_8,
SNOR_CMD_PP_1_8_8,
SNOR_CMD_PP_8_8_8,
+ SNOR_CMD_PP_8_8_8_DTR,
SNOR_CMD_PP_MAX
};
@@ -189,7 +193,12 @@ struct spi_nor_locking_ops {
* Serial Flash Discoverable Parameters (SFDP) tables.
*
* @size: the flash memory density in bytes.
+ * @writesize Minimal writable flash unit size. Defaults to 1. Set to
+ * ECC unit size for ECC-ed flashes.
* @page_size: the page size of the SPI NOR flash memory.
+ * @rdsr_dummy: dummy cycles needed for Read Status Register command.
+ * @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register
+ * command.
* @hwcaps: describes the read and page program hardware
* capabilities.
* @reads: read capabilities ordered by priority: the higher index
@@ -198,6 +207,7 @@ struct spi_nor_locking_ops {
* higher index in the array, the higher priority.
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
* Table.
+ * @octal_dtr_enable: enables SPI NOR octal DTR mode.
* @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
* @convert_addr: converts an absolute address into something the flash
@@ -211,7 +221,10 @@ struct spi_nor_locking_ops {
*/
struct spi_nor_flash_parameter {
u64 size;
+ u32 writesize;
u32 page_size;
+ u8 rdsr_dummy;
+ u8 rdsr_addr_nbytes;
struct spi_nor_hwcaps hwcaps;
struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
@@ -219,6 +232,7 @@ struct spi_nor_flash_parameter {
struct spi_nor_erase_map erase_map;
+ int (*octal_dtr_enable)(struct spi_nor *nor, bool enable);
int (*quad_enable)(struct spi_nor *nor);
int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
@@ -311,6 +325,13 @@ struct flash_info {
* BP3 is bit 6 of status register.
* Must be used with SPI_NOR_4BIT_BP.
*/
+#define SPI_NOR_OCTAL_DTR_READ BIT(19) /* Flash supports octal DTR Read. */
+#define SPI_NOR_OCTAL_DTR_PP BIT(20) /* Flash supports Octal DTR Page Program */
+#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(21) /*
+ * Flash enables the best
+ * available I/O mode via a
+ * volatile bit.
+ */
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
@@ -399,6 +420,9 @@ extern const struct spi_nor_manufacturer spi_nor_winbond;
extern const struct spi_nor_manufacturer spi_nor_xilinx;
extern const struct spi_nor_manufacturer spi_nor_xmc;
+void spi_nor_spimem_setup_op(const struct spi_nor *nor,
+ struct spi_mem_op *op,
+ const enum spi_nor_protocol proto);
int spi_nor_write_enable(struct spi_nor *nor);
int spi_nor_write_disable(struct spi_nor *nor);
int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
@@ -419,6 +443,11 @@ ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
int spi_nor_hwcaps_read2cmd(u32 hwcaps);
u8 spi_nor_convert_3to4_read(u8 opcode);
+void spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto);
void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
enum spi_nor_protocol proto);
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index ef3695080710..c224e59820a1 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -8,10 +8,123 @@
#include "core.h"
+#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */
+#define SPINOR_OP_MT_RD_ANY_REG 0x85 /* Read volatile register */
+#define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */
+#define SPINOR_REG_MT_CFR0V 0x00 /* For setting octal DTR mode */
+#define SPINOR_REG_MT_CFR1V 0x01 /* For setting dummy cycles */
+#define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */
+#define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */
+
+static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor, bool enable)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+ int ret;
+
+ if (enable) {
+ /* Use 20 dummy cycles for memory array reads. */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ *buf = 20;
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(3, SPINOR_REG_MT_CFR1V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, buf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+ }
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (enable)
+ *buf = SPINOR_MT_OCT_DTR;
+ else
+ *buf = SPINOR_MT_EXSPI;
+
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(enable ? 3 : 4,
+ SPINOR_REG_MT_CFR0V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, buf, 1));
+
+ if (!enable)
+ spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY(enable ? 8 : 0, 1),
+ SPI_MEM_OP_DATA_IN(round_up(nor->info->id_len, 2),
+ buf, 1));
+
+ if (enable)
+ spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (memcmp(buf, nor->info->id, nor->info->id_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void mt35xu512aba_default_init(struct spi_nor *nor)
+{
+ nor->params->octal_dtr_enable = spi_nor_micron_octal_dtr_enable;
+}
+
+static void mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
+{
+ /* Set the Fast Read settings. */
+ nor->params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
+ spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, 20, SPINOR_OP_MT_DTR_RD,
+ SNOR_PROTO_8_8_8_DTR);
+
+ nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
+ nor->params->rdsr_dummy = 8;
+ nor->params->rdsr_addr_nbytes = 0;
+
+ /*
+ * The BFPT quad enable field is set to a reserved value so the quad
+ * enable function is ignored by spi_nor_parse_bfpt(). Make sure we
+ * disable it.
+ */
+ nor->params->quad_enable = NULL;
+}
+
+static struct spi_nor_fixups mt35xu512aba_fixups = {
+ .default_init = mt35xu512aba_default_init,
+ .post_sfdp = mt35xu512aba_post_sfdp_fixup,
+};
+
static const struct flash_info micron_parts[] = {
{ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES) },
+ SPI_NOR_4B_OPCODES | SPI_NOR_OCTAL_DTR_READ |
+ SPI_NOR_OCTAL_DTR_PP |
+ SPI_NOR_IO_MODE_EN_VOLATILE)
+ .fixups = &mt35xu512aba_fixups},
{ "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
SPI_NOR_4B_OPCODES) },
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index 08de2a2b4452..5859564e63fb 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -4,6 +4,7 @@
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/mtd/spi-nor.h>
@@ -19,6 +20,11 @@
#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
+#define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */
+#define SFDP_SCCR_MAP_ID 0xff87 /*
+ * Status, Control and Configuration
+ * Register Map.
+ */
#define SFDP_SIGNATURE 0x50444653U
@@ -602,10 +608,32 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
break;
}
+ /* Soft Reset support. */
+ if (bfpt.dwords[BFPT_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST)
+ nor->flags |= SNOR_F_SOFT_RESET;
+
/* Stop here if not JESD216 rev C or later. */
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
params);
+ /* 8D-8D-8D command extension. */
+ switch (bfpt.dwords[BFPT_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) {
+ case BFPT_DWORD18_CMD_EXT_REP:
+ nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
+ break;
+
+ case BFPT_DWORD18_CMD_EXT_INV:
+ nor->cmd_ext_type = SPI_NOR_EXT_INVERT;
+ break;
+
+ case BFPT_DWORD18_CMD_EXT_RES:
+ dev_dbg(nor->dev, "Reserved command extension used\n");
+ break;
+
+ case BFPT_DWORD18_CMD_EXT_16B:
+ dev_dbg(nor->dev, "16-bit opcodes not supported\n");
+ return -EOPNOTSUPP;
+ }
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
}
@@ -1046,9 +1074,16 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
}
/* 4BAIT is the only SFDP table that indicates page program support. */
- if (pp_hwcaps & SNOR_HWCAPS_PP)
+ if (pp_hwcaps & SNOR_HWCAPS_PP) {
spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
+ }
if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
SPINOR_OP_PP_1_1_4_4B,
@@ -1082,6 +1117,131 @@ out:
return ret;
}
+#define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29)
+#define PROFILE1_DWORD1_RDSR_DUMMY BIT(28)
+#define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8)
+#define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7)
+#define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27)
+#define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17)
+#define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7)
+
+/**
+ * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table
+ * @nor: pointer to a 'struct spi_nor'
+ * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the Profile 1.0 Table length and version.
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_profile1(struct spi_nor *nor,
+ const struct sfdp_parameter_header *profile1_header,
+ struct spi_nor_flash_parameter *params)
+{
+ u32 *dwords, addr;
+ size_t len;
+ int ret;
+ u8 dummy, opcode;
+
+ len = profile1_header->length * sizeof(*dwords);
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(profile1_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ le32_to_cpu_array(dwords, profile1_header->length);
+
+ /* Get 8D-8D-8D fast read opcode and dummy cycles. */
+ opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[0]);
+
+ /* Set the Read Status Register dummy cycles and dummy address bytes. */
+ if (dwords[0] & PROFILE1_DWORD1_RDSR_DUMMY)
+ params->rdsr_dummy = 8;
+ else
+ params->rdsr_dummy = 4;
+
+ if (dwords[0] & PROFILE1_DWORD1_RDSR_ADDR_BYTES)
+ params->rdsr_addr_nbytes = 4;
+ else
+ params->rdsr_addr_nbytes = 0;
+
+ /*
+ * We don't know what speed the controller is running at. Find the
+ * dummy cycles for the fastest frequency the flash can run at to be
+ * sure we are never short of dummy cycles. A value of 0 means the
+ * frequency is not supported.
+ *
+ * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let
+ * flashes set the correct value if needed in their fixup hooks.
+ */
+ dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[3]);
+ if (!dummy)
+ dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, dwords[4]);
+ if (!dummy)
+ dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, dwords[4]);
+ if (!dummy)
+ dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, dwords[4]);
+ if (!dummy)
+ dev_dbg(nor->dev,
+ "Can't find dummy cycles from Profile 1.0 table\n");
+
+ /* Round up to an even value to avoid tripping controllers up. */
+ dummy = round_up(dummy, 2);
+
+ /* Update the fast read settings. */
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, dummy, opcode,
+ SNOR_PROTO_8_8_8_DTR);
+
+out:
+ kfree(dwords);
+ return ret;
+}
+
+#define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31)
+
+/**
+ * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register
+ * Map.
+ * @nor: pointer to a 'struct spi_nor'
+ * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the SCCR Map table length and version.
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_sccr(struct spi_nor *nor,
+ const struct sfdp_parameter_header *sccr_header,
+ struct spi_nor_flash_parameter *params)
+{
+ u32 *dwords, addr;
+ size_t len;
+ int ret;
+
+ len = sccr_header->length * sizeof(*dwords);
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(sccr_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ le32_to_cpu_array(dwords, sccr_header->length);
+
+ if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22]))
+ nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
+
+out:
+ kfree(dwords);
+ return ret;
+}
+
/**
* spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
@@ -1183,6 +1343,14 @@ int spi_nor_parse_sfdp(struct spi_nor *nor,
err = spi_nor_parse_4bait(nor, param_header, params);
break;
+ case SFDP_PROFILE1_ID:
+ err = spi_nor_parse_profile1(nor, param_header, params);
+ break;
+
+ case SFDP_SCCR_MAP_ID:
+ err = spi_nor_parse_sccr(nor, param_header, params);
+ break;
+
default:
break;
}
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
index 7f9846b3a1ad..89152ae1cf3e 100644
--- a/drivers/mtd/spi-nor/sfdp.h
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -90,6 +90,14 @@ struct sfdp_bfpt {
#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
+#define BFPT_DWORD16_SWRST_EN_RST BIT(12)
+
+#define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29)
+#define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */
+#define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */
+#define BFPT_DWORD18_CMD_EXT_RES (0x2UL << 29) /* Reserved */
+#define BFPT_DWORD18_CMD_EXT_16B (0x3UL << 29) /* 16-bit opcode */
+
struct sfdp_parameter_header {
u8 id_lsb;
u8 minor;
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 8429b4af999a..89f220814c48 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -8,6 +8,312 @@
#include "core.h"
+#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
+#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
+#define SPINOR_REG_CYPRESS_CFR1V 0x00800002
+#define SPINOR_REG_CYPRESS_CFR1V_QUAD_EN BIT(1) /* Quad Enable */
+#define SPINOR_REG_CYPRESS_CFR2V 0x00800003
+#define SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24 0xb
+#define SPINOR_REG_CYPRESS_CFR3V 0x00800004
+#define SPINOR_REG_CYPRESS_CFR3V_PGSZ BIT(4) /* Page size. */
+#define SPINOR_REG_CYPRESS_CFR5V 0x00800006
+#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN 0x3
+#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS 0
+#define SPINOR_OP_CYPRESS_RD_FAST 0xee
+
+/**
+ * cypress_nor_quad_enable_volatile() - enable Quad I/O mode in volatile
+ * register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * It is recommended to update volatile registers in the field application due
+ * to a risk of the non-volatile registers corruption by power interrupt. This
+ * function sets Quad Enable bit in CFR1 volatile. If users set the Quad Enable
+ * bit in the CFR1 non-volatile in advance (typically by a Flash programmer
+ * before mounting Flash on PCB), the Quad Enable bit in the CFR1 volatile is
+ * also set during Flash power-up.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 addr_mode_nbytes = nor->params->rdsr_addr_nbytes;
+ u8 cfr1v_written;
+ int ret;
+
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1V_QUAD_EN)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1V_QUAD_EN;
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ cfr1v_written = nor->bouncebuf[0];
+
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != cfr1v_written) {
+ dev_err(nor->dev, "CFR1: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /* Replace Quad Enable with volatile version */
+ nor->params->quad_enable = cypress_nor_quad_enable_volatile;
+
+ return 0; // cypress_nor_set_page_size(nor); is currently TODO in ti-linux-5.10.y
+}
+
+static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ struct spi_nor_erase_type *erase_type =
+ nor->params->erase_map.erase_type;
+ struct spi_nor_flash_parameter *params = nor->params;
+ unsigned int i;
+
+ /*
+ * In some parts, 3byte erase opcodes are advertised by 4BAIT.
+ * Convert them to 4byte erase opcodes.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ switch (erase_type[i].opcode) {
+ case SPINOR_OP_SE:
+ erase_type[i].opcode = SPINOR_OP_SE_4B;
+ break;
+ case SPINOR_OP_BE_4K:
+ erase_type[i].opcode = SPINOR_OP_BE_4K_4B;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Fast Read 4B requires mode cycles */
+ params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
+
+ /* The writesize should be ECC data unit size */
+ params->writesize = 16;
+}
+
+static struct spi_nor_fixups s25hx_t_fixups = {
+ .post_bfpt = s25hx_t_post_bfpt_fixup,
+ .post_sfdp = s25hx_t_post_sfdp_fixup,
+};
+
+/**
+ * spi_nor_cypress_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
+ * @nor: pointer to a 'struct spi_nor'
+ * @enable: whether to enable or disable Octal DTR
+ *
+ * This also sets the memory access latency cycles to 24 to allow the flash to
+ * run at up to 200MHz.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+ int ret;
+
+ if (enable) {
+ /* Use 24 dummy cycles for memory array reads. */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ *buf = SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24;
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(3, SPINOR_REG_CYPRESS_CFR2V,
+ 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, buf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ nor->read_dummy = 24;
+ }
+
+ /* Set/unset the octal and DTR enable bits. */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (enable) {
+ buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
+ } else {
+ /*
+ * The register is 1-byte wide, but 1-byte transactions are not
+ * allowed in 8D-8D-8D mode. Since there is no register at the
+ * next location, just initialize the value to 0 and let the
+ * transaction go on.
+ */
+ buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS;
+ buf[1] = 0;
+ }
+
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(enable ? 3 : 4,
+ SPINOR_REG_CYPRESS_CFR5V,
+ 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(enable ? 1 : 2, buf, 1));
+
+ if (!enable)
+ spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ /* Read flash ID to make sure the switch was successful. */
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
+ SPI_MEM_OP_ADDR(enable ? 4 : 0, 0, 1),
+ SPI_MEM_OP_DUMMY(enable ? 3 : 0, 1),
+ SPI_MEM_OP_DATA_IN(round_up(nor->info->id_len, 2),
+ buf, 1));
+
+ if (enable)
+ spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (memcmp(buf, nor->info->id, nor->info->id_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void s28hs512t_default_init(struct spi_nor *nor)
+{
+ nor->params->octal_dtr_enable = spi_nor_cypress_octal_dtr_enable;
+ nor->params->writesize = 16;
+}
+
+static void s28hs512t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ /*
+ * On older versions of the flash the xSPI Profile 1.0 table has the
+ * 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE.
+ */
+ if (nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
+ nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
+ SPINOR_OP_CYPRESS_RD_FAST;
+
+ /* This flash is also missing the 4-byte Page Program opcode bit. */
+ spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
+
+ /*
+ * The xSPI Profile 1.0 table advertises the number of additional
+ * address bytes needed for Read Status Register command as 0 but the
+ * actual value for that is 4.
+ */
+ nor->params->rdsr_addr_nbytes = 4;
+}
+
+static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * The BFPT table advertises a 512B page size but the page size is
+ * actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ */
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(3, SPINOR_REG_CYPRESS_CFR3V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ int ret;
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
+ params->page_size = 512;
+ else
+ params->page_size = 256;
+
+ return 0;
+}
+
+static struct spi_nor_fixups s28hs512t_fixups = {
+ .default_init = s28hs512t_default_init,
+ .post_sfdp = s28hs512t_post_sfdp_fixup,
+ .post_bfpt = s28hs512t_post_bfpt_fixup,
+};
+
static int
s25fs_s_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
@@ -102,8 +408,29 @@ static const struct flash_info spansion_parts[] = {
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES) },
+ { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR)
+ .fixups = &s25hx_t_fixups },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1,
SPI_NOR_NO_ERASE) },
+ { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256,
+ SECT_4K | SPI_NOR_OCTAL_DTR_READ |
+ SPI_NOR_OCTAL_DTR_PP)
+ .fixups = &s28hs512t_fixups,
+ },
};
static void spansion_post_sfdp_fixups(struct spi_nor *nor)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 4153e0d15c5f..ee3fe709686c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -624,10 +624,8 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
}
- if (ubi->mtd->type == MTD_NORFLASH) {
- ubi_assert(ubi->mtd->writesize == 1);
+ if (ubi->mtd->type == MTD_NORFLASH)
ubi->nor_flash = 1;
- }
ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 14d890b00d2c..2f3312c31e51 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -535,7 +535,14 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
return -EROFS;
}
- if (ubi->nor_flash) {
+ /*
+ * If the flash is ECC-ed then we have to erase the ECC block before we
+ * can write to it. But the write is in preparation to an erase in the
+ * first place. This means we cannot zero out EC and VID before the
+ * erase and we just have to hope the flash starts erasing from the
+ * start of the page.
+ */
+ if (ubi->nor_flash && ubi->mtd->writesize == 1) {
err = nor_erase_prepare(ubi, pnum);
if (err)
return err;
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 1fb22388e7e0..22997fde1cdd 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "mux-core: " fmt
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -28,6 +29,20 @@
*/
#define MUX_CACHE_UNKNOWN MUX_IDLE_AS_IS
+/**
+ * struct mux_state - Represents a mux controller state specific to a given
+ * consumer.
+ * @mux: Pointer to a mux controller.
+ * @state: State of the mux to be selected.
+ *
+ * This structure is specific to the consumer that acquires it and has
+ * information specific to that consumer.
+ */
+struct mux_state {
+ struct mux_control *mux;
+ unsigned int state;
+};
+
static struct class mux_class = {
.name = "mux",
.owner = THIS_MODULE,
@@ -116,6 +131,7 @@ struct mux_chip *mux_chip_alloc(struct device *dev,
sema_init(&mux->lock, 1);
mux->cached_state = MUX_CACHE_UNKNOWN;
mux->idle_state = MUX_IDLE_AS_IS;
+ mux->last_change = ktime_get();
}
device_initialize(&mux_chip->dev);
@@ -129,6 +145,8 @@ static int mux_control_set(struct mux_control *mux, int state)
int ret = mux->chip->ops->set(mux, state);
mux->cached_state = ret < 0 ? MUX_CACHE_UNKNOWN : state;
+ if (ret >= 0)
+ mux->last_change = ktime_get();
return ret;
}
@@ -314,15 +332,31 @@ static int __mux_control_select(struct mux_control *mux, int state)
return ret;
}
+static void mux_control_delay(struct mux_control *mux, unsigned int delay_us)
+{
+ ktime_t delayend;
+ s64 remaining;
+
+ if (!delay_us)
+ return;
+
+ delayend = ktime_add_us(mux->last_change, delay_us);
+ remaining = ktime_us_delta(delayend, ktime_get());
+ if (remaining > 0)
+ fsleep(remaining);
+}
+
/**
- * mux_control_select() - Select the given multiplexer state.
+ * mux_control_select_delay() - Select the given multiplexer state.
* @mux: The mux-control to request a change of state from.
* @state: The new requested state.
+ * @delay_us: The time to delay (in microseconds) if the mux state is changed.
*
* On successfully selecting the mux-control state, it will be locked until
* there is a call to mux_control_deselect(). If the mux-control is already
* selected when mux_control_select() is called, the caller will be blocked
- * until mux_control_deselect() is called (by someone else).
+ * until mux_control_deselect() or mux_state_deselect() is called (by someone
+ * else).
*
* Therefore, make sure to call mux_control_deselect() when the operation is
* complete and the mux-control is free for others to use, but do not call
@@ -331,7 +365,8 @@ static int __mux_control_select(struct mux_control *mux, int state)
* Return: 0 when the mux-control state has the requested state or a negative
* errno on error.
*/
-int mux_control_select(struct mux_control *mux, unsigned int state)
+int mux_control_select_delay(struct mux_control *mux, unsigned int state,
+ unsigned int delay_us)
{
int ret;
@@ -340,21 +375,48 @@ int mux_control_select(struct mux_control *mux, unsigned int state)
return ret;
ret = __mux_control_select(mux, state);
+ if (ret >= 0)
+ mux_control_delay(mux, delay_us);
if (ret < 0)
up(&mux->lock);
return ret;
}
-EXPORT_SYMBOL_GPL(mux_control_select);
+EXPORT_SYMBOL_GPL(mux_control_select_delay);
+
+/**
+ * mux_state_select_delay() - Select the given multiplexer state.
+ * @mstate: The mux-state to select.
+ * @delay_us: The time to delay (in microseconds) if the mux state is changed.
+ *
+ * On successfully selecting the mux-state, its mux-control will be locked
+ * until there is a call to mux_state_deselect(). If the mux-control is already
+ * selected when mux_state_select() is called, the caller will be blocked
+ * until mux_state_deselect() or mux_control_deselect() is called (by someone
+ * else).
+ *
+ * Therefore, make sure to call mux_state_deselect() when the operation is
+ * complete and the mux-control is free for others to use, but do not call
+ * mux_state_deselect() if mux_state_select() fails.
+ *
+ * Return: 0 when the mux-state has been selected or a negative
+ * errno on error.
+ */
+int mux_state_select_delay(struct mux_state *mstate, unsigned int delay_us)
+{
+ return mux_control_select_delay(mstate->mux, mstate->state, delay_us);
+}
+EXPORT_SYMBOL_GPL(mux_state_select_delay);
/**
- * mux_control_try_select() - Try to select the given multiplexer state.
+ * mux_control_try_select_delay() - Try to select the given multiplexer state.
* @mux: The mux-control to request a change of state from.
* @state: The new requested state.
+ * @delay_us: The time to delay (in microseconds) if the mux state is changed.
*
* On successfully selecting the mux-control state, it will be locked until
- * mux_control_deselect() called.
+ * mux_control_deselect() is called.
*
* Therefore, make sure to call mux_control_deselect() when the operation is
* complete and the mux-control is free for others to use, but do not call
@@ -363,7 +425,8 @@ EXPORT_SYMBOL_GPL(mux_control_select);
* Return: 0 when the mux-control state has the requested state or a negative
* errno on error. Specifically -EBUSY if the mux-control is contended.
*/
-int mux_control_try_select(struct mux_control *mux, unsigned int state)
+int mux_control_try_select_delay(struct mux_control *mux, unsigned int state,
+ unsigned int delay_us)
{
int ret;
@@ -371,13 +434,36 @@ int mux_control_try_select(struct mux_control *mux, unsigned int state)
return -EBUSY;
ret = __mux_control_select(mux, state);
+ if (ret >= 0)
+ mux_control_delay(mux, delay_us);
if (ret < 0)
up(&mux->lock);
return ret;
}
-EXPORT_SYMBOL_GPL(mux_control_try_select);
+EXPORT_SYMBOL_GPL(mux_control_try_select_delay);
+
+/**
+ * mux_state_try_select_delay() - Try to select the given multiplexer state.
+ * @mstate: The mux-state to select.
+ * @delay_us: The time to delay (in microseconds) if the mux state is changed.
+ *
+ * On successfully selecting the mux-state, its mux-control will be locked
+ * until mux_state_deselect() is called.
+ *
+ * Therefore, make sure to call mux_state_deselect() when the operation is
+ * complete and the mux-control is free for others to use, but do not call
+ * mux_state_deselect() if mux_state_try_select() fails.
+ *
+ * Return: 0 when the mux-state has been selected or a negative errno on
+ * error. Specifically -EBUSY if the mux-control is contended.
+ */
+int mux_state_try_select_delay(struct mux_state *mstate, unsigned int delay_us)
+{
+ return mux_control_try_select_delay(mstate->mux, mstate->state, delay_us);
+}
+EXPORT_SYMBOL_GPL(mux_state_try_select_delay);
/**
* mux_control_deselect() - Deselect the previously selected multiplexer state.
@@ -405,6 +491,24 @@ int mux_control_deselect(struct mux_control *mux)
}
EXPORT_SYMBOL_GPL(mux_control_deselect);
+/**
+ * mux_state_deselect() - Deselect the previously selected multiplexer state.
+ * @mstate: The mux-state to deselect.
+ *
+ * It is required that a single call is made to mux_state_deselect() for
+ * each and every successful call made to either of mux_state_select() or
+ * mux_state_try_select().
+ *
+ * Return: 0 on success and a negative errno on error. An error can only
+ * occur if the mux has an idle state. Note that even if an error occurs, the
+ * mux-control is unlocked and is thus free for the next access.
+ */
+int mux_state_deselect(struct mux_state *mstate)
+{
+ return mux_control_deselect(mstate->mux);
+}
+EXPORT_SYMBOL_GPL(mux_state_deselect);
+
/* Note this function returns a reference to the mux_chip dev. */
static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
{
@@ -415,14 +519,17 @@ static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
return dev ? to_mux_chip(dev) : NULL;
}
-/**
- * mux_control_get() - Get the mux-control for a device.
+/*
+ * mux_get() - Get the mux-control for a device.
* @dev: The device that needs a mux-control.
* @mux_name: The name identifying the mux-control.
+ * @state: Pointer to where the requested state is returned, or NULL when
+ * the required multiplexer states are handled by other means.
*
* Return: A pointer to the mux-control, or an ERR_PTR with a negative errno.
*/
-struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
+static struct mux_control *mux_get(struct device *dev, const char *mux_name,
+ unsigned int *state)
{
struct device_node *np = dev->of_node;
struct of_phandle_args args;
@@ -432,8 +539,12 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
int ret;
if (mux_name) {
- index = of_property_match_string(np, "mux-control-names",
- mux_name);
+ if (state)
+ index = of_property_match_string(np, "mux-state-names",
+ mux_name);
+ else
+ index = of_property_match_string(np, "mux-control-names",
+ mux_name);
if (index < 0) {
dev_err(dev, "mux controller '%s' not found\n",
mux_name);
@@ -441,12 +552,17 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
}
}
- ret = of_parse_phandle_with_args(np,
- "mux-controls", "#mux-control-cells",
- index, &args);
+ if (state)
+ ret = of_parse_phandle_with_args(np,
+ "mux-states", "#mux-state-cells",
+ index, &args);
+ else
+ ret = of_parse_phandle_with_args(np,
+ "mux-controls", "#mux-control-cells",
+ index, &args);
if (ret) {
- dev_err(dev, "%pOF: failed to get mux-control %s(%i)\n",
- np, mux_name ?: "", index);
+ dev_err(dev, "%pOF: failed to get mux-%s %s(%i)\n",
+ np, state ? "state" : "control", mux_name ?: "", index);
return ERR_PTR(ret);
}
@@ -455,17 +571,35 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
if (!mux_chip)
return ERR_PTR(-EPROBE_DEFER);
- if (args.args_count > 1 ||
- (!args.args_count && (mux_chip->controllers > 1))) {
- dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
- np, args.np);
- put_device(&mux_chip->dev);
- return ERR_PTR(-EINVAL);
- }
-
controller = 0;
- if (args.args_count)
- controller = args.args[0];
+ if (state) {
+ if (args.args_count > 2 || args.args_count == 0 ||
+ (args.args_count < 2 && mux_chip->controllers > 1)) {
+ dev_err(dev, "%pOF: wrong #mux-state-cells for %pOF\n",
+ np, args.np);
+ put_device(&mux_chip->dev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (args.args_count == 2) {
+ controller = args.args[0];
+ *state = args.args[1];
+ } else {
+ *state = args.args[0];
+ }
+
+ } else {
+ if (args.args_count > 1 ||
+ (!args.args_count && mux_chip->controllers > 1)) {
+ dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
+ np, args.np);
+ put_device(&mux_chip->dev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (args.args_count)
+ controller = args.args[0];
+ }
if (controller >= mux_chip->controllers) {
dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
@@ -476,6 +610,18 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
return &mux_chip->mux[controller];
}
+
+/**
+ * mux_control_get() - Get the mux-control for a device.
+ * @dev: The device that needs a mux-control.
+ * @mux_name: The name identifying the mux-control.
+ *
+ * Return: A pointer to the mux-control, or an ERR_PTR with a negative errno.
+ */
+struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
+{
+ return mux_get(dev, mux_name, NULL);
+}
EXPORT_SYMBOL_GPL(mux_control_get);
/**
@@ -528,6 +674,82 @@ struct mux_control *devm_mux_control_get(struct device *dev,
EXPORT_SYMBOL_GPL(devm_mux_control_get);
/*
+ * mux_state_get() - Get the mux-state for a device.
+ * @dev: The device that needs a mux-state.
+ * @mux_name: The name identifying the mux-state.
+ *
+ * Return: A pointer to the mux-state, or an ERR_PTR with a negative errno.
+ */
+static struct mux_state *mux_state_get(struct device *dev, const char *mux_name)
+{
+ struct mux_state *mstate;
+
+ mstate = kzalloc(sizeof(*mstate), GFP_KERNEL);
+ if (!mstate)
+ return ERR_PTR(-ENOMEM);
+
+ mstate->mux = mux_get(dev, mux_name, &mstate->state);
+ if (IS_ERR(mstate->mux)) {
+ int err = PTR_ERR(mstate->mux);
+
+ kfree(mstate);
+ return ERR_PTR(err);
+ }
+
+ return mstate;
+}
+
+/**
+ * mux_state_put() - Put away the mux-state for good.
+ * @mstate: The mux-state to put away.
+ *
+ * mux_state_put() reverses the effects of mux_state_get().
+ */
+void mux_state_put(struct mux_state *mstate)
+{
+ mux_control_put(mstate->mux);
+ kfree(mstate);
+}
+EXPORT_SYMBOL_GPL(mux_state_put);
+
+static void devm_mux_state_release(struct device *dev, void *res)
+{
+ struct mux_state *mstate = *(struct mux_state **)res;
+
+ mux_state_put(mstate);
+}
+
+/**
+ * devm_mux_state_get() - Get the mux-state for a device, with resource
+ * management.
+ * @dev: The device that needs a mux-control.
+ * @mux_name: The name identifying the mux-control.
+ *
+ * Return: Pointer to the mux-state, or an ERR_PTR with a negative errno.
+ */
+struct mux_state *devm_mux_state_get(struct device *dev,
+ const char *mux_name)
+{
+ struct mux_state **ptr, *mstate;
+
+ ptr = devres_alloc(devm_mux_state_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ mstate = mux_state_get(dev, mux_name);
+ if (IS_ERR(mstate)) {
+ devres_free(ptr);
+ return mstate;
+ }
+
+ *ptr = mstate;
+ devres_add(dev, ptr);
+
+ return mstate;
+}
+EXPORT_SYMBOL_GPL(devm_mux_state_get);
+
+/*
* Using subsys_initcall instead of module_init here to try to ensure - for
* the non-modular case - that the subsystem is initialized when mux consumers
* and mux controllers start to use it.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 19a19a7b7deb..41422f8f7930 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -21,6 +21,7 @@
#include <linux/iopoll.h>
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/phy/phy.h>
#include "m_can.h"
@@ -1419,6 +1420,7 @@ static void m_can_stop(struct net_device *dev)
static int m_can_close(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int err;
netif_stop_queue(dev);
@@ -1438,6 +1440,10 @@ static int m_can_close(struct net_device *dev)
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
+ err = phy_power_off(cdev->transceiver);
+ if (err)
+ return err;
+
return 0;
}
@@ -1624,6 +1630,10 @@ static int m_can_open(struct net_device *dev)
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
+ err = phy_power_on(cdev->transceiver);
+ if (err)
+ return err;
+
err = m_can_clk_start(cdev);
if (err)
return err;
@@ -1679,6 +1689,7 @@ out_wq_fail:
close_candev(dev);
exit_disable_clks:
m_can_clk_stop(cdev);
+ phy_power_off(cdev->transceiver);
return err;
}
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index b2699a7c9997..8cad1235afa0 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -27,6 +27,7 @@
#include <linux/iopoll.h>
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/phy/phy.h>
/* m_can lec values */
enum m_can_lec_type {
@@ -80,6 +81,7 @@ struct m_can_classdev {
struct workqueue_struct *tx_wq;
struct work_struct tx_work;
struct sk_buff *tx_skb;
+ struct phy *transceiver;
struct can_bittiming_const *bit_timing;
struct can_bittiming_const *data_timing;
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 161cb9be018c..45910fb5145c 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -6,6 +6,7 @@
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include "m_can.h"
@@ -60,6 +61,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
void __iomem *mram_addr;
+ struct phy *transceiver;
int irq, ret = 0;
mcan_class = m_can_class_allocate_dev(&pdev->dev);
@@ -99,6 +101,16 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto probe_fail;
}
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver)) {
+ ret = PTR_ERR(transceiver);
+ dev_err_probe(&pdev->dev, ret, "failed to get phy\n");
+ return ret;
+ }
+
+ if (transceiver)
+ mcan_class->can.bitrate_max = transceiver->attrs.max_link_rate;
+
priv->base = addr;
priv->mram_base = mram_addr;
@@ -106,6 +118,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->pm_clock_support = 1;
mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
mcan_class->dev = &pdev->dev;
+ mcan_class->transceiver = transceiver;
mcan_class->ops = &m_can_plat_ops;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index abfc4c435d59..853f29836812 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -33,6 +33,7 @@ config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
select PHYLIB
+ select MDIO_BITBANG
help
This driver supports TI's DaVinci MDIO module.
@@ -92,7 +93,9 @@ config TI_CPTS
config TI_K3_AM65_CPSW_NUSS
tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ select NET_DEVLINK
select TI_DAVINCI_MDIO
+ select PHYLINK
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
@@ -105,6 +108,15 @@ config TI_K3_AM65_CPSW_NUSS
To compile this driver as a module, choose M here: the module
will be called ti-am65-cpsw-nuss.
+config TI_K3_AM65_CPSW_SWITCHDEV
+ bool "TI K3 AM654x/J721E CPSW Switch mode support"
+ depends on TI_K3_AM65_CPSW_NUSS
+ depends on NET_SWITCHDEV
+ help
+ This enables switchdev support for TI K3 CPSWxG Ethernet
+ Switch. Enable this driver to support hardware switch support for AM65
+ CPSW NUSS driver.
+
config TI_K3_AM65_CPTS
tristate "TI K3 AM65x CPTS"
depends on ARCH_K3 && OF
@@ -171,4 +183,46 @@ config CPMAC
help
TI AR7 CPMAC Ethernet support
+config TI_RDEV_ETH_SWITCH_VIRT_EMAC
+ tristate "TI Virtual Eth MAC driver"
+ depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ help
+ Support for 1 Port Virtual Eth MAC driver over remotedev
+ R5F Eth Switch FW RPMSG protocol.
+ This is available starting with the J721E platform.
+
+config TI_PRUETH
+ tristate "TI PRU Ethernet EMAC driver"
+ depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
+ select TI_ICSS_IEP
+ imply PTP_1588_CLOCK
+ help
+ Some TI SoCs has Programmable Realtime Units (PRUs) cores which can
+ support Single or Dual Ethernet ports with help of firmware code running
+ on PRU cores. This driver supports remoteproc based communication to
+ PRU firmware to expose ethernet interface to Linux.
+
+config TI_ICSS_IEP
+ tristate "TI PRU ICSS IEP driver"
+ depends on TI_PRUSS
+ default TI_PRUSS
+ help
+ This enables support for the PRU-ICSS Industrial Ethernet Peripheral
+ within a PRU-ICSS subsystem present on various TI SoCs.
+
+config TI_ICSSG_PRUETH
+ tristate "TI Gigabit PRU Ethernet driver"
+ select TI_DAVINCI_MDIO
+ select NET_PTP_CLASSIFY
+ select TI_ICSS_IEP
+ select PAGE_POOL
+ imply PTP_1588_CLOCK
+ depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
+ depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ help
+ Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem
+ This subsystem is available starting with the AM65 platform.
+
endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 6e779292545d..24cce9c34a04 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -26,4 +26,17 @@ keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+ti-am65-cpsw-nuss-$(CONFIG_DEBUG_FS) += am65-debugfs.o
+ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
+
+
+obj-$(CONFIG_TI_RDEV_ETH_SWITCH_VIRT_EMAC) += ti-j721e-cpsw-virt-mac.o
+ti-j721e-cpsw-virt-mac-y := j721e-cpsw-virt-mac.o k3-cppi-desc-pool.o
+
+obj-$(CONFIG_TI_PRUETH) += prueth.o
+prueth-y := prueth_core.o prueth_qos.o prueth_switch.o prueth_lre.o
+obj-$(CONFIG_TI_ICSS_IEP) += icss_iep.o
+
+obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o
+icssg-prueth-y := icssg_prueth.o icssg_classifier.o icssg_ethtool.o icssg_queues.o icssg_config.o k3-cppi-desc-pool.o icssg_mii_cfg.o icssg_switchdev.o icssg_qos.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 6e4d4f9e32e0..2033ff7e7e81 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -6,7 +6,7 @@
*/
#include <linux/net_tstamp.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -372,7 +372,15 @@ static const struct am65_cpsw_ethtool_stat am65_slave_stats[] = {
/* Ethtool priv_flags */
static const char am65_cpsw_ethtool_priv_flags[][ETH_GSTRING_LEN] = {
#define AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN BIT(0)
+/* common flags */
"p0-rx-ptype-rrobin",
+/* port specific flags */
+#define AM65_CPSW_PRIV_IET_FRAME_PREEMPTION BIT(1)
+ "iet-frame-preemption",
+#define AM65_CPSW_PRIV_IET_MAC_VERIFY BIT(2)
+ "iet-mac-verify",
+#define AM65_CPSW_PRIV_CUT_THRU BIT(3)
+ "cut-thru",
};
static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
@@ -468,9 +476,7 @@ static void am65_cpsw_get_pauseparam(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- pause->autoneg = AUTONEG_DISABLE;
- pause->rx_pause = salve->rx_pause ? true : false;
- pause->tx_pause = salve->tx_pause ? true : false;
+ phylink_ethtool_get_pauseparam(salve->phylink, pause);
}
static int am65_cpsw_set_pauseparam(struct net_device *ndev,
@@ -478,18 +484,7 @@ static int am65_cpsw_set_pauseparam(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EINVAL;
-
- if (!phy_validate_pause(salve->phy, pause))
- return -EINVAL;
-
- salve->rx_pause = pause->rx_pause ? true : false;
- salve->tx_pause = pause->tx_pause ? true : false;
-
- phy_set_asym_pause(salve->phy, salve->rx_pause, salve->tx_pause);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(salve->phylink, pause);
}
static void am65_cpsw_get_wol(struct net_device *ndev,
@@ -497,11 +492,7 @@ static void am65_cpsw_get_wol(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (salve->phy)
- phy_ethtool_get_wol(salve->phy, wol);
+ phylink_ethtool_get_wol(salve->phylink, wol);
}
static int am65_cpsw_set_wol(struct net_device *ndev,
@@ -509,10 +500,7 @@ static int am65_cpsw_set_wol(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EOPNOTSUPP;
-
- return phy_ethtool_set_wol(salve->phy, wol);
+ return phylink_ethtool_set_wol(salve->phylink, wol);
}
static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
@@ -520,11 +508,7 @@ static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EOPNOTSUPP;
-
- phy_ethtool_ksettings_get(salve->phy, ecmd);
- return 0;
+ return phylink_ethtool_ksettings_get(salve->phylink, ecmd);
}
static int
@@ -533,40 +517,28 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_ksettings_set(salve->phy, ecmd);
+ return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
}
static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_get_eee(salve->phy, edata);
+ return phylink_ethtool_get_eee(salve->phylink, edata);
}
static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_set_eee(salve->phy, edata);
+ return phylink_ethtool_set_eee(salve->phylink, edata);
}
static int am65_cpsw_nway_reset(struct net_device *ndev)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_restart_aneg(salve->phy);
+ return phylink_ethtool_nway_reset(salve->phylink);
}
static int am65_cpsw_get_regs_len(struct net_device *ndev)
@@ -721,10 +693,19 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_iet *iet = &port->qos.iet;
u32 priv_flags = 0;
if (common->pf_p0_rx_ptype_rrobin)
priv_flags |= AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN;
+ /* Port specific flags */
+ if (iet->fpe_configured)
+ priv_flags |= AM65_CPSW_PRIV_IET_FRAME_PREEMPTION;
+ if (iet->mac_verify_configured)
+ priv_flags |= AM65_CPSW_PRIV_IET_MAC_VERIFY;
+ if (port->qos.cut_thru.enable)
+ priv_flags |= AM65_CPSW_PRIV_CUT_THRU;
return priv_flags;
}
@@ -732,20 +713,115 @@ static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- int rrobin;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_iet *iet = &port->qos.iet;
+ int rrobin, iet_fpe, mac_verify, cut_thru;
rrobin = !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+ iet_fpe = !!(flags & AM65_CPSW_PRIV_IET_FRAME_PREEMPTION);
+ mac_verify = !!(flags & AM65_CPSW_PRIV_IET_MAC_VERIFY);
+ cut_thru = !!(flags & AM65_CPSW_PRIV_CUT_THRU);
if (common->usage_count)
return -EBUSY;
- if (common->est_enabled && rrobin) {
+ if ((common->est_enabled || common->iet_enabled || iet_fpe) && rrobin) {
netdev_err(ndev,
"p0-rx-ptype-rrobin flag conflicts with QOS\n");
return -EINVAL;
}
+ if (common->tx_ch_num < 2 && iet_fpe) {
+ netdev_err(ndev, "IET fpe needs at least 2 h/w queues\n");
+ return -EINVAL;
+ }
+
+ if (mac_verify && (!iet->fpe_configured && !iet_fpe)) {
+ netdev_err(ndev, "Enable IET FPE for IET MAC verify\n");
+ return -EINVAL;
+ }
+
+ if (cut_thru && !(common->pdata.quirks & AM64_CPSW_QUIRK_CUT_THRU)) {
+ netdev_err(ndev, "Cut-Thru not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (cut_thru && common->is_emac_mode) {
+ netdev_err(ndev, "Enable switch mode for cut-thru\n");
+ return -EINVAL;
+ }
+
common->pf_p0_rx_ptype_rrobin = rrobin;
+ iet->fpe_configured = iet_fpe;
+ iet->mac_verify_configured = mac_verify;
+ port->qos.cut_thru.enable = cut_thru;
+
+ return 0;
+}
+
+static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ tx_chn = &common->tx_chns[0];
+
+ coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+
+ return 0;
+}
+
+static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &common->tx_chns[queue];
+
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+
+ return 0;
+}
+
+static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ tx_chn = &common->tx_chns[0];
+
+ if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
+ coal->rx_coalesce_usecs = 20;
+
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
+ coal->tx_coalesce_usecs = 20;
+
+ common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
+ tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &common->tx_chns[queue];
+
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
+ coal->tx_coalesce_usecs = 20;
+
+ tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
return 0;
}
@@ -767,6 +843,11 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_ts_info = am65_cpsw_get_ethtool_ts_info,
.get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
.set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS,
+ .get_coalesce = am65_cpsw_get_coalesce,
+ .set_coalesce = am65_cpsw_set_coalesce,
+ .get_per_queue_coalesce = am65_cpsw_get_per_queue_coalesce,
+ .set_per_queue_coalesce = am65_cpsw_set_per_queue_coalesce,
.get_link = ethtool_op_get_link,
.get_link_ksettings = am65_cpsw_get_link_ksettings,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 059d68d48f1e..f56eb438c1ca 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
@@ -18,19 +19,22 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/rtnetlink.h>
#include <linux/mfd/syscon.h>
#include <linux/sys_soc.h>
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
+#include <linux/net_switch_config.h>
#include "cpsw_ale.h"
#include "cpsw_sl.h"
#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-switchdev.h"
#include "k3-cppi-desc-pool.h"
#include "am65-cpts.h"
@@ -71,6 +75,17 @@
#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
+#define AM65_CPSW_SGMII_CONTROL_REG 0x010
+#define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018
+#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
+#define AM65_CPSW_SGMII_CONTROL_MASTER_MODE BIT(5)
+
+#define MAC2MAC_MR_ADV_ABILITY_BASE (BIT(15) | BIT(0))
+#define MAC2MAC_MR_ADV_ABILITY_FULLDUPLEX BIT(12)
+#define MAC2MAC_MR_ADV_ABILITY_1G BIT(11)
+#define MAC2MAC_MR_ADV_ABILITY_100M BIT(10)
+#define MAC2PHY_MR_ADV_ABILITY BIT(0)
+
#define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
#define AM65_CPSW_CTL_P0_ENABLE BIT(2)
#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
@@ -78,6 +93,7 @@
/* AM65_CPSW_P0_REG_CTL */
#define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0)
+#define AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN BIT(16)
/* AM65_CPSW_PORT_REG_PRI_CTL */
#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
@@ -157,69 +173,6 @@ static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
common->pdata.quirks);
}
-void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- struct phy_device *phy = port->slave.phy;
- u32 mac_control = 0;
-
- if (!phy)
- return;
-
- if (phy->link) {
- mac_control = CPSW_SL_CTL_GMII_EN;
-
- if (phy->speed == 1000)
- mac_control |= CPSW_SL_CTL_GIG;
- if (phy->speed == 10 && phy_interface_is_rgmii(phy))
- /* Can be used with in band mode only */
- mac_control |= CPSW_SL_CTL_EXT_EN;
- if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
- mac_control |= CPSW_SL_CTL_IFCTL_A;
- if (phy->duplex)
- mac_control |= CPSW_SL_CTL_FULLDUPLEX;
-
- /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
-
- /* rx_pause/tx_pause */
- if (port->slave.rx_pause)
- mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
-
- if (port->slave.tx_pause)
- mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
-
- cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
-
- /* enable forwarding */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
-
- am65_cpsw_qos_link_up(ndev, phy->speed);
- netif_tx_wake_all_queues(ndev);
- } else {
- int tmo;
-
- /* disable forwarding */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
-
- cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
-
- tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
- dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n",
- cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS),
- tmo);
-
- cpsw_sl_ctl_reset(port->slave.mac_sl);
-
- am65_cpsw_qos_link_down(ndev);
- netif_tx_stop_all_queues(ndev);
- }
-
- phy_print_status(phy);
-}
-
static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
@@ -228,6 +181,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
u32 port_mask, unreg_mcast = 0;
int ret;
+ if (!common->is_emac_mode)
+ return 0;
+
if (!netif_running(ndev) || !vid)
return 0;
@@ -241,8 +197,8 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
if (!vid)
unreg_mcast = port_mask;
dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
- ret = cpsw_ale_add_vlan(common->ale, vid, port_mask,
- unreg_mcast, port_mask, 0);
+ ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
+ unreg_mcast, port_mask, 0);
pm_runtime_put(common->dev);
return ret;
@@ -252,8 +208,12 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
+ if (!common->is_emac_mode)
+ return 0;
+
if (!netif_running(ndev) || !vid)
return 0;
@@ -264,17 +224,23 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
}
dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
- ret = cpsw_ale_del_vlan(common->ale, vid, 0);
+ ret = cpsw_ale_del_vlan(common->ale, vid,
+ BIT(port->port_id) | ALE_PORT_HOST);
pm_runtime_put(common->dev);
return ret;
}
-static void am65_cpsw_slave_set_promisc_2g(struct am65_cpsw_port *port,
- bool promisc)
+static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
+ bool promisc)
{
struct am65_cpsw_common *common = port->common;
+ if (promisc && !common->is_emac_mode) {
+ dev_dbg(common->dev, "promisc mode requested in switch mode");
+ return;
+ }
+
if (promisc) {
/* Enable promiscuous mode */
cpsw_ale_control_set(common->ale, port->port_id,
@@ -296,7 +262,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
bool promisc;
promisc = !!(ndev->flags & IFF_PROMISC);
- am65_cpsw_slave_set_promisc_2g(port, promisc);
+ am65_cpsw_slave_set_promisc(port, promisc);
if (promisc)
return;
@@ -364,8 +330,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_err(dev, "Failed to map rx skb buffer\n");
return -EINVAL;
@@ -373,7 +340,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
- cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb));
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = skb;
@@ -404,6 +372,11 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
}
+static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
+static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
+static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
+static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
netdev_features_t features)
{
@@ -425,10 +398,9 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
/* set base flow_id */
writel(common->rx_flow_id_base,
host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
- /* en tx crc offload */
- if (features & NETIF_F_HW_CSUM)
- writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
- host_p->port_base + AM65_CPSW_P0_REG_CTL);
+ writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN |
+ AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN,
+ host_p->port_base + AM65_CPSW_P0_REG_CTL);
am65_cpsw_nuss_set_p0_ptype(common);
@@ -452,9 +424,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1);
- if (AM65_CPSW_IS_CPSW2G(common))
- cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
- ALE_PORT_NOLEARN, 1);
/* switch to vlan unaware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
@@ -468,6 +437,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
port_mask, port_mask,
port_mask & ~ALE_PORT_HOST);
+ if (common->is_emac_mode)
+ am65_cpsw_init_host_port_emac(common);
+ else
+ am65_cpsw_init_host_port_switch(common);
+
+ am65_cpsw_qos_tx_p0_rate_init(common);
+
for (i = 0; i < common->rx_chns.descs_num; i++) {
skb = __netdev_alloc_skb_ip_align(NULL,
AM65_CPSW_MAX_PACKET_SIZE,
@@ -497,6 +473,10 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
}
napi_enable(&common->napi_rx);
+ if (common->rx_irq_disabled) {
+ common->rx_irq_disabled = false;
+ enable_irq(common->rx_chns.irq);
+ }
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
@@ -528,8 +508,10 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
msecs_to_jiffies(1000));
if (!i)
dev_err(common->dev, "tx timeout\n");
- for (i = 0; i < common->tx_ch_num; i++)
+ for (i = 0; i < common->tx_ch_num; i++) {
napi_disable(&common->tx_chns[i].napi_tx);
+ hrtimer_cancel(&common->tx_chns[i].tx_hrtimer);
+ }
for (i = 0; i < common->tx_ch_num; i++) {
k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
@@ -538,8 +520,17 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
}
+ reinit_completion(&common->tdown_complete);
k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!i)
+ dev_err(common->dev, "rx teardown timeout\n");
+ }
+
napi_disable(&common->napi_rx);
+ hrtimer_cancel(&common->rx_hrtimer);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
@@ -563,15 +554,15 @@ static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
- if (port->slave.phy)
- phy_stop(port->slave.phy);
+ phylink_stop(port->slave.phylink);
netif_tx_stop_all_queues(ndev);
- if (port->slave.phy) {
- phy_disconnect(port->slave.phy);
- port->slave.phy = NULL;
- }
+ phylink_disconnect_phy(port->slave.phylink);
+
+ /* Clean up IET */
+ am65_cpsw_qos_iet_cleanup(ndev);
+ am65_cpsw_qos_cut_thru_cleanup(port);
ret = am65_cpsw_nuss_common_stop(common);
if (ret)
@@ -596,8 +587,8 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- u32 port_mask;
int ret, i;
+ u32 reg;
ret = pm_runtime_get_sync(common->dev);
if (ret < 0) {
@@ -605,77 +596,80 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
return ret;
}
+ /* Idle MAC port */
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+ cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+ /* soft reset MAC */
+ cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1);
+ mdelay(1);
+ reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET);
+ if (reg) {
+ dev_err(common->dev, "soft RESET didn't complete\n");
+ ret = -ETIMEDOUT;
+ goto runtime_put;
+ }
+
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
if (ret) {
dev_err(common->dev, "cannot set real number of tx queues\n");
- return ret;
+ goto runtime_put;
}
ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
if (ret) {
dev_err(common->dev, "cannot set real number of rx queues\n");
- return ret;
+ goto runtime_put;
}
- for (i = 0; i < common->tx_ch_num; i++)
- netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, i);
+
+ netdev_tx_reset_queue(txq);
+ txq->tx_maxrate = common->tx_chns[i].rate_mbps;
+ }
ret = am65_cpsw_nuss_common_open(common, ndev->features);
if (ret)
- return ret;
+ goto runtime_put;
common->usage_count++;
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
- if (port->slave.mac_only)
- /* enable mac-only mode on port */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_MACONLY, 1);
- if (AM65_CPSW_IS_CPSW2G(common))
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_NOLEARN, 1);
-
- port_mask = BIT(port->port_id) | ALE_PORT_HOST;
- cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
- HOST_PORT_NUM, ALE_SECURE, 0);
- cpsw_ale_add_mcast(common->ale, ndev->broadcast,
- port_mask, 0, 0, ALE_MCAST_FWD_2);
+ if (common->is_emac_mode)
+ am65_cpsw_init_port_emac_ale(port);
+ else
+ am65_cpsw_init_port_switch_ale(port);
/* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port);
- ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET,
- port->slave.phy_if);
+ ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
if (ret)
goto error_cleanup;
- if (port->slave.phy_node) {
- port->slave.phy = of_phy_connect(ndev,
- port->slave.phy_node,
- &am65_cpsw_nuss_adjust_link,
- 0, port->slave.phy_if);
- if (!port->slave.phy) {
- dev_err(common->dev, "phy %pOF not found on slave %d\n",
- port->slave.phy_node,
- port->port_id);
- ret = -ENODEV;
- goto error_cleanup;
- }
- }
-
/* restore vlan configurations */
vlan_for_each(ndev, cpsw_restore_vlans, port);
- phy_attached_info(port->slave.phy);
- phy_start(port->slave.phy);
+ /* Initialize IET */
+ am65_cpsw_qos_iet_init(ndev);
+ am65_cpsw_qos_cut_thru_init(port);
+ am65_cpsw_qos_mqprio_init(port);
+
+ phylink_start(port->slave.phylink);
return 0;
error_cleanup:
am65_cpsw_nuss_ndo_slave_stop(ndev);
return ret;
+
+runtime_put:
+ pm_runtime_put(common->dev);
+ return ret;
}
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
@@ -691,8 +685,9 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
- dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_kfree_skb_any(skb);
@@ -767,8 +762,10 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
return ret;
}
- if (desc_dma & 0x1) {
+ if (cppi5_desc_is_tdcm(desc_dma)) {
dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
+ complete(&common->tdown_complete);
return 0;
}
@@ -779,6 +776,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
@@ -793,18 +791,19 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
- dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
if (new_skb) {
+ ndev_priv = netdev_priv(ndev);
+ am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
am65_cpsw_nuss_rx_csum(skb, csum_info);
napi_gro_receive(&common->napi_rx, skb);
- ndev_priv = netdev_priv(ndev);
stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp);
@@ -833,6 +832,15 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
return ret;
}
+static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
+{
+ struct am65_cpsw_common *common =
+ container_of(timer, struct am65_cpsw_common, rx_hrtimer);
+
+ enable_irq(common->rx_chns.irq);
+ return HRTIMER_NORESTART;
+}
+
static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
@@ -857,14 +865,23 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
- if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
- enable_irq(common->rx_chns.irq);
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
+ if (common->rx_irq_disabled) {
+ common->rx_irq_disabled = false;
+ if (unlikely(common->rx_pace_timeout)) {
+ hrtimer_start(&common->rx_hrtimer,
+ ns_to_ktime(common->rx_pace_timeout),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(common->rx_chns.irq);
+ }
+ }
+ }
return num_rx;
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
- struct device *dev,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
@@ -875,20 +892,23 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
next_desc = first_desc;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_single(dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
while (next_desc_dma) {
next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
next_desc_dma);
cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_page(dev, buf_dma, buf_dma_len,
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
@@ -906,15 +926,62 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
dev_kfree_skb_any(skb);
}
-static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
- int chn, unsigned int budget)
+static struct sk_buff *
+am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_tx;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+
+ ndev = skb->dev;
+
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ return skb;
+}
+
+static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
+ struct netdev_queue *netif_txq)
+{
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* Check whether the queue is stopped due to stalled
+ * tx dma, if the queue is stopped then wake the queue
+ * as we have free desc for tx
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+
+ __netif_tx_unlock(netif_txq);
+ }
+}
+
+static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
+ int chn, unsigned int budget, bool *tdown)
+{
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
@@ -923,41 +990,70 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
- void **swdata;
tx_chn = &common->tx_chns[chn];
while (true) {
- struct am65_cpsw_ndev_priv *ndev_priv;
- struct am65_cpsw_ndev_stats *stats;
-
+ spin_lock(&tx_chn->lock);
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ spin_unlock(&tx_chn->lock);
if (res == -ENODATA)
break;
- if (desc_dma & 0x1) {
+ if (cppi5_desc_is_tdcm(desc_dma)) {
if (atomic_dec_and_test(&common->tdown_cnt))
complete(&common->tdown_complete);
+ *tdown = true;
break;
}
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx);
-
+ skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
+ total_bytes = skb->len;
ndev = skb->dev;
+ napi_consume_skb(skb, budget);
+ num_tx++;
- am65_cpts_tx_timestamp(common->cpts, skb);
+ netif_txq = netdev_get_tx_queue(ndev, chn);
- ndev_priv = netdev_priv(ndev);
- stats = this_cpu_ptr(ndev_priv->stats);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+
+ dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
+
+ return num_tx;
+}
+
+static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
+ int chn, unsigned int budget, bool *tdown)
+{
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned int total_bytes = 0;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+
+ tx_chn = &common->tx_chns[chn];
+
+ while (true) {
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&common->tdown_cnt))
+ complete(&common->tdown_complete);
+ *tdown = true;
+ break;
+ }
+
+ skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
+ ndev = skb->dev;
total_bytes += skb->len;
napi_consume_skb(skb, budget);
num_tx++;
@@ -970,44 +1066,56 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
- if (netif_tx_queue_stopped(netif_txq)) {
- /* Check whether the queue is stopped due to stalled tx dma,
- * if the queue is stopped then wake the queue as
- * we have free desc for tx
- */
- __netif_tx_lock(netif_txq, smp_processor_id());
- if (netif_running(ndev) &&
- (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
- MAX_SKB_FRAGS))
- netif_tx_wake_queue(netif_txq);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
- __netif_tx_unlock(netif_txq);
- }
dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
return num_tx;
}
+static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
+{
+ struct am65_cpsw_tx_chn *tx_chns =
+ container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer);
+
+ enable_irq(tx_chns->irq);
+ return HRTIMER_NORESTART;
+}
+
static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
{
struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
+ bool tdown = false;
int num_tx;
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id,
- budget);
- num_tx = min(num_tx, budget);
- if (num_tx < budget) {
- napi_complete(napi_tx);
- enable_irq(tx_chn->irq);
+ if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
+ num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
+ budget, &tdown);
+ else
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
+ tx_chn->id, budget, &tdown);
+
+ if (num_tx >= budget)
+ return budget;
+
+ if (napi_complete_done(napi_tx, num_tx)) {
+ if (unlikely(tx_chn->tx_pace_timeout && !tdown)) {
+ hrtimer_start(&tx_chn->tx_hrtimer,
+ ns_to_ktime(tx_chn->tx_pace_timeout),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(tx_chn->irq);
+ }
}
- return num_tx;
+ return 0;
}
static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
{
struct am65_cpsw_common *common = dev_id;
+ common->rx_irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&common->napi_rx);
@@ -1053,9 +1161,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
netif_txq = netdev_get_tx_queue(ndev, q_idx);
/* Map the linear buffer */
- buf_dma = dma_map_single(dev, skb->data, pkt_len,
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb buffer\n");
ndev->stats.tx_errors++;
goto err_free_skb;
@@ -1064,7 +1172,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
if (!first_desc) {
dev_dbg(dev, "Failed to allocate descriptor\n");
- dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
+ DMA_TO_DEVICE);
goto busy_stop_q;
}
@@ -1074,6 +1183,7 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
cppi5_hdesc_set_pkttype(first_desc, 0x7);
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
*(swdata) = skb;
@@ -1109,9 +1219,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_free_descs;
}
- buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb page\n");
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
ndev->stats.tx_errors++;
@@ -1119,11 +1229,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
}
cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(next_desc,
buf_dma, frag_size, buf_dma, frag_size);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
pkt_len += frag_size;
@@ -1139,7 +1251,13 @@ done_tx:
cppi5_hdesc_set_pktlen(first_desc, pkt_len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
- ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (AM65_CPSW_IS_CPSW2G(common)) {
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ } else {
+ spin_lock_bh(&tx_chn->lock);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ spin_unlock_bh(&tx_chn->lock);
+ }
if (ret) {
dev_err(dev, "can't push desc %d\n", ret);
/* inform bql */
@@ -1165,14 +1283,14 @@ done_tx:
return NETDEV_TX_OK;
err_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
err_free_skb:
ndev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
busy_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
busy_stop_q:
netif_tx_stop_queue(netif_txq);
return NETDEV_TX_BUSY;
@@ -1314,6 +1432,43 @@ static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
+static int am65_cpsw_switch_config_ioctl(struct net_device *ndev,
+ struct ifreq *ifrq, int cmd)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct net_switch_config config;
+ int ret = -EINVAL;
+
+ /* Only SIOCSWITCHCONFIG is used as cmd argument and hence, there is no
+ * switch statement required.
+ * Function calls are based on switch_config.cmd
+ */
+
+ if (copy_from_user(&config, ifrq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.cmd) {
+ case SWITCH_RATELIMIT:
+ {
+ ret = cpsw_ale_rx_ratelimit_mc(common->ale, port->port_id, config.mcast_rate_limit);
+ if (ret)
+ dev_err(common->dev, "CPSW_ALE set MC ratelimit failed");
+
+ ret = cpsw_ale_rx_ratelimit_bc(common->ale, port->port_id, config.bcast_rate_limit);
+ if (ret)
+ dev_err(common->dev, "CPSW_ALE set BC ratelimit failed");
+
+ break;
+ }
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
struct ifreq *req, int cmd)
{
@@ -1327,12 +1482,11 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
return am65_cpsw_nuss_hwtstamp_set(ndev, req);
case SIOCGHWTSTAMP:
return am65_cpsw_nuss_hwtstamp_get(ndev, req);
+ case SIOCSWITCHCONFIG:
+ return am65_cpsw_switch_config_ioctl(ndev, req, cmd);
}
- if (!port->slave.phy)
- return -EOPNOTSUPP;
-
- return phy_mii_ioctl(port->slave.phy, req, cmd);
+ return phylink_mii_ioctl(port->slave.phylink, req, cmd);
}
static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
@@ -1369,32 +1523,14 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped;
}
-static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev,
- netdev_features_t features)
+static struct devlink_port *am65_cpsw_ndo_get_devlink_port(struct net_device *ndev)
{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- netdev_features_t changes = features ^ ndev->features;
- struct am65_cpsw_host *host_p;
-
- host_p = am65_common_get_host(common);
-
- if (changes & NETIF_F_HW_CSUM) {
- bool enable = !!(features & NETIF_F_HW_CSUM);
-
- dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n",
- enable ? "ON" : "OFF");
- if (enable)
- writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
- host_p->port_base + AM65_CPSW_P0_REG_CTL);
- else
- writel(0,
- host_p->port_base + AM65_CPSW_P0_REG_CTL);
- }
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- return 0;
+ return &port->devlink_port;
}
-static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
+static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
.ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
@@ -1406,8 +1542,196 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
.ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
- .ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
+ .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate,
+ .ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
+};
+
+static void am65_cpsw_disable_phy(struct phy *phy)
+{
+ phy_power_off(phy);
+ phy_exit(phy);
+}
+
+static int am65_cpsw_enable_phy(struct phy *phy)
+{
+ int ret;
+
+ ret = phy_init(phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(phy);
+ if (ret < 0) {
+ phy_exit(phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_init_phy(struct device *dev, struct device_node *port_np)
+{
+ const char *name = "serdes-phy";
+ struct phy *phy;
+ int ret;
+
+ phy = devm_of_phy_get(dev, port_np, name);
+ if (PTR_ERR(phy) == -ENODEV)
+ return 0;
+
+ ret = am65_cpsw_enable_phy(phy);
+ if (ret < 0)
+ goto err_phy;
+
+ return 0;
+
+err_phy:
+ devm_phy_put(dev, phy);
+ return ret;
+}
+
+static void am65_cpsw_nuss_mac_control(struct am65_cpsw_port *port, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ u32 mac_control = CPSW_SL_CTL_GMII_EN;
+
+ if (speed == SPEED_1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII)
+ /* Can be used with in band mode only */
+ mac_control |= CPSW_SL_CTL_EXT_EN;
+ if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ if (interface == PHY_INTERFACE_MODE_USXGMII)
+ mac_control |= CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN;
+ if (duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* rx_pause/tx_pause */
+ if (rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
+}
+
+static void am65_cpsw_nuss_mac_enable_link(struct am65_cpsw_port *port, int speed, int duplex)
+{
+ struct am65_cpsw_common *common = port->common;
+ struct net_device *ndev = port->ndev;
+ /* enable phy */
+ am65_cpsw_enable_phy(port->slave.ifphy);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ am65_cpsw_qos_link_up(ndev, speed, duplex);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ u32 mr_adv_ability = MAC2MAC_MR_ADV_ABILITY_BASE;
+ struct am65_cpsw_common *common = port->common;
+ struct fwnode_handle *fwnode;
+ bool fixed_link = false;
+
+ if (common->pdata.extra_modes & BIT(state->interface))
+ writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
+ port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
+
+ /* Detecting fixed-link */
+ fwnode = of_node_to_fwnode(port->slave.phy_node);
+ if (fwnode)
+ fixed_link = !!fwnode_get_named_child_node(fwnode, "fixed-link");
+
+ if (fixed_link) {
+ /* In fixed-link mode, mac_link_up is not invoked.
+ * Therefore, the relevant mac_link_up operations
+ * have to be moved to mac_config.
+ */
+ am65_cpsw_nuss_mac_control(port, state->interface, state->speed,
+ state->duplex, state->pause & MLO_PAUSE_TX,
+ state->pause & MLO_PAUSE_RX);
+
+ if (state->speed == SPEED_1000)
+ mr_adv_ability |= MAC2MAC_MR_ADV_ABILITY_1G;
+ if (state->speed == SPEED_100)
+ mr_adv_ability |= MAC2MAC_MR_ADV_ABILITY_100M;
+ if (state->duplex)
+ mr_adv_ability |= MAC2MAC_MR_ADV_ABILITY_FULLDUPLEX;
+
+ if (state->interface == PHY_INTERFACE_MODE_SGMII &&
+ (common->pdata.extra_modes & BIT(state->interface))) {
+ writel(mr_adv_ability,
+ port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
+ writel((AM65_CPSW_SGMII_CONTROL_MASTER_MODE |
+ AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE),
+ port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
+ }
+
+ am65_cpsw_nuss_mac_enable_link(port, state->speed, state->duplex);
+ } else {
+ if (state->interface == PHY_INTERFACE_MODE_SGMII &&
+ (common->pdata.extra_modes & BIT(state->interface)))
+ writel(MAC2PHY_MR_ADV_ABILITY,
+ port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
+ }
+}
+
+static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+ struct net_device *ndev = port->ndev;
+ int tmo;
+
+ /* disable phy */
+ am65_cpsw_disable_phy(port->slave.ifphy);
+
+ /* disable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+
+ tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+ dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
+ cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
+
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+ am65_cpsw_qos_link_down(ndev);
+ netif_tx_stop_all_queues(ndev);
+}
+
+static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+
+ am65_cpsw_nuss_mac_control(port, interface, speed, duplex, tx_pause, rx_pause);
+
+ am65_cpsw_nuss_mac_enable_link(port, speed, duplex);
+}
+
+static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = am65_cpsw_nuss_mac_config,
+ .mac_link_down = am65_cpsw_nuss_mac_link_down,
+ .mac_link_up = am65_cpsw_nuss_mac_link_up,
};
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
@@ -1417,7 +1741,6 @@ static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
if (!port->disabled)
return;
- common->disabled_ports_mask |= BIT(port->port_id);
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
@@ -1433,6 +1756,8 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ irq_set_affinity_hint(tx_chn->irq, NULL);
+
if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
@@ -1450,11 +1775,14 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+ common->tx_ch_rate_msk = 0;
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- if (tx_chn->irq)
+ if (tx_chn->irq) {
+ irq_set_affinity_hint(tx_chn->irq, NULL);
devm_free_irq(dev, tx_chn->irq, tx_chn);
+ }
netif_napi_del(&tx_chn->napi_tx);
@@ -1468,6 +1796,35 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
}
}
+static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ int i, ret = 0;
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ netif_tx_napi_add(common->dma_ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
+ hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
+
+ ret = devm_request_irq(dev, tx_chn->irq,
+ am65_cpsw_nuss_tx_irq,
+ IRQF_TRIGGER_HIGH,
+ tx_chn->tx_chn_name, tx_chn);
+ if (ret) {
+ dev_err(dev, "failure requesting tx%u irq %u, %d\n",
+ tx_chn->id, tx_chn->irq, ret);
+ goto err;
+ }
+ irq_set_affinity_hint(tx_chn->irq, get_cpu_mask(i % num_online_cpus()));
+ }
+
+err:
+ return ret;
+}
+
static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
{
u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
@@ -1496,28 +1853,29 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
snprintf(tx_chn->tx_chn_name,
sizeof(tx_chn->tx_chn_name), "tx%d", i);
+ spin_lock_init(&tx_chn->lock);
tx_chn->common = common;
tx_chn->id = i;
tx_chn->descs_num = max_desc_num;
- tx_chn->desc_pool =
- k3_cppi_desc_pool_create_name(dev,
- tx_chn->descs_num,
- hdesc_size,
- tx_chn->tx_chn_name);
- if (IS_ERR(tx_chn->desc_pool)) {
- ret = PTR_ERR(tx_chn->desc_pool);
- dev_err(dev, "Failed to create poll %d\n", ret);
- goto err;
- }
tx_chn->tx_chn =
k3_udma_glue_request_tx_chn(dev,
tx_chn->tx_chn_name,
&tx_cfg);
if (IS_ERR(tx_chn->tx_chn)) {
- ret = PTR_ERR(tx_chn->tx_chn);
- dev_err(dev, "Failed to request tx dma channel %d\n",
- ret);
+ ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
+ "Failed to request tx dma channel\n");
+ goto err;
+ }
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+
+ tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->tx_chn_name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ dev_err(dev, "Failed to create poll %d\n", ret);
goto err;
}
@@ -1533,6 +1891,12 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
dev_name(dev), tx_chn->id);
}
+ ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
+ if (ret) {
+ dev_err(dev, "Failed to add tx NAPI %d\n", ret);
+ goto err;
+ }
+
err:
i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
if (i) {
@@ -1550,6 +1914,8 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
rx_chn = &common->rx_chns;
+ irq_set_affinity_hint(rx_chn->irq, NULL);
+
if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
@@ -1557,6 +1923,31 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
}
+static void am65_cpsw_nuss_remove_rx_chns(void *data)
+{
+ struct am65_cpsw_common *common = data;
+ struct am65_cpsw_rx_chn *rx_chn;
+ struct device *dev = common->dev;
+
+ rx_chn = &common->rx_chns;
+ devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
+
+ if (!(rx_chn->irq < 0)) {
+ irq_set_affinity_hint(rx_chn->irq, NULL);
+ devm_free_irq(dev, rx_chn->irq, common);
+ }
+
+ netif_napi_del(&common->napi_rx);
+
+ if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+
+ common->rx_flow_id_base = -1;
+}
+
static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
@@ -1577,7 +1968,16 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
/* init all flows */
rx_chn->dev = dev;
rx_chn->descs_num = max_desc_num;
- rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
+ "Failed to request rx dma channel\n");
+ goto err;
+ }
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
rx_chn->descs_num,
hdesc_size, "rx");
if (IS_ERR(rx_chn->desc_pool)) {
@@ -1586,13 +1986,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
goto err;
}
- rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
- if (IS_ERR(rx_chn->rx_chn)) {
- ret = PTR_ERR(rx_chn->rx_chn);
- dev_err(dev, "Failed to request rx dma channel %d\n", ret);
- goto err;
- }
-
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
@@ -1606,7 +1999,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
};
struct k3_ring_cfg fdqring_cfg = {
.elm_size = K3_RINGACC_RING_ELSIZE_8,
- .mode = K3_RINGACC_RING_MODE_MESSAGE,
.flags = K3_RINGACC_RING_SHARED,
};
struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
@@ -1620,6 +2012,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num;
rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
i, &rx_flow_cfg);
@@ -1642,6 +2035,21 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
}
}
+ netif_napi_add(common->dma_ndev, &common->napi_rx,
+ am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
+ hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+
+ ret = devm_request_irq(dev, rx_chn->irq,
+ am65_cpsw_nuss_rx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "failure requesting rx irq %u, %d\n",
+ rx_chn->irq, ret);
+ goto err;
+ }
+ irq_set_affinity_hint(rx_chn->irq, get_cpu_mask(cpumask_first(cpu_present_mask)));
+
err:
i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
if (i) {
@@ -1717,15 +2125,17 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
int ret = PTR_ERR(cpts);
of_node_put(node);
- if (ret == -EOPNOTSUPP) {
- dev_info(dev, "cpts disabled\n");
- return 0;
- }
-
dev_err(dev, "cpts create err %d\n", ret);
return ret;
}
common->cpts = cpts;
+ /* Forbid PM runtime if CPTS is running.
+ * K3 CPSWxG modules may completely lose context during ON->OFF
+ * transitions depending on integration.
+ * AM65x/J721E MCU CPSW2G: false
+ * J721E MAIN_CPSW9G: true
+ */
+ pm_runtime_forbid(dev);
return 0;
}
@@ -1767,20 +2177,25 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->common = common;
port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
AM65_CPSW_NU_PORTS_OFFSET * (port_id);
+ if (common->pdata.extra_modes)
+ port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id);
port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
(AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
port->name = of_get_property(port_np, "label", NULL);
port->fetch_ram_base =
common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
(AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
+ port->qos.iet.addfragsize = 1;
port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
if (IS_ERR(port->slave.mac_sl))
return PTR_ERR(port->slave.mac_sl);
port->disabled = !of_device_is_available(port_np);
- if (port->disabled)
+ if (port->disabled) {
+ common->disabled_ports_mask |= BIT(port->port_id);
continue;
+ }
port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
if (IS_ERR(port->slave.ifphy)) {
@@ -1790,30 +2205,16 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return ret;
}
+ /* Initialize the phy for the port */
+ ret = am65_cpsw_init_phy(dev, port_np);
+ if (ret)
+ return ret;
+
port->slave.mac_only =
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- if (of_phy_is_fixed_link(port_np)) {
- ret = of_phy_register_fixed_link(port_np);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
- port_np, ret);
- return ret;
- }
- port->slave.phy_node = of_node_get(port_np);
- } else {
- port->slave.phy_node =
- of_parse_phandle(port_np, "phy-handle", 0);
- }
-
- if (!port->slave.phy_node) {
- dev_err(dev,
- "slave[%d] no phy found\n", port_id);
- return -ENODEV;
- }
-
+ port->slave.phy_node = port_np;
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
@@ -1821,6 +2222,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return ret;
}
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
+ if (ret) {
+ dev_err(dev, "%pOF error setting phy mode %d\n", port_np, ret);
+ return ret;
+ }
+
mac_addr = of_get_mac_address(port_np);
if (!IS_ERR(mac_addr)) {
ether_addr_copy(port->slave.mac_addr, mac_addr);
@@ -1834,6 +2241,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
}
of_node_put(node);
+ /* is there at least one ext.port */
+ if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
+ dev_err(dev, "No Ext. port are available\n");
+ return -ENODEV;
+ }
+
return 0;
}
@@ -1844,14 +2257,31 @@ static void am65_cpsw_pcpu_stats_free(void *data)
free_percpu(stats);
}
-static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
+static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->slave.phylink)
+ phylink_destroy(port->slave.phylink);
+ }
+}
+
+static int
+am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
+ struct phylink *phylink;
int ret;
- port = am65_common_get_port(common, 1);
+ port = &common->ports[port_idx];
+
+ if (port->disabled)
+ return 0;
/* alloc netdev */
port->ndev = devm_alloc_etherdev_mqs(common->dev,
@@ -1880,9 +2310,54 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER;
port->ndev->vlan_features |= NETIF_F_SG;
- port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops_2g;
+ port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
+ /* Configuring Phylink */
+ port->slave.phylink_config.dev = &port->ndev->dev;
+ port->slave.phylink_config.type = PHYLINK_NETDEV;
+ port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD | MAC_5000FD;
+
+ switch (port->slave.phy_if) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+ break;
+
+ case PHY_INTERFACE_MODE_RMII:
+ __set_bit(port->slave.phy_if,
+ port->slave.phylink_config.supported_interfaces);
+ break;
+
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
+ __set_bit(port->slave.phy_if,
+ port->slave.phylink_config.supported_interfaces);
+ } else {
+ dev_err(dev, "selected phy-mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ dev_err(dev, "selected phy-mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ phylink = phylink_create(&port->slave.phylink_config,
+ of_node_to_fwnode(port->slave.phy_node),
+ port->slave.phy_if,
+ &am65_cpsw_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ port->slave.phylink = phylink;
+
/* Disable TX checksum offload by default due to HW bug */
if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
port->ndev->features &= ~NETIF_F_HW_CSUM;
@@ -1893,74 +2368,538 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
ndev_priv->stats);
+ if (ret)
+ dev_err(dev, "failed to add percpu stat free action %d\n", ret);
+
+ if (!common->dma_ndev)
+ common->dma_ndev = port->ndev;
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ ret = am65_cpsw_nuss_init_port_ndev(common, i);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->ndev)
+ unregister_netdev(port->ndev);
+ }
+}
+
+static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
+{
+ int set_val = 0;
+ int i;
+
+ if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
+ set_val = 1;
+
+ dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = 1; i <= common->port_num; i++) {
+ struct am65_cpsw_port *port = am65_common_get_port(common, i);
+ struct am65_cpsw_ndev_priv *priv;
+
+ if (!port->ndev)
+ continue;
+
+ priv = am65_ndev_to_priv(port->ndev);
+ priv->offload_fwd_mark = set_val;
+ }
+}
+
+bool am65_cpsw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ return !common->is_emac_mode;
+ }
+
+ return false;
+}
+
+static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_device *br_ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ if (!common->br_members) {
+ common->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (common->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ common->br_members |= BIT(priv->port->port_id);
+
+ am65_cpsw_port_offload_fwd_mark_update(common);
+
+ return NOTIFY_DONE;
+}
+
+static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ common->br_members &= ~BIT(priv->port->port_id);
+
+ am65_cpsw_port_offload_fwd_mark_update(common);
+
+ if (!common->br_members)
+ common->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int am65_cpsw_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!am65_cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = am65_cpsw_netdevice_port_link(ndev, info->upper_dev);
+ else
+ am65_cpsw_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ if (AM65_CPSW_IS_CPSW2G(cpsw) ||
+ !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ return 0;
+
+ cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
+ ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
if (ret) {
- dev_err(dev, "Failed to add percpu stat free action %d\n", ret);
+ dev_err(cpsw->dev, "can't register netdevice notifier\n");
return ret;
}
- netif_napi_add(port->ndev, &common->napi_rx,
- am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
+ ret = am65_cpsw_switchdev_register_notifiers(cpsw);
+ if (ret)
+ unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
return ret;
}
-static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
+static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+ if (AM65_CPSW_IS_CPSW2G(cpsw) ||
+ !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ return;
+
+ am65_cpsw_switchdev_unregister_notifiers(cpsw);
+ unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+}
+
+static const struct devlink_ops am65_cpsw_devlink_ops = {};
+
+static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
+{
+ cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
+ ALE_MCAST_BLOCK_LEARN_FWD);
+}
+
+static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+
+ writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ am65_cpsw_init_stp_ale_entry(common);
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
+}
+
+static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+
+ writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
+
+ /* learning make no sense in multi-mac mode */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
+}
+
+static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct am65_cpsw_common *common = dl_priv->common;
+
+ dev_dbg(common->dev, "%s id:%u\n", __func__, id);
+
+ if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = !common->is_emac_mode;
+
+ return 0;
+}
+
+static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_slave_data *slave = &port->slave;
+ struct am65_cpsw_common *common = port->common;
+ u32 port_mask;
+
+ writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ if (slave->mac_only)
+ /* enable mac-only mode on port */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY, 1);
+
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+
+ cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
+ cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
+}
+
+static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_slave_data *slave = &port->slave;
+ struct am65_cpsw_common *cpsw = port->common;
+ u32 port_mask;
+
+ cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_NOLEARN, 0);
+
+ cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
+ slave->port_vlan);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+
+ cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD_2);
+
+ writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_MACONLY, 0);
+}
+
+static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct am65_cpsw_common *cpsw = dl_priv->common;
+ bool switch_en = ctx->val.vbool;
+ bool if_running = false;
+ int i;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ if (switch_en == !cpsw->is_emac_mode)
+ return 0;
+
+ if (!switch_en && cpsw->br_members) {
+ dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+
+ cpsw->is_emac_mode = !switch_en;
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+
+ if (!sl_ndev || !netif_running(sl_ndev))
+ continue;
+
+ if_running = true;
+ }
+
+ if (!if_running) {
+ /* all ndevs are down */
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_slave_data *slave;
+
+ if (!sl_ndev)
+ continue;
+
+ slave = am65_ndev_to_slave(sl_ndev);
+ if (switch_en)
+ slave->port_vlan = cpsw->default_vlan;
+ else
+ slave->port_vlan = 0;
+ }
+
+ goto exit;
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+ /* clean up ALE table */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
+
+ if (switch_en) {
+ dev_info(cpsw->dev, "Enable switch mode\n");
+
+ am65_cpsw_init_host_port_switch(cpsw);
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_slave_data *slave;
+ struct am65_cpsw_port *port;
+
+ if (!sl_ndev)
+ continue;
+
+ port = am65_ndev_to_port(sl_ndev);
+ slave = am65_ndev_to_slave(sl_ndev);
+ slave->port_vlan = cpsw->default_vlan;
+
+ if (netif_running(sl_ndev))
+ am65_cpsw_init_port_switch_ale(port);
+ }
+
+ } else {
+ dev_info(cpsw->dev, "Disable switch mode\n");
+
+ am65_cpsw_init_host_port_emac(cpsw);
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_port *port;
+
+ if (!sl_ndev)
+ continue;
+
+ port = am65_ndev_to_port(sl_ndev);
+ port->slave.port_vlan = 0;
+ if (netif_running(sl_ndev)) {
+ am65_cpsw_init_port_emac_ale(port);
+ am65_cpsw_qos_cut_thru_cleanup(port);
+ }
+ }
+ }
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
+exit:
+ rtnl_unlock();
+
+ return 0;
+}
+
+static const struct devlink_param am65_cpsw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ am65_cpsw_dl_switch_mode_get,
+ am65_cpsw_dl_switch_mode_set, NULL),
+};
+
+static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common)
+{
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ if (dl_port->registered)
+ devlink_port_unregister(dl_port);
+ }
+}
+
+static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
{
+ struct devlink_port_attrs attrs = {};
+ struct am65_cpsw_devlink *dl_priv;
struct device *dev = common->dev;
+ struct devlink_port *dl_port;
struct am65_cpsw_port *port;
- int i, ret = 0;
+ int ret = 0;
+ int i;
- port = am65_common_get_port(common, 1);
+ common->devlink =
+ devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv));
+ if (!common->devlink)
+ return -ENOMEM;
- for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ dl_priv = devlink_priv(common->devlink);
+ dl_priv->common = common;
- netif_tx_napi_add(port->ndev, &tx_chn->napi_tx,
- am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
+ ret = devlink_register(common->devlink, dev);
+ if (ret) {
+ dev_err(dev, "devlink reg fail ret:%d\n", ret);
+ goto dl_free;
+ }
- ret = devm_request_irq(dev, tx_chn->irq,
- am65_cpsw_nuss_tx_irq,
- IRQF_TRIGGER_HIGH,
- tx_chn->tx_chn_name, tx_chn);
+ /* Provide devlink hook to switch mode when multiple external ports
+ * are present NUSS switchdev driver is enabled.
+ */
+ if (!AM65_CPSW_IS_CPSW2G(common) &&
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
+ ret = devlink_params_register(common->devlink,
+ am65_cpsw_devlink_params,
+ ARRAY_SIZE(am65_cpsw_devlink_params));
if (ret) {
- dev_err(dev, "failure requesting tx%u irq %u, %d\n",
- tx_chn->id, tx_chn->irq, ret);
- goto err;
+ dev_err(dev, "devlink params reg fail ret:%d\n", ret);
+ goto dl_unreg;
+ }
+ devlink_params_publish(common->devlink);
+ }
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ if (port->ndev)
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ else
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
+ attrs.phys.port_number = port->port_id;
+ attrs.switch_id.id_len = sizeof(resource_size_t);
+ memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
+ devlink_port_attrs_set(dl_port, &attrs);
+
+ ret = devlink_port_register(common->devlink, dl_port, port->port_id);
+ if (ret) {
+ dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
+ port->port_id, ret);
+ goto dl_port_unreg;
}
}
-err:
return ret;
+
+dl_port_unreg:
+ am65_cpsw_unregister_devlink_ports(common);
+dl_unreg:
+ devlink_unregister(common->devlink);
+dl_free:
+ devlink_free(common->devlink);
+
+ return ret;
+}
+
+static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
+{
+ if (!AM65_CPSW_IS_CPSW2G(common) &&
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
+ devlink_params_unpublish(common->devlink);
+ devlink_params_unregister(common->devlink, am65_cpsw_devlink_params,
+ ARRAY_SIZE(am65_cpsw_devlink_params));
+ }
+
+ am65_cpsw_unregister_devlink_ports(common);
+ devlink_unregister(common->devlink);
+ devlink_free(common->devlink);
}
-static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
+static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct devlink_port *dl_port;
struct am65_cpsw_port *port;
- int ret = 0;
+ int ret = 0, i;
- port = am65_common_get_port(common, 1);
- ret = am65_cpsw_nuss_ndev_add_napi_2g(common);
+ /* init tx channels */
+ ret = am65_cpsw_nuss_init_tx_chns(common);
if (ret)
- goto err;
+ return ret;
+ ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ return ret;
- ret = devm_request_irq(dev, common->rx_chns.irq,
- am65_cpsw_nuss_rx_irq,
- IRQF_TRIGGER_HIGH, dev_name(dev), common);
- if (ret) {
- dev_err(dev, "failure requesting rx irq %u, %d\n",
- common->rx_chns.irq, ret);
- goto err;
+ ret = am65_cpsw_nuss_register_devlink(common);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+
+ ret = am65_cpsw_nuss_register_port_debugfs(port);
+ if (ret)
+ goto err_cleanup_ndev;
+
+ if (!port->ndev)
+ continue;
+
+ ret = register_netdev(port->ndev);
+ if (ret) {
+ dev_err(dev, "error registering slave net device%i %d\n",
+ i, ret);
+ goto err_cleanup_ndev;
+ }
+
+ dl_port = &port->devlink_port;
+ devlink_port_type_eth_set(dl_port, port->ndev);
}
- ret = register_netdev(port->ndev);
+ ret = am65_cpsw_register_notifiers(common);
if (ret)
- dev_err(dev, "error registering slave net device %d\n", ret);
+ goto err_cleanup_ndev;
/* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA
*/
-err:
+
+ return 0;
+
+err_cleanup_ndev:
+ am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_unregister_devlink(common);
+
return ret;
}
@@ -1970,22 +2909,8 @@ int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
common->tx_ch_num = num_tx;
ret = am65_cpsw_nuss_init_tx_chns(common);
- if (ret)
- return ret;
-
- return am65_cpsw_nuss_ndev_add_napi_2g(common);
-}
-
-static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
-{
- struct am65_cpsw_port *port;
- int i;
- for (i = 0; i < common->port_num; i++) {
- port = &common->ports[i];
- if (port->ndev)
- unregister_netdev(port->ndev);
- }
+ return ret;
}
struct am65_cpsw_soc_pdata {
@@ -2006,15 +2931,50 @@ static const struct soc_device_attribute am65_cpsw_socinfo[] = {
static const struct am65_cpsw_pdata am65x_sr1_0 = {
.quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
+ .ale_dev_id = "am65x-cpsw2g",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
};
static const struct am65_cpsw_pdata j721e_pdata = {
.quirks = 0,
+ .ale_dev_id = "am65x-cpsw2g",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+};
+
+static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
+ .quirks = AM64_CPSW_QUIRK_CUT_THRU | AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+};
+
+static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
+};
+
+static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
+};
+
+static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_USXGMII),
};
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
+ { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
+ { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
+ { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata},
+ { .compatible = "ti,j784s4-cpswxg-nuss", .data = &j784s4_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
@@ -2041,7 +3001,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct device_node *node;
struct resource *res;
struct clk *clk;
+ u64 id_temp;
int ret, i;
+ int ale_entries;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
@@ -2060,6 +3022,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (IS_ERR(common->ss_base))
return PTR_ERR(common->ss_base);
common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
+ /* Use device's physical base address as switch id */
+ id_temp = cpu_to_be64(res->start);
+ memcpy(common->switch_id, &id_temp, sizeof(res->start));
node = of_get_child_by_name(dev->of_node, "ethernet-ports");
if (!node)
@@ -2069,19 +3034,11 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
return -ENOENT;
- if (common->port_num != 1)
- return -EOPNOTSUPP;
-
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
common->tx_ch_num = 1;
common->pf_p0_rx_ptype_rrobin = false;
-
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- dev_err(dev, "error setting dma mask: %d\n", ret);
- return ret;
- }
+ common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num,
sizeof(*common->ports),
@@ -2090,13 +3047,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return -ENOMEM;
clk = devm_clk_get(dev, "fck");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
-
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "error getting fck clock %d\n", ret);
- return ret;
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
common->bus_freq = clk_get_rate(clk);
pm_runtime_enable(dev);
@@ -2125,14 +3077,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
am65_cpsw_nuss_get_ver(common);
- /* init tx channels */
- ret = am65_cpsw_nuss_init_tx_chns(common);
- if (ret)
- goto err_of_clear;
- ret = am65_cpsw_nuss_init_rx_chns(common);
- if (ret)
- goto err_of_clear;
-
ret = am65_cpsw_nuss_init_host_p(common);
if (ret)
goto err_of_clear;
@@ -2146,7 +3090,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
ale_params.ale_ports = common->port_num + 1;
ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
- ale_params.dev_id = "am65x-cpsw2g";
+ ale_params.dev_id = common->pdata.ale_dev_id;
ale_params.bus_freq = common->bus_freq;
common->ale = cpsw_ale_create(&ale_params);
@@ -2156,6 +3100,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
goto err_of_clear;
}
+ ale_entries = common->ale->params.ale_entries;
+ common->ale_context = devm_kzalloc(dev,
+ ale_entries * ALE_ENTRY_WORDS * sizeof(u32),
+ GFP_KERNEL);
ret = am65_cpsw_init_cpts(common);
if (ret)
goto err_of_clear;
@@ -2166,17 +3114,28 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
dev_set_drvdata(dev, common);
- ret = am65_cpsw_nuss_init_ndev_2g(common);
+ common->is_emac_mode = true;
+
+ ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
- goto err_of_clear;
+ goto err_free_phylink;
- ret = am65_cpsw_nuss_ndev_reg_2g(common);
+ ret = am65_cpsw_nuss_register_debugfs(common);
if (ret)
- goto err_of_clear;
+ goto err_free_phylink;
+
+ ret = am65_cpsw_nuss_register_ndevs(common);
+ if (ret) {
+ am65_cpsw_nuss_unregister_debugfs(common);
+ goto err_free_phylink;
+ }
pm_runtime_put(dev);
return 0;
+err_free_phylink:
+ am65_cpsw_nuss_phylink_cleanup(common);
+ am65_cpts_release(common->cpts);
err_of_clear:
of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
@@ -2199,6 +3158,12 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_nuss_unregister_debugfs(common);
+ am65_cpsw_nuss_phylink_cleanup(common);
+ am65_cpts_release(common->cpts);
+ am65_cpsw_unregister_devlink(common);
+ am65_cpsw_unregister_notifiers(common);
+
/* must unregister ndevs here because DD release_driver routine calls
* dma_deconfigure(dev) before devres_release_all(dev)
*/
@@ -2211,10 +3176,106 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int am65_cpsw_nuss_suspend(struct device *dev)
+{
+ struct am65_cpsw_common *common = dev_get_drvdata(dev);
+ struct am65_cpsw_port *port;
+ struct net_device *ndev;
+ int i, ret;
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+
+ cpsw_ale_dump(common->ale, common->ale_context);
+ host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ ndev = port->ndev;
+
+ if (!ndev)
+ continue;
+
+ port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ netif_device_detach(ndev);
+ if (netif_running(ndev)) {
+ rtnl_lock();
+ ret = am65_cpsw_nuss_ndo_slave_stop(ndev);
+ rtnl_unlock();
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ am65_cpts_suspend(common->cpts);
+
+ am65_cpsw_nuss_remove_rx_chns(common);
+ am65_cpsw_nuss_remove_tx_chns(common);
+
+ return 0;
+}
+
+static int am65_cpsw_nuss_resume(struct device *dev)
+{
+ struct am65_cpsw_common *common = dev_get_drvdata(dev);
+ struct am65_cpsw_port *port;
+ struct net_device *ndev;
+ int i, ret;
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ bool need_ale_restore = false;
+
+ ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+ ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ return ret;
+
+ /* If RX IRQ was disabled before suspend, keep it disabled */
+ if (common->rx_irq_disabled)
+ disable_irq(common->rx_chns.irq);
+
+ am65_cpts_resume(common->cpts);
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ ndev = port->ndev;
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ need_ale_restore = true;
+ rtnl_lock();
+ ret = am65_cpsw_nuss_ndo_slave_open(ndev);
+ rtnl_unlock();
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ }
+
+ netif_device_attach(ndev);
+ writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ }
+
+ writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ if (need_ale_restore)
+ cpsw_ale_restore(common->ale, common->ale_context);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume)
+};
+
static struct platform_driver am65_cpsw_nuss_driver = {
.driver = {
.name = AM65_CPSW_DRV_NAME,
.of_match_table = am65_cpsw_nuss_of_mtable,
+ .pm = &am65_cpsw_nuss_dev_pm_ops,
},
.probe = am65_cpsw_nuss_probe,
.remove = am65_cpsw_nuss_remove,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 993e1d4d3222..7ac10e5bae21 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -6,11 +6,15 @@
#ifndef AM65_CPSW_NUSS_H_
#define AM65_CPSW_NUSS_H_
+#include <linux/debugfs.h>
+#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <net/devlink.h>
#include "am65-cpsw-qos.h"
struct am65_cpts;
@@ -21,16 +25,20 @@ struct am65_cpts;
#define AM65_CPSW_MAX_RX_QUEUES 1
#define AM65_CPSW_MAX_RX_FLOWS 1
+#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
+
struct am65_cpsw_slave_data {
bool mac_only;
struct cpsw_sl *mac_sl;
struct device_node *phy_node;
- struct phy_device *phy;
phy_interface_t phy_if;
struct phy *ifphy;
bool rx_pause;
bool tx_pause;
u8 mac_addr[ETH_ALEN];
+ int port_vlan;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
};
struct am65_cpsw_port {
@@ -39,6 +47,7 @@ struct am65_cpsw_port {
const char *name;
u32 port_id;
void __iomem *port_base;
+ void __iomem *sgmii_base;
void __iomem *stat_base;
void __iomem *fetch_ram_base;
bool disabled;
@@ -46,27 +55,39 @@ struct am65_cpsw_port {
bool tx_ts_enabled;
bool rx_ts_enabled;
struct am65_cpsw_qos qos;
+ struct devlink_port devlink_port;
+ struct dentry *debugfs_port;
+ /* Only for suspend resume context */
+ u32 vid_context;
};
struct am65_cpsw_host {
struct am65_cpsw_common *common;
void __iomem *port_base;
void __iomem *stat_base;
+ /* Only for suspend resume context */
+ u32 vid_context;
};
struct am65_cpsw_tx_chn {
+ struct device *dma_dev;
struct napi_struct napi_tx;
struct am65_cpsw_common *common;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_tx_channel *tx_chn;
+ spinlock_t lock; /* protect TX rings in multi-port mode */
+ struct hrtimer tx_hrtimer;
+ unsigned long tx_pace_timeout;
int irq;
u32 id;
u32 descs_num;
char tx_chn_name[128];
+ u32 rate_mbps;
};
struct am65_cpsw_rx_chn {
struct device *dev;
+ struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
u32 descs_num;
@@ -74,9 +95,23 @@ struct am65_cpsw_rx_chn {
};
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
+#define AM64_CPSW_QUIRK_CUT_THRU BIT(1)
+#define AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ BIT(2)
struct am65_cpsw_pdata {
u32 quirks;
+ u64 extra_modes;
+ enum k3_ring_mode fdqring_mode;
+ const char *ale_dev_id;
+};
+
+enum cpsw_devlink_param_id {
+ AM65_CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ AM65_CPSW_DL_PARAM_SWITCH_MODE,
+};
+
+struct am65_cpsw_devlink {
+ struct am65_cpsw_common *common;
};
struct am65_cpsw_common {
@@ -91,10 +126,12 @@ struct am65_cpsw_common {
struct am65_cpsw_host host;
struct am65_cpsw_port *ports;
u32 disabled_ports_mask;
+ struct net_device *dma_ndev;
int usage_count; /* number of opened ports */
struct cpsw_ale *ale;
int tx_ch_num;
+ u32 tx_ch_rate_msk;
u32 rx_flow_id_base;
struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES];
@@ -103,6 +140,9 @@ struct am65_cpsw_common {
struct am65_cpsw_rx_chn rx_chns;
struct napi_struct napi_rx;
+ bool rx_irq_disabled;
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout;
u32 nuss_ver;
u32 cpsw_ver;
@@ -110,6 +150,20 @@ struct am65_cpsw_common {
bool pf_p0_rx_ptype_rrobin;
struct am65_cpts *cpts;
int est_enabled;
+ int iet_enabled;
+ unsigned int cut_thru_enabled;
+
+ bool is_emac_mode;
+ u16 br_members;
+ int default_vlan;
+ struct devlink *devlink;
+ struct net_device *hw_bridge_dev;
+ struct notifier_block am65_cpsw_netdevice_nb;
+ unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
+
+ struct dentry *debugfs_root;
+ /* only for suspend/resume context restore */
+ u32 *ale_context;
};
struct am65_cpsw_ndev_stats {
@@ -124,6 +178,7 @@ struct am65_cpsw_ndev_priv {
u32 msg_enable;
struct am65_cpsw_port *port;
struct am65_cpsw_ndev_stats __percpu *stats;
+ bool offload_fwd_mark;
};
#define am65_ndev_to_priv(ndev) \
@@ -151,4 +206,26 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+bool am65_cpsw_port_dev_check(const struct net_device *dev);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+int am65_cpsw_nuss_register_port_debugfs(struct am65_cpsw_port *port);
+int am65_cpsw_nuss_register_debugfs(struct am65_cpsw_common *common);
+void am65_cpsw_nuss_unregister_debugfs(struct am65_cpsw_common *common);
+#else
+static inline int am65_cpsw_nuss_register_port_debugfs(struct am65_cpsw_port *port)
+{
+ return 0;
+}
+
+static inline int am65_cpsw_nuss_register_debugfs(struct am65_cpsw_common *common)
+{
+ return 0;
+}
+
+static inline void am65_cpsw_nuss_unregister_debugfs(struct am65_cpsw_common *common)
+{
+}
+#endif
+
#endif /* AM65_CPSW_NUSS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 3bdd4dbcd2ff..bacb332ff431 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -4,24 +4,42 @@
*
* quality of service module includes:
* Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
+ * Interspersed Express Traffic (IET - P802.3br/D2.0)
*/
+#include <linux/bitfield.h>
#include <linux/pm_runtime.h>
#include <linux/time.h>
+#include <linux/math64.h>
#include "am65-cpsw-nuss.h"
#include "am65-cpsw-qos.h"
#include "am65-cpts.h"
#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_REG_FREQ 0x05c
#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_MAX_BLKS 0x008
+#define AM65_CPSW_PN_REG_TX_PRI_MAP 0x018
+#define AM65_CPSW_PN_REG_RX_PRI_MAP 0x020
+#define AM65_CPSW_PN_REG_IET_CTRL 0x040
+#define AM65_CPSW_PN_REG_IET_STATUS 0x044
+#define AM65_CPSW_PN_REG_IET_VERIFY 0x048
#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
#define AM65_CPSW_PN_REG_EST_CTL 0x060
+#define AM65_CPSW_PN_REG_PRI_CIR(pri) (0x140 + 4 * (pri))
+#define AM65_CPSW_PN_REG_PRI_EIR(pri) (0x160 + 4 * (pri))
+
+#define AM64_CPSW_PN_CUT_THRU 0x3C0
+#define AM64_CPSW_PN_SPEED 0x3C4
/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_IET_EN BIT(17)
#define AM65_CPSW_CTL_EST_EN BIT(18)
+#define AM64_CPSW_CTL_CUT_THRU_EN BIT(19)
/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_IET_PORT_EN BIT(16)
#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
/* AM65_CPSW_PN_REG_EST_CTL register fields */
@@ -32,6 +50,27 @@
#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+/* AM65_CPSW_PN_REG_IET_CTRL register fields */
+#define AM65_CPSW_PN_IET_MAC_PENABLE BIT(0)
+#define AM65_CPSW_PN_IET_MAC_DISABLEVERIFY BIT(2)
+#define AM65_CPSW_PN_IET_MAC_LINKFAIL BIT(3)
+#define AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK GENMASK(10, 8)
+#define AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_OFFSET 8
+#define AM65_CPSW_PN_IET_PREMPT_MASK GENMASK(23, 16)
+#define AM65_CPSW_PN_IET_PREMPT_OFFSET 16
+
+/* AM65_CPSW_PN_REG_IET_STATUS register fields */
+#define AM65_CPSW_PN_MAC_VERIFIED BIT(0)
+#define AM65_CPSW_PN_MAC_VERIFY_FAIL BIT(1)
+#define AM65_CPSW_PN_MAC_RESPOND_ERR BIT(2)
+#define AM65_CPSW_PN_MAC_VERIFY_ERR BIT(3)
+
+/* AM65_CPSW_PN_REG_IET_VERIFY register fields */
+/* 10 msec converted to NSEC */
+#define AM65_CPSW_IET_VERIFY_CNT_MS (10)
+#define AM65_CPSW_IET_VERIFY_CNT_NS (AM65_CPSW_IET_VERIFY_CNT_MS * \
+ NSEC_PER_MSEC)
+
/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
@@ -47,12 +86,258 @@
#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+/* Cut-Thru AM64_CPSW_PN_CUT_THRU */
+#define AM64_PN_CUT_THRU_TX_PRI GENMASK(7, 0)
+#define AM64_PN_CUT_THRU_RX_PRI GENMASK(15, 8)
+
+/* Cut-Thru AM64_CPSW_PN_SPEED */
+#define AM64_PN_SPEED_VAL GENMASK(3, 0)
+#define AM64_PN_SPEED_AUTO_EN BIT(8)
+#define AM64_PN_AUTO_SPEED GENMASK(15, 12)
+
+/* AM65_CPSW_PN_REG_MAX_BLKS fields for IET and No IET cases */
+/* 7 blocks for pn_rx_max_blks, 13 for pn_tx_max_blks*/
+#define AM65_CPSW_PN_TX_RX_MAX_BLKS_IET 0xD07
+#define AM65_CPSW_PN_TX_RX_MAX_BLKS_DEFAULT 0x1004
+
enum timer_act {
TACT_PROG, /* need program timer */
TACT_NEED_STOP, /* need stop first */
TACT_SKIP_PROG, /* just buffer can be updated */
};
+/* number of traffic classes (fifos) per port */
+#define AM65_CPSW_PN_TC_NUM 8
+#define AM65_CPSW_PN_TX_PRI_MAP_DEF 0x76543210
+
+static int am65_cpsw_mqprio_setup(struct net_device *ndev, void *type_data);
+
+/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
+ * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
+ * bytes/nibbles that can be sent while transmission on given speed.
+ */
+static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
+{
+ u64 temp;
+
+ temp = ns * link_speed;
+ if (link_speed < SPEED_1000)
+ temp <<= 1;
+
+ return DIV_ROUND_UP(temp, 8 * 1000);
+}
+
+/* IET */
+
+static void am65_cpsw_iet_enable(struct am65_cpsw_common *common)
+{
+ int common_enable = 0;
+ u32 val;
+ int i;
+
+ for (i = 0; i < common->port_num; i++)
+ common_enable |= !!common->ports[i].qos.iet.mask;
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+
+ if (common_enable)
+ val |= AM65_CPSW_CTL_IET_EN;
+ else
+ val &= ~AM65_CPSW_CTL_IET_EN;
+
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+ common->iet_enabled = common_enable;
+}
+
+static void am65_cpsw_port_iet_enable(struct am65_cpsw_port *port,
+ u32 mask)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (mask)
+ val |= AM65_CPSW_PN_CTL_IET_PORT_EN;
+ else
+ val &= ~AM65_CPSW_PN_CTL_IET_PORT_EN;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
+ port->qos.iet.mask = mask;
+}
+
+static int am65_cpsw_iet_verify(struct am65_cpsw_port *port)
+{
+ int try;
+ u32 val;
+
+ netdev_info(port->ndev, "Starting IET/FPE MAC Verify\n");
+ /* Set verify timeout depending on link speed. It is 10 msec
+ * in wireside clocks
+ */
+ val = am65_est_cmd_ns_to_cnt(AM65_CPSW_IET_VERIFY_CNT_NS,
+ port->qos.link_speed);
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_VERIFY);
+
+ /* By experiment, keep this about 20 * 50 msec = 1000 msec.
+ * Usually succeeds in one try. But at times it takes more
+ * attempts especially at initial boot. Try for 20 times
+ * before give up
+ */
+ try = 20;
+ do {
+ /* Enable IET Preemption for the port and
+ * reset LINKFAIL bit to start Verify.
+ */
+ writel(AM65_CPSW_PN_IET_MAC_PENABLE,
+ port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ /* Takes 10 msec to complete this in h/w assuming other
+ * side is already ready. However since both side might
+ * take variable setup/config time, need to Wait for
+ * additional time. Chose 50 msec through trials
+ */
+ msleep(50);
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
+ if (val & AM65_CPSW_PN_MAC_VERIFIED)
+ break;
+
+ if (val & AM65_CPSW_PN_MAC_VERIFY_FAIL) {
+ netdev_dbg(port->ndev,
+ "IET MAC verify failed, trying again");
+ /* Reset the verify state machine by writing 1
+ * to LINKFAIL
+ */
+ writel(AM65_CPSW_PN_IET_MAC_LINKFAIL,
+ port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ }
+
+ if (val & AM65_CPSW_PN_MAC_RESPOND_ERR) {
+ netdev_err(port->ndev, "IET MAC respond error");
+ return -ENODEV;
+ }
+
+ if (val & AM65_CPSW_PN_MAC_VERIFY_ERR) {
+ netdev_err(port->ndev, "IET MAC verify error");
+ return -ENODEV;
+ }
+
+ } while (try-- > 0);
+
+ if (try <= 0) {
+ netdev_err(port->ndev, "IET MAC Verify/Response timeout");
+ return -ENODEV;
+ }
+
+ netdev_info(port->ndev, "IET/FPE MAC Verify Success\n");
+ return 0;
+}
+
+static void am65_cpsw_iet_config_mac_preempt(struct am65_cpsw_port *port,
+ bool enable, bool force)
+{
+ struct am65_cpsw_iet *iet = &port->qos.iet;
+ u32 val;
+
+ /* Enable pre-emption queues and force mode if no mac verify */
+ val = 0;
+ if (enable) {
+ if (!force) {
+ /* AM65_CPSW_PN_IET_MAC_PENABLE already
+ * set as part of MAC Verify. So read
+ * modify
+ */
+ val = readl(port->port_base +
+ AM65_CPSW_PN_REG_IET_CTRL);
+ } else {
+ val |= AM65_CPSW_PN_IET_MAC_PENABLE;
+ val |= AM65_CPSW_PN_IET_MAC_DISABLEVERIFY;
+ }
+ val |= ((iet->fpe_mask_configured <<
+ AM65_CPSW_PN_IET_PREMPT_OFFSET) &
+ AM65_CPSW_PN_IET_PREMPT_MASK);
+ val |= ((iet->addfragsize <<
+ AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_OFFSET) &
+ AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK);
+ }
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ iet->fpe_enabled = enable;
+}
+
+static void am65_cpsw_iet_set(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ struct am65_cpsw_iet *iet = &port->qos.iet;
+
+ /* For IET, Change MAX_BLKS */
+ writel(AM65_CPSW_PN_TX_RX_MAX_BLKS_IET,
+ port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
+
+ am65_cpsw_port_iet_enable(port, iet->fpe_mask_configured);
+ am65_cpsw_iet_enable(common);
+}
+
+static int am65_cpsw_iet_fpe_enable(struct am65_cpsw_port *port, bool verify)
+{
+ int ret;
+
+ if (verify) {
+ ret = am65_cpsw_iet_verify(port);
+ if (ret)
+ return ret;
+ }
+
+ am65_cpsw_iet_config_mac_preempt(port, true, !verify);
+
+ return 0;
+}
+
+void am65_cpsw_qos_iet_init(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ struct am65_cpsw_iet *iet = &port->qos.iet;
+
+ /* Enable IET FPE only if user has enabled priv flag for iet frame
+ * preemption.
+ */
+ if (!iet->fpe_configured) {
+ iet->fpe_mask_configured = 0;
+ return;
+ }
+ /* Use highest priority queue as express queue and others
+ * as preemptible queues.
+ */
+ iet->fpe_mask_configured = GENMASK(common->tx_ch_num - 2, 0);
+
+ /* Init work queue for IET MAC verify process */
+ iet->ndev = ndev;
+
+ am65_cpsw_iet_set(ndev);
+}
+
+static void am65_cpsw_iet_fpe_disable(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_iet *iet = &port->qos.iet;
+
+ am65_cpsw_iet_config_mac_preempt(port, false,
+ !iet->mac_verify_configured);
+}
+
+void am65_cpsw_qos_iet_cleanup(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ /* restore MAX_BLKS to default */
+ writel(AM65_CPSW_PN_TX_RX_MAX_BLKS_DEFAULT,
+ port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
+
+ am65_cpsw_iet_fpe_disable(port);
+ am65_cpsw_port_iet_enable(port, 0);
+ am65_cpsw_iet_enable(common);
+}
+
static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
{
return port->qos.est_oper || port->qos.est_admin;
@@ -223,21 +508,6 @@ static void am65_cpsw_est_update_state(struct net_device *ndev)
am65_cpsw_admin_to_oper(ndev);
}
-/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
- * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
- * bytes/nibbles that can be sent while transmission on given speed.
- */
-static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
-{
- u64 temp;
-
- temp = ns * link_speed;
- if (link_speed < SPEED_1000)
- temp <<= 1;
-
- return DIV_ROUND_UP(temp, 8 * 1000);
-}
-
static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
int fetch_cnt,
int fetch_allow)
@@ -356,7 +626,7 @@ static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
}
-/**
+/*
* Enable ESTf periodic output, set cycle start time and interval.
*/
static int am65_cpsw_timer_set(struct net_device *ndev,
@@ -448,6 +718,7 @@ static int am65_cpsw_configure_taprio(struct net_device *ndev,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpts *cpts = common->cpts;
int ret = 0, tact = TACT_PROG;
+ u64 cur_time, n;
am65_cpsw_est_update_state(ndev);
@@ -470,13 +741,21 @@ static int am65_cpsw_configure_taprio(struct net_device *ndev,
if (tact == TACT_PROG)
am65_cpsw_timer_stop(ndev);
- if (!est_new->taprio.base_time)
- est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
-
am65_cpsw_port_est_get_buf_num(ndev, est_new);
am65_cpsw_est_set_sched_list(ndev, est_new);
am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
+ /* If the base-time is in the past, start schedule from the time:
+ * base_time + (N*cycle_time)
+ * where N is the smallest possible integer such that the above
+ * time is in the future.
+ */
+ cur_time = am65_cpts_ns_gettime(cpts);
+ if (est_new->taprio.base_time < cur_time) {
+ n = div64_u64(cur_time - est_new->taprio.base_time, est_new->taprio.cycle_time);
+ est_new->taprio.base_time += (n + 1) * est_new->taprio.cycle_time;
+ }
+
am65_cpsw_est_set(ndev, est_new->taprio.enable);
if (tact == TACT_PROG) {
@@ -594,18 +873,46 @@ int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return am65_cpsw_setup_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return am65_cpsw_mqprio_setup(ndev, type_data);
default:
return -EOPNOTSUPP;
}
}
-void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
+static void am65_cpsw_iet_link_up(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_iet *iet = &port->qos.iet;
- if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ if (!iet->fpe_configured)
return;
+ /* Schedule MAC Verify and enable IET FPE if configured */
+ if (iet->mac_verify_configured) {
+ am65_cpsw_iet_fpe_enable(port, true);
+ } else {
+ /* Force IET FPE here */
+ netdev_info(ndev, "IET Enable Force mode\n");
+ am65_cpsw_iet_fpe_enable(port, false);
+ }
+}
+
+static void am65_cpsw_cut_thru_link_up(struct am65_cpsw_port *port);
+static void am65_cpsw_tx_pn_shaper_link_up(struct am65_cpsw_port *port);
+
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed, int duplex)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ port->qos.link_speed = link_speed;
+ port->qos.duplex = duplex;
+ am65_cpsw_iet_link_up(ndev);
+ am65_cpsw_cut_thru_link_up(port);
+ am65_cpsw_tx_pn_shaper_link_up(port);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
am65_cpsw_est_link_up(ndev, link_speed);
port->qos.link_down_time = 0;
}
@@ -614,6 +921,8 @@ void am65_cpsw_qos_link_down(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ am65_cpsw_iet_fpe_disable(port);
+
if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
return;
@@ -622,3 +931,536 @@ void am65_cpsw_qos_link_down(struct net_device *ndev)
port->qos.link_speed = SPEED_UNKNOWN;
}
+
+static void am65_cpsw_cut_thru_dump(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_common *common = port->common;
+ u32 contro, cut_thru, speed;
+
+ contro = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+ cut_thru = readl(port->port_base + AM64_CPSW_PN_CUT_THRU);
+ speed = readl(port->port_base + AM64_CPSW_PN_SPEED);
+ dev_dbg(common->dev, "Port%u: cut_thru dump control:%08x cut_thru:%08x hwspeed:%08x\n",
+ port->port_id, contro, cut_thru, speed);
+}
+
+static void am65_cpsw_cut_thru_enable(struct am65_cpsw_common *common)
+{
+ u32 val;
+
+ if (common->cut_thru_enabled) {
+ common->cut_thru_enabled++;
+ return;
+ }
+
+ /* Populate CPSW VBUS freq for auto speed detection */
+ writel(common->bus_freq / 1000000,
+ common->cpsw_base + AM65_CPSW_REG_FREQ);
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+ val |= AM64_CPSW_CTL_CUT_THRU_EN;
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+ common->cut_thru_enabled++;
+}
+
+void am65_cpsw_qos_cut_thru_init(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_cut_thru *cut_thru = &port->qos.cut_thru;
+ struct am65_cpsw_common *common = port->common;
+
+ /* Enable cut_thr only if user has enabled priv flag */
+ if (!cut_thru->enable)
+ return;
+
+ if (common->is_emac_mode) {
+ cut_thru->enable = false;
+ dev_info(common->dev, "Disable cut-thru, need Switch mode\n");
+ return;
+ }
+
+ am65_cpsw_cut_thru_enable(common);
+
+ /* en auto speed */
+ writel(AM64_PN_SPEED_AUTO_EN, port->port_base + AM64_CPSW_PN_SPEED);
+ dev_info(common->dev, "Init cut_thru\n");
+ am65_cpsw_cut_thru_dump(port);
+}
+
+static void am65_cpsw_cut_thru_disable(struct am65_cpsw_common *common)
+{
+ u32 val;
+
+ if (--common->cut_thru_enabled)
+ return;
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+ val &= ~AM64_CPSW_CTL_CUT_THRU_EN;
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+}
+
+void am65_cpsw_qos_cut_thru_cleanup(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_cut_thru *cut_thru = &port->qos.cut_thru;
+ struct am65_cpsw_common *common = port->common;
+
+ if (!cut_thru->enable)
+ return;
+
+ writel(0, port->port_base + AM64_CPSW_PN_CUT_THRU);
+ writel(0, port->port_base + AM64_CPSW_PN_SPEED);
+
+ am65_cpsw_cut_thru_disable(common);
+ dev_info(common->dev, "Cleanup cut_thru\n");
+ am65_cpsw_cut_thru_dump(port);
+}
+
+static u32 am65_cpsw_cut_thru_speed2hw(int link_speed)
+{
+ switch (link_speed) {
+ case SPEED_10:
+ return 1;
+ case SPEED_100:
+ return 2;
+ case SPEED_1000:
+ return 3;
+ default:
+ return 0;
+ }
+}
+
+static void am65_cpsw_cut_thru_link_up(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_cut_thru *cut_thru = &port->qos.cut_thru;
+ struct am65_cpsw_common *common = port->common;
+ u32 val, speed;
+
+ if (!cut_thru->enable)
+ return;
+
+ writel(AM64_PN_SPEED_AUTO_EN, port->port_base + AM64_CPSW_PN_SPEED);
+ /* barrier */
+ readl(port->port_base + AM64_CPSW_PN_SPEED);
+ /* HW need 15us in 10/100 mode and 3us in 1G mode auto speed detection
+ * add delay with some margin
+ */
+ usleep_range(40, 50);
+ val = readl(port->port_base + AM64_CPSW_PN_SPEED);
+ speed = FIELD_GET(AM64_PN_AUTO_SPEED, val);
+ if (!speed) {
+ dev_warn(common->dev,
+ "Port%u: cut_thru no speed auto detected switch to manual\n",
+ port->port_id);
+ speed = am65_cpsw_cut_thru_speed2hw(port->qos.link_speed);
+ if (!speed) {
+ dev_err(common->dev,
+ "Port%u: cut_thru speed configuration failed\n",
+ port->port_id);
+ return;
+ }
+ val = FIELD_PREP(AM64_PN_SPEED_VAL, speed);
+ writel(val, port->port_base + AM64_CPSW_PN_SPEED);
+ }
+
+ val = FIELD_PREP(AM64_PN_CUT_THRU_TX_PRI, cut_thru->tx_pri_mask) |
+ FIELD_PREP(AM64_PN_CUT_THRU_RX_PRI, cut_thru->rx_pri_mask);
+
+ if (port->qos.duplex) {
+ writel(val, port->port_base + AM64_CPSW_PN_CUT_THRU);
+ dev_info(common->dev, "Port%u: Enable cut_thru rx:%08x tx:%08x hwspeed:%u (%08x)\n",
+ port->port_id,
+ cut_thru->rx_pri_mask, cut_thru->tx_pri_mask,
+ speed, val);
+ } else {
+ writel(0, port->port_base + AM64_CPSW_PN_CUT_THRU);
+ dev_info(common->dev, "Port%u: Disable cut_thru duplex=%d\n",
+ port->port_id, port->qos.duplex);
+ }
+ am65_cpsw_cut_thru_dump(port);
+}
+
+static u32
+am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
+{
+ u32 ir;
+
+ bus_freq /= 1000000;
+ ir = DIV_ROUND_UP(((u64)rate_mbps * 32768), bus_freq);
+ return ir;
+}
+
+static void
+am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common *common,
+ int tx_ch, u32 rate_mbps)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+ u32 ch_cir;
+ int i;
+
+ ch_cir = am65_cpsw_qos_tx_rate_calc(rate_mbps, common->bus_freq);
+ writel(ch_cir, host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
+
+ /* update rates for every port tx queues */
+ for (i = 0; i < common->port_num; i++) {
+ struct net_device *ndev = common->ports[i].ndev;
+
+ if (!ndev)
+ continue;
+ netdev_get_tx_queue(ndev, tx_ch)->tx_maxrate = rate_mbps;
+ }
+}
+
+int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev,
+ int queue, u32 rate_mbps)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ struct am65_cpsw_tx_chn *tx_chn;
+ u32 ch_rate, tx_ch_rate_msk_new;
+ u32 ch_msk = 0;
+ int ret;
+
+ dev_dbg(common->dev, "apply TX%d rate limiting %uMbps tx_rate_msk%x\n",
+ queue, rate_mbps, common->tx_ch_rate_msk);
+
+ if (common->pf_p0_rx_ptype_rrobin) {
+ dev_err(common->dev, "TX Rate Limiting failed - rrobin mode\n");
+ return -EINVAL;
+ }
+
+ ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
+ if (ch_rate == rate_mbps)
+ return 0;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+ ret = 0;
+
+ tx_ch_rate_msk_new = common->tx_ch_rate_msk;
+ if (rate_mbps && !(tx_ch_rate_msk_new & BIT(queue))) {
+ tx_ch_rate_msk_new |= BIT(queue);
+ ch_msk = GENMASK(common->tx_ch_num - 1, queue);
+ ch_msk = tx_ch_rate_msk_new ^ ch_msk;
+ } else if (!rate_mbps) {
+ tx_ch_rate_msk_new &= ~BIT(queue);
+ ch_msk = queue ? GENMASK(queue - 1, 0) : 0;
+ ch_msk = tx_ch_rate_msk_new & ch_msk;
+ }
+
+ if (ch_msk) {
+ dev_err(common->dev, "TX rate limiting has to be enabled sequentially hi->lo tx_rate_msk:%x tx_rate_msk_new:%x\n",
+ common->tx_ch_rate_msk, tx_ch_rate_msk_new);
+ ret = -EINVAL;
+ goto exit_put;
+ }
+
+ tx_chn = &common->tx_chns[queue];
+ tx_chn->rate_mbps = rate_mbps;
+ common->tx_ch_rate_msk = tx_ch_rate_msk_new;
+
+ if (!common->usage_count)
+ /* will be applied on next netif up */
+ goto exit_put;
+
+ am65_cpsw_qos_tx_p0_rate_apply(common, queue, rate_mbps);
+
+exit_put:
+ pm_runtime_put(common->dev);
+ return ret;
+}
+
+void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+ int tx_ch;
+
+ for (tx_ch = 0; tx_ch < common->tx_ch_num; tx_ch++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[tx_ch];
+ u32 ch_cir;
+
+ if (!tx_chn->rate_mbps)
+ continue;
+
+ ch_cir = am65_cpsw_qos_tx_rate_calc(tx_chn->rate_mbps,
+ common->bus_freq);
+ writel(ch_cir,
+ host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
+ }
+}
+
+static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ struct am65_cpsw_common *common = port->common;
+ struct tc_mqprio_qopt_offload *mqprio;
+ bool shaper_en;
+ u32 rate_mbps;
+ int i;
+
+ mqprio = &p_mqprio->mqprio_hw;
+ shaper_en = p_mqprio->shaper_en && !p_mqprio->shaper_susp;
+
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ rate_mbps = 0;
+ if (shaper_en) {
+ rate_mbps = mqprio->min_rate[i] * 8 / 1000000;
+ rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
+ common->bus_freq);
+ }
+
+ writel(rate_mbps,
+ port->port_base + AM65_CPSW_PN_REG_PRI_CIR(i));
+ }
+
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ rate_mbps = 0;
+ if (shaper_en && mqprio->max_rate[i]) {
+ rate_mbps = mqprio->max_rate[i] - mqprio->min_rate[i];
+ rate_mbps = rate_mbps * 8 / 1000000;
+ rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
+ common->bus_freq);
+ }
+
+ writel(rate_mbps,
+ port->port_base + AM65_CPSW_PN_REG_PRI_EIR(i));
+ }
+}
+
+static void am65_cpsw_tx_pn_shaper_link_up(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ struct am65_cpsw_common *common = port->common;
+ bool shaper_susp = false;
+
+ if (!p_mqprio->enable || !p_mqprio->shaper_en)
+ return;
+
+ if (p_mqprio->max_rate_total > port->qos.link_speed)
+ shaper_susp = true;
+
+ if (p_mqprio->shaper_susp == shaper_susp)
+ return;
+
+ if (shaper_susp)
+ dev_info(common->dev,
+ "Port%u: total shaper tx rate > link speed - suspend shaper\n",
+ port->port_id);
+ else
+ dev_info(common->dev,
+ "Port%u: link recover - resume shaper\n",
+ port->port_id);
+
+ p_mqprio->shaper_susp = shaper_susp;
+ am65_cpsw_tx_pn_shaper_apply(port);
+}
+
+void am65_cpsw_qos_mqprio_init(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(port->common);
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ struct tc_mqprio_qopt_offload *mqprio = &p_mqprio->mqprio_hw;
+ int i, fifo, rx_prio_map;
+
+ rx_prio_map = readl(host->port_base + AM65_CPSW_PN_REG_RX_PRI_MAP);
+
+ if (p_mqprio->enable) {
+ for (i = 0; i < AM65_CPSW_PN_TC_NUM; i++) {
+ fifo = mqprio->qopt.prio_tc_map[i];
+ p_mqprio->tx_prio_map |= fifo << (4 * i);
+ }
+
+ netdev_set_num_tc(port->ndev, mqprio->qopt.num_tc);
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ netdev_set_tc_queue(port->ndev, i,
+ mqprio->qopt.count[i],
+ mqprio->qopt.offset[i]);
+ if (!i) {
+ p_mqprio->tc0_q = mqprio->qopt.offset[i];
+ rx_prio_map &= ~(0x7 << (4 * p_mqprio->tc0_q));
+ }
+ }
+ } else {
+ /* restore default configuration */
+ netdev_reset_tc(port->ndev);
+ p_mqprio->tx_prio_map = AM65_CPSW_PN_TX_PRI_MAP_DEF;
+ rx_prio_map |= p_mqprio->tc0_q << (4 * p_mqprio->tc0_q);
+ p_mqprio->tc0_q = 0;
+ }
+
+ writel(p_mqprio->tx_prio_map,
+ port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
+ writel(rx_prio_map,
+ host->port_base + AM65_CPSW_PN_REG_RX_PRI_MAP);
+
+ am65_cpsw_tx_pn_shaper_apply(port);
+}
+
+static int am65_cpsw_mqprio_verify(struct am65_cpsw_port *port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ int i;
+
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ unsigned int last = mqprio->qopt.offset[i] +
+ mqprio->qopt.count[i];
+
+ if (mqprio->qopt.offset[i] >= port->ndev->real_num_tx_queues ||
+ !mqprio->qopt.count[i] ||
+ last > port->ndev->real_num_tx_queues)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port *port,
+ struct tc_mqprio_qopt_offload *mqprio,
+ u64 *max_rate)
+{
+ struct am65_cpsw_common *common = port->common;
+ bool has_min_rate, has_max_rate;
+ u64 min_rate_total = 0, max_rate_total = 0;
+ u32 min_rate_msk = 0, max_rate_msk = 0;
+ int num_tc, i;
+
+ has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
+
+ if (!has_min_rate && has_max_rate)
+ return -EOPNOTSUPP;
+
+ if (!has_min_rate)
+ return 0;
+
+ num_tc = mqprio->qopt.num_tc;
+
+ for (i = num_tc - 1; i >= 0; i--) {
+ u32 ch_msk;
+
+ if (mqprio->min_rate[i])
+ min_rate_msk |= BIT(i);
+ min_rate_total += mqprio->min_rate[i];
+
+ if (has_max_rate) {
+ if (mqprio->max_rate[i])
+ max_rate_msk |= BIT(i);
+ max_rate_total += mqprio->max_rate[i];
+
+ if (!mqprio->min_rate[i] && mqprio->max_rate[i]) {
+ dev_err(common->dev, "TX tc%d rate max>0 but min=0\n",
+ i);
+ return -EINVAL;
+ }
+
+ if (mqprio->max_rate[i] &&
+ mqprio->max_rate[i] < mqprio->min_rate[i]) {
+ dev_err(common->dev, "TX tc%d rate min(%llu)>max(%llu)\n",
+ i, mqprio->min_rate[i],
+ mqprio->max_rate[i]);
+ return -EINVAL;
+ }
+ }
+
+ ch_msk = GENMASK(num_tc - 1, i);
+ if ((min_rate_msk & BIT(i)) && (min_rate_msk ^ ch_msk)) {
+ dev_err(common->dev, "TX Min rate limiting has to be enabled sequentially hi->lo tx_rate_msk%x\n",
+ min_rate_msk);
+ return -EINVAL;
+ }
+
+ if ((max_rate_msk & BIT(i)) && (max_rate_msk ^ ch_msk)) {
+ dev_err(common->dev, "TX max rate limiting has to be enabled sequentially hi->lo tx_rate_msk%x\n",
+ max_rate_msk);
+ return -EINVAL;
+ }
+ }
+ min_rate_total *= 8;
+ min_rate_total /= 1000 * 1000;
+ max_rate_total *= 8;
+ max_rate_total /= 1000 * 1000;
+
+ if (port->qos.link_speed != SPEED_UNKNOWN) {
+ if (min_rate_total > port->qos.link_speed) {
+ dev_err(common->dev, "TX rate min exceed %llu link speed %d\n",
+ min_rate_total, port->qos.link_speed);
+ return -EINVAL;
+ }
+
+ if (max_rate_total > port->qos.link_speed) {
+ dev_err(common->dev, "TX rate max exceed %llu link speed %d\n",
+ max_rate_total, port->qos.link_speed);
+ return -EINVAL;
+ }
+ }
+
+ *max_rate = max_t(u64, min_rate_total, max_rate_total);
+
+ return 0;
+}
+
+static int am65_cpsw_mqprio_setup(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct am65_cpsw_common *common = port->common;
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ bool has_min_rate;
+ int num_tc, ret;
+ u64 max_rate;
+
+ if (!mqprio->qopt.hw)
+ goto skip_check;
+
+ if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL)
+ return -EOPNOTSUPP;
+
+ num_tc = mqprio->qopt.num_tc;
+ if (num_tc > AM65_CPSW_PN_TC_NUM)
+ return -ERANGE;
+
+ if ((mqprio->flags & TC_MQPRIO_F_SHAPER) &&
+ mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE)
+ return -EOPNOTSUPP;
+
+ ret = am65_cpsw_mqprio_verify(port, mqprio);
+ if (ret)
+ return ret;
+
+ ret = am65_cpsw_mqprio_verify_shaper(port, mqprio, &max_rate);
+ if (ret)
+ return ret;
+
+skip_check:
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ if (mqprio->qopt.hw) {
+ memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio));
+ has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ p_mqprio->enable = 1;
+ p_mqprio->shaper_en = has_min_rate;
+ p_mqprio->shaper_susp = !has_min_rate;
+ p_mqprio->max_rate_total = max_rate;
+ p_mqprio->tx_prio_map = 0;
+ } else {
+ unsigned int tc0_q = p_mqprio->tc0_q;
+
+ memset(p_mqprio, 0, sizeof(*p_mqprio));
+ p_mqprio->mqprio_hw.qopt.num_tc = AM65_CPSW_PN_TC_NUM;
+ p_mqprio->tc0_q = tc0_q;
+ }
+
+ if (!netif_running(ndev))
+ goto exit_put;
+
+ am65_cpsw_qos_mqprio_init(port);
+
+exit_put:
+ pm_runtime_put(common->dev);
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
index e8f1b6b59e93..a3ac4c3ac484 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -7,6 +7,10 @@
#include <linux/netdevice.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+
+struct am65_cpsw_port;
+struct am65_cpsw_common;
struct am65_cpsw_est {
int buf;
@@ -14,16 +18,59 @@ struct am65_cpsw_est {
struct tc_taprio_qopt_offload taprio;
};
+struct am65_cpsw_iet {
+ struct net_device *ndev;
+ /* Set through priv flags */
+ bool fpe_configured;
+ bool mac_verify_configured;
+ /* frame preemption enabled */
+ bool fpe_enabled;
+ /* configured mask */
+ u32 fpe_mask_configured;
+ /* current mask */
+ u32 mask;
+ u32 addfragsize;
+};
+
+struct am65_cpsw_mqprio {
+ struct tc_mqprio_qopt_offload mqprio_hw;
+ u64 max_rate_total;
+ u32 tx_prio_map;
+
+ unsigned enable:1;
+ unsigned shaper_en:1;
+ unsigned shaper_susp:1;
+ unsigned tc0_q:3;
+};
+
+struct am65_cpsw_cut_thru {
+ unsigned int rx_pri_mask;
+ unsigned int tx_pri_mask;
+ bool enable;
+};
+
struct am65_cpsw_qos {
struct am65_cpsw_est *est_admin;
struct am65_cpsw_est *est_oper;
ktime_t link_down_time;
int link_speed;
+ int duplex;
+ struct am65_cpsw_iet iet;
+ struct am65_cpsw_mqprio mqprio;
+ struct am65_cpsw_cut_thru cut_thru;
};
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
-void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed);
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed, int duplex);
void am65_cpsw_qos_link_down(struct net_device *ndev);
+void am65_cpsw_qos_iet_init(struct net_device *ndev);
+void am65_cpsw_qos_iet_cleanup(struct net_device *ndev);
+void am65_cpsw_qos_cut_thru_init(struct am65_cpsw_port *port);
+void am65_cpsw_qos_cut_thru_cleanup(struct am65_cpsw_port *port);
+int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev,
+ int queue, u32 rate_mbps);
+void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common);
+void am65_cpsw_qos_mqprio_init(struct am65_cpsw_port *port);
#endif /* AM65_CPSW_QOS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
new file mode 100644
index 000000000000..b9ab087b63ea
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -0,0 +1,552 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments K3 AM65 Ethernet Switchdev Driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-switchdev.h"
+#include "cpsw_ale.h"
+
+struct am65_cpsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct am65_cpsw_port *port;
+ unsigned long event;
+};
+
+static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port,
+ struct switchdev_trans *trans, u8 state)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ u8 cpsw_state;
+ int ret = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ cpsw_state = ALE_PORT_STATE_FORWARD;
+ break;
+ case BR_STATE_LEARNING:
+ cpsw_state = ALE_PORT_STATE_LEARN;
+ break;
+ case BR_STATE_DISABLED:
+ cpsw_state = ALE_PORT_STATE_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ cpsw_state = ALE_PORT_STATE_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_STATE, cpsw_state);
+ netdev_dbg(port->ndev, "ale state: %u\n", cpsw_state);
+
+ return ret;
+}
+
+static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ unsigned long brport_flags)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ bool unreg_mcast_add = false;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (brport_flags & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+ netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, port->port_id);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
+ unreg_mcast_add);
+
+ return 0;
+}
+
+static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ unsigned long flags)
+{
+ if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int am65_cpsw_port_attr_set(struct net_device *ndev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int ret;
+
+ netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, port->port_id);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = am65_cpsw_port_attr_br_flags_pre_set(ndev, trans,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = am65_cpsw_port_stp_state_set(port, trans, attr->u.stp_state);
+ netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = am65_cpsw_port_attr_br_flags_set(port, trans, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static u16 am65_cpsw_get_pvid(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
+ u32 pvid;
+
+ if (port->port_id)
+ pvid = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ else
+ pvid = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ pvid = pvid & 0xfff;
+
+ return pvid;
+}
+
+static void am65_cpsw_set_pvid(struct am65_cpsw_port *port, u16 vid, bool cfi, u32 cos)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
+ u32 pvid;
+
+ pvid = vid;
+ pvid |= cfi ? BIT(12) : 0;
+ pvid |= (cos & 0x7) << 13;
+
+ if (port->port_id)
+ writel(pvid, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ else
+ writel(pvid, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+}
+
+static int am65_cpsw_port_vlan_add(struct am65_cpsw_port *port, bool untag, bool pvid,
+ u16 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int unreg_mcast_mask = 0;
+ int reg_mcast_mask = 0;
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+ u32 flags;
+
+ if (cpu_port) {
+ port_mask = BIT(HOST_PORT_NUM);
+ flags = orig_dev->flags;
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = BIT(port->port_id);
+ flags = port->ndev->flags;
+ }
+
+ if (flags & IFF_MULTICAST)
+ reg_mcast_mask = port_mask;
+
+ if (untag)
+ untag_mask = port_mask;
+
+ ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
+ reg_mcast_mask, unreg_mcast_mask);
+ if (ret) {
+ netdev_err(port->ndev, "Unable to add vlan\n");
+ return ret;
+ }
+
+ if (cpu_port)
+ cpsw_ale_add_ucast(cpsw->ale, port->slave.mac_addr,
+ HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, vid);
+ if (!pvid)
+ return ret;
+
+ am65_cpsw_set_pvid(port, vid, 0, 0);
+
+ netdev_dbg(port->ndev, "VID add: %s: vid:%u ports:%X\n",
+ port->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int am65_cpsw_port_vlan_del(struct am65_cpsw_port *port, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(port->port_id);
+
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ if (ret != 0)
+ return ret;
+
+ /* We don't care for the return value here, error is returned only if
+ * the unicast entry is not present
+ */
+ if (cpu_port)
+ cpsw_ale_del_ucast(cpsw->ale, port->slave.mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+
+ if (vid == am65_cpsw_get_pvid(port))
+ am65_cpsw_set_pvid(port, 0, 0, 0);
+
+ /* We don't care for the return value here, error is returned only if
+ * the multicast entry is not present
+ */
+ cpsw_ale_del_mcast(cpsw->ale, port->ndev->broadcast, port_mask,
+ ALE_VLAN, vid);
+ netdev_dbg(port->ndev, "VID del: %s: vid:%u ports:%X\n",
+ port->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
+ port->ndev->name, vlan->vid_begin, vlan->flags);
+
+ if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid_begin, orig_dev);
+}
+
+static int am65_cpsw_port_vlans_del(struct am65_cpsw_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+
+{
+ return am65_cpsw_port_vlan_del(port, vlan->vid_begin, vlan->obj.orig_dev);
+}
+
+static int am65_cpsw_port_mdb_add(struct am65_cpsw_port *port,
+ struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_mask;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(port->port_id);
+
+ err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
+ ALE_VLAN, mdb->vid, 0);
+ netdev_dbg(port->ndev, "MDB add: %s: vid %u:%pM ports: %X\n",
+ port->ndev->name, mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int del_mask;
+
+ if (cpu_port)
+ del_mask = BIT(HOST_PORT_NUM);
+ else
+ del_mask = BIT(port->port_id);
+
+ /* Ignore error as error code is returned only when entry is already removed */
+ cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
+ ALE_VLAN, mdb->vid);
+ netdev_dbg(port->ndev, "MDB del: %s: vid %u:%pM ports: %X\n",
+ port->ndev->name, mdb->vid, mdb->addr, del_mask);
+
+ return 0;
+}
+
+static int am65_cpsw_port_obj_add(struct net_device *ndev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, port->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = am65_cpsw_port_vlans_add(port, vlan, trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = am65_cpsw_port_mdb_add(port, mdb, trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int am65_cpsw_port_obj_del(struct net_device *ndev,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, port->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = am65_cpsw_port_vlans_del(port, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = am65_cpsw_port_mdb_del(port, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void am65_cpsw_switchdev_event_work(struct work_struct *work)
+{
+ struct am65_cpsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct am65_cpsw_switchdev_event_work, work);
+ struct am65_cpsw_port *port = switchdev_work->port;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_id = port->port_id;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port_id = HOST_PORT_NUM;
+
+ cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ am65_cpsw_fdb_offload_notify(port->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port_id = HOST_PORT_NUM;
+
+ cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port->ndev);
+}
+
+/* called under rcu_read_lock() */
+static int am65_cpsw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct am65_cpsw_switchdev_event_work *switchdev_work;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!am65_cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
+ switchdev_work->port = port;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block cpsw_switchdev_notifier = {
+ .notifier_call = am65_cpsw_switchdev_event,
+};
+
+static int am65_cpsw_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpsw_switchdev_bl_notifier = {
+ .notifier_call = am65_cpsw_switchdev_blocking_event,
+};
+
+int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+ }
+
+ return ret;
+}
+
+void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+ unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.h b/drivers/net/ethernet/ti/am65-cpsw-switchdev.h
new file mode 100644
index 000000000000..a67a7606bc80
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
+#define DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
+
+#include <linux/skbuff.h>
+
+#if IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)
+static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
+{
+ skb->offload_fwd_mark = val;
+}
+
+int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw);
+void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw);
+#else
+static inline int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+}
+
+static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
+{
+}
+
+#endif
+
+#endif /* DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 5dc60ecabe56..2470c93b0fb6 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -175,7 +175,22 @@ struct am65_cpts {
u64 timestamp;
u32 genf_enable;
u32 hw_ts_enable;
+ u32 estf_enable;
struct sk_buff_head txq;
+ bool pps_enabled;
+ bool pps_present;
+ u32 pps_hw_ts_idx;
+ u32 pps_genf_idx;
+ /* context save/restore */
+ u64 sr_cpts_ns;
+ u64 sr_ktime_ns;
+ u32 sr_control;
+ u32 sr_int_enable;
+ u32 sr_rftclk_sel;
+ u32 sr_ts_ppm_hi;
+ u32 sr_ts_ppm_low;
+ struct am65_genf_regs sr_genf[AM65_CPTS_GENF_MAX_NUM];
+ struct am65_genf_regs sr_estf[AM65_CPTS_ESTF_MAX_NUM];
};
struct am65_cpts_skb_cb_data {
@@ -309,8 +324,17 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts)
case AM65_CPTS_EV_HW:
pevent.index = am65_cpts_event_get_port(event) - 1;
pevent.timestamp = event->timestamp;
- pevent.type = PTP_CLOCK_EXTTS;
- dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
+ if (cpts->pps_enabled &&
+ pevent.index == cpts->pps_hw_ts_idx) {
+ pevent.type = PTP_CLOCK_PPSUSR;
+ pevent.pps_times.ts_real =
+ ns_to_timespec64(pevent.timestamp);
+ } else {
+ pevent.type = PTP_CLOCK_EXTTS;
+ }
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_HW:%s p:%d t:%llu\n",
+ pevent.type == PTP_CLOCK_EXTTS ?
+ "extts" : "pps",
pevent.index, event->timestamp);
ptp_clock_event(cpts->ptp_clock, &pevent);
@@ -384,9 +408,12 @@ static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
- int neg_adj = 0;
- u64 adj_period;
- u32 val;
+ u32 estf_ctrl_val = 0, estf_ppm_hi = 0, estf_ppm_low = 0;
+ int pps_index = cpts->pps_genf_idx;
+ u64 adj_period, pps_adj_period;
+ u32 ctrl_val, ppm_hi, ppm_low;
+ unsigned long flags;
+ int neg_adj = 0, i;
if (ppb < 0) {
neg_adj = 1;
@@ -406,17 +433,60 @@ static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
mutex_lock(&cpts->ptp_clk_lock);
- val = am65_cpts_read32(cpts, control);
+ ctrl_val = am65_cpts_read32(cpts, control);
if (neg_adj)
- val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
+ ctrl_val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
else
- val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
- am65_cpts_write32(cpts, val, control);
+ ctrl_val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
+
+ ppm_hi = upper_32_bits(adj_period) & 0x3FF;
+ ppm_low = lower_32_bits(adj_period);
+
+ if (cpts->pps_enabled) {
+ estf_ctrl_val = am65_cpts_read32(cpts, genf[pps_index].control);
+ if (neg_adj)
+ estf_ctrl_val &= ~BIT(1);
+ else
+ estf_ctrl_val |= BIT(1);
+
+ /* GenF PPM will do correction using cpts refclk tick which is
+ * (cpts->ts_add_val + 1) ns, so GenF length PPM adj period
+ * need to be corrected.
+ */
+ pps_adj_period = adj_period * (cpts->ts_add_val + 1);
+ estf_ppm_hi = upper_32_bits(pps_adj_period) & 0x3FF;
+ estf_ppm_low = lower_32_bits(pps_adj_period);
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
- val = upper_32_bits(adj_period) & 0x3FF;
- am65_cpts_write32(cpts, val, ts_ppm_hi);
- val = lower_32_bits(adj_period);
- am65_cpts_write32(cpts, val, ts_ppm_low);
+ /* All below writes must be done extremely fast:
+ * - delay between PPM dir and PPM value changes can cause err due old
+ * PPM correction applied in wrong direction
+ * - delay between CPTS-clock PPM cfg and GenF PPM cfg can cause err
+ * due CPTS-clock PPM working with new cfg while GenF PPM cfg still
+ * with old for short period of time
+ */
+
+ am65_cpts_write32(cpts, ctrl_val, control);
+ am65_cpts_write32(cpts, ppm_hi, ts_ppm_hi);
+ am65_cpts_write32(cpts, ppm_low, ts_ppm_low);
+
+ if (cpts->pps_enabled) {
+ am65_cpts_write32(cpts, estf_ctrl_val, genf[pps_index].control);
+ am65_cpts_write32(cpts, estf_ppm_hi, genf[pps_index].ppm_hi);
+ am65_cpts_write32(cpts, estf_ppm_low, genf[pps_index].ppm_low);
+ }
+
+ for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) {
+ if (cpts->estf_enable & BIT(i)) {
+ am65_cpts_write32(cpts, estf_ctrl_val, estf[i].control);
+ am65_cpts_write32(cpts, estf_ppm_hi, estf[i].ppm_hi);
+ am65_cpts_write32(cpts, estf_ppm_low, estf[i].ppm_low);
+ }
+ }
+ /* All GenF/EstF can be updated here the same way */
+ spin_unlock_irqrestore(&cpts->lock, flags);
mutex_unlock(&cpts->ptp_clk_lock);
@@ -496,6 +566,10 @@ static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
{
+
+ if (cpts->pps_present && index == cpts->pps_hw_ts_idx)
+ return -EINVAL;
+
if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
return 0;
@@ -529,6 +603,11 @@ int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
am65_cpts_write32(cpts, val, estf[idx].comp_lo);
val = lower_32_bits(cycles);
am65_cpts_write32(cpts, val, estf[idx].length);
+ am65_cpts_write32(cpts, 0, estf[idx].control);
+ am65_cpts_write32(cpts, 0, estf[idx].ppm_hi);
+ am65_cpts_write32(cpts, 0, estf[idx].ppm_low);
+
+ cpts->estf_enable |= BIT(idx);
dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
@@ -539,6 +618,7 @@ EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
{
am65_cpts_write32(cpts, 0, estf[idx].length);
+ cpts->estf_enable &= ~BIT(idx);
dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
}
@@ -569,6 +649,10 @@ static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
val = lower_32_bits(cycles);
am65_cpts_write32(cpts, val, genf[req->index].length);
+ am65_cpts_write32(cpts, 0, genf[req->index].control);
+ am65_cpts_write32(cpts, 0, genf[req->index].ppm_hi);
+ am65_cpts_write32(cpts, 0, genf[req->index].ppm_low);
+
cpts->genf_enable |= BIT(req->index);
} else {
am65_cpts_write32(cpts, 0, genf[req->index].length);
@@ -580,6 +664,9 @@ static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
static int am65_cpts_perout_enable(struct am65_cpts *cpts,
struct ptp_perout_request *req, int on)
{
+ if (cpts->pps_present && req->index == cpts->pps_genf_idx)
+ return -EINVAL;
+
if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
return 0;
@@ -593,6 +680,48 @@ static int am65_cpts_perout_enable(struct am65_cpts *cpts,
return 0;
}
+static int am65_cpts_pps_enable(struct am65_cpts *cpts, int on)
+{
+ struct ptp_clock_request rq;
+ struct timespec64 ts;
+ int ret = 0;
+ u64 ns;
+
+ if (!cpts->pps_present)
+ return -EINVAL;
+
+ if (cpts->pps_enabled == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+
+ if (on) {
+ am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
+
+ ns = am65_cpts_gettime(cpts, NULL);
+ ts = ns_to_timespec64(ns);
+ rq.perout.period.sec = 1;
+ rq.perout.period.nsec = 0;
+ rq.perout.start.sec = ts.tv_sec + 2;
+ rq.perout.start.nsec = 0;
+ rq.perout.index = cpts->pps_genf_idx;
+
+ am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
+ cpts->pps_enabled = true;
+ } else {
+ rq.perout.index = cpts->pps_genf_idx;
+ am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
+ am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
+ cpts->pps_enabled = false;
+ }
+
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: pps: %s\n",
+ __func__, on ? "enabled" : "disabled");
+ return ret;
+}
+
static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -603,6 +732,8 @@ static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
return am65_cpts_extts_enable(cpts, rq->extts.index, on);
case PTP_CLK_REQ_PEROUT:
return am65_cpts_perout_enable(cpts, &rq->perout, on);
+ case PTP_CLK_REQ_PPS:
+ return am65_cpts_pps_enable(cpts, on);
default:
break;
}
@@ -727,7 +858,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
/**
* am65_cpts_rx_enable - enable rx timestamping
* @cpts: cpts handle
- * @skb: packet
+ * @en: enable
*
* This functions enables rx packets timestamping. The CPTS can timestamp all
* rx packets.
@@ -915,17 +1046,22 @@ static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
cpts->genf_num = prop[0];
+ if (!of_property_read_u32_array(node, "ti,pps", prop, 2)) {
+ cpts->pps_present = true;
+ cpts->pps_hw_ts_idx = prop[0];
+ cpts->pps_genf_idx = prop[1];
+ }
+
return cpts_of_mux_clk_setup(cpts, node);
}
-static void am65_cpts_release(void *data)
+void am65_cpts_release(struct am65_cpts *cpts)
{
- struct am65_cpts *cpts = data;
-
ptp_clock_unregister(cpts->ptp_clock);
am65_cpts_disable(cpts);
clk_disable_unprepare(cpts->refclk);
}
+EXPORT_SYMBOL_GPL(am65_cpts_release);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node)
@@ -985,6 +1121,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
if (cpts->genf_num)
cpts->ptp_info.n_per_out = cpts->genf_num;
+ if (cpts->pps_present)
+ cpts->ptp_info.pps = 1;
am65_cpts_set_add_val(cpts);
@@ -1006,32 +1144,94 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
}
cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
- ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
- if (ret) {
- dev_err(dev, "failed to add ptpclk reset action %d", ret);
- return ERR_PTR(ret);
- }
-
ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
am65_cpts_interrupt,
IRQF_ONESHOT, dev_name(dev), cpts);
if (ret < 0) {
dev_err(cpts->dev, "error attaching irq %d\n", ret);
- return ERR_PTR(ret);
+ goto reset_ptpclk;
}
- dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+ dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u pps:%d\n",
am65_cpts_read32(cpts, idver),
- cpts->refclk_freq, cpts->ts_add_val);
+ cpts->refclk_freq, cpts->ts_add_val, cpts->pps_present);
return cpts;
+reset_ptpclk:
+ am65_cpts_release(cpts);
refclk_disable:
clk_disable_unprepare(cpts->refclk);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(am65_cpts_create);
+void am65_cpts_suspend(struct am65_cpts *cpts)
+{
+ /* save state and disable CPTS */
+ cpts->sr_control = am65_cpts_read32(cpts, control);
+ cpts->sr_int_enable = am65_cpts_read32(cpts, int_enable);
+ cpts->sr_rftclk_sel = am65_cpts_read32(cpts, rftclk_sel);
+ cpts->sr_ts_ppm_hi = am65_cpts_read32(cpts, ts_ppm_hi);
+ cpts->sr_ts_ppm_low = am65_cpts_read32(cpts, ts_ppm_low);
+ cpts->sr_cpts_ns = am65_cpts_gettime(cpts, NULL);
+ cpts->sr_ktime_ns = ktime_to_ns(ktime_get_real());
+ am65_cpts_disable(cpts);
+ clk_disable(cpts->refclk);
+
+ /* Save GENF state */
+ memcpy_fromio(&cpts->sr_genf, &cpts->reg->genf, sizeof(cpts->sr_genf));
+
+ /* Save ESTF state */
+ memcpy_fromio(&cpts->sr_estf, &cpts->reg->estf, sizeof(cpts->sr_estf));
+}
+EXPORT_SYMBOL_GPL(am65_cpts_suspend);
+
+void am65_cpts_resume(struct am65_cpts *cpts)
+{
+ int i;
+ s64 ktime_ns;
+
+ /* restore state and enable CPTS */
+ clk_enable(cpts->refclk);
+ am65_cpts_write32(cpts, cpts->sr_rftclk_sel, rftclk_sel);
+ am65_cpts_set_add_val(cpts);
+ am65_cpts_write32(cpts, cpts->sr_control, control);
+ am65_cpts_write32(cpts, cpts->sr_int_enable, int_enable);
+
+ /* Restore time to saved CPTS time + time in suspend/resume */
+ ktime_ns = ktime_to_ns(ktime_get_real());
+ ktime_ns -= cpts->sr_ktime_ns;
+ am65_cpts_settime(cpts, cpts->sr_cpts_ns + ktime_ns);
+
+ /* Restore compensation (PPM) */
+ am65_cpts_write32(cpts, cpts->sr_ts_ppm_hi, ts_ppm_hi);
+ am65_cpts_write32(cpts, cpts->sr_ts_ppm_low, ts_ppm_low);
+
+ /* Restore GENF state */
+ for (i = 0; i < AM65_CPTS_GENF_MAX_NUM; i++) {
+ am65_cpts_write32(cpts, 0, genf[i].length); /* TRM sequence */
+ am65_cpts_write32(cpts, cpts->sr_genf[i].comp_hi, genf[i].comp_hi);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].comp_lo, genf[i].comp_lo);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].length, genf[i].length);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].control, genf[i].control);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_hi, genf[i].ppm_hi);
+ am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_low, genf[i].ppm_low);
+ }
+
+ /* Restore ESTTF state */
+ for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) {
+ am65_cpts_write32(cpts, 0, estf[i].length); /* TRM sequence */
+ am65_cpts_write32(cpts, cpts->sr_estf[i].comp_hi, estf[i].comp_hi);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].comp_lo, estf[i].comp_lo);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].length, estf[i].length);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].control, estf[i].control);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_hi, estf[i].ppm_hi);
+ am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_low, estf[i].ppm_low);
+ }
+}
+EXPORT_SYMBOL_GPL(am65_cpts_resume);
+
static int am65_cpts_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
index cf9fbc28fd03..6e14df0be113 100644
--- a/drivers/net/ethernet/ti/am65-cpts.h
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -18,6 +18,7 @@ struct am65_cpts_estf_cfg {
};
#if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
+void am65_cpts_release(struct am65_cpts *cpts);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
int am65_cpts_phc_index(struct am65_cpts *cpts);
@@ -28,7 +29,13 @@ u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg);
void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
+void am65_cpts_suspend(struct am65_cpts *cpts);
+void am65_cpts_resume(struct am65_cpts *cpts);
#else
+static inline void am65_cpts_release(struct am65_cpts *cpts)
+{
+}
+
static inline struct am65_cpts *am65_cpts_create(struct device *dev,
void __iomem *regs,
struct device_node *node)
@@ -69,6 +76,14 @@ static inline int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
static inline void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
{
}
+
+static inline void am65_cpts_suspend(struct am65_cpts *cpts)
+{
+}
+
+static inline void am65_cpts_resume(struct am65_cpts *cpts)
+{
+}
#endif
#endif /* K3_CPTS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-debugfs.c b/drivers/net/ethernet/ti/am65-debugfs.c
new file mode 100644
index 000000000000..33bc6a24a328
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-debugfs.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet debugfs submodule
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+
+#include "am65-cpsw-nuss.h"
+
+int am65_cpsw_nuss_register_debugfs(struct am65_cpsw_common *common)
+{
+ common->debugfs_root = debugfs_create_dir(dev_name(common->dev), NULL);
+ if (IS_ERR(common->debugfs_root))
+ return PTR_ERR(common->debugfs_root);
+
+ return 0;
+}
+
+void am65_cpsw_nuss_unregister_debugfs(struct am65_cpsw_common *common)
+{
+ debugfs_remove_recursive(common->debugfs_root);
+}
+
+static int
+cut_thru_tx_pri_mask_get(void *data, u64 *val)
+{
+ struct am65_cpsw_port *port = data;
+ struct am65_cpsw_cut_thru *cut_thru;
+ int ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+ cut_thru = &port->qos.cut_thru;
+ if (port->ndev->reg_state == NETREG_REGISTERED) {
+ *val = cut_thru->tx_pri_mask;
+ ret = 0;
+ }
+ read_unlock(&dev_base_lock);
+
+ return ret;
+}
+
+static int
+cut_thru_tx_pri_mask_set(void *data, u64 val)
+{
+ struct am65_cpsw_cut_thru *cut_thru;
+ struct am65_cpsw_port *port = data;
+ struct am65_cpsw_common *common;
+ int ret = 0;
+
+ if (val & ~GENMASK(7, 0))
+ return -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ common = port->common;
+ cut_thru = &port->qos.cut_thru;
+
+ if (cut_thru->enable) {
+ dev_err(common->dev, "Port%u: can't set cut-thru tx_pri_mask while cut-thru enabled\n",
+ port->port_id);
+ ret = -EINVAL;
+ goto err;
+ }
+ cut_thru->tx_pri_mask = val;
+
+err:
+ rtnl_unlock();
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_cut_thru_tx_pri_mask, cut_thru_tx_pri_mask_get,
+ cut_thru_tx_pri_mask_set, "%llx\n");
+
+static int
+cut_thru_rx_pri_mask_get(void *data, u64 *val)
+{
+ struct am65_cpsw_port *port = data;
+ struct am65_cpsw_cut_thru *cut_thru;
+ int ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+ cut_thru = &port->qos.cut_thru;
+ if (port->ndev->reg_state == NETREG_REGISTERED) {
+ *val = cut_thru->rx_pri_mask;
+ ret = 0;
+ }
+ read_unlock(&dev_base_lock);
+
+ return ret;
+}
+
+static int
+cut_thru_rx_pri_mask_set(void *data, u64 val)
+{
+ struct am65_cpsw_cut_thru *cut_thru;
+ struct am65_cpsw_port *port = data;
+ struct am65_cpsw_common *common;
+ int ret = 0;
+
+ if (val & ~GENMASK(7, 0))
+ return -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ common = port->common;
+ cut_thru = &port->qos.cut_thru;
+
+ if (cut_thru->enable) {
+ dev_err(common->dev, "Port%u: can't set cut-thru rx_pri_mask while cut-thru enabled\n",
+ port->port_id);
+ ret = -EINVAL;
+ goto err;
+ }
+ cut_thru->rx_pri_mask = val;
+
+err:
+ rtnl_unlock();
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_cut_thru_rx_pri_mask, cut_thru_rx_pri_mask_get,
+ cut_thru_rx_pri_mask_set, "%llx\n");
+
+static int
+iet_addfragsize_get(void *data, u64 *val)
+{
+ struct am65_cpsw_port *port = data;
+ struct am65_cpsw_iet *iet;
+ int ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+ iet = &port->qos.iet;
+ if (port->ndev->reg_state == NETREG_REGISTERED) {
+ *val = (iet->addfragsize + 1) << 6;
+ ret = 0;
+ }
+ read_unlock(&dev_base_lock);
+
+ return ret;
+}
+
+static int
+iet_addfragsize_set(void *data, u64 val)
+{
+ struct am65_cpsw_iet *iet;
+ struct am65_cpsw_port *port = data;
+ int ret = 0;
+
+ if (val > 512)
+ return -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ iet = &port->qos.iet;
+
+ /* hw addfragsize is in 64 octet units*/
+ iet->addfragsize = (val >> 6) - 1;
+
+ rtnl_unlock();
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_iet_addfragsize, iet_addfragsize_get, iet_addfragsize_set, "%llu\n");
+
+int am65_cpsw_nuss_register_port_debugfs(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_common *common = port->common;
+ char dirn[32];
+
+ scnprintf(dirn, sizeof(dirn), "Port%x", port->port_id);
+ port->debugfs_port = debugfs_create_dir(dirn, common->debugfs_root);
+ if (IS_ERR(port->debugfs_port))
+ return PTR_ERR(port->debugfs_port);
+
+ debugfs_create_bool("disabled", 0400,
+ port->debugfs_port, &port->disabled);
+ if (port->disabled)
+ return 0;
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_CUT_THRU) {
+ debugfs_create_file("cut_thru_tx_pri_mask", 0600,
+ port->debugfs_port,
+ port, &fops_cut_thru_tx_pri_mask);
+ debugfs_create_file("cut_thru_rx_pri_mask", 0600,
+ port->debugfs_port,
+ port, &fops_cut_thru_rx_pri_mask);
+ }
+
+ debugfs_create_file("iet_addfragsize", 0600,
+ port->debugfs_port,
+ port, &fops_iet_addfragsize);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5af0f9f8c097..6e32521a0658 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -34,6 +34,8 @@
#include <net/page_pool.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+//#include <linux/filter.h>
+#include <linux/net_switch_config.h>
#include <linux/pinctrl/consumer.h>
#include <net/pkt_cls.h>
@@ -59,6 +61,10 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
module_param(rx_packet_max, int, 0);
MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
+static int tx_packet_min = CPSW_MIN_PACKET_SIZE;
+module_param(tx_packet_min, int, 0444);
+MODULE_PARM_DESC(tx_packet_min, "minimum tx packet size (bytes)");
+
static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
module_param(descs_pool_size, int, 0444);
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
@@ -409,12 +415,10 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.frame_sz = PAGE_SIZE;
port = priv->emac_port + cpsw->data.dual_emac;
- ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
- /* XDP prog might have changed packet data and boundaries */
- len = xdp.data_end - xdp.data;
headroom = xdp.data - xdp.data_hard_start;
/* XDP prog can modify vlan tag, so can't use encap header */
@@ -498,7 +502,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* enable forwarding */
cpsw_ale_control_set(cpsw->ale, slave_port,
- ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+ ALE_PORT_STATE,
+ priv->port_state[slave_port]);
*link = true;
@@ -611,6 +616,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->mac_control = 0; /* no link yet */
slave_port = cpsw_get_slave_port(slave->slave_num);
+ priv->port_state[slave_port] = ALE_PORT_STATE_FORWARD;
if (cpsw->data.dual_emac)
cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
@@ -913,14 +919,17 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct cpts *cpts = cpsw->cpts;
struct netdev_queue *txq;
struct cpdma_chan *txch;
+ unsigned int len;
int ret, q_idx;
- if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+ if (skb_padto(skb, tx_packet_min)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
ndev->stats.tx_dropped++;
return NET_XMIT_DROP;
}
+ len = skb->len < tx_packet_min ? tx_packet_min : skb->len;
+
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -1140,7 +1149,7 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
for (i = 0; i < n; i++) {
xdpf = frames[i];
- if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
+ if (xdpf->len < tx_packet_min) {
xdp_return_frame_rx_napi(xdpf);
drops++;
continue;
@@ -1166,12 +1175,37 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
}
#endif
+#include "cpsw_switch_ioctl.c"
+
+static int cpsw_ndo_ioctl_legacy(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
+ case SIOCSWITCHCONFIG:
+ return cpsw_switch_config_ioctl(dev, req, cmd);
+ }
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_do_ioctl = cpsw_ndo_ioctl,
+ .ndo_do_ioctl = cpsw_ndo_ioctl_legacy,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index a6a455c32628..6d60373d15e0 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -50,6 +50,8 @@
/* ALE_AGING_TIMER */
#define ALE_AGING_TIMER_MASK GENMASK(23, 0)
+#define ALE_RATE_LIMIT_MIN_PPS 1000
+
/**
* struct ale_entry_fld - The ALE tbl entry field description
* @start_bit: field start bit
@@ -634,8 +636,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
return 0;
}
-static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
- u16 vid, int port_mask)
+static void cpsw_ale_vlan_del_modify_int(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int port_mask)
{
int reg_mcast, unreg_mcast;
int members, untag;
@@ -644,6 +646,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
ALE_ENT_VID_MEMBER_LIST);
members &= ~port_mask;
if (!members) {
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
return;
}
@@ -673,7 +676,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
ALE_ENT_VID_MEMBER_LIST, members);
}
-int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
@@ -684,11 +687,39 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_read(ale, idx, ale_entry);
- if (port_mask) {
- cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask);
- } else {
+ cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
+ cpsw_ale_write(ale, idx, ale_entry);
+
+ return 0;
+}
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int members, idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ /* if !port_mask - force remove VLAN (legacy).
+ * Check if there are other VLAN members ports
+ * if no - remove VLAN.
+ * if yes it means same VLAN was added to >1 port in multi port mode, so
+ * remove port_mask ports from VLAN ALE entry excluding Host port.
+ */
+ members = cpsw_ale_vlan_get_fld(ale, ale_entry, ALE_ENT_VID_MEMBER_LIST);
+ members &= ~port_mask;
+
+ if (!port_mask || !members) {
+ /* last port or force remove - remove VLAN */
cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ } else {
+ port_mask &= ~ALE_PORT_HOST;
+ cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
}
cpsw_ale_write(ale, idx, ale_entry);
@@ -1107,6 +1138,50 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
return tmp & BITMASK(info->bits);
}
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE MC port:%d ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d MC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_MCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d MC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE port:%d BC ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d BC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_BCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d BC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
static void cpsw_ale_timer(struct timer_list *t)
{
struct cpsw_ale *ale = from_timer(ale, t, timer);
@@ -1170,6 +1245,26 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
void cpsw_ale_start(struct cpsw_ale *ale)
{
+ unsigned long ale_prescale;
+
+ /* configure Broadcast and Multicast Rate Limit
+ * number_of_packets = (Fclk / ALE_PRESCALE) * port.BCAST/MCAST_LIMIT
+ * ALE_PRESCALE width is 19bit and min value 0x10
+ * port.BCAST/MCAST_LIMIT is 8bit
+ *
+ * For multi port configuration support the ALE_PRESCALE is configured to 1ms interval,
+ * which allows to configure port.BCAST/MCAST_LIMIT per port and achieve:
+ * min number_of_packets = 1000 when port.BCAST/MCAST_LIMIT = 1
+ * max number_of_packets = 1000 * 255 = 255000 when port.BCAST/MCAST_LIMIT = 0xFF
+ */
+ ale_prescale = ale->params.bus_freq / ALE_RATE_LIMIT_MIN_PPS;
+ writel((u32)ale_prescale, ale->params.ale_regs + ALE_PRESCALE);
+
+ /* Allow MC/BC rate limiting globally.
+ * The actual Rate Limit cfg enabled per-port by port.BCAST/MCAST_LIMIT
+ */
+ cpsw_ale_control_set(ale, 0, ALE_RATE_LIMIT, 1);
+
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
@@ -1227,6 +1322,13 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.major_ver_mask = 0x7,
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
},
+ {
+ .dev_id = "am64-cpswxg",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .major_ver_mask = 0x7,
+ .vlan_entry_tbl = vlan_entry_k3_cpswxg,
+ .tbl_entries = 512,
+ },
{ },
};
@@ -1352,6 +1454,18 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
}
}
+void cpsw_ale_restore(struct cpsw_ale *ale, u32 *data)
+{
+ int i, type;
+
+ for (i = 0; i < ale->params.ale_entries; i++) {
+ type = cpsw_ale_get_entry_type(data);
+ if (type != ALE_TYPE_FREE)
+ cpsw_ale_write(ale, i, data);
+ data += ALE_ENTRY_WORDS;
+ }
+}
+
u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
{
return ale ? ale->params.ale_entries : 0;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 5e4a69662c5f..6779ee111d57 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -120,11 +120,14 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
int control, int value);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
+void cpsw_ale_restore(struct cpsw_ale *ale, u32 *data);
u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale);
static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
@@ -134,6 +137,7 @@ static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
int untag_mask, int reg_mcast, int unreg_mcast);
+int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
bool add);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index a1ee205d6a88..c7deae9f9d72 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -46,6 +46,8 @@ static int debug_level;
static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
+module_param(descs_pool_size, int, 0444);
+MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
struct cpsw_devlink {
struct cpsw_common *cpsw;
@@ -351,12 +353,10 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.rxq = &priv->xdp_rxq[ch];
xdp.frame_sz = PAGE_SIZE;
- ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
- /* XDP prog might have changed packet data and boundaries */
- len = xdp.data_end - xdp.data;
headroom = xdp.data - xdp.data_hard_start;
/* XDP prog can modify vlan tag, so can't use encap header */
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index e74f2e95a46e..9503a2dcf621 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -502,6 +502,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
ale_params.ale_ageout = ale_ageout;
ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
ale_params.dev_id = "cpsw";
+ ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000;
cpsw->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(cpsw->ale)) {
@@ -612,7 +613,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpsw_common *cpsw = priv->cpsw;
@@ -676,7 +677,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
struct cpsw_priv *priv = netdev_priv(dev);
@@ -693,16 +694,6 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
-#else
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
-{
- return -EOPNOTSUPP;
-}
-
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
-{
- return -EOPNOTSUPP;
-}
#endif /*CONFIG_TI_CPTS*/
int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1323,7 +1314,7 @@ int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
}
int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
- struct page *page, int port)
+ struct page *page, int port, int *len)
{
struct cpsw_common *cpsw = priv->cpsw;
struct net_device *ndev = priv->ndev;
@@ -1341,10 +1332,13 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
}
act = bpf_prog_run_xdp(prog, xdp);
+ /* XDP prog might have changed packet data and boundaries */
+ *len = xdp->data_end - xdp->data;
+
switch (act) {
case XDP_PASS:
ret = CPSW_XDP_PASS;
- break;
+ goto out;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
@@ -1370,8 +1364,13 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
trace_xdp_exception(ndev, prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
goto drop;
}
+
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
out:
rcu_read_unlock();
return ret;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index a100c93edee8..5d408828e83a 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -381,6 +381,7 @@ struct cpsw_priv {
u32 emac_port;
struct cpsw_common *cpsw;
int offload_fwd_mark;
+ u8 port_state[3];
u32 tx_packet_min;
};
@@ -440,7 +441,7 @@ int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
struct page *page, int port);
int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
- struct page *page, int port);
+ struct page *page, int port, int *len);
irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id);
@@ -495,4 +496,19 @@ int cpsw_set_channels_common(struct net_device *ndev,
cpdma_handler_fn rx_handler);
int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info);
+#if IS_ENABLED(CONFIG_TI_CPTS)
+int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
+int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+#else
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+#endif /*CONFIG_TI_CPTS*/
+
#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */
diff --git a/drivers/net/ethernet/ti/cpsw_switch_ioctl.c b/drivers/net/ethernet/ti/cpsw_switch_ioctl.c
new file mode 100644
index 000000000000..05a43fb41508
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_switch_ioctl.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/* CPSW switch-configuration using non-standard private ioctl SIOCSWITCHCONFIG
+ * Grygorii Strashko <grygorii.strashko@ti.com>:
+ * moved code in separate file to minimize merge conflicts with LKML
+ */
+
+static int cpsw_set_port_state(struct cpsw_priv *priv, int port,
+ int port_state)
+{
+ switch (port_state) {
+ case PORT_STATE_DISABLED:
+ priv->port_state[port] = ALE_PORT_STATE_DISABLE;
+ break;
+ case PORT_STATE_BLOCKED:
+ priv->port_state[port] = ALE_PORT_STATE_BLOCK;
+ break;
+ case PORT_STATE_LEARN:
+ priv->port_state[port] = ALE_PORT_STATE_LEARN;
+ break;
+ case PORT_STATE_FORWARD:
+ priv->port_state[port] = ALE_PORT_STATE_FORWARD;
+ break;
+ default:
+ dev_err(priv->dev, "Switch config: Invalid port state\n");
+ return -EINVAL;
+ }
+ return cpsw_ale_control_set(priv->cpsw->ale, port, ALE_PORT_STATE,
+ priv->port_state[port]);
+}
+
+static int cpsw_switch_config_ioctl(struct net_device *ndev,
+ struct ifreq *ifrq, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct net_switch_config config;
+ int ret = -EINVAL;
+
+ if (cpsw->data.dual_emac) {
+ dev_err(priv->dev, "CPSW not in switch mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Only SIOCSWITCHCONFIG is used as cmd argument and hence, there is no
+ * switch statement required.
+ * Function calls are based on switch_config.cmd
+ */
+
+ if (copy_from_user(&config, ifrq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.vid > 4095) {
+ dev_err(priv->dev, "Invalid VLAN id Arguments for cmd %d\n",
+ config.cmd);
+ return ret;
+ }
+
+ switch (config.cmd) {
+ case SWITCH_ADD_MULTICAST:
+ if (config.port > 0 && config.port <= 7 &&
+ is_multicast_ether_addr(config.addr)) {
+ ret = cpsw_ale_add_mcast(cpsw->ale, config.addr,
+ config.port, ALE_VLAN,
+ config.vid, 0);
+ } else {
+ dev_err(priv->dev, "Invalid Arguments for cmd %d\n",
+ config.cmd);
+ }
+ break;
+ case SWITCH_DEL_MULTICAST:
+ if (is_multicast_ether_addr(config.addr)) {
+ ret = cpsw_ale_del_mcast(cpsw->ale, config.addr,
+ 0, ALE_VLAN, config.vid);
+ } else {
+ dev_err(priv->dev, "Invalid Arguments for cmd %d\n",
+ config.cmd);
+ }
+ break;
+ case SWITCH_ADD_VLAN:
+ if (config.port > 0 && config.port <= 7) {
+ ret = cpsw_ale_add_vlan(cpsw->ale, config.vid,
+ config.port,
+ config.untag_port,
+ config.reg_multi,
+ config.unreg_multi);
+ } else {
+ dev_err(priv->dev, "Invalid Arguments for cmd %d\n",
+ config.cmd);
+ }
+ break;
+ case SWITCH_DEL_VLAN:
+ ret = cpsw_ale_del_vlan(cpsw->ale, config.vid, 0);
+ break;
+ case SWITCH_SET_PORT_CONFIG:
+ {
+ struct phy_device *phy = NULL;
+ struct ethtool_link_ksettings cmd;
+
+ if (config.port == 1 || config.port == 2)
+ phy = cpsw->slaves[config.port - 1].phy;
+
+ if (!phy) {
+ dev_err(priv->dev, "Phy not Found\n");
+ break;
+ }
+
+ convert_legacy_settings_to_link_ksettings(&cmd, &config.ecmd);
+ cmd.base.phy_address = phy->mdio.addr;
+ ret = phy_ethtool_ksettings_set(phy, &cmd);
+ break;
+ }
+ case SWITCH_GET_PORT_CONFIG:
+ {
+ struct phy_device *phy = NULL;
+ struct ethtool_link_ksettings cmd;
+
+ if (config.port == 1 || config.port == 2)
+ phy = cpsw->slaves[config.port - 1].phy;
+
+ if (!phy) {
+ dev_err(priv->dev, "Phy not Found\n");
+ break;
+ }
+
+ cmd.base.phy_address = phy->mdio.addr;
+ phy_ethtool_ksettings_get(phy, &cmd);
+ convert_link_ksettings_to_legacy_settings(&config.ecmd, &cmd);
+
+ ret = copy_to_user(ifrq->ifr_data, &config, sizeof(config));
+ break;
+ }
+ case SWITCH_ADD_UNKNOWN_VLAN_INFO:
+ if (config.unknown_vlan_member <= 7 &&
+ config.unknown_vlan_untag <= 7 &&
+ config.unknown_vlan_unreg_multi <= 7 &&
+ config.unknown_vlan_reg_multi <= 7) {
+ cpsw_ale_control_set(cpsw->ale, 0,
+ ALE_PORT_UNTAGGED_EGRESS,
+ config.unknown_vlan_untag);
+ cpsw_ale_control_set(cpsw->ale, 0,
+ ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
+ config.unknown_vlan_reg_multi);
+ cpsw_ale_control_set(cpsw->ale, 0,
+ ALE_PORT_UNKNOWN_MCAST_FLOOD,
+ config.unknown_vlan_unreg_multi);
+ cpsw_ale_control_set(cpsw->ale, 0,
+ ALE_PORT_UNKNOWN_VLAN_MEMBER,
+ config.unknown_vlan_member);
+ ret = 0;
+ } else {
+ dev_err(priv->dev, "Invalid Unknown VLAN Arguments\n");
+ }
+ break;
+ case SWITCH_GET_PORT_STATE:
+ if (config.port == 1 || config.port == 2) {
+ config.port_state = priv->port_state[config.port];
+ ret = copy_to_user(ifrq->ifr_data, &config,
+ sizeof(config));
+ } else {
+ dev_err(priv->dev, "Invalid Port number\n");
+ }
+ break;
+ case SWITCH_SET_PORT_STATE:
+ if (config.port == 1 || config.port == 2) {
+ ret = cpsw_set_port_state(priv, config.port,
+ config.port_state);
+ } else {
+ dev_err(priv->dev, "Invalid Port number\n");
+ }
+ break;
+ case SWITCH_GET_PORT_VLAN_CONFIG:
+ {
+ u32 __iomem *port_vlan_reg;
+ u32 port_vlan;
+
+ switch (config.port) {
+ case 0:
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ port_vlan = readl(port_vlan_reg);
+ ret = 0;
+
+ break;
+ case 1:
+ case 2:
+ {
+ int slave = config.port - 1;
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+
+ port_vlan = slave_read(cpsw->slaves + slave, reg);
+ ret = 0;
+
+ break;
+ }
+ default:
+ dev_err(priv->dev, "Invalid Port number\n");
+ break;
+ }
+
+ if (!ret) {
+ config.vid = port_vlan & 0xfff;
+ config.vlan_cfi = port_vlan & BIT(12) ? true : false;
+ config.prio = (port_vlan >> 13) & 0x7;
+ ret = copy_to_user(ifrq->ifr_data, &config,
+ sizeof(config));
+ }
+ break;
+ }
+ case SWITCH_SET_PORT_VLAN_CONFIG:
+ {
+ void __iomem *port_vlan_reg;
+ u32 port_vlan;
+
+ port_vlan = config.vid;
+ port_vlan |= config.vlan_cfi ? BIT(12) : 0;
+ port_vlan |= (config.prio & 0x7) << 13;
+
+ switch (config.port) {
+ case 0:
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ writel(port_vlan, port_vlan_reg);
+ ret = 0;
+
+ break;
+ case 1:
+ case 2:
+ {
+ int slave = config.port - 1;
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+
+ slave_write(cpsw->slaves + slave, port_vlan, reg);
+ ret = 0;
+
+ break;
+ }
+ default:
+ dev_err(priv->dev, "Invalid Port number\n");
+ break;
+ }
+
+ break;
+ }
+ case SWITCH_RATELIMIT:
+ {
+ if (config.port > 2) {
+ dev_err(priv->dev, "Invalid Port number\n");
+ break;
+ }
+
+ ret = cpsw_ale_rx_ratelimit_mc(cpsw->ale, config.port, config.mcast_rate_limit);
+ if (ret)
+ dev_err(priv->dev, "CPSW_ALE set MC ratelimit failed");
+
+ ret = cpsw_ale_rx_ratelimit_bc(cpsw->ale, config.port, config.bcast_rate_limit);
+ if (ret)
+ dev_err(priv->dev, "CPSW_ALE set BC ratelimit failed");
+
+ break;
+ }
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index 985a929bb957..29747da5c514 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -227,7 +227,7 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
else
port_mask = BIT(priv->emac_port);
- ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ ret = cpsw_ale_vlan_del_modify(cpsw->ale, vid, port_mask);
if (ret != 0)
return ret;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 702fdc393da0..383e2ba16b50 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -26,6 +26,8 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/sys_soc.h>
/*
* This timeout definition is a worst-case ultra defensive measure against
@@ -41,6 +43,7 @@
struct davinci_mdio_of_param {
int autosuspend_delay_ms;
+ bool manual_mode;
};
struct davinci_mdio_regs {
@@ -49,6 +52,15 @@ struct davinci_mdio_regs {
#define CONTROL_IDLE BIT(31)
#define CONTROL_ENABLE BIT(30)
#define CONTROL_MAX_DIV (0xffff)
+#define CONTROL_CLKDIV GENMASK(15, 0)
+
+#define MDIO_MAN_MDCLK_O BIT(2)
+#define MDIO_MAN_OE BIT(1)
+#define MDIO_MAN_PIN BIT(0)
+#define MDIO_MANUALMODE BIT(31)
+
+#define MDIO_PIN 0
+
u32 alive;
u32 link;
@@ -59,7 +71,9 @@ struct davinci_mdio_regs {
u32 userintmasked;
u32 userintmaskset;
u32 userintmaskclr;
- u32 __reserved_1[20];
+ u32 manualif;
+ u32 poll;
+ u32 __reserved_1[18];
struct {
u32 access;
@@ -79,6 +93,7 @@ static const struct mdio_platform_data default_pdata = {
struct davinci_mdio_data {
struct mdio_platform_data pdata;
+ struct mdiobb_ctrl bb_ctrl;
struct davinci_mdio_regs __iomem *regs;
struct clk *clk;
struct device *dev;
@@ -90,6 +105,7 @@ struct davinci_mdio_data {
*/
bool skip_scan;
u32 clk_div;
+ bool manual_mode;
};
static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
@@ -128,9 +144,126 @@ static void davinci_mdio_enable(struct davinci_mdio_data *data)
writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
}
-static int davinci_mdio_reset(struct mii_bus *bus)
+static void davinci_mdio_disable(struct davinci_mdio_data *data)
+{
+ u32 reg;
+
+ /* Disable MDIO state machine */
+ reg = readl(&data->regs->control);
+
+ reg &= ~CONTROL_CLKDIV;
+ reg |= data->clk_div;
+
+ reg &= ~CONTROL_ENABLE;
+ writel(reg, &data->regs->control);
+}
+
+static void davinci_mdio_enable_manual_mode(struct davinci_mdio_data *data)
+{
+ u32 reg;
+ /* set manual mode */
+ reg = readl(&data->regs->poll);
+ reg |= MDIO_MANUALMODE;
+ writel(reg, &data->regs->poll);
+}
+
+static void davinci_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (level)
+ reg |= MDIO_MAN_MDCLK_O;
+ else
+ reg &= ~MDIO_MAN_MDCLK_O;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (output)
+ reg |= MDIO_MAN_OE;
+ else
+ reg &= ~MDIO_MAN_OE;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (value)
+ reg |= MDIO_MAN_PIN;
+ else
+ reg &= ~MDIO_MAN_PIN;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+ struct davinci_mdio_data *data;
+ unsigned long reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+ return test_bit(MDIO_PIN, &reg);
+}
+
+static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(bus->parent);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bus->parent);
+ return ret;
+ }
+
+ ret = mdiobb_read(bus, phy, reg);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(bus->parent);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bus->parent);
+ return ret;
+ }
+
+ ret = mdiobb_write(bus, phy, reg, val);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdio_common_reset(struct davinci_mdio_data *data)
{
- struct davinci_mdio_data *data = bus->priv;
u32 phy_mask, ver;
int ret;
@@ -140,6 +273,11 @@ static int davinci_mdio_reset(struct mii_bus *bus)
return ret;
}
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
+ }
+
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
@@ -173,6 +311,23 @@ done:
return 0;
}
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+
+ return davinci_mdio_common_reset(data);
+}
+
+static int davinci_mdiobb_reset(struct mii_bus *bus)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+ struct davinci_mdio_data *data;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+
+ return davinci_mdio_common_reset(data);
+}
+
/* wait until hardware is ready for another user access */
static inline int wait_for_user_access(struct davinci_mdio_data *data)
{
@@ -324,6 +479,28 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
return 0;
}
+struct k3_mdio_soc_data {
+ bool manual_mode;
+};
+
+static const struct k3_mdio_soc_data am65_mdio_soc_data = {
+ .manual_mode = true,
+};
+
+static const struct soc_device_attribute k3_mdio_socinfo[] = {
+ { .family = "AM62X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721S2", .revision = "SR1.0", .data = &am65_mdio_soc_data},
+ { /* sentinel */ },
+};
+
#if IS_ENABLED(CONFIG_OF)
static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
.autosuspend_delay_ms = 100,
@@ -337,6 +514,14 @@ static const struct of_device_id davinci_mdio_of_mtable[] = {
MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
#endif
+static const struct mdiobb_ops davinci_mdiobb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = davinci_set_mdc,
+ .set_mdio_dir = davinci_set_mdio_dir,
+ .set_mdio_data = davinci_set_mdio_data,
+ .get_mdio_data = davinci_get_mdio_data,
+};
+
static int davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -351,27 +536,42 @@ static int davinci_mdio_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- data->bus = devm_mdiobus_alloc(dev);
+ data->manual_mode = false;
+ data->bb_ctrl.ops = &davinci_mdiobb_ops;
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ const struct soc_device_attribute *soc_match_data;
+
+ soc_match_data = soc_device_match(k3_mdio_socinfo);
+ if (soc_match_data && soc_match_data->data) {
+ const struct k3_mdio_soc_data *socdata =
+ soc_match_data->data;
+
+ data->manual_mode = socdata->manual_mode;
+ }
+ }
+
+ if (data->manual_mode)
+ data->bus = alloc_mdio_bitbang(&data->bb_ctrl);
+ else
+ data->bus = devm_mdiobus_alloc(dev);
+
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
return -ENOMEM;
}
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
- const struct of_device_id *of_id;
+ const struct davinci_mdio_of_param *of_mdio_data;
ret = davinci_mdio_probe_dt(&data->pdata, pdev);
if (ret)
return ret;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
- of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
- if (of_id) {
- const struct davinci_mdio_of_param *of_mdio_data;
-
- of_mdio_data = of_id->data;
- if (of_mdio_data)
- autosuspend_delay_ms =
+ of_mdio_data = of_device_get_match_data(&pdev->dev);
+ if (of_mdio_data) {
+ autosuspend_delay_ms =
of_mdio_data->autosuspend_delay_ms;
}
} else {
@@ -381,11 +581,20 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
data->bus->name = dev_name(dev);
- data->bus->read = davinci_mdio_read,
- data->bus->write = davinci_mdio_write,
- data->bus->reset = davinci_mdio_reset,
+
+ if (data->manual_mode) {
+ data->bus->read = davinci_mdiobb_read;
+ data->bus->write = davinci_mdiobb_write;
+ data->bus->reset = davinci_mdiobb_reset;
+
+ dev_info(dev, "Configuring MDIO in manual mode\n");
+ } else {
+ data->bus->read = davinci_mdio_read;
+ data->bus->write = davinci_mdio_write;
+ data->bus->reset = davinci_mdio_reset;
+ data->bus->priv = data;
+ }
data->bus->parent = dev;
- data->bus->priv = data;
data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
@@ -443,9 +652,13 @@ static int davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
- if (data->bus)
+ if (data->bus) {
mdiobus_unregister(data->bus);
+ if (data->manual_mode)
+ free_mdio_bitbang(data->bus);
+ }
+
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -462,7 +675,9 @@ static int davinci_mdio_runtime_suspend(struct device *dev)
ctrl = readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
writel(ctrl, &data->regs->control);
- wait_for_idle(data);
+
+ if (!data->manual_mode)
+ wait_for_idle(data);
return 0;
}
@@ -471,7 +686,12 @@ static int davinci_mdio_runtime_resume(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
- davinci_mdio_enable(data);
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
+ } else {
+ davinci_mdio_enable(data);
+ }
return 0;
}
#endif
diff --git a/drivers/net/ethernet/ti/icss_iep.c b/drivers/net/ethernet/ti/icss_iep.c
new file mode 100644
index 000000000000..4552fc07553d
--- /dev/null
+++ b/drivers/net/ethernet/ti/icss_iep.c
@@ -0,0 +1,1166 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/timekeeping.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+
+#include "icss_iep.h"
+
+#define IEP_MAX_DEF_INC 0xf
+#define IEP_MAX_COMPEN_INC 0xfff
+#define IEP_MAX_COMPEN_COUNT 0xffffff
+
+#define IEP_GLOBAL_CFG_CNT_ENABLE BIT(0)
+#define IEP_GLOBAL_CFG_DEFAULT_INC_MASK GENMASK(7, 4)
+#define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT 4
+#define IEP_GLOBAL_CFG_COMPEN_INC_MASK GENMASK(19, 8)
+#define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT 8
+
+#define IEP_GLOBAL_STATUS_CNT_OVF BIT(0)
+
+#define CMP_INDEX(sync) ((sync) + 1)
+#define IEP_CMP_CFG_SHADOW_EN BIT(17)
+#define IEP_CMP_CFG_CMP0_RST_CNT_EN BIT(0)
+#define IEP_CMP_CFG_CMP_EN(cmp) (GENMASK(16, 1) & (1 << ((cmp) + 1)))
+
+#define IEP_CMP_STATUS(cmp) (1 << (cmp))
+
+#define IEP_SYNC_CTRL_SYNC_EN BIT(0)
+#define IEP_SYNC_CTRL_SYNC_N_EN(n) (GENMASK(2, 1) & (BIT(1) << (n)))
+
+#define IEP_MIN_CMP 0
+#define IEP_MAX_CMP 15
+
+#define ICSS_IEP_64BIT_COUNTER_SUPPORT BIT(0)
+#define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT BIT(1)
+#define ICSS_IEP_SHADOW_MODE_SUPPORT BIT(2)
+
+#define LATCH_INDEX(ts_index) ((ts_index) + 6)
+#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
+#define IEP_CAP_CFG_CAPNF_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n) + 1)
+#define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
+
+enum {
+ ICSS_IEP_GLOBAL_CFG_REG,
+ ICSS_IEP_GLOBAL_STATUS_REG,
+ ICSS_IEP_COMPEN_REG,
+ ICSS_IEP_SLOW_COMPEN_REG,
+ ICSS_IEP_COUNT_REG0,
+ ICSS_IEP_COUNT_REG1,
+ ICSS_IEP_CAPTURE_CFG_REG,
+ ICSS_IEP_CAPTURE_STAT_REG,
+
+ ICSS_IEP_CAP6_RISE_REG0,
+ ICSS_IEP_CAP6_RISE_REG1,
+
+ ICSS_IEP_CAP7_RISE_REG0,
+ ICSS_IEP_CAP7_RISE_REG1,
+
+ ICSS_IEP_CMP_CFG_REG,
+ ICSS_IEP_CMP_STAT_REG,
+ ICSS_IEP_CMP0_REG0,
+ ICSS_IEP_CMP0_REG1,
+ ICSS_IEP_CMP1_REG0,
+ ICSS_IEP_CMP1_REG1,
+
+ ICSS_IEP_CMP8_REG0,
+ ICSS_IEP_CMP8_REG1,
+ ICSS_IEP_SYNC_CTRL_REG,
+ ICSS_IEP_SYNC0_STAT_REG,
+ ICSS_IEP_SYNC1_STAT_REG,
+ ICSS_IEP_SYNC_PWIDTH_REG,
+ ICSS_IEP_SYNC0_PERIOD_REG,
+ ICSS_IEP_SYNC1_DELAY_REG,
+ ICSS_IEP_SYNC_START_REG,
+ ICSS_IEP_MAX_REGS,
+};
+
+/**
+ * struct icss_iep_plat_data - Plat data to handle SoC variants
+ * @config: Regmap configuration data
+ * @reg_offs: register offsets to capture offset differences across SoCs
+ * @flags: Flags to represent IEP properties
+ */
+struct icss_iep_plat_data {
+ struct regmap_config *config;
+ u32 reg_offs[ICSS_IEP_MAX_REGS];
+ u32 flags;
+};
+
+struct icss_iep {
+ struct device *dev;
+ void __iomem *base;
+ const struct icss_iep_plat_data *plat_data;
+ struct regmap *map;
+ struct device_node *client_np;
+ unsigned long refclk_freq;
+ int clk_tick_time; /* one refclk tick time in ns */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct mutex ptp_clk_mutex; /* PHC access serializer */
+ spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
+ u32 def_inc;
+ s16 slow_cmp_inc;
+ u32 slow_cmp_count;
+ const struct icss_iep_clockops *ops;
+ void *clockops_data;
+ u32 cycle_time_ns;
+ u32 perout_enabled;
+ bool pps_enabled;
+ int cap_cmp_irq;
+ u64 period;
+ u32 latch_enable;
+ struct hrtimer sync_timer;
+};
+
+static u32 icss_iep_readl(struct icss_iep *iep, int reg)
+{
+ return readl(iep->base + iep->plat_data->reg_offs[reg]);
+}
+
+static void icss_iep_writel(struct icss_iep *iep, int reg, u32 val)
+{
+ return writel(val, iep->base + iep->plat_data->reg_offs[reg]);
+}
+
+/**
+ * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: upper 32 bit IEP counter
+ */
+int icss_iep_get_count_hi(struct icss_iep *iep)
+{
+ u32 val = 0;
+
+ if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
+ val = icss_iep_readl(iep, ICSS_IEP_COUNT_REG1);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
+
+/**
+ * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: lower 32 bit IEP counter
+ */
+int icss_iep_get_count_low(struct icss_iep *iep)
+{
+ u32 val = 0;
+
+ if (iep)
+ val = icss_iep_readl(iep, ICSS_IEP_COUNT_REG0);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
+
+/**
+ * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: PTP clock index, -1 if not registered
+ */
+int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
+{
+ if (!iep || !iep->ptp_clock)
+ return -1;
+ return ptp_clock_index(iep->ptp_clock);
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
+
+static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
+{
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ icss_iep_writel(iep, ICSS_IEP_COUNT_REG1, upper_32_bits(ns));
+ icss_iep_writel(iep, ICSS_IEP_COUNT_REG0, lower_32_bits(ns));
+}
+
+static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
+
+static void icss_iep_settime(struct icss_iep *iep, u64 ns)
+{
+ unsigned long flags;
+
+ if (iep->ops && iep->ops->settime) {
+ iep->ops->settime(iep->clockops_data, ns);
+ return;
+ }
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+ if (iep->pps_enabled || iep->perout_enabled)
+ icss_iep_writel(iep, ICSS_IEP_SYNC_CTRL_REG, 0);
+
+ icss_iep_set_counter(iep, ns);
+
+ if (iep->pps_enabled || iep->perout_enabled) {
+ icss_iep_update_to_next_boundary(iep, ns);
+ icss_iep_writel(iep, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ }
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+}
+
+static u64 icss_iep_gettime(struct icss_iep *iep,
+ struct ptp_system_timestamp *sts)
+{
+ u32 ts_hi = 0, ts_lo;
+ unsigned long flags;
+
+ if (iep->ops && iep->ops->gettime)
+ return iep->ops->gettime(iep->clockops_data, sts);
+
+ /* use local_irq_x() to make it work for both RT/non-RT */
+ local_irq_save(flags);
+
+ /* no need to play with hi-lo, hi is latched when lo is read */
+ ptp_read_system_prets(sts);
+ ts_lo = icss_iep_readl(iep, ICSS_IEP_COUNT_REG0);
+ ptp_read_system_postts(sts);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ ts_hi = icss_iep_readl(iep, ICSS_IEP_COUNT_REG1);
+
+ local_irq_restore(flags);
+
+ return (u64)ts_lo | (u64)ts_hi << 32;
+}
+
+static void icss_iep_enable(struct icss_iep *iep)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_CNT_ENABLE,
+ IEP_GLOBAL_CFG_CNT_ENABLE);
+}
+
+static void icss_iep_disable(struct icss_iep *iep)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_CNT_ENABLE,
+ 0);
+}
+
+static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
+{
+ u32 cycle_time;
+ int cmp;
+
+ /* FIXME: check why we need to decrement by def_inc */
+ cycle_time = iep->cycle_time_ns - iep->def_inc;
+
+ icss_iep_disable(iep);
+
+ /* disable shadow mode */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_SHADOW_EN, 0);
+
+ /* enable shadow mode */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
+
+ /* clear counters */
+ icss_iep_set_counter(iep, 0);
+
+ /* clear overflow status */
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
+ IEP_GLOBAL_STATUS_CNT_OVF,
+ IEP_GLOBAL_STATUS_CNT_OVF);
+
+ /* clear compare status */
+ for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
+ IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
+ }
+
+ /* enable reset counter on CMP0 event */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP0_RST_CNT_EN,
+ IEP_CMP_CFG_CMP0_RST_CNT_EN);
+ /* enable compare */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(0),
+ IEP_CMP_CFG_CMP_EN(0));
+
+ /* set CMP0 value to cycle time */
+ regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
+
+ icss_iep_set_counter(iep, 0);
+ icss_iep_enable(iep);
+}
+
+static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
+ def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
+}
+
+static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
+{
+ struct device *dev = regmap_get_device(iep->map);
+
+ if (compen_inc > IEP_MAX_COMPEN_INC) {
+ dev_err(dev, "%s: too high compensation inc %d\n",
+ __func__, compen_inc);
+ compen_inc = IEP_MAX_COMPEN_INC;
+ }
+
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_COMPEN_INC_MASK,
+ compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
+}
+
+static void icss_iep_set_compensation_count(struct icss_iep *iep,
+ u32 compen_count)
+{
+ struct device *dev = regmap_get_device(iep->map);
+
+ if (compen_count > IEP_MAX_COMPEN_COUNT) {
+ dev_err(dev, "%s: too high compensation count %d\n",
+ __func__, compen_count);
+ compen_count = IEP_MAX_COMPEN_COUNT;
+ }
+
+ regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
+}
+
+static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
+ u32 compen_count)
+{
+ regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
+}
+
+/* PTP PHC operations */
+static int icss_iep_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ u32 cyc_count;
+ u16 cmp_inc;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ /* ppb is amount of frequency we want to adjust in 1GHz (billion)
+ * e.g. 100ppb means we need to speed up clock by 100Hz
+ * i.e. at end of 1 second (1 billion ns) clock time, we should be
+ * counting 100 more ns.
+ * We use IEP slow compensation to achieve continuous freq. adjustment.
+ * There are 2 parts. Cycle time and adjustment per cycle.
+ * Simplest case would be 1 sec Cycle time. Then adjustment
+ * pre cycle would be (def_inc + ppb) value.
+ * Cycle time will have to be chosen based on how worse the ppb is.
+ * e.g. smaller the ppb, cycle time has to be large.
+ * The minimum adjustment we can do is +-1ns per cycle so let's
+ * reduce the cycle time to get 1ns per cycle adjustment.
+ * 1ppb = 1sec cycle time & 1ns adjust
+ * 1000ppb = 1/1000 cycle time & 1ns adjust per cycle
+ */
+
+ if (iep->cycle_time_ns)
+ iep->slow_cmp_inc = iep->clk_tick_time; /* 4ns adj per cycle */
+ else
+ iep->slow_cmp_inc = 1; /* 1ns adjust per cycle */
+
+ if (ppb < 0) {
+ iep->slow_cmp_inc = -iep->slow_cmp_inc;
+ ppb = -ppb;
+ }
+
+ cyc_count = NSEC_PER_SEC; /* 1s cycle time @1GHz */
+ cyc_count /= ppb; /* cycle time per ppb */
+
+ /* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
+ if (!iep->cycle_time_ns)
+ cyc_count /= iep->clk_tick_time;
+ iep->slow_cmp_count = cyc_count;
+
+ /* iep->clk_tick_time is def_inc */
+ cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
+ icss_iep_set_compensation_inc(iep, cmp_inc);
+ icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
+
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ s64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ if (iep->ops && iep->ops->adjtime) {
+ iep->ops->adjtime(iep->clockops_data, delta);
+ } else {
+ ns = icss_iep_gettime(iep, NULL);
+ ns += delta;
+ icss_iep_settime(iep, ns);
+ }
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ ns = icss_iep_gettime(iep, sts);
+ *ts = ns_to_timespec64(ns);
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ ns = timespec64_to_ns(ts);
+ icss_iep_settime(iep, ns);
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
+{
+ u64 ns, p_ns;
+ u32 offset;
+
+ ns = icss_iep_gettime(iep, NULL);
+ if (start_ns < ns)
+ start_ns = ns;
+ p_ns = iep->period;
+ /* Round up to next period boundary */
+ start_ns += p_ns - 1;
+ offset = do_div(start_ns, p_ns);
+ start_ns = start_ns * p_ns;
+ /* If it is too close to update, shift to next boundary */
+ if (p_ns - offset < 10)
+ start_ns += p_ns;
+
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
+}
+
+static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ struct ptp_perout_request *req, int on)
+{
+ int ret;
+ u64 cmp;
+
+ if (iep->ops && iep->ops->perout_enable) {
+ ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
+ if (ret)
+ return ret;
+
+ if (on) {
+ /* Configure CMP */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
+ /* Configure SYNC */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000); /* 1ms pulse width */
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ } else {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+ }
+ } else {
+ if (on) {
+ u64 start_ns;
+
+ iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
+ req->period.nsec;
+ start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
+ + req->period.nsec;
+ icss_iep_update_to_next_boundary(iep, start_ns);
+
+ /* Enable Sync in single shot mode */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ } else {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear CMP regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+
+ /* Disable sync */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
+ }
+ }
+
+ return 0;
+}
+
+static int icss_iep_perout_enable(struct icss_iep *iep,
+ struct ptp_perout_request *req, int on)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->pps_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->perout_enabled == !!on)
+ goto exit;
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+ if (iep->cap_cmp_irq)
+ hrtimer_cancel(&iep->sync_timer);
+ ret = icss_iep_perout_enable_hw(iep, req, on);
+ if (!ret)
+ iep->perout_enabled = !!on;
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static irqreturn_t icss_iep_cap_cmp_handler(int irq, void *dev_id)
+{
+ struct icss_iep *iep = (struct icss_iep *)dev_id;
+ unsigned int val, index = 0, i, sts;
+ struct ptp_clock_event pevent;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+ u64 ns, ns_next;
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+
+ val = icss_iep_readl(iep, ICSS_IEP_CMP_STAT_REG);
+ if (val & BIT(CMP_INDEX(index))) {
+ icss_iep_writel(iep, ICSS_IEP_CMP_STAT_REG,
+ BIT(CMP_INDEX(index)));
+
+ if (!iep->pps_enabled && !iep->perout_enabled)
+ goto do_latch;
+
+ ns = icss_iep_readl(iep, ICSS_IEP_CMP1_REG0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
+ val = icss_iep_readl(iep, ICSS_IEP_CMP1_REG1);
+ ns |= (u64)val << 32;
+ }
+ /* set next event */
+ ns_next = ns + iep->period;
+ icss_iep_writel(iep, ICSS_IEP_CMP1_REG0,
+ lower_32_bits(ns_next));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ icss_iep_writel(iep, ICSS_IEP_CMP1_REG1,
+ upper_32_bits(ns_next));
+
+ pevent.pps_times.ts_real = ns_to_timespec64(ns);
+ pevent.type = PTP_CLOCK_PPSUSR;
+ pevent.index = index;
+ ptp_clock_event(iep->ptp_clock, &pevent);
+ dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next);
+
+ hrtimer_start(&iep->sync_timer, ms_to_ktime(110), /* 100ms + buffer */
+ HRTIMER_MODE_REL);
+
+ ret = IRQ_HANDLED;
+ }
+
+do_latch:
+ sts = icss_iep_readl(iep, ICSS_IEP_CAPTURE_STAT_REG);
+ if (!sts)
+ goto cap_cmp_exit;
+
+ for (i = 0; i < iep->ptp_info.n_ext_ts; i++) {
+ if (sts & IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(i * 2)) {
+ ns = icss_iep_readl(iep,
+ ICSS_IEP_CAP6_RISE_REG0 + (i * 2));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
+ val = icss_iep_readl(iep,
+ ICSS_IEP_CAP6_RISE_REG0 + (i * 2) + 1);
+ ns |= (u64)val << 32;
+ }
+ pevent.timestamp = ns;
+ pevent.type = PTP_CLOCK_EXTTS;
+ pevent.index = i;
+ ptp_clock_event(iep->ptp_clock, &pevent);
+ dev_dbg(iep->dev, "IEP:extts index=%d ts: %llu\n", i, ns);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+cap_cmp_exit:
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+ return ret;
+}
+
+static int icss_iep_pps_enable(struct icss_iep *iep, int on)
+{
+ int ret = 0;
+ struct timespec64 ts;
+ struct ptp_clock_request rq;
+ unsigned long flags;
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->perout_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->pps_enabled == !!on)
+ goto exit;
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+
+ rq.perout.index = 0;
+ if (on) {
+ ns = icss_iep_gettime(iep, NULL);
+ ts = ns_to_timespec64(ns);
+ rq.perout.period.sec = 1;
+ rq.perout.period.nsec = 0;
+ rq.perout.start.sec = ts.tv_sec + 2;
+ rq.perout.start.nsec = 0;
+ ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ } else {
+ if (iep->cap_cmp_irq)
+ hrtimer_cancel(&iep->sync_timer);
+ ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ }
+
+ if (!ret)
+ iep->pps_enabled = !!on;
+
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
+{
+ u32 val, cap, ret = 0;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->ops && iep->ops->extts_enable) {
+ ret = iep->ops->extts_enable(iep->clockops_data, index, on);
+ goto exit;
+ }
+
+ if (!!(iep->latch_enable & BIT(index)) == !!on)
+ goto exit;
+
+ regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
+ cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
+ if (on) {
+ val |= cap;
+ iep->latch_enable |= BIT(index);
+ } else {
+ val &= ~cap;
+ iep->latch_enable &= ~BIT(index);
+ }
+ regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return icss_iep_perout_enable(iep, &rq->perout, on);
+ case PTP_CLK_REQ_PPS:
+ return icss_iep_pps_enable(iep, on);
+ case PTP_CLK_REQ_EXTTS:
+ return icss_iep_extts_enable(iep, rq->extts.index, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info icss_iep_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "ICSS IEP timer",
+ .max_adj = 10000000,
+ .adjfreq = icss_iep_ptp_adjfreq,
+ .adjtime = icss_iep_ptp_adjtime,
+ .gettimex64 = icss_iep_ptp_gettimeex,
+ .settime64 = icss_iep_ptp_settime,
+ .enable = icss_iep_ptp_enable,
+};
+
+static enum hrtimer_restart icss_iep_sync0_work(struct hrtimer *timer)
+{
+ struct icss_iep *iep = container_of(timer, struct icss_iep, sync_timer);
+
+ icss_iep_writel(iep, ICSS_IEP_SYNC_CTRL_REG, 0);
+ icss_iep_writel(iep, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ icss_iep_writel(iep, ICSS_IEP_SYNC0_STAT_REG, 1);
+
+ return HRTIMER_NORESTART;
+}
+
+struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
+{
+ struct platform_device *pdev;
+ struct device_node *iep_np;
+ struct icss_iep *iep;
+
+ iep_np = of_parse_phandle(np, "iep", idx);
+ if (!iep_np || !of_device_is_available(iep_np))
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(iep_np);
+ of_node_put(iep_np);
+
+ if (!pdev)
+ /* probably IEP not yet probed */
+ return ERR_PTR(-EPROBE_DEFER);
+
+ iep = platform_get_drvdata(pdev);
+ if (!iep)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ device_lock(iep->dev);
+ if (iep->client_np) {
+ device_unlock(iep->dev);
+ dev_err(iep->dev, "IEP is already acquired by %s",
+ iep->client_np->name);
+ return ERR_PTR(-EBUSY);
+ }
+ iep->client_np = np;
+ device_unlock(iep->dev);
+ get_device(iep->dev);
+
+ return iep;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_idx);
+
+struct icss_iep *icss_iep_get(struct device_node *np)
+{
+ return icss_iep_get_idx(np, 0);
+}
+EXPORT_SYMBOL_GPL(icss_iep_get);
+
+void icss_iep_put(struct icss_iep *iep)
+{
+ device_lock(iep->dev);
+ iep->client_np = NULL;
+ device_unlock(iep->dev);
+ put_device(iep->dev);
+ if (iep->cap_cmp_irq)
+ hrtimer_cancel(&iep->sync_timer);
+}
+EXPORT_SYMBOL_GPL(icss_iep_put);
+
+void icss_iep_init_fw(struct icss_iep *iep)
+{
+ /* start IEP for FW use in raw 64bit mode, no PTP support */
+ iep->clk_tick_time = iep->def_inc;
+ iep->cycle_time_ns = 0;
+ iep->ops = NULL;
+ iep->clockops_data = NULL;
+ icss_iep_set_default_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_count(iep, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
+ icss_iep_set_slow_compensation_count(iep, 0);
+
+ icss_iep_enable(iep);
+ icss_iep_settime(iep, 0);
+}
+EXPORT_SYMBOL_GPL(icss_iep_init_fw);
+
+void icss_iep_exit_fw(struct icss_iep *iep)
+{
+ icss_iep_disable(iep);
+}
+EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
+
+int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
+ void *clockops_data, u32 cycle_time_ns)
+{
+ int ret = 0;
+
+ iep->cycle_time_ns = cycle_time_ns;
+ iep->clk_tick_time = iep->def_inc;
+ iep->ops = clkops;
+ iep->clockops_data = clockops_data;
+ icss_iep_set_default_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_count(iep, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
+ icss_iep_set_slow_compensation_count(iep, 0);
+
+ if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
+ !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
+ goto skip_perout;
+
+ if (iep->cap_cmp_irq || (iep->ops && iep->ops->perout_enable)) {
+ iep->ptp_info.n_per_out = 1;
+ iep->ptp_info.pps = 1;
+ }
+
+ if (iep->cap_cmp_irq || (iep->ops && iep->ops->extts_enable))
+ iep->ptp_info.n_ext_ts = 2;
+
+skip_perout:
+ if (cycle_time_ns)
+ icss_iep_enable_shadow_mode(iep);
+ else
+ icss_iep_enable(iep);
+ icss_iep_settime(iep, ktime_get_real_ns());
+
+ iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
+ if (IS_ERR(iep->ptp_clock)) {
+ ret = PTR_ERR(iep->ptp_clock);
+ iep->ptp_clock = NULL;
+ dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icss_iep_init);
+
+int icss_iep_exit(struct icss_iep *iep)
+{
+ if (iep->ptp_clock) {
+ ptp_clock_unregister(iep->ptp_clock);
+ iep->ptp_clock = NULL;
+ }
+ icss_iep_disable(iep);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icss_iep_exit);
+
+static const struct of_device_id icss_iep_of_match[];
+
+static int icss_iep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct icss_iep *iep;
+ struct clk *iep_clk;
+ int ret;
+
+ iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
+ if (!iep)
+ return -ENOMEM;
+
+ iep->dev = dev;
+ iep->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(iep->base))
+ return -ENODEV;
+
+ iep->cap_cmp_irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp");
+ if (iep->cap_cmp_irq < 0) {
+ if (iep->cap_cmp_irq == -EPROBE_DEFER)
+ return iep->cap_cmp_irq;
+ iep->cap_cmp_irq = 0;
+ } else {
+ ret = devm_request_irq(dev, iep->cap_cmp_irq,
+ icss_iep_cap_cmp_handler, IRQF_TRIGGER_HIGH,
+ "iep_cap_cmp", iep);
+ if (ret) {
+ dev_err(iep->dev, "Request irq failed for cap_cmp %d\n", ret);
+ return ret;
+ }
+ hrtimer_init(&iep->sync_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ iep->sync_timer.function = icss_iep_sync0_work;
+ }
+
+ iep_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(iep_clk))
+ return PTR_ERR(iep_clk);
+
+ iep->refclk_freq = clk_get_rate(iep_clk);
+
+ iep->def_inc = NSEC_PER_SEC / iep->refclk_freq; /* ns per clock tick */
+ if (iep->def_inc > IEP_MAX_DEF_INC) {
+ dev_err(dev, "Failed to set def_inc %d. IEP_clock is too slow to be supported\n",
+ iep->def_inc);
+ return -EINVAL;
+ }
+
+ iep->plat_data = of_device_get_match_data(dev);
+ if (!iep->plat_data)
+ return -EINVAL;
+
+ iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
+ if (IS_ERR(iep->map)) {
+ dev_err(dev, "Failed to create regmap for IEP %ld\n",
+ PTR_ERR(iep->map));
+ return PTR_ERR(iep->map);
+ }
+
+ iep->ptp_info = icss_iep_ptp_info;
+ mutex_init(&iep->ptp_clk_mutex);
+ spin_lock_init(&iep->irq_lock);
+ dev_set_drvdata(dev, iep);
+ icss_iep_disable(iep);
+
+ return 0;
+}
+
+static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static int icss_iep_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct icss_iep *iep = context;
+
+ writel(val, iep->base + iep->plat_data->reg_offs[reg]);
+
+ return 0;
+}
+
+static int icss_iep_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct icss_iep *iep = context;
+
+ *val = readl(iep->base + iep->plat_data->reg_offs[reg]);
+
+ return 0;
+}
+
+static struct regmap_config am654_icss_iep_regmap_config = {
+ .name = "icss iep",
+ .reg_stride = 1,
+ .reg_write = icss_iep_regmap_write,
+ .reg_read = icss_iep_regmap_read,
+ .writeable_reg = am654_icss_iep_valid_reg,
+ .readable_reg = am654_icss_iep_valid_reg,
+ .fast_io = 1,
+};
+
+static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
+ .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
+ ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
+ ICSS_IEP_SHADOW_MODE_SUPPORT,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
+ [ICSS_IEP_COUNT_REG0] = 0x10,
+ [ICSS_IEP_COUNT_REG1] = 0x14,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
+ [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
+ [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x70,
+ [ICSS_IEP_CMP_STAT_REG] = 0x74,
+ [ICSS_IEP_CMP0_REG0] = 0x78,
+ [ICSS_IEP_CMP0_REG1] = 0x7c,
+ [ICSS_IEP_CMP1_REG0] = 0x80,
+ [ICSS_IEP_CMP1_REG1] = 0x84,
+
+ [ICSS_IEP_CMP8_REG0] = 0xc0,
+ [ICSS_IEP_CMP8_REG1] = 0xc4,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
+ [ICSS_IEP_SYNC_START_REG] = 0x19c,
+ },
+ .config = &am654_icss_iep_regmap_config,
+};
+
+static const struct icss_iep_plat_data am57xx_icss_iep_plat_data = {
+ .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
+ ICSS_IEP_SLOW_COMPEN_REG_SUPPORT,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
+ [ICSS_IEP_COUNT_REG0] = 0x10,
+ [ICSS_IEP_COUNT_REG1] = 0x14,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
+ [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
+ [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x70,
+ [ICSS_IEP_CMP_STAT_REG] = 0x74,
+ [ICSS_IEP_CMP0_REG0] = 0x78,
+ [ICSS_IEP_CMP0_REG1] = 0x7c,
+ [ICSS_IEP_CMP1_REG0] = 0x80,
+ [ICSS_IEP_CMP1_REG1] = 0x84,
+
+ [ICSS_IEP_CMP8_REG0] = 0xc0,
+ [ICSS_IEP_CMP8_REG1] = 0xc4,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
+ [ICSS_IEP_SYNC_START_REG] = 0x19c,
+ },
+ .config = &am654_icss_iep_regmap_config,
+};
+
+static bool am335x_icss_iep_valid_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_CAPTURE_STAT_REG:
+ case ICSS_IEP_CAP6_RISE_REG0:
+ case ICSS_IEP_CMP_CFG_REG ... ICSS_IEP_CMP0_REG0:
+ case ICSS_IEP_CMP8_REG0 ... ICSS_IEP_SYNC_START_REG:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static struct regmap_config am335x_icss_iep_regmap_config = {
+ .name = "icss iep",
+ .reg_stride = 1,
+ .reg_write = icss_iep_regmap_write,
+ .reg_read = icss_iep_regmap_read,
+ .writeable_reg = am335x_icss_iep_valid_reg,
+ .readable_reg = am335x_icss_iep_valid_reg,
+};
+
+static const struct icss_iep_plat_data am335x_icss_iep_plat_data = {
+ .flags = 0,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_COUNT_REG0] = 0x0C,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x10,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x14,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x30,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x38,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x40,
+ [ICSS_IEP_CMP_STAT_REG] = 0x44,
+ [ICSS_IEP_CMP0_REG0] = 0x48,
+
+ [ICSS_IEP_CMP8_REG0] = 0x88,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x100,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x108,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x10C,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x110,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x114,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x118,
+ [ICSS_IEP_SYNC_START_REG] = 0x11C,
+ },
+ .config = &am335x_icss_iep_regmap_config,
+};
+
+static const struct of_device_id icss_iep_of_match[] = {
+ {
+ .compatible = "ti,am654-icss-iep",
+ .data = &am654_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am5728-icss-iep",
+ .data = &am57xx_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am3356-icss-iep",
+ .data = &am335x_icss_iep_plat_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, icss_iep_of_match);
+
+static struct platform_driver icss_iep_driver = {
+ .driver = {
+ .name = "icss-iep",
+ .of_match_table = of_match_ptr(icss_iep_of_match),
+ },
+ .probe = icss_iep_probe,
+};
+module_platform_driver(icss_iep_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI ICSS IEP driver");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
diff --git a/drivers/net/ethernet/ti/icss_iep.h b/drivers/net/ethernet/ti/icss_iep.h
new file mode 100644
index 000000000000..22bee0ad4565
--- /dev/null
+++ b/drivers/net/ethernet/ti/icss_iep.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSS_IEP_H
+#define __NET_TI_ICSS_IEP_H
+
+#include <linux/mutex.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/regmap.h>
+
+struct icss_iep;
+
+/* Firmware specific clock operations */
+struct icss_iep_clockops {
+ void (*settime)(void *clockops_data, u64 ns);
+ void (*adjtime)(void *clockops_data, s64 delta);
+ u64 (*gettime)(void *clockops_data, struct ptp_system_timestamp *sts);
+ int (*perout_enable)(void *clockops_data,
+ struct ptp_perout_request *req, int on,
+ u64 *cmp);
+ int (*extts_enable)(void *clockops_data, u32 index, int on);
+};
+
+struct icss_iep *icss_iep_get(struct device_node *np);
+struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx);
+void icss_iep_put(struct icss_iep *iep);
+int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
+ void *clockops_data, u32 cycle_time_ns);
+int icss_iep_exit(struct icss_iep *iep);
+int icss_iep_get_count_low(struct icss_iep *iep);
+int icss_iep_get_count_hi(struct icss_iep *iep);
+int icss_iep_get_ptp_clock_idx(struct icss_iep *iep);
+void icss_iep_init_fw(struct icss_iep *iep);
+void icss_iep_exit_fw(struct icss_iep *iep);
+
+#endif /* __NET_TI_ICSS_IEP_H */
diff --git a/drivers/net/ethernet/ti/icss_lre_firmware.h b/drivers/net/ethernet/ti/icss_lre_firmware.h
new file mode 100644
index 000000000000..fd2dc1b12cc9
--- /dev/null
+++ b/drivers/net/ethernet/ti/icss_lre_firmware.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017-2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#ifndef __ICSS_LRE_FIRMWARE_H
+#define __ICSS_LRE_FIRMWARE_H
+
+#define ICSS_LRE_TAG_RCT_SIZE 6 /* HSR tag or PRP RCT size */
+
+#define ICSS_LRE_HSR_MODE 0x1E76
+#define ICSS_LRE_MODEH 0x01
+
+/* PRU0 DMEM */
+#define ICSS_LRE_DBG_START 0x1E00
+
+#define ICSS_LRE_DUPLICATE_HOST_TABLE 0x0200
+
+/* PRU1 DMEM */
+#define ICSS_LRE_DUPLICATE_PORT_TABLE_PRU0 0x0200
+#define ICSS_LRE_DUPLICATE_PORT_TABLE_PRU1 0x0E00
+
+/* Size and setup (N and M) of duplicate host table */
+#define ICSS_LRE_DUPLICATE_HOST_TABLE_SIZE 0x1C08
+/* Size and setup (N and M) of duplicate port table (HSR Only) */
+#define ICSS_LRE_DUPLICATE_PORT_TABLE_SIZE 0x1C1C
+/* Time after which an entry is removed from the dup table (10ms resolution) */
+#define ICSS_LRE_DUPLI_FORGET_TIME 0x1C24
+/* Time interval to check the port duplicate table */
+#define ICSS_LRE_DUPLI_PORT_CHECK_RESO 0x1C2C
+/* Time interval to check the host duplicate table */
+#define ICSS_LRE_DUPLI_HOST_CHECK_RESO 0x1C30
+/* NodeTable | Host | Port */
+#define ICSS_LRE_HOST_TIMER_CHECK_FLAGS 0x1C38
+/* Arbitration flag for the host duplicate t */
+#define ICSS_LRE_HOST_DUPLICATE_ARBITRATION 0x1C3C
+/* Supervision address in LRE */
+#define ICSS_LRE_SUP_ADDR 0x1C4C
+#define ICSS_LRE_SUP_ADDR_LOW 0x1C50
+
+/* Time in TimeTicks (1/100s) */
+#define ICSS_LRE_DUPLICATE_FORGET_TIME_400_MS 40
+/* Time in TimeTicks (1/100s) */
+#define ICSS_LRE_NODE_FORGET_TIME_60000_MS 6000
+#define ICSS_LRE_MAX_FORGET_TIME 0xffdf
+
+#define ICSS_LRE_DUPLICATE_PORT_TABLE_DMEM_SIZE 0x0C00
+#define ICSS_LRE_DUPLICATE_HOST_TABLE_DMEM_SIZE 0x1800
+#define ICSS_LRE_STATS_DMEM_SIZE 0x0088
+#define ICSS_LRE_DEBUG_COUNTER_DMEM_SIZE 0x0050
+
+#define ICSS_LRE_DUPLICATE_HOST_TABLE_SIZE_INIT 0x800004 /* N = 128, M = 4 */
+#define ICSS_LRE_DUPLICATE_PORT_TABLE_SIZE_INIT 0x400004 /* N = 64, M = 4 */
+#define ICSS_LRE_MASTER_SLAVE_BUSY_BITS_CLEAR 0x0
+#define ICSS_LRE_TABLE_CHECK_RESOLUTION_10_MS 0xA
+#define ICSS_LRE_SUP_ADDRESS_INIT_OCTETS_HIGH 0x4E1501 /* 01-15-4E-00- */
+#define ICSS_LRE_SUP_ADDRESS_INIT_OCTETS_LOW 0x1 /* -01-00 */
+
+/* SHARED RAM */
+
+/* 8 bytes of VLAN PCP to RX QUEUE MAPPING */
+#define ICSS_LRE_QUEUE_2_PCP_MAP_OFFSET 0x120
+#define ICSS_LRE_START 0x140
+
+/* Number of frames successfully sent over port A/B that are HSR/PRP tagged */
+
+#define ICSS_LRE_CNT_TX_A (ICSS_LRE_START + 4)
+#define ICSS_LRE_DUPLICATE_DISCARD (ICSS_LRE_START + 104)
+#define ICSS_LRE_TRANSPARENT_RECEPTION (ICSS_LRE_START + 108)
+#define ICSS_LRE_CNT_NODES (ICSS_LRE_START + 52)
+
+/* SRAM */
+
+#define ICSS_LRE_IEC62439_CONST_DUPLICATE_ACCEPT 0x01
+#define ICSS_LRE_IEC62439_CONST_DUPLICATE_DISCARD 0x02
+#define ICSS_LRE_IEC62439_CONST_TRANSP_RECEPTION_REMOVE_RCT 0x01
+#define ICSS_LRE_IEC62439_CONST_TRANSP_RECEPTION_PASS_RCT 0x02
+
+/* Enable/disable interrupts for high/low priority instead of per port.
+ * 0 = disabled (default) 1 = enabled
+ */
+#define ICSS_LRE_PRIORITY_INTRS_STATUS_OFFSET 0x1FAA
+/* Enable/disable timestamping of packets. 0 = disabled (default) 1 = enabled */
+#define ICSS_LRE_TIMESTAMP_PKTS_STATUS_OFFSET 0x1FAB
+#define ICSS_LRE_TIMESTAMP_ARRAY_OFFSET 0xC200
+
+/* HOST_TIMER_CHECK_FLAGS bits */
+#define ICSS_LRE_HOST_TIMER_NODE_TABLE_CHECK_BIT BIT(0)
+#define ICSS_LRE_HOST_TIMER_NODE_TABLE_CLEAR_BIT BIT(4)
+#define ICSS_LRE_HOST_TIMER_HOST_TABLE_CHECK_BIT BIT(8)
+#define ICSS_LRE_HOST_TIMER_P1_TABLE_CHECK_BIT BIT(16)
+#define ICSS_LRE_HOST_TIMER_P2_TABLE_CHECK_BIT BIT(24)
+#define ICSS_LRE_HOST_TIMER_PORT_TABLE_CHECK_BITS \
+ (ICSS_LRE_HOST_TIMER_P1_TABLE_CHECK_BIT | \
+ ICSS_LRE_HOST_TIMER_P2_TABLE_CHECK_BIT)
+
+#define ICSS_LRE_NODE_FREE 0x10
+/* PRU1 DMEM */
+#define ICSS_LRE_V2_1_HASH_MASK 0xFF
+#define ICSS_LRE_V2_1_INDEX_ARRAY_NT 0x3000
+#define ICSS_LRE_V2_1_BIN_ARRAY \
+ (ICSS_LRE_V2_1_INDEX_ARRAY_NT + \
+ (ICSS_LRE_V2_1_INDEX_TBL_MAX_ENTRIES * 6))
+#define ICSS_LRE_V2_1_NODE_TABLE_NEW \
+ (ICSS_LRE_V2_1_BIN_ARRAY + \
+ (ICSS_LRE_V2_1_BIN_TBL_MAX_ENTRIES * 8))
+#define ICSS_LRE_V2_1_INDEX_ARRAY_LOC PRUETH_MEM_SHARED_RAM
+#define ICSS_LRE_V2_1_BIN_ARRAY_LOC PRUETH_MEM_SHARED_RAM
+#define ICSS_LRE_V2_1_NODE_TABLE_LOC PRUETH_MEM_SHARED_RAM
+#define ICSS_LRE_V2_1_INDEX_TBL_MAX_ENTRIES 256
+#define ICSS_LRE_V2_1_BIN_TBL_MAX_ENTRIES 256
+#define ICSS_LRE_V2_1_NODE_TBL_MAX_ENTRIES 256
+
+#define ICSS_LRE_NODE_FREE 0x10
+#define ICSS_LRE_NODE_TAKEN 0x01
+#define ICSS_LRE_NT_REM_NODE_TYPE_MASK 0x1F
+#define ICSS_LRE_NT_REM_NODE_TYPE_SHIFT 0x00
+
+#define ICSS_LRE_NT_REM_NODE_TYPE_SANA 0x01
+#define ICSS_LRE_NT_REM_NODE_TYPE_SANB 0x02
+#define ICSS_LRE_NT_REM_NODE_TYPE_SANAB 0x03
+#define ICSS_LRE_NT_REM_NODE_TYPE_DAN 0x04
+#define ICSS_LRE_NT_REM_NODE_TYPE_REDBOX 0x08
+#define ICSS_LRE_NT_REM_NODE_TYPE_VDAN 0x10
+
+#define ICSS_LRE_NT_REM_NODE_HSR_BIT 0x20 /* if set node is HSR */
+
+#define ICSS_LRE_NT_REM_NODE_DUP_MASK 0xC0
+#define ICSS_LRE_NT_REM_NODE_DUP_SHIFT 0x06
+
+/* Node ent duplicate type: DupAccept */
+#define ICSS_LRE_NT_REM_NODE_DUP_ACCEPT 0x40
+/* Node ent duplicate type: DupDiscard */
+#define ICSS_LRE_NT_REM_NODE_DUP_DISCARD 0x80
+
+#endif /* __ICSS_LRE_FIRMWARE_H */
diff --git a/drivers/net/ethernet/ti/icss_mii_rt.h b/drivers/net/ethernet/ti/icss_mii_rt.h
new file mode 100644
index 000000000000..1184a5289ebf
--- /dev/null
+++ b/drivers/net/ethernet/ti/icss_mii_rt.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* PRU-ICSS MII_RT register definitions
+ *
+ * Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __NET_PRUSS_MII_RT_H__
+#define __NET_PRUSS_MII_RT_H__
+
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+
+#include "icss_lre_firmware.h"
+
+/* PRUSS_MII_RT Registers */
+#define PRUSS_MII_RT_RXCFG0 0x0
+#define PRUSS_MII_RT_RXCFG1 0x4
+#define PRUSS_MII_RT_TXCFG0 0x10
+#define PRUSS_MII_RT_TXCFG1 0x14
+#define PRUSS_MII_RT_TX_CRC0 0x20
+#define PRUSS_MII_RT_TX_CRC1 0x24
+#define PRUSS_MII_RT_TX_IPG0 0x30
+#define PRUSS_MII_RT_TX_IPG1 0x34
+#define PRUSS_MII_RT_PRS0 0x38
+#define PRUSS_MII_RT_PRS1 0x3c
+#define PRUSS_MII_RT_RX_FRMS0 0x40
+#define PRUSS_MII_RT_RX_FRMS1 0x44
+#define PRUSS_MII_RT_RX_PCNT0 0x48
+#define PRUSS_MII_RT_RX_PCNT1 0x4c
+#define PRUSS_MII_RT_RX_ERR0 0x50
+#define PRUSS_MII_RT_RX_ERR1 0x54
+
+/* PRUSS_MII_RT_RXCFG0/1 bits */
+#define PRUSS_MII_RT_RXCFG_RX_ENABLE BIT(0)
+#define PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS BIT(1)
+#define PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE BIT(2)
+#define PRUSS_MII_RT_RXCFG_RX_MUX_SEL BIT(3)
+#define PRUSS_MII_RT_RXCFG_RX_L2_EN BIT(4)
+#define PRUSS_MII_RT_RXCFG_RX_BYTE_SWAP BIT(5)
+#define PRUSS_MII_RT_RXCFG_RX_AUTO_FWD_PRE BIT(6)
+#define PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS BIT(9)
+
+/* PRUSS_MII_RT_TXCFG0/1 bits */
+#define PRUSS_MII_RT_TXCFG_TX_ENABLE BIT(0)
+#define PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE BIT(1)
+#define PRUSS_MII_RT_TXCFG_TX_EN_MODE BIT(2)
+#define PRUSS_MII_RT_TXCFG_TX_BYTE_SWAP BIT(3)
+#define PRUSS_MII_RT_TXCFG_TX_MUX_SEL BIT(8)
+#define PRUSS_MII_RT_TXCFG_PRE_TX_AUTO_SEQUENCE BIT(9)
+#define PRUSS_MII_RT_TXCFG_PRE_TX_AUTO_ESC_ERR BIT(10)
+#define PRUSS_MII_RT_TXCFG_TX_32_MODE_EN BIT(11)
+#define PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN BIT(12) /* SR2.0 onwards */
+
+#define PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT 16
+#define PRUSS_MII_RT_TXCFG_TX_START_DELAY_MASK GENMASK(25, 16)
+
+#define PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT 28
+#define PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_MASK GENMASK(30, 28)
+
+/* PRUSS_MII_RT_TX_IPG0/1 bits */
+#define PRUSS_MII_RT_TX_IPG_IPG_SHIFT 0
+#define PRUSS_MII_RT_TX_IPG_IPG_MASK GENMASK(9, 0)
+
+/* PRUSS_MII_RT_PRS0/1 bits */
+#define PRUSS_MII_RT_PRS_COL BIT(0)
+#define PRUSS_MII_RT_PRS_CRS BIT(1)
+
+/* PRUSS_MII_RT_RX_FRMS0/1 bits */
+#define PRUSS_MII_RT_RX_FRMS_MIN_FRM_SHIFT 0
+#define PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK GENMASK(15, 0)
+
+#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT 16
+#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK GENMASK(31, 16)
+
+/* Min/Max in MII_RT_RX_FRMS */
+/* For EMAC and Switch */
+#define PRUSS_MII_RT_RX_FRMS_MAX (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+#define PRUSS_MII_RT_RX_FRMS_MIN_FRM (64)
+
+/* for HSR and PRP */
+#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_LRE (PRUSS_MII_RT_RX_FRMS_MAX + \
+ ICSS_LRE_TAG_RCT_SIZE)
+/* PRUSS_MII_RT_RX_PCNT0/1 bits */
+#define PRUSS_MII_RT_RX_PCNT_MIN_PCNT_SHIFT 0
+#define PRUSS_MII_RT_RX_PCNT_MIN_PCNT_MASK GENMASK(3, 0)
+
+#define PRUSS_MII_RT_RX_PCNT_MAX_PCNT_SHIFT 4
+#define PRUSS_MII_RT_RX_PCNT_MAX_PCNT_MASK GENMASK(7, 4)
+
+/* PRUSS_MII_RT_RX_ERR0/1 bits */
+#define PRUSS_MII_RT_RX_ERR_MIN_PCNT_ERR BIT(0)
+#define PRUSS_MII_RT_RX_ERR_MAX_PCNT_ERR BIT(1)
+#define PRUSS_MII_RT_RX_ERR_MIN_FRM_ERR BIT(2)
+#define PRUSS_MII_RT_RX_ERR_MAX_FRM_ERR BIT(3)
+
+#define ICSSG_CFG_OFFSET 0
+#define RGMII_CFG_OFFSET 4
+
+/* Constant to choose between MII0 and MII1 */
+#define ICSS_MII0 0
+#define ICSS_MII1 1
+
+/* ICSSG_CFG Register bits */
+#define ICSSG_CFG_SGMII_MODE BIT(16)
+#define ICSSG_CFG_TX_PRU_EN BIT(11)
+#define ICSSG_CFG_RX_SFD_TX_SOF_EN BIT(10)
+#define ICSSG_CFG_RTU_PRU_PSI_SHARE_EN BIT(9)
+#define ICSSG_CFG_IEP1_TX_EN BIT(8)
+#define ICSSG_CFG_MII1_MODE GENMASK(6, 5)
+#define ICSSG_CFG_MII1_MODE_SHIFT 5
+#define ICSSG_CFG_MII0_MODE GENMASK(4, 3)
+#define ICSSG_CFG_MII0_MODE_SHIFT 3
+#define ICSSG_CFG_RX_L2_G_EN BIT(2)
+#define ICSSG_CFG_TX_L2_EN BIT(1)
+#define ICSSG_CFG_TX_L1_EN BIT(0)
+
+enum mii_mode {
+ MII_MODE_MII = 0,
+ MII_MODE_RGMII,
+ MII_MODE_SGMII
+};
+
+/* RGMII CFG Register bits */
+#define RGMII_CFG_INBAND_EN_MII0 BIT(16)
+#define RGMII_CFG_GIG_EN_MII0 BIT(17)
+#define RGMII_CFG_INBAND_EN_MII1 BIT(20)
+#define RGMII_CFG_GIG_EN_MII1 BIT(21)
+#define RGMII_CFG_FULL_DUPLEX_MII0 BIT(18)
+#define RGMII_CFG_FULL_DUPLEX_MII1 BIT(22)
+#define RGMII_CFG_SPEED_MII0 GENMASK(2, 1)
+#define RGMII_CFG_SPEED_MII1 GENMASK(6, 5)
+#define RGMII_CFG_SPEED_MII0_SHIFT 1
+#define RGMII_CFG_SPEED_MII1_SHIFT 5
+#define RGMII_CFG_FULLDUPLEX_MII0 BIT(3)
+#define RGMII_CFG_FULLDUPLEX_MII1 BIT(7)
+#define RGMII_CFG_FULLDUPLEX_MII0_SHIFT 3
+#define RGMII_CFG_FULLDUPLEX_MII1_SHIFT 7
+#define RGMII_CFG_SPEED_10M 0
+#define RGMII_CFG_SPEED_100M 1
+#define RGMII_CFG_SPEED_1G 2
+
+struct regmap;
+struct prueth_emac;
+
+void icssg_mii_update_ipg(struct regmap *mii_rt, int mii, u32 ipg);
+void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu);
+void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac);
+u32 icssg_rgmii_cfg_get_bitfield(struct regmap *miig_rt, u32 mask, u32 shift);
+u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii);
+u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii);
+void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if);
+
+#endif /* __NET_PRUSS_MII_RT_H__ */
diff --git a/drivers/net/ethernet/ti/icss_switch.h b/drivers/net/ethernet/ti/icss_switch.h
new file mode 100644
index 000000000000..7aa760e3702d
--- /dev/null
+++ b/drivers/net/ethernet/ti/icss_switch.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __ICSS_SWITCH_H
+#define __ICSS_SWITCH_H
+
+/* Basic Switch Parameters
+ * Used to auto compute offset addresses on L3 OCMC RAM. Do not modify these
+ * without changing firmware accordingly
+ */
+#define SWITCH_BUFFER_SIZE (64 * 1024) /* L3 buffer */
+#define ICSS_BLOCK_SIZE 32 /* data bytes per BD */
+#define BD_SIZE 4 /* byte buffer descriptor */
+#define NUM_QUEUES 4 /* Queues on Port 0/1/2 */
+
+#define PORT_LINK_MASK 0x1
+#define PORT_IS_HD_MASK 0x2
+
+/* Physical Port queue size (number of BDs). Same for both ports */
+#define QUEUE_1_SIZE 97 /* Network Management high */
+#define QUEUE_2_SIZE 97 /* Network Management low */
+#define QUEUE_3_SIZE 97 /* Protocol specific */
+#define QUEUE_4_SIZE 97 /* NRT (IP,ARP, ICMP) */
+
+/* Host queue size (number of BDs). Each BD points to data buffer of 32 bytes.
+ * HOST PORT QUEUES can buffer up to 4 full sized frames per queue
+ */
+#define HOST_QUEUE_1_SIZE 194 /* Protocol and VLAN priority 7 & 6 */
+#define HOST_QUEUE_2_SIZE 194 /* Protocol mid */
+#define HOST_QUEUE_3_SIZE 194 /* Protocol low */
+#define HOST_QUEUE_4_SIZE 194 /* NRT (IP, ARP, ICMP) */
+
+#define COL_QUEUE_SIZE 0
+
+/* NRT Buffer descriptor definition
+ * Each buffer descriptor points to a max 32 byte block and has 32 bit in size
+ * to have atomic operation.
+ * PRU can address bytewise into memory.
+ * Definition of 32 bit descriptor is as follows
+ *
+ * Bits Name Meaning
+ * =============================================================================
+ * 0..7 Index points to index in buffer queue, max 256 x 32
+ * byte blocks can be addressed
+ * 6 LookupSuccess For switch, FDB lookup was successful (source
+ * MAC address found in FDB).
+ * For RED, NodeTable lookup was successful.
+ * 7 Flood Packet should be flooded (destination MAC
+ * address found in FDB). For switch only.
+ * 8..12 Block_length number of valid bytes in this specific block.
+ * Will be <=32 bytes on last block of packet
+ * 13 More "More" bit indicating that there are more blocks
+ * 14 Shadow indicates that "index" is pointing into shadow
+ * buffer
+ * 15 TimeStamp indicates that this packet has time stamp in
+ * separate buffer - only needed of PTCP runs on
+ * host
+ * 16..17 Port different meaning for ingress and egress,
+ * Ingress: Port = 0 indicates phy port 1 and
+ * Port = 1 indicates phy port 2.
+ * Egress: 0 sends on phy port 1 and 1 sends on
+ * phy port 2. Port = 2 goes over MAC table
+ * look-up
+ * 18..28 Length 11 bit of total packet length which is put into
+ * first BD only so that host access only one BD
+ * 29 VlanTag indicates that packet has Length/Type field of
+ * 0x08100 with VLAN tag in following byte
+ * 30 Broadcast indicates that packet goes out on both physical
+ * ports, there will be two bd but only one buffer
+ * 31 Error indicates there was an error in the packet
+ */
+#define PRUETH_BD_START_FLAG_MASK BIT(0)
+#define PRUETH_BD_START_FLAG_SHIFT 0
+
+#define PRUETH_BD_HSR_FRAME_MASK BIT(4)
+#define PRUETH_BD_HSR_FRAME_SHIFT 4
+
+#define PRUETH_BD_SUP_HSR_FRAME_MASK BIT(5)
+#define PRUETH_BD_SUP_HSR_FRAME_SHIFT 5
+
+#define PRUETH_BD_LOOKUP_SUCCESS_MASK BIT(6)
+#define PRUETH_BD_LOOKUP_SUCCESS_SHIFT 6
+
+#define PRUETH_BD_SW_FLOOD_MASK BIT(7)
+#define PRUETH_BD_SW_FLOOD_SHIFT 7
+
+#define PRUETH_BD_SHADOW_MASK BIT(14)
+#define PRUETH_BD_SHADOW_SHIFT 14
+
+#define PRUETH_BD_TIMESTAMP_MASK BIT(15)
+#define PRUETH_BD_TIMESTAMP_SHIT 15
+
+#define PRUETH_BD_PORT_MASK GENMASK(17, 16)
+#define PRUETH_BD_PORT_SHIFT 16
+
+#define PRUETH_BD_LENGTH_MASK GENMASK(28, 18)
+#define PRUETH_BD_LENGTH_SHIFT 18
+
+#define PRUETH_BD_BROADCAST_MASK BIT(30)
+#define PRUETH_BD_BROADCAST_SHIFT 30
+
+#define PRUETH_BD_ERROR_MASK BIT(31)
+#define PRUETH_BD_ERROR_SHIFT 31
+
+/* The following offsets indicate which sections of the memory are used
+ * for EMAC internal tasks
+ */
+#define DRAM_START_OFFSET 0x1e98
+#define SRAM_START_OFFSET 0x400
+
+/* General Purpose Statistics
+ * These are present on both PRU0 and PRU1 DRAM
+ */
+/* base statistics offset */
+#define STATISTICS_OFFSET 0x1f00
+#define STAT_SIZE 0x98
+
+/* The following offsets indicate which sections of the memory are used
+ * for switch internal tasks
+ */
+#define SWITCH_SPECIFIC_DRAM0_START_SIZE 0x100
+#define SWITCH_SPECIFIC_DRAM0_START_OFFSET 0x1F00
+
+#define SWITCH_SPECIFIC_DRAM1_START_SIZE 0x300
+#define SWITCH_SPECIFIC_DRAM1_START_OFFSET 0x1D00
+
+/* Offset for storing
+ * 1. Storm Prevention Params
+ * 2. PHY Speed Offset
+ * 3. Port Status Offset
+ * These are present on both PRU0 and PRU1
+ */
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_BC (STATISTICS_OFFSET + STAT_SIZE)
+/* 4 bytes */
+#define PHY_SPEED_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 4)
+/* 1 byte */
+#define PORT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 8)
+/* 1 byte */
+#define COLLISION_COUNTER (STATISTICS_OFFSET + STAT_SIZE + 9)
+/* 4 bytes */
+#define RX_PKT_SIZE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 10)
+/* 4 bytes */
+#define PORT_CONTROL_ADDR (STATISTICS_OFFSET + STAT_SIZE + 14)
+/* 6 bytes */
+#define PORT_MAC_ADDR (STATISTICS_OFFSET + STAT_SIZE + 18)
+/* 1 byte */
+#define RX_INT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 24)
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_MC (STATISTICS_OFFSET + STAT_SIZE + 25)
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_UC (STATISTICS_OFFSET + STAT_SIZE + 29)
+/* 4 bytes ? */
+#define STP_INVALID_STATE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 33)
+
+/* DRAM1 Offsets for Switch */
+/* 4 queue descriptors for port 0 (host receive) */
+#define P0_QUEUE_DESC_OFFSET 0x1E7C
+#define P1_QUEUE_DESC_OFFSET 0x1E9C
+#define P2_QUEUE_DESC_OFFSET 0x1EBC
+/* collision descriptor of port 0 */
+#define P0_COL_QUEUE_DESC_OFFSET 0x1E64
+#define P1_COL_QUEUE_DESC_OFFSET 0x1E6C
+#define P2_COL_QUEUE_DESC_OFFSET 0x1E74
+/* Collision Status Register
+ * P0: bit 0 is pending flag, bit 1..2 inidicates which queue,
+ * P1: bit 8 is pending flag, 9..10 is queue number
+ * P2: bit 16 is pending flag, 17..18 is queue number, remaining bits are 0.
+ */
+#define COLLISION_STATUS_ADDR 0x1E60
+
+#define INTERFACE_MAC_ADDR 0x1E58
+#define P2_MAC_ADDR 0x1E50
+#define P1_MAC_ADDR 0x1E48
+
+#define QUEUE_SIZE_ADDR 0x1E30
+#define QUEUE_OFFSET_ADDR 0x1E18
+#define QUEUE_DESCRIPTOR_OFFSET_ADDR 0x1E00
+
+#define COL_RX_CONTEXT_P2_OFFSET_ADDR (COL_RX_CONTEXT_P1_OFFSET_ADDR + 12)
+#define COL_RX_CONTEXT_P1_OFFSET_ADDR (COL_RX_CONTEXT_P0_OFFSET_ADDR + 12)
+#define COL_RX_CONTEXT_P0_OFFSET_ADDR (P2_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Port 2 Rx Context */
+#define P2_Q4_RX_CONTEXT_OFFSET (P2_Q3_RX_CONTEXT_OFFSET + 8)
+#define P2_Q3_RX_CONTEXT_OFFSET (P2_Q2_RX_CONTEXT_OFFSET + 8)
+#define P2_Q2_RX_CONTEXT_OFFSET (P2_Q1_RX_CONTEXT_OFFSET + 8)
+#define P2_Q1_RX_CONTEXT_OFFSET RX_CONTEXT_P2_Q1_OFFSET_ADDR
+#define RX_CONTEXT_P2_Q1_OFFSET_ADDR (P1_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Port 1 Rx Context */
+#define P1_Q4_RX_CONTEXT_OFFSET (P1_Q3_RX_CONTEXT_OFFSET + 8)
+#define P1_Q3_RX_CONTEXT_OFFSET (P1_Q2_RX_CONTEXT_OFFSET + 8)
+#define P1_Q2_RX_CONTEXT_OFFSET (P1_Q1_RX_CONTEXT_OFFSET + 8)
+#define P1_Q1_RX_CONTEXT_OFFSET (RX_CONTEXT_P1_Q1_OFFSET_ADDR)
+#define RX_CONTEXT_P1_Q1_OFFSET_ADDR (P0_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Host Port Rx Context */
+#define P0_Q4_RX_CONTEXT_OFFSET (P0_Q3_RX_CONTEXT_OFFSET + 8)
+#define P0_Q3_RX_CONTEXT_OFFSET (P0_Q2_RX_CONTEXT_OFFSET + 8)
+#define P0_Q2_RX_CONTEXT_OFFSET (P0_Q1_RX_CONTEXT_OFFSET + 8)
+#define P0_Q1_RX_CONTEXT_OFFSET RX_CONTEXT_P0_Q1_OFFSET_ADDR
+#define RX_CONTEXT_P0_Q1_OFFSET_ADDR (COL_TX_CONTEXT_P2_Q1_OFFSET_ADDR + 8)
+
+/* Port 2 Tx Collision Context */
+#define COL_TX_CONTEXT_P2_Q1_OFFSET_ADDR (COL_TX_CONTEXT_P1_Q1_OFFSET_ADDR + 8)
+/* Port 1 Tx Collision Context */
+#define COL_TX_CONTEXT_P1_Q1_OFFSET_ADDR (P2_Q4_TX_CONTEXT_OFFSET + 8)
+
+/* Port 2 */
+#define P2_Q4_TX_CONTEXT_OFFSET (P2_Q3_TX_CONTEXT_OFFSET + 8)
+#define P2_Q3_TX_CONTEXT_OFFSET (P2_Q2_TX_CONTEXT_OFFSET + 8)
+#define P2_Q2_TX_CONTEXT_OFFSET (P2_Q1_TX_CONTEXT_OFFSET + 8)
+#define P2_Q1_TX_CONTEXT_OFFSET TX_CONTEXT_P2_Q1_OFFSET_ADDR
+#define TX_CONTEXT_P2_Q1_OFFSET_ADDR (P1_Q4_TX_CONTEXT_OFFSET + 8)
+
+/* Port 1 */
+#define P1_Q4_TX_CONTEXT_OFFSET (P1_Q3_TX_CONTEXT_OFFSET + 8)
+#define P1_Q3_TX_CONTEXT_OFFSET (P1_Q2_TX_CONTEXT_OFFSET + 8)
+#define P1_Q2_TX_CONTEXT_OFFSET (P1_Q1_TX_CONTEXT_OFFSET + 8)
+#define P1_Q1_TX_CONTEXT_OFFSET TX_CONTEXT_P1_Q1_OFFSET_ADDR
+#define TX_CONTEXT_P1_Q1_OFFSET_ADDR SWITCH_SPECIFIC_DRAM1_START_OFFSET
+
+/* Shared RAM Offsets for Switch */
+/* NSP (Network Storm Prevention) timer re-uses NT timer */
+#define PRUETH_NSP_CREDIT_SHIFT 8
+#define PRUETH_NSP_ENABLE BIT(0)
+
+/* DRAM Offsets for EMAC
+ * Present on Both DRAM0 and DRAM1
+ */
+
+/* 4 queue descriptors for port tx = 32 bytes */
+#define TX_CONTEXT_Q1_OFFSET_ADDR (PORT_QUEUE_DESC_OFFSET + 32)
+#define PORT_QUEUE_DESC_OFFSET (ICSS_EMAC_TTS_CYC_TX_SOF + 8)
+
+/* EMAC Time Triggered Send Offsets */
+#define ICSS_EMAC_TTS_CYC_TX_SOF (ICSS_EMAC_TTS_PREV_TX_SOF + 8)
+#define ICSS_EMAC_TTS_PREV_TX_SOF (ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET + 4)
+#define ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET (ICSS_EMAC_TTS_STATUS_OFFSET + 4)
+#define ICSS_EMAC_TTS_STATUS_OFFSET (ICSS_EMAC_TTS_CFG_TIME_OFFSET + 4)
+#define ICSS_EMAC_TTS_CFG_TIME_OFFSET (ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET + 4)
+#define ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET (ICSS_EMAC_TTS_CYCLE_START_OFFSET + 8)
+#define ICSS_EMAC_TTS_CYCLE_START_OFFSET ICSS_EMAC_TTS_BASE_OFFSET
+#define ICSS_EMAC_TTS_BASE_OFFSET DRAM_START_OFFSET
+
+/* Shared RAM offsets for EMAC */
+
+/* Queue Descriptors */
+
+/* 4 queue descriptors for port 0 (host receive). 32 bytes */
+#define HOST_QUEUE_DESC_OFFSET (HOST_QUEUE_SIZE_ADDR + 16)
+
+/* table offset for queue size:
+ * 3 ports * 4 Queues * 1 byte offset = 12 bytes
+ */
+#define HOST_QUEUE_SIZE_ADDR (HOST_QUEUE_OFFSET_ADDR + 8)
+/* table offset for queue:
+ * 4 Queues * 2 byte offset = 8 bytes
+ */
+#define HOST_QUEUE_OFFSET_ADDR (HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR + 8)
+/* table offset for Host queue descriptors:
+ * 1 ports * 4 Queues * 2 byte offset = 8 bytes
+ */
+#define HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR (HOST_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Host Port Rx Context */
+#define HOST_Q4_RX_CONTEXT_OFFSET (HOST_Q3_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q3_RX_CONTEXT_OFFSET (HOST_Q2_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q2_RX_CONTEXT_OFFSET (HOST_Q1_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q1_RX_CONTEXT_OFFSET (EMAC_PROMISCUOUS_MODE_OFFSET + 4)
+
+/* Promiscuous mode control */
+#define EMAC_P1_PROMISCUOUS_BIT BIT(0)
+#define EMAC_P2_PROMISCUOUS_BIT BIT(1)
+#define EMAC_PROMISCUOUS_MODE_OFFSET (EMAC_RESERVED + 4)
+#define EMAC_RESERVED EOF_48K_BUFFER_BD
+
+/* allow for max 48k buffer which spans the descriptors up to 0x1800 6kB */
+#define EOF_48K_BUFFER_BD (P0_BUFFER_DESC_OFFSET + HOST_BD_SIZE + PORT_BD_SIZE)
+
+#define HOST_BD_SIZE ((HOST_QUEUE_1_SIZE + HOST_QUEUE_2_SIZE + HOST_QUEUE_3_SIZE + HOST_QUEUE_4_SIZE) * BD_SIZE)
+#define PORT_BD_SIZE ((QUEUE_1_SIZE + QUEUE_2_SIZE + QUEUE_3_SIZE + QUEUE_4_SIZE) * 2 * BD_SIZE)
+
+#define END_OF_BD_POOL (P2_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE)
+#define P2_Q4_BD_OFFSET (P2_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE)
+#define P2_Q3_BD_OFFSET (P2_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE)
+#define P2_Q2_BD_OFFSET (P2_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE)
+#define P2_Q1_BD_OFFSET (P1_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE)
+#define P1_Q4_BD_OFFSET (P1_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE)
+#define P1_Q3_BD_OFFSET (P1_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE)
+#define P1_Q2_BD_OFFSET (P1_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE)
+#define P1_Q1_BD_OFFSET (P0_Q4_BD_OFFSET + HOST_QUEUE_4_SIZE * BD_SIZE)
+#define P0_Q4_BD_OFFSET (P0_Q3_BD_OFFSET + HOST_QUEUE_3_SIZE * BD_SIZE)
+#define P0_Q3_BD_OFFSET (P0_Q2_BD_OFFSET + HOST_QUEUE_2_SIZE * BD_SIZE)
+#define P0_Q2_BD_OFFSET (P0_Q1_BD_OFFSET + HOST_QUEUE_1_SIZE * BD_SIZE)
+#define P0_Q1_BD_OFFSET P0_BUFFER_DESC_OFFSET
+#define P0_BUFFER_DESC_OFFSET SRAM_START_OFFSET
+
+/* Memory Usage of L3 OCMC RAM */
+
+/* L3 64KB Memory - mainly buffer Pool */
+#define END_OF_BUFFER_POOL (P2_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * ICSS_BLOCK_SIZE)
+#define P2_Q4_BUFFER_OFFSET (P2_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * ICSS_BLOCK_SIZE)
+#define P2_Q3_BUFFER_OFFSET (P2_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * ICSS_BLOCK_SIZE)
+#define P2_Q2_BUFFER_OFFSET (P2_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * ICSS_BLOCK_SIZE)
+#define P2_Q1_BUFFER_OFFSET (P1_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * ICSS_BLOCK_SIZE)
+#define P1_Q4_BUFFER_OFFSET (P1_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * ICSS_BLOCK_SIZE)
+#define P1_Q3_BUFFER_OFFSET (P1_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * ICSS_BLOCK_SIZE)
+#define P1_Q2_BUFFER_OFFSET (P1_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * ICSS_BLOCK_SIZE)
+#define P1_Q1_BUFFER_OFFSET (P0_Q4_BUFFER_OFFSET + HOST_QUEUE_4_SIZE * ICSS_BLOCK_SIZE)
+#define P0_Q4_BUFFER_OFFSET (P0_Q3_BUFFER_OFFSET + HOST_QUEUE_3_SIZE * ICSS_BLOCK_SIZE)
+#define P0_Q3_BUFFER_OFFSET (P0_Q2_BUFFER_OFFSET + HOST_QUEUE_2_SIZE * ICSS_BLOCK_SIZE)
+#define P0_Q2_BUFFER_OFFSET (P0_Q1_BUFFER_OFFSET + HOST_QUEUE_1_SIZE * ICSS_BLOCK_SIZE)
+#define P0_COL_BUFFER_OFFSET 0xEE00
+#define P0_Q1_BUFFER_OFFSET 0x0000
+
+/* The below bit will be set in BD for EMAC mode in the egress
+ * direction and reset for PRP mode
+ */
+#define PRUETH_TX_PRP_EMAC_MODE BIT(0)
+
+/* 1 byte | 0 : Interrupt Pacing disabled | 1 : Interrupt Pacing enabled */
+#define INTR_PAC_STATUS_OFFSET_PRU1 0x1FAE
+/* 1 byte | 0 : Interrupt Pacing disabled | 1 : Interrupt Pacing enabled */
+#define INTR_PAC_STATUS_OFFSET_PRU0 0x1FAF
+
+#define V2_1_FDB_TBL_LOC PRUETH_MEM_SHARED_RAM
+#define V2_1_FDB_TBL_OFFSET 0x2000
+
+#define FDB_INDEX_TBL_MAX_ENTRIES 256
+#define FDB_MAC_TBL_MAX_ENTRIES 256
+
+#endif /* __ICSS_SWITCH_H */
diff --git a/drivers/net/ethernet/ti/icss_vlan_mcast_filter_mmap.h b/drivers/net/ethernet/ti/icss_vlan_mcast_filter_mmap.h
new file mode 100644
index 000000000000..a1a1a1da47e7
--- /dev/null
+++ b/drivers/net/ethernet/ti/icss_vlan_mcast_filter_mmap.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * This file contains VLAN/Multicast filtering feature memory map
+ *
+ */
+
+#ifndef ICSS_VLAN_MULTICAST_FILTER_MM_H
+#define ICSS_VLAN_MULTICAST_FILTER_MM_H
+
+/* VLAN/Multicast filter defines & offsets, present on both PRU0 and PRU1 DRAM */
+
+/* Feature enable/disable values for multicast filtering */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_DISABLED 0x00
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_ENABLED 0x01
+
+/* Feature enable/disable values for VLAN filtering */
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_DISABLED 0x00
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_ENABLED 0x01
+
+/* Add/remove multicast mac id for filtering bin */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_ALLOWED 0x01
+#define ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_NOT_ALLOWED 0x00
+
+/* Default HASH value for the multicast filtering Mask */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_INIT_VAL 0xFF
+
+/* Size requirements for Multicast filtering feature */
+#define ICSS_EMAC_FW_MULTICAST_TABLE_SIZE_BYTES 256
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES 6
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_SIZE_BYTES 1
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_STATUS_SIZE_BYTES 1
+#define ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_SIZE_BYTES 4
+
+/* Size requirements for VLAN filtering feature : 4096 bits = 512 bytes */
+#define ICSS_EMAC_FW_VLAN_FILTER_TABLE_SIZE_BYTES 512
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_SIZE_BYTES 1
+#define ICSS_EMAC_FW_VLAN_FILTER_DROP_CNT_SIZE_BYTES 4
+
+/* Mask override set status */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_SET 1
+/* Mask override not set status */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_NOT_SET 0
+/* 6 bytes HASH Mask for the MAC */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OFFSET 0xF4
+/* 0 -> multicast filtering disabled | 1 -> multicast filtering enabled */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_OFFSET (ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OFFSET + ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES)
+/* Status indicating if the HASH override is done or not: 0: no, 1: yes */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_OVERRIDE_STATUS (ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_OFFSET + ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_SIZE_BYTES)
+/* Multicast drop statistics */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_OFFSET (ICSS_EMAC_FW_MULTICAST_FILTER_OVERRIDE_STATUS + ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OVERRIDE_STATUS_SIZE_BYTES)
+/* Multicast table */
+#define ICSS_EMAC_FW_MULTICAST_FILTER_TABLE (ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_OFFSET + ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_SIZE_BYTES)
+
+/* Multicast filter defines & offsets for LRE
+ */
+#define ICSS_LRE_FW_MULTICAST_TABLE_SEARCH_OP_CONTROL_BIT 0xE0
+/* one byte field :
+ * 0 -> multicast filtering disabled
+ * 1 -> multicast filtering enabled
+ */
+#define ICSS_LRE_FW_MULTICAST_FILTER_MASK 0xE4
+#define ICSS_LRE_FW_MULTICAST_FILTER_TABLE 0x100
+
+/* VLAN table Offsets */
+#define ICSS_EMAC_FW_VLAN_FLTR_TBL_BASE_ADDR 0x200
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_BITMAP_OFFSET 0xEF
+#define ICSS_EMAC_FW_VLAN_FILTER_DROP_CNT_OFFSET (ICSS_EMAC_FW_VLAN_FILTER_CTRL_BITMAP_OFFSET + ICSS_EMAC_FW_VLAN_FILTER_CTRL_SIZE_BYTES)
+
+/* VLAN filter Control Bit maps */
+/* one bit field, bit 0: | 0 : VLAN filter disabled (default), 1: VLAN filter enabled */
+#define ICSS_EMAC_FW_VLAN_FILTER_CTRL_ENABLE_BIT 0
+/* one bit field, bit 1: | 0 : untagged host rcv allowed (default), 1: untagged host rcv not allowed */
+#define ICSS_EMAC_FW_VLAN_FILTER_UNTAG_HOST_RCV_ALLOW_CTRL_BIT 1
+/* one bit field, bit 1: | 0 : priotag host rcv allowed (default), 1: priotag host rcv not allowed */
+#define ICSS_EMAC_FW_VLAN_FILTER_PRIOTAG_HOST_RCV_ALLOW_CTRL_BIT 2
+/* one bit field, bit 1: | 0 : skip sv vlan flow :1 : take sv vlan flow (not applicable for dual emac */
+#define ICSS_EMAC_FW_VLAN_FILTER_SV_VLAN_FLOW_HOST_RCV_ALLOW_CTRL_BIT 3
+
+/* VLAN IDs */
+#define ICSS_EMAC_FW_VLAN_FILTER_PRIOTAG_VID 0
+#define ICSS_EMAC_FW_VLAN_FILTER_VID_MIN 0x0000
+#define ICSS_EMAC_FW_VLAN_FILTER_VID_MAX 0x0FFF
+
+/* VLAN Filtering Commands */
+#define ICSS_EMAC_FW_VLAN_FILTER_ADD_VLAN_VID_CMD 0x00
+#define ICSS_EMAC_FW_VLAN_FILTER_REMOVE_VLAN_VID_CMD 0x01
+
+/* Switch defines for VLAN/MC filtering */
+/* SRAM
+ * VLAN filter defines & offsets
+ */
+#define ICSS_LRE_FW_VLAN_FLTR_CTRL_BYTE 0x1FE
+/* one bit field | 0 : VLAN filter disabled
+ * | 1 : VLAN filter enabled
+ */
+#define ICSS_LRE_FW_VLAN_FLTR_TBL_BASE_ADDR 0x200
+
+#endif /* ICSS_MULTICAST_FILTER_MM_H */
diff --git a/drivers/net/ethernet/ti/icssg_classifier.c b/drivers/net/ethernet/ti/icssg_classifier.c
new file mode 100644
index 000000000000..47b13672d723
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_classifier.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments ICSSG Ethernet Driver
+ *
+ * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/regmap.h>
+
+#include "icssg_prueth.h"
+
+#define ICSSG_NUM_CLASSIFIERS 16
+#define ICSSG_NUM_FT1_SLOTS 8
+#define ICSSG_NUM_FT3_SLOTS 16
+
+#define ICSSG_NUM_CLASSIFIERS_IN_USE 5
+
+/* Filter 1 - FT1 */
+#define FT1_NUM_SLOTS 8
+#define FT1_SLOT_SIZE 0x10 /* bytes */
+
+/* offsets from FT1 slot base i.e. slot 1 start */
+#define FT1_DA0 0x0
+#define FT1_DA1 0x4
+#define FT1_DA0_MASK 0x8
+#define FT1_DA1_MASK 0xc
+
+#define FT1_N_REG(slize, n, reg) (offs[slice].ft1_slot_base + FT1_SLOT_SIZE * (n) + (reg))
+
+#define FT1_LEN_MASK GENMASK(19, 16)
+#define FT1_LEN_SHIFT 16
+#define FT1_LEN(len) (((len) << FT1_LEN_SHIFT) & FT1_LEN_MASK)
+
+#define FT1_START_MASK GENMASK(14, 0)
+#define FT1_START(start) ((start) & FT1_START_MASK)
+
+#define FT1_MATCH_SLOT(n) (GENMASK(23, 16) & (BIT(n) << 16))
+
+enum ft1_cfg_type {
+ FT1_CFG_TYPE_DISABLED = 0,
+ FT1_CFG_TYPE_EQ,
+ FT1_CFG_TYPE_GT,
+ FT1_CFG_TYPE_LT,
+};
+
+#define FT1_CFG_SHIFT(n) (2 * (n))
+#define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n)))
+
+/* Filter 3 - FT3 */
+#define FT3_NUM_SLOTS 16
+#define FT3_SLOT_SIZE 0x20 /* bytes */
+
+/* offsets from FT3 slot n's base */
+#define FT3_START 0
+#define FT3_START_AUTO 0x4
+#define FT3_START_OFFSET 0x8
+#define FT3_JUMP_OFFSET 0xc
+#define FT3_LEN 0x10
+#define FT3_CFG 0x14
+#define FT3_T 0x18
+#define FT3_T_MASK 0x1c
+
+#define FT3_N_REG(slize, n, reg) (offs[slice].ft3_slot_base + FT3_SLOT_SIZE * (n) + (reg))
+
+/* offsets from rx_class n's base */
+#define RX_CLASS_AND_EN 0
+#define RX_CLASS_OR_EN 0x4
+
+#define RX_CLASS_NUM_SLOTS 16
+#define RX_CLASS_EN_SIZE 0x8 /* bytes */
+
+#define RX_CLASS_N_REG(slice, n, reg) (offs[slice].rx_class_base + RX_CLASS_EN_SIZE * (n) + (reg))
+
+/* RX Class Gates */
+#define RX_CLASS_GATES_SIZE 0x4 /* bytes */
+
+#define RX_CLASS_GATES_N_REG(slice, n) \
+ (offs[slice].rx_class_gates_base + RX_CLASS_GATES_SIZE * (n))
+
+#define RX_CLASS_GATES_ALLOW_MASK BIT(6)
+#define RX_CLASS_GATES_RAW_MASK BIT(5)
+#define RX_CLASS_GATES_PHASE_MASK BIT(4)
+
+/* RX Class traffic data matching bits */
+#define RX_CLASS_FT_UC BIT(31)
+#define RX_CLASS_FT_MC BIT(30)
+#define RX_CLASS_FT_BC BIT(29)
+#define RX_CLASS_FT_FW BIT(28)
+#define RX_CLASS_FT_RCV BIT(27)
+#define RX_CLASS_FT_VLAN BIT(26)
+#define RX_CLASS_FT_DA_P BIT(25)
+#define RX_CLASS_FT_DA_I BIT(24)
+#define RX_CLASS_FT_FT1_MATCH_MASK GENMASK(23, 16)
+#define RX_CLASS_FT_FT1_MATCH_SHIFT 16
+#define RX_CLASS_FT_FT3_MATCH_MASK GENMASK(15, 0)
+#define RX_CLASS_FT_FT3_MATCH_SHIFT 0
+
+#define RX_CLASS_FT_FT1_MATCH(slot) \
+ ((BIT(slot) << RX_CLASS_FT_FT1_MATCH_SHIFT) & RX_CLASS_FT_FT1_MATCH_MASK)
+
+enum rx_class_sel_type {
+ RX_CLASS_SEL_TYPE_OR = 0,
+ RX_CLASS_SEL_TYPE_AND = 1,
+ RX_CLASS_SEL_TYPE_OR_AND_AND = 2,
+ RX_CLASS_SEL_TYPE_OR_OR_AND = 3,
+};
+
+#define FT1_CFG_SHIFT(n) (2 * (n))
+#define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n)))
+
+#define RX_CLASS_SEL_SHIFT(n) (2 * (n))
+#define RX_CLASS_SEL_MASK(n) (0x3 << RX_CLASS_SEL_SHIFT((n)))
+
+#define ICSSG_CFG_OFFSET 0
+#define MAC_INTERFACE_0 0x18
+#define MAC_INTERFACE_1 0x1c
+
+#define ICSSG_CFG_RX_L2_G_EN BIT(2)
+
+/* these are register offsets per PRU */
+struct miig_rt_offsets {
+ u32 mac0;
+ u32 mac1;
+ u32 ft1_start_len;
+ u32 ft1_cfg;
+ u32 ft1_slot_base;
+ u32 ft3_slot_base;
+ u32 ft3_p_base;
+ u32 ft_rx_ptr;
+ u32 rx_class_base;
+ u32 rx_class_cfg1;
+ u32 rx_class_cfg2;
+ u32 rx_class_gates_base;
+ u32 rx_green;
+ u32 rx_rate_cfg_base;
+ u32 rx_rate_src_sel0;
+ u32 rx_rate_src_sel1;
+ u32 tx_rate_cfg_base;
+ u32 stat_base;
+ u32 tx_hsr_tag;
+ u32 tx_hsr_seq;
+ u32 tx_vlan_type;
+ u32 tx_vlan_ins;
+};
+
+static struct miig_rt_offsets offs[] = {
+ /* PRU0 */
+ {
+ 0x8,
+ 0xc,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x108,
+ 0x308,
+ 0x408,
+ 0x40c,
+ 0x48c,
+ 0x490,
+ 0x494,
+ 0x4d4,
+ 0x4e4,
+ 0x504,
+ 0x508,
+ 0x50c,
+ 0x54c,
+ 0x63c,
+ 0x640,
+ 0x644,
+ 0x648,
+ },
+ /* PRU1 */
+ {
+ 0x10,
+ 0x14,
+ 0x64c,
+ 0x650,
+ 0x654,
+ 0x6d4,
+ 0x8d4,
+ 0x9d4,
+ 0x9d8,
+ 0xa58,
+ 0xa5c,
+ 0xa60,
+ 0xaa0,
+ 0xab0,
+ 0xad0,
+ 0xad4,
+ 0xad8,
+ 0xb18,
+ 0xc08,
+ 0xc0c,
+ 0xc10,
+ 0xc14,
+ },
+};
+
+static inline u32 addr_to_da0(const u8 *addr)
+{
+ return (u32)(addr[0] | addr[1] << 8 |
+ addr[2] << 16 | addr[3] << 24);
+};
+
+static inline u32 addr_to_da1(const u8 *addr)
+{
+ return (u32)(addr[4] | addr[5] << 8);
+};
+
+static void rx_class_ft1_set_start_len(struct regmap *miig_rt, int slice,
+ u16 start, u8 len)
+{
+ u32 offset, val;
+
+ offset = offs[slice].ft1_start_len;
+ val = FT1_LEN(len) | FT1_START(start);
+ regmap_write(miig_rt, offset, val);
+}
+
+static void rx_class_ft1_set_da(struct regmap *miig_rt, int slice,
+ int n, const u8 *addr)
+{
+ u32 offset;
+
+ offset = FT1_N_REG(slice, n, FT1_DA0);
+ regmap_write(miig_rt, offset, addr_to_da0(addr));
+ offset = FT1_N_REG(slice, n, FT1_DA1);
+ regmap_write(miig_rt, offset, addr_to_da1(addr));
+}
+
+static void rx_class_ft1_set_da_mask(struct regmap *miig_rt, int slice,
+ int n, const u8 *addr)
+{
+ u32 offset;
+
+ offset = FT1_N_REG(slice, n, FT1_DA0_MASK);
+ regmap_write(miig_rt, offset, addr_to_da0(addr));
+ offset = FT1_N_REG(slice, n, FT1_DA1_MASK);
+ regmap_write(miig_rt, offset, addr_to_da1(addr));
+}
+
+static void rx_class_ft1_cfg_set_type(struct regmap *miig_rt, int slice, int n,
+ enum ft1_cfg_type type)
+{
+ u32 offset;
+
+ offset = offs[slice].ft1_cfg;
+ regmap_update_bits(miig_rt, offset, FT1_CFG_MASK(n),
+ type << FT1_CFG_SHIFT(n));
+}
+
+static void rx_class_sel_set_type(struct regmap *miig_rt, int slice, int n,
+ enum rx_class_sel_type type)
+{
+ u32 offset;
+
+ offset = offs[slice].rx_class_cfg1;
+ regmap_update_bits(miig_rt, offset, RX_CLASS_SEL_MASK(n),
+ type << RX_CLASS_SEL_SHIFT(n));
+}
+
+static void rx_class_set_and(struct regmap *miig_rt, int slice, int n,
+ u32 data)
+{
+ u32 offset;
+
+ offset = RX_CLASS_N_REG(slice, n, RX_CLASS_AND_EN);
+ regmap_write(miig_rt, offset, data);
+}
+
+static void rx_class_set_or(struct regmap *miig_rt, int slice, int n,
+ u32 data)
+{
+ u32 offset;
+
+ offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
+ regmap_write(miig_rt, offset, data);
+}
+
+static u32 rx_class_get_or(struct regmap *miig_rt, int slice, int n)
+{
+ u32 offset, val;
+
+ offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
+ regmap_read(miig_rt, offset, &val);
+
+ return val;
+}
+
+void icssg_class_set_host_mac_addr(struct regmap *miig_rt, u8 *mac)
+{
+ regmap_write(miig_rt, MAC_INTERFACE_0, addr_to_da0(mac));
+ regmap_write(miig_rt, MAC_INTERFACE_1, addr_to_da1(mac));
+}
+
+void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
+{
+ regmap_write(miig_rt, offs[slice].mac0, addr_to_da0(mac));
+ regmap_write(miig_rt, offs[slice].mac1, addr_to_da1(mac));
+}
+
+static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice,
+ int slot, const u8 *addr, const u8 *mask)
+{
+ int i;
+ u32 val;
+
+ WARN(slot >= FT1_NUM_SLOTS, "invalid slot: %d\n", slot);
+
+ rx_class_ft1_set_da(miig_rt, slice, slot, addr);
+ rx_class_ft1_set_da_mask(miig_rt, slice, slot, mask);
+ rx_class_ft1_cfg_set_type(miig_rt, slice, slot, FT1_CFG_TYPE_EQ);
+
+ /* Enable the FT1 slot in OR enable for all classifiers */
+ for (i = 0; i < ICSSG_NUM_CLASSIFIERS_IN_USE; i++) {
+ val = rx_class_get_or(miig_rt, slice, i);
+ val |= RX_CLASS_FT_FT1_MATCH(slot);
+ rx_class_set_or(miig_rt, slice, i, val);
+ }
+}
+
+/* disable all RX traffic */
+void icssg_class_disable(struct regmap *miig_rt, int slice)
+{
+ u32 data, offset;
+ int n;
+
+ /* Enable RX_L2_G */
+ regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_RX_L2_G_EN,
+ ICSSG_CFG_RX_L2_G_EN);
+
+ for (n = 0; n < ICSSG_NUM_CLASSIFIERS; n++) {
+ /* AND_EN = 0 */
+ rx_class_set_and(miig_rt, slice, n, 0);
+ /* OR_EN = 0 */
+ rx_class_set_or(miig_rt, slice, n, 0);
+
+ /* set CFG1 to OR */
+ rx_class_sel_set_type(miig_rt, slice, n, RX_CLASS_SEL_TYPE_OR);
+
+ /* configure gate */
+ offset = RX_CLASS_GATES_N_REG(slice, n);
+ regmap_read(miig_rt, offset, &data);
+ /* clear class_raw so we go through filters */
+ data &= ~RX_CLASS_GATES_RAW_MASK;
+ /* set allow and phase mask */
+ data |= RX_CLASS_GATES_ALLOW_MASK | RX_CLASS_GATES_PHASE_MASK;
+ regmap_write(miig_rt, offset, data);
+ }
+
+ /* FT1 Disabled */
+ for (n = 0; n < ICSSG_NUM_FT1_SLOTS; n++) {
+ u8 addr[] = { 0, 0, 0, 0, 0, 0, };
+
+ rx_class_ft1_cfg_set_type(miig_rt, slice, n,
+ FT1_CFG_TYPE_DISABLED);
+ rx_class_ft1_set_da(miig_rt, slice, n, addr);
+ rx_class_ft1_set_da_mask(miig_rt, slice, n, addr);
+ }
+
+ /* clear CFG2 */
+ regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
+}
+
+void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
+ bool is_sr1)
+{
+ u32 data;
+ int n;
+ int classifiers_in_use = ICSSG_NUM_CLASSIFIERS_IN_USE;
+
+ if (!is_sr1)
+ classifiers_in_use = 1;
+
+ /* defaults */
+ icssg_class_disable(miig_rt, slice);
+
+ /* Setup Classifier */
+ for (n = 0; n < classifiers_in_use; n++) {
+ /* match on Broadcast or MAC_PRU address */
+ data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
+
+ /* multicast? */
+ if (allmulti)
+ data |= RX_CLASS_FT_MC;
+
+ rx_class_set_or(miig_rt, slice, n, data);
+
+ /* set CFG1 for OR_OR_AND for classifier */
+ rx_class_sel_set_type(miig_rt, slice, n,
+ RX_CLASS_SEL_TYPE_OR_OR_AND);
+ }
+
+ /* clear CFG2 */
+ regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
+}
+
+void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice)
+{
+ u32 data;
+ u32 offset;
+ int n;
+
+ /* defaults */
+ icssg_class_disable(miig_rt, slice);
+
+ /* Setup Classifier */
+ for (n = 0; n < ICSSG_NUM_CLASSIFIERS_IN_USE; n++) {
+ /* set RAW_MASK to bypass filters */
+ offset = RX_CLASS_GATES_N_REG(slice, n);
+ regmap_read(miig_rt, offset, &data);
+ data |= RX_CLASS_GATES_RAW_MASK;
+ regmap_write(miig_rt, offset, data);
+ }
+}
+
+void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
+ struct net_device *ndev)
+{
+ int slot;
+ struct netdev_hw_addr *ha;
+ u8 sr_addr[] = { 0x01, 0x80, 0xC2, 0, 0, 0, };
+ u8 cb_addr[] = { 0x01, 0x00, 0x5e, 0, 0, 0, };
+ u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
+
+ rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ /* reserve first 2 slots for
+ * 1) 01-80-C2-00-00-XX Known Service Ethernet Multicast addresses
+ * 2) 01-00-5e-00-00-XX Local Network Control Block
+ * (224.0.0.0 - 224.0.0.255 (224.0.0/24))
+ */
+ mask_addr[5] = 0xff;
+ icssg_class_ft1_add_mcast(miig_rt, slice, 0, sr_addr, mask_addr);
+ icssg_class_ft1_add_mcast(miig_rt, slice, 1, cb_addr, mask_addr);
+ mask_addr[5] = 0;
+ slot = 2;
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* skip addresses matching reserved slots */
+ if (!memcmp(sr_addr, ha->addr, 5) ||
+ !memcmp(cb_addr, ha->addr, 5)) {
+ netdev_dbg(ndev, "mcast skip %pM\n", ha->addr);
+ continue;
+ }
+
+ if (slot >= FT1_NUM_SLOTS) {
+ netdev_dbg(ndev,
+ "can't add more than %d MC addresses, enabling allmulti\n",
+ FT1_NUM_SLOTS);
+ icssg_class_default(miig_rt, slice, 1, 1);
+ break;
+ }
+
+ netdev_dbg(ndev, "mcast add %pM\n", ha->addr);
+ icssg_class_ft1_add_mcast(miig_rt, slice, slot,
+ ha->addr, mask_addr);
+ slot++;
+ }
+}
+
+/* required for SR2 for SAV check */
+void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
+{
+ u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
+
+ rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
+ rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
+ rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
+}
diff --git a/drivers/net/ethernet/ti/icssg_config.c b/drivers/net/ethernet/ti/icssg_config.c
new file mode 100644
index 000000000000..564bafcd6fc3
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_config.c
@@ -0,0 +1,863 @@
+// SPDX-License-Identifier: GPL-2.0
+/* ICSSG Ethernet driver
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/regmap.h>
+#include <uapi/linux/if_ether.h>
+#include "icssg_config.h"
+#include "icssg_prueth.h"
+#include "icssg_switch_map.h"
+#include "icss_mii_rt.h"
+
+/* TX IPG Values to be set for 100M and 1G link speeds. These values are
+ * in ocp_clk cycles. So need change if ocp_clk is changed for a specific
+ * h/w design.
+ */
+
+/* IPG is in core_clk cycles */
+#define MII_RT_TX_IPG_100M_SR1 0x166
+#define MII_RT_TX_IPG_1G_SR1 0x1a
+#define MII_RT_TX_IPG_100M 0x17
+#define MII_RT_TX_IPG_1G 0xb
+
+#define ICSSG_QUEUES_MAX 64
+#define ICSSG_QUEUE_OFFSET 0xd00
+#define ICSSG_QUEUE_PEEK_OFFSET 0xe00
+#define ICSSG_QUEUE_CNT_OFFSET 0xe40
+#define ICSSG_QUEUE_RESET_OFFSET 0xf40
+
+#define ICSSG_NUM_TX_QUEUES 8
+
+#define RECYCLE_Q_SLICE0 16
+#define RECYCLE_Q_SLICE1 17
+
+#define ICSSG_NUM_OTHER_QUEUES 5 /* port, host and special queues */
+
+#define PORT_HI_Q_SLICE0 32
+#define PORT_LO_Q_SLICE0 33
+#define HOST_HI_Q_SLICE0 34
+#define HOST_LO_Q_SLICE0 35
+#define HOST_SPL_Q_SLICE0 40 /* Special Queue */
+
+#define PORT_HI_Q_SLICE1 36
+#define PORT_LO_Q_SLICE1 37
+#define HOST_HI_Q_SLICE1 38
+#define HOST_LO_Q_SLICE1 39
+#define HOST_SPL_Q_SLICE1 41 /* Special Queue */
+
+#define MII_RXCFG_DEFAULT (PRUSS_MII_RT_RXCFG_RX_ENABLE | \
+ PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS | \
+ PRUSS_MII_RT_RXCFG_RX_L2_EN | \
+ PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS)
+
+#define MII_TXCFG_DEFAULT (PRUSS_MII_RT_TXCFG_TX_ENABLE | \
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | \
+ PRUSS_MII_RT_TXCFG_TX_32_MODE_EN | \
+ PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN)
+
+#define ICSSG_CFG_DEFAULT (ICSSG_CFG_TX_L1_EN | \
+ ICSSG_CFG_TX_L2_EN | ICSSG_CFG_RX_L2_G_EN | \
+ ICSSG_CFG_TX_PRU_EN | /* SR2.0 only */ \
+ ICSSG_CFG_SGMII_MODE)
+
+#define FDB_GEN_CFG1 0x60
+#define SMEM_VLAN_OFFSET 8
+#define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8)
+
+#define FDB_GEN_CFG2 0x64
+#define FDB_VLAN_EN BIT(6)
+#define FDB_HOST_EN BIT(2)
+#define FDB_PRU1_EN BIT(1)
+#define FDB_PRU0_EN BIT(0)
+#define FDB_EN_ALL (FDB_PRU0_EN | FDB_PRU1_EN | \
+ FDB_HOST_EN | FDB_VLAN_EN)
+
+struct map {
+ int queue;
+ u32 pd_addr_start;
+ u32 flags;
+ bool special;
+};
+
+struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
+ {
+ { PORT_HI_Q_SLICE0, PORT_DESC0_HI, 0x200000, 0 },
+ { PORT_LO_Q_SLICE0, PORT_DESC0_LO, 0, 0 },
+ { HOST_HI_Q_SLICE0, HOST_DESC0_HI, 0x200000, 0 },
+ { HOST_LO_Q_SLICE0, HOST_DESC0_LO, 0, 0 },
+ { HOST_SPL_Q_SLICE0, HOST_SPPD0, 0x400000, 1 },
+ },
+ {
+ { PORT_HI_Q_SLICE1, PORT_DESC1_HI, 0xa00000, 0 },
+ { PORT_LO_Q_SLICE1, PORT_DESC1_LO, 0x800000, 0 },
+ { HOST_HI_Q_SLICE1, HOST_DESC1_HI, 0xa00000, 0 },
+ { HOST_LO_Q_SLICE1, HOST_DESC1_LO, 0x800000, 0 },
+ { HOST_SPL_Q_SLICE1, HOST_SPPD1, 0xc00000, 1 },
+ },
+};
+
+static void icssg_config_mii_init_switch(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ struct regmap *mii_rt = prueth->mii_rt;
+ int mii = prueth_emac_slice(emac);
+ u32 rxcfg_reg, txcfg_reg, pcnt_reg;
+ u32 rxcfg, txcfg;
+
+ rxcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RXCFG0 :
+ PRUSS_MII_RT_RXCFG1;
+ txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
+ PRUSS_MII_RT_TXCFG1;
+ pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
+ PRUSS_MII_RT_RX_PCNT1;
+
+ rxcfg = PRUSS_MII_RT_RXCFG_RX_ENABLE |
+ PRUSS_MII_RT_RXCFG_RX_L2_EN |
+ PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS;
+
+ txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
+ PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN;
+
+ if (mii == ICSS_MII1)
+ rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL;
+
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+ else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
+ regmap_write(mii_rt, rxcfg_reg, rxcfg);
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, pcnt_reg, 0x1);
+}
+
+static void icssg_config_mii_init(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ struct regmap *mii_rt = prueth->mii_rt;
+ int slice = prueth_emac_slice(emac);
+ u32 rxcfg_reg, txcfg_reg, pcnt_reg;
+ u32 rxcfg, txcfg;
+
+ rxcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RXCFG0 :
+ PRUSS_MII_RT_RXCFG1;
+ txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
+ PRUSS_MII_RT_TXCFG1;
+ pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
+ PRUSS_MII_RT_RX_PCNT1;
+
+ rxcfg = MII_RXCFG_DEFAULT;
+ txcfg = MII_TXCFG_DEFAULT;
+
+ if (slice == ICSS_MII1)
+ rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL;
+
+ /* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need
+ * to be swapped also comparing to RGMII mode. TODO: errata?
+ */
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII && slice == ICSS_MII0)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+ else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
+ regmap_write(mii_rt, rxcfg_reg, rxcfg);
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, pcnt_reg, 0x1);
+}
+
+static void icssg_miig_queues_init(struct prueth *prueth, int slice)
+{
+ struct regmap *miig_rt = prueth->miig_rt;
+ void __iomem *smem = prueth->shram.va;
+ u8 pd[ICSSG_SPECIAL_PD_SIZE];
+ int queue = 0, i, j;
+ u32 *pdword;
+
+ /* reset hwqueues */
+ if (slice)
+ queue = ICSSG_NUM_TX_QUEUES;
+
+ for (i = 0; i < ICSSG_NUM_TX_QUEUES; i++) {
+ regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
+ queue++;
+ }
+
+ queue = slice ? RECYCLE_Q_SLICE1 : RECYCLE_Q_SLICE0;
+ regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
+
+ for (i = 0; i < ICSSG_NUM_OTHER_QUEUES; i++) {
+ regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET,
+ hwq_map[slice][i].queue);
+ }
+
+ /* initialize packet descriptors in SMEM */
+ /* push pakcet descriptors to hwqueues */
+
+ pdword = (u32 *)pd;
+ for (j = 0; j < ICSSG_NUM_OTHER_QUEUES; j++) {
+ struct map *mp;
+ int pd_size, num_pds;
+ u32 pdaddr;
+
+ mp = &hwq_map[slice][j];
+ if (mp->special) {
+ pd_size = ICSSG_SPECIAL_PD_SIZE;
+ num_pds = ICSSG_NUM_SPECIAL_PDS;
+ } else {
+ pd_size = ICSSG_NORMAL_PD_SIZE;
+ num_pds = ICSSG_NUM_NORMAL_PDS;
+ }
+
+ for (i = 0; i < num_pds; i++) {
+ memset(pd, 0, pd_size);
+
+ pdword[0] &= cpu_to_le32(ICSSG_FLAG_MASK);
+ pdword[0] |= cpu_to_le32(mp->flags);
+ pdaddr = mp->pd_addr_start + i * pd_size;
+
+ memcpy_toio(smem + pdaddr, pd, pd_size);
+ queue = mp->queue;
+ regmap_write(miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue,
+ pdaddr);
+ }
+ }
+}
+
+void icssg_config_ipg(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+
+ switch (emac->speed) {
+ case SPEED_1000:
+ icssg_mii_update_ipg(prueth->mii_rt, slice, prueth->is_sr1 ?
+ MII_RT_TX_IPG_1G_SR1 : MII_RT_TX_IPG_1G);
+ break;
+ case SPEED_100:
+ icssg_mii_update_ipg(prueth->mii_rt, slice, prueth->is_sr1 ?
+ MII_RT_TX_IPG_100M_SR1 : MII_RT_TX_IPG_100M);
+ break;
+ case SPEED_10:
+ /* Firmware hardcodes IPG for PG1. PG2 same as 100M */
+ if (!prueth->is_sr1)
+ icssg_mii_update_ipg(prueth->mii_rt, slice,
+ MII_RT_TX_IPG_100M);
+ break;
+ default:
+ /* Other links speeds not supported */
+ pr_err("Unsupported link speed\n");
+ return;
+ }
+}
+
+/* SR1: Set buffer sizes for the pools. There are 8 internal queues
+ * implemented in firmware, but only 4 tx channels/threads in the Egress
+ * direction to firmware. Need a high priority queue for management
+ * messages since they shouldn't be blocked even during high traffic
+ * situation. So use Q0-Q2 as data queues and Q3 as management queue
+ * in the max case. However for ease of configuration, use the max
+ * data queue + 1 for management message if we are not using max
+ * case.
+ *
+ * Allocate 4 MTU buffers per data queue. Firmware requires
+ * pool sizes to be set for internal queues. Set the upper 5 queue
+ * pool size to min size of 128 bytes since there are only 3 tx
+ * data channels and management queue requires only minimum buffer.
+ * i.e lower queues are used by driver and highest priority queue
+ * from that is used for management message.
+ */
+
+static int emac_egress_buf_pool_size[] = {
+ PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_SIZE_SR1,
+ PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
+ PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
+ PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1};
+
+void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac,
+ int slice)
+{
+ void __iomem *va;
+ struct icssg_config_sr1 *config;
+ int i, index;
+
+ va = prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1;
+ config = &prueth->config[slice];
+ memset(config, 0, sizeof(*config));
+ config->addr_lo = cpu_to_le32(lower_32_bits(prueth->msmcram.pa));
+ config->addr_hi = cpu_to_le32(upper_32_bits(prueth->msmcram.pa));
+ config->num_tx_threads = 0;
+ config->rx_flow_id = emac->rx_flow_id_base; /* flow id for host port */
+ config->rx_mgr_flow_id = emac->rx_mgm_flow_id_base; /* for mgm ch */
+ config->rand_seed = get_random_int();
+
+ for (i = PRUETH_EMAC_BUF_POOL_START_SR1; i < PRUETH_NUM_BUF_POOLS_SR1;
+ i++) {
+ index = i - PRUETH_EMAC_BUF_POOL_START_SR1;
+ config->tx_buf_sz[i] =
+ cpu_to_le32(emac_egress_buf_pool_size[index]);
+ }
+
+ memcpy_toio(va, &prueth->config[slice], sizeof(prueth->config[slice]));
+}
+
+static void emac_r30_cmd_init(struct prueth_emac *emac)
+{
+ int i;
+ struct icssg_r30_cmd *p;
+
+ p = emac->dram.va + MGR_R30_CMD_OFFSET;
+
+ for (i = 0; i < 4; i++)
+ writel(EMAC_NONE, &p->cmd[i]);
+}
+
+static int emac_r30_is_done(struct prueth_emac *emac)
+{
+ const struct icssg_r30_cmd *p;
+ int i;
+ u32 cmd;
+
+ p = emac->dram.va + MGR_R30_CMD_OFFSET;
+
+ for (i = 0; i < 4; i++) {
+ cmd = readl(&p->cmd[i]);
+ if (cmd != EMAC_NONE)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int prueth_switch_buffer_setup(struct prueth_emac *emac)
+{
+ struct icssg_buffer_pool_cfg *bpool_cfg;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ struct icssg_rxq_ctx *rxq_ctx;
+ u32 addr;
+ int i;
+
+ addr = lower_32_bits(prueth->msmcram.pa);
+ if (slice)
+ addr += PRUETH_NUM_BUF_POOLS_SR2 * PRUETH_EMAC_BUF_POOL_SIZE_SR2;
+
+ if (addr % SZ_64K) {
+ dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
+ return -EINVAL;
+ }
+
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ /* workaround for f/w bug. bpool 0 needs to be initilalized */
+ for (i = 0;
+ i < PRUETH_NUM_BUF_POOLS_SR2;
+ i++) {
+ bpool_cfg[i].addr = cpu_to_le32(addr);
+ bpool_cfg[i].len = cpu_to_le32(PRUETH_EMAC_BUF_POOL_SIZE_SR2);
+ addr += PRUETH_EMAC_BUF_POOL_SIZE_SR2;
+ }
+
+ if (!slice)
+ addr += PRUETH_NUM_BUF_POOLS_SR2 * PRUETH_EMAC_BUF_POOL_SIZE_SR2;
+ else
+ addr += PRUETH_SW_NUM_BUF_POOLS_HOST_SR2 * PRUETH_SW_BUF_POOL_SIZE_HOST_SR2;
+
+ for (i = PRUETH_NUM_BUF_POOLS_SR2;
+ i < PRUETH_SW_NUM_BUF_POOLS_HOST_SR2 + PRUETH_NUM_BUF_POOLS_SR2;
+ i++) {
+ bpool_cfg[i].addr = cpu_to_le32(addr);
+ bpool_cfg[i].len = cpu_to_le32(PRUETH_SW_BUF_POOL_SIZE_HOST_SR2);
+ addr += PRUETH_SW_BUF_POOL_SIZE_HOST_SR2;
+ }
+
+ if (!slice)
+ addr += PRUETH_SW_NUM_BUF_POOLS_HOST_SR2 * PRUETH_SW_BUF_POOL_SIZE_HOST_SR2;
+ else
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
+
+ /* Pre-emptible RX buffer queue */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ rxq_ctx->start[i] = cpu_to_le32(addr);
+
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ rxq_ctx->end = cpu_to_le32(addr);
+
+ /* Express RX buffer queue */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ rxq_ctx->start[i] = cpu_to_le32(addr);
+
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ rxq_ctx->end = cpu_to_le32(addr);
+
+ return 0;
+}
+
+static int prueth_emac_buffer_setup(struct prueth_emac *emac)
+{
+ struct icssg_buffer_pool_cfg *bpool_cfg;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ struct icssg_rxq_ctx *rxq_ctx;
+ u32 addr;
+ int i;
+
+ /* Layout to have 64KB aligned buffer pool
+ * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1|
+ */
+
+ addr = lower_32_bits(prueth->msmcram.pa);
+ if (slice)
+ addr += PRUETH_NUM_BUF_POOLS_SR2 * PRUETH_EMAC_BUF_POOL_SIZE_SR2;
+
+ if (addr % SZ_64K) {
+ dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
+ return -EINVAL;
+ }
+
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ /* workaround for f/w bug. bpool 0 needs to be initilalized */
+ bpool_cfg[0].addr = cpu_to_le32(addr);
+ bpool_cfg[0].len = 0;
+
+ for (i = PRUETH_EMAC_BUF_POOL_START_SR2;
+ i < (PRUETH_EMAC_BUF_POOL_START_SR2 + PRUETH_NUM_BUF_POOLS_SR2);
+ i++) {
+ bpool_cfg[i].addr = cpu_to_le32(addr);
+ bpool_cfg[i].len = cpu_to_le32(PRUETH_EMAC_BUF_POOL_SIZE_SR2);
+ addr += PRUETH_EMAC_BUF_POOL_SIZE_SR2;
+ }
+
+ if (!slice)
+ addr += PRUETH_NUM_BUF_POOLS_SR2 * PRUETH_EMAC_BUF_POOL_SIZE_SR2;
+ else
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
+
+ /* Pre-emptible RX buffer queue */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ rxq_ctx->start[i] = cpu_to_le32(addr);
+
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ rxq_ctx->end = cpu_to_le32(addr);
+
+ /* Express RX buffer queue */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ rxq_ctx->start[i] = cpu_to_le32(addr);
+
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ rxq_ctx->end = cpu_to_le32(addr);
+
+ return 0;
+}
+
+static void icssg_init_emac_mode(struct prueth *prueth)
+{
+ u8 mac[ETH_ALEN] = { 0 };
+
+ if (prueth->emacs_initialized)
+ return;
+
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, 0);
+ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, 0);
+ /* Clear host MAC address */
+ icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
+}
+
+static void icssg_init_switch_mode(struct prueth *prueth)
+{
+ int i;
+ u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+
+ if (prueth->emacs_initialized)
+ return;
+
+ /* Set VLAN TABLE address base */
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ addr << SMEM_VLAN_OFFSET);
+ /* Set enable VLAN aware mode, and FDBs for all PRUs */
+ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
+ prueth->vlan_tbl = prueth->shram.va + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+ for (i = 0; i < SZ_4K - 1; i++) {
+ prueth->vlan_tbl[i].fid = i;
+ prueth->vlan_tbl[i].fid_c1 = 0;
+ }
+
+ icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
+ icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
+}
+
+int icssg_config_sr2(struct prueth *prueth, struct prueth_emac *emac, int slice)
+{
+ void *config = emac->dram.va + ICSSG_CONFIG_OFFSET;
+ u8 *cfg_byte_ptr = config;
+ struct icssg_flow_cfg *flow_cfg;
+ u32 mask;
+ int ret;
+
+ if (prueth->is_switch_mode)
+ icssg_init_switch_mode(prueth);
+ else
+ icssg_init_emac_mode(prueth);
+
+ memset_io(config, 0, TAS_GATE_MASK_LIST0);
+ icssg_miig_queues_init(prueth, slice);
+
+ emac->speed = SPEED_1000;
+ emac->duplex = DUPLEX_FULL;
+ if (!phy_interface_mode_is_rgmii(emac->phy_if)) {
+ emac->speed = SPEED_100;
+ emac->duplex = DUPLEX_FULL;
+ }
+ regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
+ icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
+ if (prueth->is_switch_mode)
+ icssg_config_mii_init_switch(emac);
+ else
+ icssg_config_mii_init(emac);
+ icssg_config_ipg(emac);
+ icssg_update_rgmii_cfg(prueth->miig_rt, emac);
+
+ /* set GPI mode */
+ pruss_cfg_gpimode(prueth->pruss, prueth->pru_id[slice],
+ PRUSS_GPI_MODE_MII);
+
+ /* enable XFR shift for PRU and RTU */
+ mask = PRUSS_SPP_XFER_SHIFT_EN | PRUSS_SPP_RTU_XFR_SHIFT_EN;
+ pruss_cfg_update(prueth->pruss, PRUSS_CFG_SPP, mask, mask);
+
+ /* set C28 to 0x100 */
+ pru_rproc_set_ctable(prueth->pru[slice], PRU_C28, 0x100 << 8);
+ pru_rproc_set_ctable(prueth->rtu[slice], PRU_C28, 0x100 << 8);
+ pru_rproc_set_ctable(prueth->txpru[slice], PRU_C28, 0x100 << 8);
+
+ flow_cfg = config + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+ flow_cfg->rx_base_flow = cpu_to_le32(emac->rx_flow_id_base);
+ flow_cfg->mgm_base_flow = 0;
+ *(cfg_byte_ptr + SPL_PKT_DEFAULT_PRIORITY) = 0;
+ *(cfg_byte_ptr + QUEUE_NUM_UNTAGGED) = 0x0;
+
+ if (prueth->is_switch_mode)
+ ret = prueth_switch_buffer_setup(emac);
+ else
+ ret = prueth_emac_buffer_setup(emac);
+ if (ret)
+ return ret;
+
+ emac_r30_cmd_init(emac);
+
+ return 0;
+}
+
+/* commands to program ICSSG R30 registers */
+/* FIXME: fix hex magic numbers with macros */
+static struct icssg_r30_cmd emac_r32_bitmask[] = {
+ {{0xffff0004, 0xffff0100, 0xffff0100, EMAC_NONE}}, /* EMAC_PORT_DISABLE */
+ {{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}}, /* EMAC_PORT_BLOCK */
+ {{0xffbb0000, 0xfcff0000, 0xdcff0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */
+ {{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}}, /* EMAC_PORT_FORWARD_WO_LEARNING */
+ {{0xffff0001, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT ALL */
+ {{0xfffe0002, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT TAGGED */
+ {{0xfffc0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT UNTAGGED and PRIO */
+ {{EMAC_NONE, 0xffff0020, EMAC_NONE, EMAC_NONE}}, /* TAS Trigger List change */
+ {{EMAC_NONE, 0xdfff1000, EMAC_NONE, EMAC_NONE}}, /* TAS set state ENABLE*/
+ {{EMAC_NONE, 0xefff2000, EMAC_NONE, EMAC_NONE}}, /* TAS set state RESET*/
+ {{EMAC_NONE, 0xcfff0000, EMAC_NONE, EMAC_NONE}}, /* TAS set state DISABLE*/
+ {{EMAC_NONE, EMAC_NONE, 0xffff0400, EMAC_NONE}}, /* UC flooding ENABLE*/
+ {{EMAC_NONE, EMAC_NONE, 0xfbff0000, EMAC_NONE}}, /* UC flooding DISABLE*/
+ {{EMAC_NONE, EMAC_NONE, 0xffff0800, EMAC_NONE}}, /* MC flooding ENABLE*/
+ {{EMAC_NONE, EMAC_NONE, 0xf7ff0000, EMAC_NONE}}, /* MC flooding DISABLE*/
+ {{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/
+ {{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx DISABLE*/
+ {{0xffff0010, EMAC_NONE, 0xffff0010, EMAC_NONE}}, /* VLAN AWARE*/
+ {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/
+};
+
+int emac_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd cmd)
+{
+ struct icssg_r30_cmd *p;
+ int ret = -ETIMEDOUT;
+ int timeout = 10;
+ int i;
+
+ p = emac->dram.va + MGR_R30_CMD_OFFSET;
+
+ if (cmd >= ICSSG_EMAC_PORT_MAX_COMMANDS) {
+ netdev_err(emac->ndev, "invalid port command\n");
+ return -EINVAL;
+ }
+
+ /* only one command at a time allowed to firmware */
+ mutex_lock(&emac->cmd_lock);
+
+ for (i = 0; i < 4; i++)
+ writel(emac_r32_bitmask[cmd].cmd[i], &p->cmd[i]);
+
+ /* wait for done */
+ while (timeout) {
+ if (emac_r30_is_done(emac)) {
+ ret = 0;
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+
+ if (ret == -ETIMEDOUT)
+ netdev_err(emac->ndev, "timeout waiting for command done\n");
+
+ mutex_unlock(&emac->cmd_lock);
+
+ return ret;
+}
+
+void icssg_config_set_speed(struct prueth_emac *emac)
+{
+ u8 fw_speed;
+
+ if (emac->is_sr1)
+ return;
+
+ switch (emac->speed) {
+ case SPEED_1000:
+ fw_speed = FW_LINK_SPEED_1G;
+ break;
+ case SPEED_100:
+ fw_speed = FW_LINK_SPEED_100M;
+ break;
+ case SPEED_10:
+ fw_speed = FW_LINK_SPEED_10M;
+ break;
+ default:
+ /* Other links speeds not supported */
+ pr_err("Unsupported link speed\n");
+ return;
+ }
+
+ if (emac->duplex == DUPLEX_HALF)
+ fw_speed |= FW_LINK_SPEED_HD;
+
+ writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
+}
+
+static void icssg_config_half_duplex_sr1(struct prueth_emac *emac)
+{
+ int slice = prueth_emac_slice(emac);
+ struct icssg_config_sr1 *config;
+ u32 val = get_random_int();
+ void __iomem *va;
+
+ va = emac->prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1;
+ config = (struct icssg_config_sr1 *)va;
+
+ writel(val, &config->rand_seed);
+}
+
+void icssg_config_half_duplex(struct prueth_emac *emac)
+{
+ u32 val;
+
+ if (emac->is_sr1)
+ icssg_config_half_duplex_sr1(emac);
+
+ val = get_random_int();
+ writel(val, emac->dram.va + HD_RAND_SEED_OFFSET);
+}
+
+int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
+ struct mgmt_cmd_rsp *rsp)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ int addr;
+ int i = 10000;
+
+ addr = icssg_queue_pop(prueth, slice == 0 ?
+ ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1);
+ if (addr < 0)
+ return addr;
+
+ /* First 4 bytes have FW owned buffer linking info which should
+ * not be touched
+ */
+ memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd));
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr);
+ while (i--) {
+ addr = icssg_queue_pop(prueth, slice == 0 ?
+ ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1);
+ if (addr < 0) {
+ usleep_range(1000, 2000);
+ continue;
+ }
+
+ memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
+ /* Return buffer back for to pool */
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr);
+ break;
+ }
+ if (i <= 0) {
+ netdev_err(emac->ndev, "Timedout sending HWQ message\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid, u8 fid_c2, bool add)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ struct mgmt_cmd fdb_cmd = { 0 };
+ int slice = prueth_emac_slice(emac);
+ u8 mac_fid[ETH_ALEN + 2];
+ u8 fid = vid;
+ int ret, i;
+ u16 fdb_slot;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_fid[i] = addr[i];
+
+ /* 1-1 VID-FID mapping is already setup */
+ mac_fid[ETH_ALEN] = fid;
+ mac_fid[ETH_ALEN + 1] = 0;
+
+ fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK;
+
+ fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE;
+ fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ if (add)
+ fdb_cmd.param = ICSS_CMD_ADD_FDB;
+ else
+ fdb_cmd.param = ICSS_CMD_DEL_FDB;
+
+ fdb_cmd.param |= (slice << 4);
+
+ fid_c2 |= ICSSG_FDB_ENTRY_VALID;
+ memcpy(&fdb_cmd.cmd_args[0], addr, 4);
+ memcpy(&fdb_cmd.cmd_args[1], &addr[4], 2);
+ fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24));
+ fdb_cmd.cmd_args[2] = fdb_slot;
+
+ netdev_dbg(emac->ndev, "MAC %pM slot %X vlan %X FID %X\n",
+ addr, fdb_slot, vid, fid);
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+ if (fdb_cmd_rsp.status == 1)
+ return 0;
+
+ return -EINVAL;
+}
+
+int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ struct mgmt_cmd fdb_cmd = { 0 };
+ int slice = prueth_emac_slice(emac);
+ struct prueth_fdb_slot *slot;
+ u8 mac_fid[ETH_ALEN + 2];
+ u8 fid = vid;
+ int ret, i;
+ u16 fdb_slot;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_fid[i] = addr[i];
+
+ /* 1-1 VID-FID mapping is already setup */
+ mac_fid[ETH_ALEN] = fid;
+ mac_fid[ETH_ALEN + 1] = 0;
+
+ fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK;
+
+ fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE;
+ fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ fdb_cmd.param = ICSS_CMD_GET_FDB_SLOT;
+
+ fdb_cmd.param |= (slice << 4);
+
+ memcpy(&fdb_cmd.cmd_args[0], addr, 4);
+ memcpy(&fdb_cmd.cmd_args[1], &addr[4], 2);
+ fdb_cmd.cmd_args[1] |= fid << 16;
+ fdb_cmd.cmd_args[2] = fdb_slot;
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+
+ slot = emac->dram.va + FDB_CMD_BUFFER;
+ for (i = 0; i < 4; i++) {
+ if (ether_addr_equal(addr, slot->mac) && vid == slot->fid)
+ return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID);
+ slot++;
+ }
+
+ return 0;
+}
+
+void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
+ u8 untag_mask, bool add)
+{
+ struct prueth *prueth = emac->prueth;
+ struct prueth_vlan_tbl *tbl = prueth->vlan_tbl;
+ u8 fid_c1 = tbl[vid].fid_c1;
+
+ /* FID_C1: bit0..2 port membership mask,
+ * bit3..5 tagging mask for each port
+ * bit6 Stream VID (not handled currently)
+ * bit7 MC flood (not handled currently)
+ */
+ if (add) {
+ fid_c1 |= (port_mask | port_mask << 3);
+ fid_c1 &= ~(untag_mask << 3);
+ } else {
+ fid_c1 &= ~(port_mask | port_mask << 3);
+ }
+
+ tbl[vid].fid_c1 = fid_c1;
+}
+
+u16 icssg_get_pvid(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 pvid;
+
+ if (emac->port_id == PRUETH_PORT_MII0)
+ pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
+ else
+ pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
+
+ pvid = pvid >> 24;
+
+ return pvid;
+}
+
+void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
+{
+ u32 pvid;
+
+ /* only 256 VLANs are supported */
+ pvid = cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff));
+
+ if (port == PRUETH_PORT_MII0)
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
+ else if (port == PRUETH_PORT_MII1)
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
+ else
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
+}
diff --git a/drivers/net/ethernet/ti/icssg_config.h b/drivers/net/ethernet/ti/icssg_config.h
new file mode 100644
index 000000000000..75d1c2deeace
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_config.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSSG_CONFIG_H
+#define __NET_TI_ICSSG_CONFIG_H
+
+struct icssg_buffer_pool_cfg {
+ __le32 addr;
+ __le32 len;
+} __packed;
+
+struct icssg_flow_cfg {
+ __le16 rx_base_flow;
+ __le16 mgm_base_flow;
+} __packed;
+
+/*------------------------ SR1.0 related --------------------------*/
+
+/* Port queue size in MSMC from firmware
+ * PORTQSZ_HP .set (0x1800)
+ * PORTQSZ_HP2 .set (PORTQSZ_HP+128) ;include barrier area
+ * 0x1880 x 8 bytes per slice (port)
+ */
+
+#define MSMC_RAM_SIZE_SR1 (SZ_64K + SZ_32K + SZ_2K) /* 0x1880 x 8 x 2 */
+
+#define PRUETH_MAX_RX_MGM_DESC 8
+#define PRUETH_MAX_RX_FLOWS_SR1 4 /* excluding default flow */
+#define PRUETH_RX_FLOW_DATA_SR1 3 /* highest priority flow */
+#define PRUETH_MAX_RX_MGM_FLOWS 2 /* excluding default flow */
+#define PRUETH_RX_MGM_FLOW_RESPONSE 0
+#define PRUETH_RX_MGM_FLOW_TIMESTAMP 1
+#define PRUETH_RX_MGM_FLOW_OTHER 2
+
+#define PRUETH_NUM_BUF_POOLS_SR1 16
+#define PRUETH_EMAC_BUF_POOL_START_SR1 8
+#define PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1 128
+#define PRUETH_EMAC_BUF_SIZE_SR1 1536
+#define PRUETH_EMAC_NUM_BUF_SR1 4
+#define PRUETH_EMAC_BUF_POOL_SIZE_SR1 (PRUETH_EMAC_NUM_BUF_SR1 * \
+ PRUETH_EMAC_BUF_SIZE_SR1)
+/* Config area lies in shared RAM */
+#define ICSSG_CONFIG_OFFSET_SLICE0 0
+#define ICSSG_CONFIG_OFFSET_SLICE1 0x8000
+
+struct icssg_config_sr1 {
+ __le32 status; /* Firmware status */
+ __le32 addr_lo; /* MSMC Buffer pool base address low. */
+ __le32 addr_hi; /* MSMC Buffer pool base address high. Must be 0 */
+ __le32 tx_buf_sz[16]; /* Array of buffer pool sizes */
+ __le32 num_tx_threads; /* Number of active egress threads, 1 to 4 */
+ __le32 tx_rate_lim_en; /* Bitmask: Egress rate limit en per thread */
+ __le32 rx_flow_id; /* RX flow id for first rx ring */
+ __le32 rx_mgr_flow_id; /* RX flow id for the first management ring */
+ __le32 flags; /* TBD */
+ __le32 n_burst; /* for debug */
+ __le32 rtu_status; /* RTU status */
+ __le32 info; /* reserved */
+ __le32 reserve;
+ __le32 rand_seed; /* Used for the random number generation at fw */
+} __packed;
+
+/* Shutdown command to stop processing at firmware.
+ * Command format : 0x8101ss00. ss - sequence number. Currently not used
+ * by driver.
+ */
+#define ICSSG_SHUTDOWN_CMD 0x81010000
+
+/* pstate speed/duplex command to set speed and duplex settings
+ * in firmware.
+ * Command format : 0x8102ssPN. ss - sequence number: currently not
+ * used by driver, P - port number: For switch, N - Speed/Duplex state
+ * - Possible values of N:
+ * 0x0 - 10Mbps/Half duplex ;
+ * 0x8 - 10Mbps/Full duplex ;
+ * 0x2 - 100Mbps/Half duplex;
+ * 0xa - 100Mbps/Full duplex;
+ * 0xc - 1Gbps/Full duplex;
+ * NOTE: The above are same as bits [3..1](slice 0) or bits [8..6](slice 1) of
+ * RGMII CFG register. So suggested to read the register to populate the command
+ * bits.
+ */
+#define ICSSG_PSTATE_SPEED_DUPLEX_CMD 0x81020000
+
+/*------------------------ SR2.0 related --------------------------*/
+
+#define PRUETH_PKT_TYPE_CMD 0x10
+#define PRUETH_NAV_PS_DATA_SIZE 16 /* Protocol specific data size */
+#define PRUETH_NAV_SW_DATA_SIZE 48 /* SW related data size */
+#define PRUETH_MAX_TX_DESC 512
+#define PRUETH_MAX_RX_DESC 512
+#define PRUETH_MAX_RX_FLOWS_SR2 1 /* excluding default flow */
+#define PRUETH_RX_FLOW_DATA_SR2 0 /* FIXME: f/w bug to change to highest priority flow */
+
+#define PRUETH_EMAC_BUF_POOL_SIZE_SR2 SZ_8K
+#define PRUETH_EMAC_POOLS_PER_SLICE 24
+#define PRUETH_EMAC_BUF_POOL_START_SR2 8
+#define PRUETH_NUM_BUF_POOLS_SR2 8
+#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */
+#define MSMC_RAM_SIZE_SR2 \
+ (2 * (PRUETH_EMAC_BUF_POOL_SIZE_SR2 * PRUETH_NUM_BUF_POOLS_SR2 + \
+ PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
+
+#define PRUETH_SW_BUF_POOL_SIZE_HOST_SR2 SZ_2K
+#define PRUETH_SW_NUM_BUF_POOLS_HOST_SR2 16
+#define MSMC_RAM_SIZE_SR2_SWITCH_MODE \
+ (MSMC_RAM_SIZE_SR2 + \
+ (2 * PRUETH_SW_BUF_POOL_SIZE_HOST_SR2 * PRUETH_SW_NUM_BUF_POOLS_HOST_SR2))
+
+#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1)
+
+struct icssg_rxq_ctx {
+ __le32 start[3];
+ __le32 end;
+} __packed;
+
+/* Load time Fiwmware Configuration */
+
+#define ICSSG_FW_MGMT_CMD_HEADER 0x81
+#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
+#define ICSSG_FW_MGMT_CMD_TYPE 0x04
+#define ICSSG_FW_MGMT_PKT 0x80000000
+
+struct icssg_r30_cmd {
+ u32 cmd[4];
+} __packed;
+
+enum icssg_port_state_cmd {
+ ICSSG_EMAC_PORT_DISABLE = 0,
+ ICSSG_EMAC_PORT_BLOCK,
+ ICSSG_EMAC_PORT_FORWARD,
+ ICSSG_EMAC_PORT_FORWARD_WO_LEARNING,
+ ICSSG_EMAC_PORT_ACCEPT_ALL,
+ ICSSG_EMAC_PORT_ACCEPT_TAGGED,
+ ICSSG_EMAC_PORT_ACCEPT_UNTAGGED_N_PRIO,
+ ICSSG_EMAC_PORT_TAS_TRIGGER,
+ ICSSG_EMAC_PORT_TAS_ENABLE,
+ ICSSG_EMAC_PORT_TAS_RESET,
+ ICSSG_EMAC_PORT_TAS_DISABLE,
+ ICSSG_EMAC_PORT_UC_FLOODING_ENABLE,
+ ICSSG_EMAC_PORT_UC_FLOODING_DISABLE,
+ ICSSG_EMAC_PORT_MC_FLOODING_ENABLE,
+ ICSSG_EMAC_PORT_MC_FLOODING_DISABLE,
+ ICSSG_EMAC_PORT_PREMPT_TX_ENABLE,
+ ICSSG_EMAC_PORT_PREMPT_TX_DISABLE,
+ ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE,
+ ICSSG_EMAC_PORT_VLAN_AWARE_DISABLE,
+ ICSSG_EMAC_PORT_MAX_COMMANDS
+};
+
+#define EMAC_NONE 0xffff0000
+#define EMAC_PRU0_P_DI 0xffff0004
+#define EMAC_PRU1_P_DI 0xffff0040
+#define EMAC_TX_P_DI 0xffff0100
+
+#define EMAC_PRU0_P_EN 0xfffb0000
+#define EMAC_PRU1_P_EN 0xffbf0000
+#define EMAC_TX_P_EN 0xfeff0000
+
+#define EMAC_P_BLOCK 0xffff0040
+#define EMAC_TX_P_BLOCK 0xffff0200
+#define EMAC_P_UNBLOCK 0xffbf0000
+#define EMAC_TX_P_UNBLOCK 0xfdff0000
+#define EMAC_LEAN_EN 0xfff70000
+#define EMAC_LEAN_DI 0xffff0008
+
+#define EMAC_ACCEPT_ALL 0xffff0001
+#define EMAC_ACCEPT_TAG 0xfffe0002
+#define EMAC_ACCEPT_PRIOR 0xfffc0000
+
+/* Config area lies in DRAM */
+#define ICSSG_CONFIG_OFFSET 0x0
+
+#define ICSSG_NUM_NORMAL_PDS 64
+#define ICSSG_NUM_SPECIAL_PDS 16
+
+#define ICSSG_NORMAL_PD_SIZE 8
+#define ICSSG_SPECIAL_PD_SIZE 20
+
+#define ICSSG_FLAG_MASK 0xff00ffff
+
+struct icssg_setclock_desc {
+ u8 request;
+ u8 restore;
+ u8 acknowledgment;
+ u8 cmp_status;
+ u32 margin;
+ u32 cyclecounter0_set;
+ u32 cyclecounter1_set;
+ u32 iepcount_set;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 CMP0_current;
+ u32 iepcount_current;
+ u32 difference;
+ u32 cyclecounter0_new;
+ u32 cyclecounter1_new;
+ u32 CMP0_new;
+} __packed;
+
+#define ICSSG_CMD_POP_SLICE0 56
+#define ICSSG_CMD_POP_SLICE1 60
+
+#define ICSSG_CMD_PUSH_SLICE0 57
+#define ICSSG_CMD_PUSH_SLICE1 61
+
+#define ICSSG_RSP_POP_SLICE0 58
+#define ICSSG_RSP_POP_SLICE1 62
+
+#define ICSSG_RSP_PUSH_SLICE0 56
+#define ICSSG_RSP_PUSH_SLICE1 60
+
+#define ICSSG_TS_POP_SLICE0 59
+#define ICSSG_TS_POP_SLICE1 63
+
+#define ICSSG_TS_PUSH_SLICE0 40
+#define ICSSG_TS_PUSH_SLICE1 41
+
+struct mgmt_cmd {
+ u8 param;
+ u8 seqnum;
+ u8 type;
+ u8 header;
+ u32 cmd_args[3];
+} __packed;
+
+struct mgmt_cmd_rsp {
+ u32 reserved;
+ u8 status;
+ u8 seqnum;
+ u8 type;
+ u8 header;
+ u32 cmd_args[3];
+} __packed;
+
+/* FDB FID_C2 flag definitions */
+/* Indicates host port membership.*/
+#define ICSSG_FDB_ENTRY_P0_MEMBERSHIP BIT(0)
+/* Indicates that MAC ID is connected to physical port 1 */
+#define ICSSG_FDB_ENTRY_P1_MEMBERSHIP BIT(1)
+/* Indicates that MAC ID is connected to physical port 2 */
+#define ICSSG_FDB_ENTRY_P2_MEMBERSHIP BIT(2)
+/* Ageable bit is set for learned entries and cleared for static entries */
+#define ICSSG_FDB_ENTRY_AGEABLE BIT(3)
+/* If set for DA then packet is determined to be a special packet */
+#define ICSSG_FDB_ENTRY_BLOCK BIT(4)
+/* If set for DA then the SA from the packet is not learned */
+#define ICSSG_FDB_ENTRY_SECURE BIT(5)
+/* If set, it means packet has been seen recently with source address + FID
+ * matching MAC address/FID of entry
+ */
+#define ICSSG_FDB_ENTRY_TOUCHED BIT(6)
+/* Set if entry is valid */
+#define ICSSG_FDB_ENTRY_VALID BIT(7)
+
+/**
+ * struct prueth_vlan_tbl - VLAN table entries struct in ICSSG SMEM
+ * @fid_c1: membership and forwarding rules flag to this table. See
+ * above to defines for bit definitions
+ * @fid: FDB index for this VID (there is 1-1 mapping b/w VID and FID)
+ */
+struct prueth_vlan_tbl {
+ u8 fid_c1;
+ u8 fid;
+} __packed;
+
+/**
+ * struct prueth_fdb_slot - Result of FDB slot lookup
+ * @mac: MAC address
+ * @fid: fid to be associated with MAC
+ * @fid_c2: FID_C2 entry for this MAC
+ */
+struct prueth_fdb_slot {
+ u8 mac[ETH_ALEN];
+ u8 fid;
+ u8 fid_c2;
+} __packed;
+
+enum icssg_ietfpe_verify_states {
+ ICSSG_IETFPE_STATE_UNKNOWN = 0,
+ ICSSG_IETFPE_STATE_INITIAL,
+ ICSSG_IETFPE_STATE_VERIFYING,
+ ICSSG_IETFPE_STATE_SUCCEEDED,
+ ICSSG_IETFPE_STATE_FAILED,
+ ICSSG_IETFPE_STATE_DISABLED
+};
+#endif /* __NET_TI_ICSSG_CONFIG_H */
diff --git a/drivers/net/ethernet/ti/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg_ethtool.c
new file mode 100644
index 000000000000..bb6f4010af22
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_ethtool.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include "icssg_prueth.h"
+#include <linux/regmap.h>
+
+#define STATS_TIME_LIMIT_MS 25000000
+
+static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
+ 0xb18, /* Slice 1 stats start */
+};
+
+struct miig_stats_regs {
+ /* Rx */
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_crc_error_frames;
+ u32 rx_mii_error_frames;
+ u32 rx_odd_nibble_frames;
+ u32 rx_frame_max_size;
+ u32 rx_max_size_error_frames;
+ u32 rx_frame_min_size;
+ u32 rx_min_size_error_frames;
+ u32 rx_overrun_frames;
+ u32 rx_class0_hits;
+ u32 rx_class1_hits;
+ u32 rx_class2_hits;
+ u32 rx_class3_hits;
+ u32 rx_class4_hits;
+ u32 rx_class5_hits;
+ u32 rx_class6_hits;
+ u32 rx_class7_hits;
+ u32 rx_class8_hits;
+ u32 rx_class9_hits;
+ u32 rx_class10_hits;
+ u32 rx_class11_hits;
+ u32 rx_class12_hits;
+ u32 rx_class13_hits;
+ u32 rx_class14_hits;
+ u32 rx_class15_hits;
+ u32 rx_smd_frags;
+ u32 rx_bucket1_size;
+ u32 rx_bucket2_size;
+ u32 rx_bucket3_size;
+ u32 rx_bucket4_size;
+ u32 rx_64B_frames;
+ u32 rx_bucket1_frames;
+ u32 rx_bucket2_frames;
+ u32 rx_bucket3_frames;
+ u32 rx_bucket4_frames;
+ u32 rx_bucket5_frames;
+ u32 rx_total_bytes;
+ u32 rx_tx_total_bytes;
+ /* Tx */
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_odd_nibble_frames;
+ u32 tx_underflow_errors;
+ u32 tx_frame_max_size;
+ u32 tx_max_size_error_frames;
+ u32 tx_frame_min_size;
+ u32 tx_min_size_error_frames;
+ u32 tx_bucket1_size;
+ u32 tx_bucket2_size;
+ u32 tx_bucket3_size;
+ u32 tx_bucket4_size;
+ u32 tx_64B_frames;
+ u32 tx_bucket1_frames;
+ u32 tx_bucket2_frames;
+ u32 tx_bucket3_frames;
+ u32 tx_bucket4_frames;
+ u32 tx_bucket5_frames;
+ u32 tx_total_bytes;
+};
+
+#define ICSSG_STATS(field) \
+{ \
+ #field, \
+ offsetof(struct miig_stats_regs, field), \
+}
+
+struct icssg_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 offset;
+};
+
+static const struct icssg_stats icssg_ethtool_stats[] = {
+ /* Rx */
+ ICSSG_STATS(rx_good_frames),
+ ICSSG_STATS(rx_broadcast_frames),
+ ICSSG_STATS(rx_multicast_frames),
+ ICSSG_STATS(rx_crc_error_frames),
+ ICSSG_STATS(rx_mii_error_frames),
+ ICSSG_STATS(rx_odd_nibble_frames),
+ ICSSG_STATS(rx_frame_max_size),
+ ICSSG_STATS(rx_max_size_error_frames),
+ ICSSG_STATS(rx_frame_min_size),
+ ICSSG_STATS(rx_min_size_error_frames),
+ ICSSG_STATS(rx_overrun_frames),
+ ICSSG_STATS(rx_class0_hits),
+ ICSSG_STATS(rx_class1_hits),
+ ICSSG_STATS(rx_class2_hits),
+ ICSSG_STATS(rx_class3_hits),
+ ICSSG_STATS(rx_class4_hits),
+ ICSSG_STATS(rx_class5_hits),
+ ICSSG_STATS(rx_class6_hits),
+ ICSSG_STATS(rx_class7_hits),
+ ICSSG_STATS(rx_class8_hits),
+ ICSSG_STATS(rx_class9_hits),
+ ICSSG_STATS(rx_class10_hits),
+ ICSSG_STATS(rx_class11_hits),
+ ICSSG_STATS(rx_class12_hits),
+ ICSSG_STATS(rx_class13_hits),
+ ICSSG_STATS(rx_class14_hits),
+ ICSSG_STATS(rx_class15_hits),
+ ICSSG_STATS(rx_smd_frags),
+ ICSSG_STATS(rx_bucket1_size),
+ ICSSG_STATS(rx_bucket2_size),
+ ICSSG_STATS(rx_bucket3_size),
+ ICSSG_STATS(rx_bucket4_size),
+ ICSSG_STATS(rx_64B_frames),
+ ICSSG_STATS(rx_bucket1_frames),
+ ICSSG_STATS(rx_bucket2_frames),
+ ICSSG_STATS(rx_bucket3_frames),
+ ICSSG_STATS(rx_bucket4_frames),
+ ICSSG_STATS(rx_bucket5_frames),
+ ICSSG_STATS(rx_total_bytes),
+ ICSSG_STATS(rx_tx_total_bytes),
+ /* Tx */
+ ICSSG_STATS(tx_good_frames),
+ ICSSG_STATS(tx_broadcast_frames),
+ ICSSG_STATS(tx_multicast_frames),
+ ICSSG_STATS(tx_odd_nibble_frames),
+ ICSSG_STATS(tx_underflow_errors),
+ ICSSG_STATS(tx_frame_max_size),
+ ICSSG_STATS(tx_max_size_error_frames),
+ ICSSG_STATS(tx_frame_min_size),
+ ICSSG_STATS(tx_min_size_error_frames),
+ ICSSG_STATS(tx_bucket1_size),
+ ICSSG_STATS(tx_bucket2_size),
+ ICSSG_STATS(tx_bucket3_size),
+ ICSSG_STATS(tx_bucket4_size),
+ ICSSG_STATS(tx_64B_frames),
+ ICSSG_STATS(tx_bucket1_frames),
+ ICSSG_STATS(tx_bucket2_frames),
+ ICSSG_STATS(tx_bucket3_frames),
+ ICSSG_STATS(tx_bucket4_frames),
+ ICSSG_STATS(tx_bucket5_frames),
+ ICSSG_STATS(tx_total_bytes),
+};
+
+static void emac_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ strlcpy(info->driver, dev_driver_string(prueth->dev),
+ sizeof(info->driver));
+ /* TODO: info->fw_version */
+ strlcpy(info->bus_info, dev_name(prueth->dev), sizeof(info->bus_info));
+}
+
+static u32 emac_get_msglevel(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ return emac->msg_enable;
+}
+
+static void emac_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ emac->msg_enable = value;
+}
+
+static int emac_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (!emac->phydev)
+ return -EOPNOTSUPP;
+
+ phy_ethtool_ksettings_get(emac->phydev, ecmd);
+ return 0;
+}
+
+static int emac_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (!emac->phydev || phy_is_pseudo_fixed_link(emac->phydev))
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_ksettings_set(emac->phydev, ecmd);
+}
+
+static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (!emac->phydev || phy_is_pseudo_fixed_link(emac->phydev))
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_get_eee(emac->phydev, edata);
+}
+
+static int emac_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (!emac->phydev || phy_is_pseudo_fixed_link(emac->phydev))
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_set_eee(emac->phydev, edata);
+}
+
+static int emac_nway_reset(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (!emac->phydev || phy_is_pseudo_fixed_link(emac->phydev))
+ return -EOPNOTSUPP;
+
+ return genphy_restart_aneg(emac->phydev);
+}
+
+/* Ethtool priv_flags for IET/Frame Preemption configuration.
+ * TODO: This is a temporary solution until upstream interface
+ * is available.
+ */
+static const char emac_ethtool_priv_flags[][ETH_GSTRING_LEN] = {
+#define EMAC_PRIV_IET_FRAME_PREEMPTION BIT(0)
+ "iet-frame-preemption",
+#define EMAC_PRIV_IET_MAC_VERIFY BIT(1)
+ "iet-mac-verify",
+};
+
+static int emac_get_sset_count(struct net_device *ndev, int stringset)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(icssg_ethtool_stats);
+ case ETH_SS_PRIV_FLAGS:
+ if (!prueth->is_sr1)
+ return ARRAY_SIZE(emac_ethtool_priv_flags);
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(icssg_ethtool_stats); i++) {
+ memcpy(p, icssg_ethtool_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ if (prueth->is_sr1)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_priv_flags); i++) {
+ memcpy(p, emac_ethtool_priv_flags[i],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void emac_update_hardware_stats(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ u32 base = stats_base[slice];
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(icssg_ethtool_stats); i++) {
+ regmap_read(prueth->miig_rt,
+ base + icssg_ethtool_stats[i].offset,
+ &val);
+ regmap_write(prueth->miig_rt,
+ base + icssg_ethtool_stats[i].offset,
+ val);
+
+ emac->stats[i] += val;
+ }
+}
+
+void emac_stats_work_handler(struct work_struct *work)
+{
+ struct prueth_emac *emac = container_of(work, struct prueth_emac,
+ stats_work.work);
+ emac_update_hardware_stats(emac);
+
+ queue_delayed_work(system_long_wq, &emac->stats_work,
+ msecs_to_jiffies(STATS_TIME_LIMIT_MS / emac->speed));
+}
+
+void emac_ethtool_stats_init(struct prueth_emac *emac)
+{
+ if (!emac->stats) {
+ struct device *dev = emac->prueth->dev;
+
+ emac->stats = devm_kzalloc(dev, ARRAY_SIZE(icssg_ethtool_stats) *
+ sizeof(*emac->stats), GFP_KERNEL);
+ }
+}
+
+static void emac_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int i;
+
+ emac_update_hardware_stats(emac);
+
+ for (i = 0; i < ARRAY_SIZE(icssg_ethtool_stats); i++)
+ data[i] = emac->stats[i];
+}
+
+static int emac_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = icss_iep_get_ptp_clock_idx(emac->iep);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static void emac_get_channels(struct net_device *ndev,
+ struct ethtool_channels *ch)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ ch->max_rx = 1;
+ /* SR1 use high priority channel for management messages */
+ ch->max_tx = emac->is_sr1 ? PRUETH_MAX_TX_QUEUES - 1 :
+ PRUETH_MAX_TX_QUEUES;
+ ch->rx_count = 1;
+ ch->tx_count = emac->is_sr1 ? emac->tx_ch_num - 1 :
+ emac->tx_ch_num;
+}
+
+static int emac_set_channels(struct net_device *ndev,
+ struct ethtool_channels *ch)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ /* verify we have at least one channel in each direction */
+ /* TODO: remove below check before sending to LKML */
+ if (!ch->rx_count || !ch->tx_count)
+ return -EINVAL;
+
+ /* Check if interface is up. Can change the num queues when
+ * the interface is down.
+ */
+ if (netif_running(emac->ndev))
+ return -EBUSY;
+
+ emac->tx_ch_num = ch->tx_count;
+ /* highest channel number for management messaging on SR1 */
+ if (emac->is_sr1)
+ emac->tx_ch_num++;
+
+ return 0;
+}
+
+/* TODO : This is temporary until a formal ethtool interface become available
+ * in LKML to configure IET FPE.
+ */
+static u32 emac_get_ethtool_priv_flags(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_qos_iet *iet = &emac->qos.iet;
+ u32 priv_flags = 0;
+
+ if (emac->is_sr1)
+ return priv_flags;
+
+ /* Port specific flags */
+ if (iet->fpe_configured)
+ priv_flags |= EMAC_PRIV_IET_FRAME_PREEMPTION;
+ if (iet->mac_verify_configured)
+ priv_flags |= EMAC_PRIV_IET_MAC_VERIFY;
+
+ return priv_flags;
+}
+
+static int emac_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_qos_iet *iet = &emac->qos.iet;
+ int iet_fpe, mac_verify;
+
+ if (emac->is_sr1)
+ return -EOPNOTSUPP;
+
+ iet_fpe = !!(flags & EMAC_PRIV_IET_FRAME_PREEMPTION);
+ mac_verify = !!(flags & EMAC_PRIV_IET_MAC_VERIFY);
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (emac->tx_ch_num < 2 && iet_fpe) {
+ netdev_err(ndev, "IET fpe needs at least 2 h/w queues\n");
+ return -EINVAL;
+ }
+
+ if (mac_verify && (!iet->fpe_configured && !iet_fpe)) {
+ netdev_err(ndev, "Enable IET FPE for IET MAC verify\n");
+ return -EINVAL;
+ }
+
+ iet->fpe_configured = iet_fpe;
+ iet->mac_verify_configured = mac_verify;
+
+ return 0;
+}
+
+const struct ethtool_ops icssg_ethtool_ops = {
+ .get_drvinfo = emac_get_drvinfo,
+ .get_msglevel = emac_get_msglevel,
+ .set_msglevel = emac_set_msglevel,
+ .get_sset_count = emac_get_sset_count,
+ .get_strings = emac_get_strings,
+ .get_ethtool_stats = emac_get_ethtool_stats,
+ .get_ts_info = emac_get_ts_info,
+ .get_priv_flags = emac_get_ethtool_priv_flags,
+ .set_priv_flags = emac_set_ethtool_priv_flags,
+
+ .get_channels = emac_get_channels,
+ .set_channels = emac_set_channels,
+ .get_link_ksettings = emac_get_link_ksettings,
+ .set_link_ksettings = emac_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_eee = emac_get_eee,
+ .set_eee = emac_set_eee,
+ .nway_reset = emac_nway_reset,
+};
diff --git a/drivers/net/ethernet/ti/icssg_mii_cfg.c b/drivers/net/ethernet/ti/icssg_mii_cfg.c
new file mode 100644
index 000000000000..8687fa3b5163
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_mii_cfg.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments ICSSG Ethernet Driver
+ *
+ * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include "icss_mii_rt.h"
+#include "icssg_prueth.h"
+
+void icssg_mii_update_ipg(struct regmap *mii_rt, int mii, u32 ipg)
+{
+ u32 val;
+
+ if (mii == ICSS_MII0) {
+ regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG0, ipg);
+ } else {
+ /* Errata workaround: IEP1 is not read by h/w unless IEP0 is written */
+ regmap_read(mii_rt, PRUSS_MII_RT_TX_IPG0, &val);
+ regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG1, ipg);
+ regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG0, val);
+ }
+}
+
+void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu)
+{
+ mtu += (ETH_HLEN + ETH_FCS_LEN);
+ if (mii == ICSS_MII0) {
+ regmap_update_bits(mii_rt,
+ PRUSS_MII_RT_RX_FRMS0,
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
+ } else {
+ regmap_update_bits(mii_rt,
+ PRUSS_MII_RT_RX_FRMS1,
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
+ }
+}
+
+void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac)
+{
+ u32 gig_en_mask, gig_val = 0, full_duplex_mask, full_duplex_val = 0;
+ int slice = prueth_emac_slice(emac);
+ u32 inband_en_mask, inband_val = 0;
+
+ gig_en_mask = (slice == ICSS_MII0) ? RGMII_CFG_GIG_EN_MII0 :
+ RGMII_CFG_GIG_EN_MII1;
+ if (emac->speed == SPEED_1000)
+ gig_val = gig_en_mask;
+ regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, gig_en_mask, gig_val);
+
+ inband_en_mask = (slice == ICSS_MII0) ? RGMII_CFG_INBAND_EN_MII0 :
+ RGMII_CFG_INBAND_EN_MII1;
+ if (emac->speed == SPEED_10 && phy_interface_mode_is_rgmii(emac->phy_if))
+ inband_val = inband_en_mask;
+ regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, inband_en_mask, inband_val);
+
+ full_duplex_mask = (slice == ICSS_MII0) ? RGMII_CFG_FULL_DUPLEX_MII0 :
+ RGMII_CFG_FULL_DUPLEX_MII1;
+ if (emac->duplex == DUPLEX_FULL)
+ full_duplex_val = full_duplex_mask;
+ regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, full_duplex_mask,
+ full_duplex_val);
+}
+
+void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if)
+{
+ u32 val, mask, shift;
+
+ mask = mii == ICSS_MII0 ? ICSSG_CFG_MII0_MODE : ICSSG_CFG_MII1_MODE;
+ shift = mii == ICSS_MII0 ? ICSSG_CFG_MII0_MODE_SHIFT : ICSSG_CFG_MII1_MODE_SHIFT;
+
+ val = MII_MODE_RGMII;
+ if (phy_if == PHY_INTERFACE_MODE_MII)
+ val = MII_MODE_MII;
+
+ val <<= shift;
+ regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, mask, val);
+ regmap_read(miig_rt, ICSSG_CFG_OFFSET, &val);
+}
+
+u32 icssg_rgmii_cfg_get_bitfield(struct regmap *miig_rt, u32 mask, u32 shift)
+{
+ u32 val;
+
+ regmap_read(miig_rt, RGMII_CFG_OFFSET, &val);
+ val &= mask;
+ val >>= shift;
+
+ return val;
+}
+
+u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii)
+{
+ u32 shift = RGMII_CFG_SPEED_MII0_SHIFT, mask = RGMII_CFG_SPEED_MII0;
+
+ if (mii == ICSS_MII1) {
+ shift = RGMII_CFG_SPEED_MII1_SHIFT;
+ mask = RGMII_CFG_SPEED_MII1;
+ }
+
+ return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
+}
+
+u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii)
+{
+ u32 shift = RGMII_CFG_FULLDUPLEX_MII0_SHIFT;
+ u32 mask = RGMII_CFG_FULLDUPLEX_MII0;
+
+ if (mii == ICSS_MII1) {
+ shift = RGMII_CFG_FULLDUPLEX_MII1_SHIFT;
+ mask = RGMII_CFG_FULLDUPLEX_MII1;
+ }
+
+ return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
+}
diff --git a/drivers/net/ethernet/ti/icssg_prueth.c b/drivers/net/ethernet/ti/icssg_prueth.c
new file mode 100644
index 000000000000..b4beb2239a69
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_prueth.c
@@ -0,0 +1,3638 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG Ethernet Driver
+ *
+ * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/genalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/pruss.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/dma/ti-cppi5.h>
+
+#include "icssg_prueth.h"
+#include "icssg_switchdev.h"
+#include "icss_mii_rt.h"
+#include "k3-cppi-desc-pool.h"
+
+#define PRUETH_MODULE_VERSION "0.1"
+#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
+
+/* MAX MTU set to match MII_G_RT_RX_STAT_MAX_SIZE_PRU0/1,
+ * MII_G_RT_TX_STAT_MAX_SIZE_PORT0/1 defaults
+ */
+#define PRUETH_MAX_MTU (2000 - ETH_HLEN - ETH_FCS_LEN)
+#define PRUETH_MIN_PKT_SIZE (VLAN_ETH_ZLEN)
+#define PRUETH_MAX_PKT_SIZE (PRUETH_MAX_MTU + ETH_HLEN + ETH_FCS_LEN)
+
+/* Netif debug messages possible */
+#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi)
+
+/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
+#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
+
+#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
+
+static int debug_level = -1;
+module_param(debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "PRUETH debug level (NETIF_MSG bits)");
+
+static void prueth_cleanup_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ int max_rflows)
+{
+ if (rx_chn->pg_pool) {
+ page_pool_destroy(rx_chn->pg_pool);
+ rx_chn->pg_pool = NULL;
+ }
+
+ if (rx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (rx_chn->rx_chn)
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+}
+
+static void prueth_cleanup_tx_chns(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ if (tx_chn->tx_chn)
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ /* Assume prueth_cleanup_tx_chns() is called at the
+ * end after all channel resources are freed
+ */
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+
+static void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->irq)
+ free_irq(tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
+ }
+}
+
+static void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ struct prueth_swdata *swdata;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ swdata = cppi5_hdesc_get_swdata(desc);
+ if (swdata->type == PRUETH_SWDATA_PAGE) {
+ page_pool_recycle_direct(swdata->rx_chn->pg_pool,
+ swdata->data.page);
+ goto free_desc;
+ }
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+free_desc:
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+
+static int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
+ int budget)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_tx;
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ unsigned int total_bytes = 0;
+ struct prueth_swdata *swdata;
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+
+ tx_chn = &emac->tx_chns[chn];
+
+ while (budget--) {
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ /* teardown completion */
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&emac->tdown_cnt))
+ complete(&emac->tdown_complete);
+ break;
+ }
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+
+ /* was this command's TX complete? */
+ if (emac->is_sr1 && swdata->type == PRUETH_SWDATA_CMD) {
+ prueth_xmit_free(tx_chn, desc_tx);
+ budget++; /* not a data packet */
+ continue;
+ }
+
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ ndev->stats.tx_bytes += xdpf->len;
+ ndev->stats.tx_packets++;
+ total_bytes += xdpf->len;
+ xdp_return_frame(xdpf);
+ break;
+ default:
+ netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type);
+ prueth_xmit_free(tx_chn, desc_tx);
+ budget++;
+ continue;
+ }
+
+ prueth_xmit_free(tx_chn, desc_tx);
+ num_tx++;
+ }
+
+ if (!num_tx)
+ return 0;
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* If the TX queue was stopped, wake it now
+ * if we have enough room.
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+ __netif_tx_unlock(netif_txq);
+ }
+
+ return num_tx;
+}
+
+static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
+ struct prueth_emac *emac = tx_chn->emac;
+ int num_tx_packets;
+
+ num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget);
+
+ if (num_tx_packets < budget) {
+ napi_complete(napi_tx);
+ enable_irq(tx_chn->irq);
+ }
+
+ return num_tx_packets;
+}
+
+static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
+{
+ struct prueth_tx_chn *tx_chn = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&tx_chn->napi_tx);
+
+ return IRQ_HANDLED;
+}
+
+static int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int i, ret;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ netif_tx_napi_add(emac->ndev, &tx_chn->napi_tx,
+ emac_napi_tx_poll, NAPI_POLL_WEIGHT);
+ ret = request_irq(tx_chn->irq, prueth_tx_irq,
+ IRQF_TRIGGER_HIGH, tx_chn->name,
+ tx_chn);
+ if (ret) {
+ netif_napi_del(&tx_chn->napi_tx);
+ dev_err(prueth->dev, "unable to request TX IRQ %d\n",
+ tx_chn->irq);
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ prueth_ndev_del_tx_napi(emac, i);
+ return ret;
+}
+
+static int prueth_init_tx_chns(struct prueth_emac *emac)
+{
+ struct net_device *ndev = emac->ndev;
+ struct device *dev = emac->prueth->dev;
+ struct k3_udma_glue_tx_channel_cfg tx_cfg;
+ static const struct k3_ring_cfg ring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ .size = PRUETH_MAX_TX_DESC,
+ };
+ int ret, slice, i;
+ u32 hdesc_size;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ init_completion(&emac->tdown_complete);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&tx_cfg, 0, sizeof(tx_cfg));
+ tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ tx_cfg.tx_cfg = ring_cfg;
+ tx_cfg.txcq_cfg = ring_cfg;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(tx_chn->name, sizeof(tx_chn->name),
+ "tx%d-%d", slice, i);
+
+ tx_chn->emac = emac;
+ tx_chn->id = i;
+ tx_chn->descs_num = PRUETH_MAX_TX_DESC;
+
+ tx_chn->tx_chn =
+ k3_udma_glue_request_tx_chn(dev, tx_chn->name,
+ &tx_cfg);
+ if (IS_ERR(tx_chn->tx_chn)) {
+ ret = PTR_ERR(tx_chn->tx_chn);
+ tx_chn->tx_chn = NULL;
+ netdev_err(ndev,
+ "Failed to request tx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+ tx_chn->desc_pool =
+ k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ tx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
+ goto fail;
+ }
+
+ tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (tx_chn->irq <= 0) {
+ ret = -EINVAL;
+ netdev_err(ndev, "failed to get tx irq\n");
+ goto fail;
+ }
+
+ snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
+ dev_name(dev), tx_chn->id);
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_tx_chns(emac);
+ return ret;
+}
+
+static int prueth_init_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ char *name, u32 max_rflows,
+ u32 max_desc_num)
+{
+ struct net_device *ndev = emac->ndev;
+ struct device *dev = emac->prueth->dev;
+ struct k3_udma_glue_rx_channel_cfg rx_cfg;
+ u32 fdqring_id;
+ u32 hdesc_size;
+ int i, ret = 0, slice;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&rx_cfg, 0, sizeof(rx_cfg));
+ rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ rx_cfg.flow_id_num = max_rflows;
+ rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
+
+ /* init all flows */
+ rx_chn->dev = dev;
+ rx_chn->descs_num = max_desc_num;
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
+ &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = PTR_ERR(rx_chn->rx_chn);
+ rx_chn->rx_chn = NULL;
+ netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size,
+ rx_chn->name);
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ rx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
+ goto fail;
+ }
+
+ if (!strncmp(name, "rxmgm", 5)) {
+ emac->rx_mgm_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ netdev_dbg(ndev, "mgm flow id base = %d\n",
+ emac->rx_mgm_flow_id_base);
+ } else {
+ emac->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ netdev_dbg(ndev, "flow id base = %d\n",
+ emac->rx_flow_id_base);
+ }
+
+ fdqring_id = K3_RINGACC_RING_ID_ANY;
+ for (i = 0; i < rx_cfg.flow_id_num; i++) {
+ struct k3_ring_cfg rxring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ };
+ struct k3_ring_cfg fdqring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .flags = K3_RINGACC_RING_SHARED,
+ };
+ struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+ .rx_cfg = rxring_cfg,
+ .rxfdq_cfg = fdqring_cfg,
+ .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+ .src_tag_lo_sel =
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+ };
+
+ rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
+ rx_flow_cfg.rx_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
+
+ ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
+ i, &rx_flow_cfg);
+ if (ret) {
+ netdev_err(ndev, "Failed to init rx flow%d %d\n",
+ i, ret);
+ goto fail;
+ }
+ if (!i)
+ fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
+ i);
+ rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (rx_chn->irq[i] <= 0) {
+ netdev_err(ndev, "Failed to get rx dma irq");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
+ return ret;
+}
+
+static int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+
+ buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM;
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ netdev_err(ndev, "rx push: failed to allocate descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
+ swdata->rx_chn = rx_chn;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
+ desc_rx, desc_dma);
+}
+
+static u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
+{
+ u32 iepcount_lo, iepcount_hi, hi_rollover_count;
+ u64 ns;
+
+ iepcount_lo = lo & GENMASK(19, 0);
+ iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
+ hi_rollover_count = hi >> 11;
+
+ ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
+ ns = ns * cycle_time_ns + iepcount_lo;
+
+ return ns;
+}
+
+static void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ if (emac->is_sr1) {
+ ns = (u64)psdata[1] << 32 | psdata[0];
+ } else {
+ u32 hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
+ IEP_DEFAULT_CYCLE_TIME_NS);
+ }
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+static unsigned int prueth_rxbuf_total_len(unsigned int len)
+{
+ len += PRUETH_HEADROOM;
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ return SKB_DATA_ALIGN(len);
+}
+
+static int emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
+ struct page *page);
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, int *xdp_state)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_swdata *swdata;
+ struct page *page, *new_page;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ u32 *psdata;
+ void *pa;
+ int ret;
+
+ *xdp_state = 0;
+ pool = rx_chn->pg_pool;
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx pop: failed: %d\n", ret);
+ return ret;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
+ return 0;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ if (swdata->type != PRUETH_SWDATA_PAGE) {
+ netdev_err(ndev, "rx_pkt: invliad swdata->type %d\n", swdata->type);
+ return 0;
+ }
+ page = swdata->data.page;
+
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ /* firmware adds 4 CRC bytes, strip them */
+ pkt_len -= 4;
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ if (!netif_running(ndev)) {
+ page_pool_recycle_direct(pool, page);
+ return 0;
+ }
+
+ /* if allocation fails we drop the packet but push the
+ * descriptor back to the ring with old page to prevent a stall
+ */
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
+ ndev->stats.rx_dropped++;
+ goto requeue;
+ }
+
+ pa = page_address(page);
+ if (emac->xdp_prog) {
+ /* xdp_init_buff(&xdp, PAGE_SIZE, rx_chn->xdp_rxq); */
+ xdp.frame_sz = PAGE_SIZE;
+ xdp.rxq = &rx_chn->xdp_rxq;
+
+ /* xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); */
+ xdp.data_hard_start = pa;
+ xdp.data = pa + PRUETH_HEADROOM;
+ xdp.data_end = xdp.data + pkt_len;
+ xdp.data_meta = xdp.data + 1;
+
+ *xdp_state = emac_run_xdp(emac, &xdp, page);
+ if (*xdp_state != ICSSG_XDP_PASS)
+ goto requeue;
+ }
+
+ /* prepare skb and send to n/w stack */
+ skb = build_skb(pa, prueth_rxbuf_total_len(pkt_len));
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
+ }
+
+ skb_reserve(skb, PRUETH_HEADROOM);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* unmap page as no recycling of netstack skb page */
+ page_pool_release_page(pool, page);
+ netif_receive_skb(skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+
+requeue:
+ /* queue another RX DMA */
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
+ if (WARN_ON(ret < 0)) {
+ page_pool_recycle_direct(pool, new_page);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ }
+
+ return ret;
+}
+
+static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ struct page_pool *pool;
+ struct page *page;
+
+ pool = rx_chn->pg_pool;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ if (swdata->type == PRUETH_SWDATA_PAGE) {
+ page = swdata->data.page;
+ page_pool_recycle_direct(pool, page);
+ }
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+}
+
+static int emac_get_tx_ts(struct prueth_emac *emac,
+ struct emac_tx_ts_response *rsp)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ int addr;
+
+ addr = icssg_queue_pop(prueth, slice == 0 ?
+ ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
+ if (addr < 0)
+ return addr;
+
+ memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
+ /* return buffer back for to pool */
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
+
+ return 0;
+}
+
+/* TODO: Convert this to use worker/workqueue mechanism to serialize the
+ * request to firmware
+ */
+static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
+{
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_tx_chn *tx_chn;
+ struct cppi5_host_desc_t *first_desc;
+ u32 pkt_len = sizeof(emac->cmd_data);
+ struct prueth_swdata *swdata;
+ u32 *data = emac->cmd_data;
+ int ret = 0;
+ u32 *epib;
+
+ netdev_dbg(emac->ndev, "Sending cmd %x\n", cmd);
+
+ /* only one command at a time allowed to firmware */
+ mutex_lock(&emac->cmd_lock);
+ data[0] = cpu_to_le32(cmd);
+
+ /* highest priority channel for management messages */
+ tx_chn = &emac->tx_chns[emac->tx_ch_num - 1];
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(emac->ndev, "cmd %x: failed to map cmd buffer\n", cmd);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_err(emac->ndev, "cmd %x: failed to allocate descriptor\n", cmd);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, PRUETH_PKT_TYPE_CMD);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ swdata->type = PRUETH_SWDATA_CMD;
+ swdata->data.cmd = cmd;
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ /* send command */
+ reinit_completion(&emac->cmd_complete);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(emac->ndev, "cmd %x: push failed: %d\n", cmd, ret);
+ goto free_desc;
+ }
+ ret = wait_for_completion_timeout(&emac->cmd_complete, msecs_to_jiffies(100));
+ if (!ret)
+ netdev_err(emac->ndev, "cmd %x: completion timeout\n", cmd);
+
+ mutex_unlock(&emac->cmd_lock);
+
+ return ret;
+free_desc:
+ prueth_xmit_free(tx_chn, first_desc);
+err_unlock:
+ mutex_unlock(&emac->cmd_lock);
+
+ return ret;
+}
+
+static void emac_change_port_speed_duplex(struct prueth_emac *emac)
+{
+ u32 cmd = ICSSG_PSTATE_SPEED_DUPLEX_CMD, val;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+
+ /* only full duplex supported for now */
+ if (emac->duplex != DUPLEX_FULL)
+ return;
+
+ if (!emac->is_sr1)
+ return;
+
+ val = icssg_rgmii_get_speed(prueth->miig_rt, slice);
+ /* firmware expects full duplex settings in bit 2-1 */
+ val <<= 1;
+ cmd |= val;
+
+ val = icssg_rgmii_get_fullduplex(prueth->miig_rt, slice);
+ /* firmware expects full duplex settings in bit 3 */
+ val <<= 3;
+ cmd |= val;
+
+ emac_send_command_sr1(emac, cmd);
+}
+
+static int emac_shutdown(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ /* FIXME for SR2.0 */
+ if (!emac->is_sr1)
+ return 0;
+
+ return emac_send_command_sr1(emac, ICSSG_SHUTDOWN_CMD);
+}
+
+static void tx_ts_work(struct prueth_emac *emac)
+{
+ u64 ns;
+ struct skb_shared_hwtstamps ssh;
+ struct sk_buff *skb;
+ int ret = 0;
+ struct emac_tx_ts_response tsr;
+ u32 hi_sw;
+
+ /* There may be more than one pending requests */
+ while (1) {
+ ret = emac_get_tx_ts(emac, &tsr);
+ if (ret) /* nothing more */
+ break;
+
+ if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
+ !emac->tx_ts_skb[tsr.cookie]) {
+ netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
+ tsr.cookie);
+ break;
+ }
+
+ skb = emac->tx_ts_skb[tsr.cookie];
+ emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */
+ if (!skb) {
+ netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
+ break;
+ }
+
+ hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
+ IEP_DEFAULT_CYCLE_TIME_NS);
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+
+ skb_tstamp_tx(skb, &ssh);
+ dev_consume_skb_any(skb);
+
+ if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */
+ break;
+ }
+
+ return;
+}
+
+int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
+{
+ int i;
+
+ /* search and get the next free slot */
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (!emac->tx_ts_skb[i]) {
+ emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
+ return i;
+ }
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * emac_ndo_start_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ * Doesn't wait for completion we'll check for TX completion in
+ * emac_tx_complete_packets().
+ *
+ * Returns success(NETDEV_TX_OK) or error code (typically out of descs)
+ */
+static int emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_swdata *swdata;
+ int i, ret = 0, q_idx;
+ bool in_tx_ts = 0;
+ int tx_ts_cookie;
+ u32 pkt_len;
+ u32 *epib;
+
+ pkt_len = skb_headlen(skb);
+ q_idx = skb_get_queue_mapping(skb);
+
+ tx_chn = &emac->tx_chns[q_idx];
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: failed to map skb buffer\n");
+ ret = -EINVAL;
+ goto drop_stop_q;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ ret = -ENOMEM;
+ goto drop_stop_q_busy;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ emac->tx_ts_enabled) {
+ tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
+ if (tx_ts_cookie >= 0) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Request TX timestamp */
+ epib[0] = (u32)tx_ts_cookie;
+ epib[1] = 0x80000000; /* TX TS request */
+ emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
+ in_tx_ts = 1;
+ }
+ }
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ swdata->type = PRUETH_SWDATA_SKB;
+ swdata->data.skb = skb;
+
+ if (!skb_is_nonlinear(skb))
+ goto tx_push;
+
+ /* Handle the case where skb is fragmented in pages */
+ cur_desc = first_desc;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 frag_size = skb_frag_size(frag);
+
+ next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!next_desc) {
+ netdev_err(ndev,
+ "tx: failed to allocate frag. descriptor\n");
+ ret = -ENOMEM;
+ goto cleanup_tx_ts;
+ }
+
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: Failed to map skb page\n");
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ ret = -EINVAL;
+ goto cleanup_tx_ts;
+ }
+
+ cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(next_desc,
+ buf_dma, frag_size, buf_dma, frag_size);
+
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
+ cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
+
+ pkt_len += frag_size;
+ cur_desc = next_desc;
+ }
+ WARN_ON(pkt_len != skb->len);
+
+tx_push:
+ /* report bql before sending packet */
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+ /* cppi5_desc_dump(first_desc, 64); */
+
+ skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "tx: push failed: %d\n", ret);
+ goto drop_free_descs;
+ }
+
+ if (in_tx_ts)
+ atomic_inc(&emac->tx_ts_pending);
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
+ netif_tx_stop_queue(netif_txq);
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS)
+ netif_tx_wake_queue(netif_txq);
+ }
+
+ return NETDEV_TX_OK;
+
+cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+drop_stop_q:
+ netif_tx_stop_queue(netif_txq);
+ dev_kfree_skb_any(skb);
+
+ /* error */
+ ndev->stats.tx_dropped++;
+ netdev_err(ndev, "tx: error: %d\n", ret);
+
+ return ret;
+
+drop_stop_q_busy:
+ netif_tx_stop_queue(netif_txq);
+ return NETDEV_TX_BUSY;
+}
+
+static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct prueth_swdata *swdata;
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_kfree_skb_any(skb);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ xdp_return_frame(xdpf);
+ break;
+ default:
+ break;
+ }
+
+ prueth_xmit_free(tx_chn, desc_tx);
+}
+
+static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+
+ /* currently only TX timestamp is being returned */
+ tx_ts_work(emac);
+
+ return IRQ_HANDLED;
+}
+
+/* get one packet from requested flow_id
+ *
+ * Returns skb pointer if packet found else NULL
+ * Caller must free the returned skb.
+ */
+static struct page *prueth_process_rx_mgm(struct prueth_emac *emac,
+ u32 flow_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
+ struct page_pool *pool = rx_chn->pg_pool;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct page *page, *new_page;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma;
+ int ret;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx mgm pop: failed: %d\n", ret);
+ return NULL;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown */
+ return NULL;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+
+ /* Fix FW bug about incorrect PSDATA size */
+ if (cppi5_hdesc_get_psdata_size(desc_rx) != PRUETH_NAV_PS_DATA_SIZE) {
+ cppi5_hdesc_update_psdata_size(desc_rx,
+ PRUETH_NAV_PS_DATA_SIZE);
+ }
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ page = swdata->data.page;
+
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ /* if allocation fails we drop the packet but push the
+ * descriptor back to the ring with old page to prevent a stall
+ */
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ netdev_err(ndev,
+ "page alloc failed, dropped mgm pkt from flow %d\n",
+ flow_id);
+ new_page = page;
+ page = NULL;
+ }
+
+ /* queue another DMA */
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_mgm_chn, new_page,
+ PRUETH_MAX_PKT_SIZE);
+ if (WARN_ON(ret < 0))
+ page_pool_recycle_direct(pool, new_page);
+
+ return page;
+}
+
+static void prueth_tx_ts_sr1(struct prueth_emac *emac,
+ struct emac_tx_ts_response_sr1 *tsr)
+{
+ u64 ns;
+ struct skb_shared_hwtstamps ssh;
+ struct sk_buff *skb;
+
+ ns = (u64)tsr->hi_ts << 32 | tsr->lo_ts;
+
+ if (tsr->cookie >= PRUETH_MAX_TX_TS_REQUESTS) {
+ netdev_dbg(emac->ndev, "Invalid TX TS cookie 0x%x\n",
+ tsr->cookie);
+ return;
+ }
+
+ skb = emac->tx_ts_skb[tsr->cookie];
+ emac->tx_ts_skb[tsr->cookie] = NULL; /* free slot */
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+
+ skb_tstamp_tx(skb, &ssh);
+ dev_consume_skb_any(skb);
+
+ return;
+}
+
+static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+ struct page *page;
+ void *data;
+
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP);
+ if (!page)
+ return IRQ_NONE;
+
+ data = page_address(page) + PRUETH_HEADROOM;
+ prueth_tx_ts_sr1(emac, (struct emac_tx_ts_response_sr1 *)data);
+ page_pool_recycle_direct(emac->rx_mgm_chn.pg_pool, page);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+ struct page *page;
+ void *data;
+ u32 rsp;
+
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE);
+ if (!page)
+ return IRQ_NONE;
+
+ data = page_address(page) + PRUETH_HEADROOM;
+ /* Process command response */
+ rsp = le32_to_cpu(*(u32 *)data);
+ if ((rsp & 0xffff0000) == ICSSG_SHUTDOWN_CMD) {
+ netdev_dbg(emac->ndev,
+ "f/w Shutdown cmd resp %x\n", rsp);
+ complete(&emac->cmd_complete);
+ } else if ((rsp & 0xffff0000) ==
+ ICSSG_PSTATE_SPEED_DUPLEX_CMD) {
+ netdev_dbg(emac->ndev,
+ "f/w Speed/Duplex cmd rsp %x\n",
+ rsp);
+ complete(&emac->cmd_complete);
+ }
+
+ page_pool_recycle_direct(emac->rx_mgm_chn.pg_pool, page);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prueth_rx_irq(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&emac->napi_rx);
+
+ return IRQ_HANDLED;
+}
+
+struct icssg_firmwares {
+ char *pru;
+ char *rtu;
+ char *txpru;
+};
+
+static struct icssg_firmwares icssg_switch_firmwares[] = {
+ {
+ .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
+ }
+};
+
+static struct icssg_firmwares icssg_emac_firmwares[] = {
+ {
+ .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
+ }
+};
+
+static struct icssg_firmwares icssg_emac_firmwares_sr1[] = {
+ {
+ .pru = "ti-pruss/am65x-pru0-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-rtu0-prueth-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-pru1-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-rtu1-prueth-fw.elf",
+ }
+};
+
+static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+{
+ struct icssg_firmwares *firmwares;
+ struct device *dev = prueth->dev;
+ int slice, ret;
+
+ if (prueth->is_switch_mode)
+ firmwares = icssg_switch_firmwares;
+ else if (prueth->is_sr1)
+ firmwares = icssg_emac_firmwares_sr1;
+ else
+ firmwares = icssg_emac_firmwares;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0) {
+ netdev_err(emac->ndev, "invalid port\n");
+ return -EINVAL;
+ }
+
+ /* Set Load time configuration */
+ if (emac->is_sr1) {
+ icssg_config_sr1(prueth, emac, slice);
+ } else {
+ ret = icssg_config_sr2(prueth, emac, slice);
+ if (ret)
+ return ret;
+ }
+
+ ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
+ ret = rproc_boot(prueth->pru[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+ return -EINVAL;
+ }
+
+ ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
+ ret = rproc_boot(prueth->rtu[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+ goto halt_pru;
+ }
+
+ if (emac->is_sr1)
+ goto done;
+
+ ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
+ ret = rproc_boot(prueth->txpru[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
+ goto halt_rtu;
+ }
+
+done:
+ emac->fw_running = 1;
+ return 0;
+
+halt_rtu:
+ rproc_shutdown(prueth->rtu[slice]);
+
+halt_pru:
+ rproc_shutdown(prueth->pru[slice]);
+
+ return ret;
+}
+
+static void prueth_emac_stop(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice;
+
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ slice = ICSS_SLICE0;
+ break;
+ case PRUETH_PORT_MII1:
+ slice = ICSS_SLICE1;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return;
+ }
+
+ emac->fw_running = 0;
+ if (!emac->is_sr1)
+ rproc_shutdown(prueth->txpru[slice]);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+}
+
+static void prueth_cleanup_tx_ts(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (emac->tx_ts_skb[i]) {
+ dev_kfree_skb_any(emac->tx_ts_skb[i]);
+ emac->tx_ts_skb[i] = NULL;
+ }
+ }
+}
+
+/* called back by PHY layer if there is change in link state of hw port*/
+static void emac_adjust_link(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct phy_device *phydev = emac->phydev;
+ struct prueth *prueth = emac->prueth;
+ bool new_state = false;
+ unsigned long flags;
+
+ if (phydev->link) {
+ /* check the mode of operation - full/half duplex */
+ if (phydev->duplex != emac->duplex) {
+ new_state = true;
+ emac->duplex = phydev->duplex;
+ }
+ if (phydev->speed != emac->speed) {
+ new_state = true;
+ emac->speed = phydev->speed;
+ }
+ if (!emac->link) {
+ new_state = true;
+ emac->link = 1;
+ }
+ } else if (emac->link) {
+ new_state = true;
+ emac->link = 0;
+ /* defaults for no link */
+
+ /* f/w should support 100 & 1000 */
+ emac->speed = SPEED_1000;
+
+ /* half duplex may not be supported by f/w */
+ emac->duplex = DUPLEX_FULL;
+ }
+
+ if (new_state) {
+ phy_print_status(phydev);
+
+ /* update RGMII and MII configuration based on PHY negotiated
+ * values
+ */
+ if (emac->link) {
+ if (emac->duplex == DUPLEX_HALF)
+ icssg_config_half_duplex(emac);
+ /* Set the RGMII cfg for gig en and full duplex */
+ icssg_update_rgmii_cfg(prueth->miig_rt, emac);
+
+ /* update the Tx IPG based on 100M/1G speed */
+ spin_lock_irqsave(&emac->lock, flags);
+ icssg_config_ipg(emac);
+ spin_unlock_irqrestore(&emac->lock, flags);
+ icssg_config_set_speed(emac);
+ if (!emac->is_sr1)
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+
+ } else {
+ if (!emac->is_sr1)
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ }
+
+ /* send command to firmware to change speed and duplex
+ * setting when link is up.
+ */
+ if (emac->link) {
+ emac_change_port_speed_duplex(emac);
+ icssg_qos_link_up(ndev);
+ } else {
+ icssg_qos_link_down(ndev);
+ }
+ }
+
+ if (emac->link) {
+ /* link ON */
+ netif_carrier_on(ndev);
+ /* reactivate the transmit queue */
+ netif_tx_wake_all_queues(ndev);
+ } else {
+ /* link OFF */
+ netif_carrier_off(ndev);
+ netif_tx_stop_all_queues(ndev);
+ prueth_cleanup_tx_ts(emac);
+ }
+}
+
+static int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
+ int num_rx = 0;
+ int flow = emac->is_sr1 ?
+ PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS_SR2;
+ int rx_flow = emac->is_sr1 ?
+ PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA_SR2;
+ int cur_budget;
+ int ret;
+ int xdp_state;
+ int xdp_state_or = 0;
+
+ while (flow--) {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = emac_rx_packet(emac, flow, &xdp_state);
+ xdp_state_or |= xdp_state;
+ if (ret)
+ break;
+ num_rx++;
+ }
+
+ if (num_rx >= budget)
+ break;
+ }
+
+ if (xdp_state_or & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
+ if (num_rx < budget) {
+ napi_complete(napi_rx);
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
+
+ return num_rx;
+}
+
+static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
+ struct device *dma_dev,
+ int size)
+{
+ struct page_pool_params pp_params;
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = size;
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = dma_dev;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ netdev_err(emac->ndev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
+static struct page *prueth_get_page_from_rx_chn(struct prueth_rx_chn *chn)
+{
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma;
+ struct page *page;
+
+ k3_udma_glue_pop_rx_chn(chn->rx_chn, 0, &desc_dma);
+ desc_rx = k3_cppi_desc_pool_dma2virt(chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ page = swdata->data.page;
+
+ return page;
+}
+
+static int prueth_prepare_rx_chan(struct prueth_emac *emac,
+ struct prueth_rx_chn *chn,
+ int buf_size)
+{
+ struct page_pool *pool;
+ struct page *page;
+ int i, ret, j;
+
+ pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ chn->pg_pool = pool;
+
+ for (i = 0; i < chn->descs_num; i++) {
+ /* NOTE: we're not using memory efficiently here.
+ * 1 full page (4KB?) used here instead of
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ */
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ netdev_err(emac->ndev, "couldn't allocate rx page\n");
+ ret = -ENOMEM;
+ goto recycle_alloc_pg;
+ }
+
+ ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
+ if (ret < 0) {
+ netdev_err(emac->ndev,
+ "cannot submit skb for rx chan %s ret %d\n",
+ chn->name, ret);
+ page_pool_recycle_direct(pool, page);
+ goto recycle_alloc_pg;
+ }
+ }
+
+ return 0;
+
+recycle_alloc_pg:
+ for (j = 0; j < i; j++) {
+ page = prueth_get_page_from_rx_chn(chn);
+ page_pool_recycle_direct(pool, page);
+ }
+ page_pool_destroy(pool);
+ chn->pg_pool = NULL;
+
+ return ret;
+}
+
+static void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
+ bool free_skb)
+{
+ int i;
+
+ for (i = 0; i < ch_num; i++) {
+ if (free_skb)
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+
+static void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
+ int num_flows, bool disable)
+{
+ int i;
+
+ for (i = 0; i < num_flows; i++)
+ k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
+ prueth_rx_cleanup, !!i);
+ if (disable)
+ k3_udma_glue_disable_rx_chn(chn->rx_chn);
+}
+
+u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
+{
+ u32 hi_rollover_count, hi_rollover_count_r;
+ struct prueth_emac *emac = clockops_data;
+ struct prueth *prueth = emac->prueth;
+ void __iomem *fw_hi_r_count_addr;
+ void __iomem *fw_count_hi_addr;
+ u32 iepcount_hi, iepcount_hi_r;
+ unsigned long flags;
+ u32 iepcount_lo;
+ u64 ts = 0;
+
+ fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
+ fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
+
+ local_irq_save(flags);
+ do {
+ iepcount_hi = icss_iep_get_count_hi(emac->iep);
+ iepcount_hi += readl(fw_count_hi_addr);
+ hi_rollover_count = readl(fw_hi_r_count_addr);
+ ptp_read_system_prets(sts);
+ iepcount_lo = icss_iep_get_count_low(emac->iep);
+ ptp_read_system_postts(sts);
+
+ iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
+ iepcount_hi_r += readl(fw_count_hi_addr);
+ hi_rollover_count_r = readl(fw_hi_r_count_addr);
+ } while ((iepcount_hi_r != iepcount_hi) ||
+ (hi_rollover_count != hi_rollover_count_r));
+ local_irq_restore(flags);
+
+ ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
+ ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
+
+ return ts;
+}
+
+static void prueth_iep_settime(void *clockops_data, u64 ns)
+{
+ struct icssg_setclock_desc sc_desc, *sc_descp;
+ struct prueth_emac *emac = clockops_data;
+ u64 cyclecount;
+ u32 cycletime;
+ int timeout;
+
+ if (!emac->fw_running)
+ return;
+
+ sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
+
+ cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
+ cyclecount = ns / cycletime;
+
+ memset(&sc_desc, 0, sizeof(sc_desc));
+ sc_desc.margin = cycletime - 1000;
+ sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
+ sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
+ sc_desc.iepcount_set = ns % cycletime;
+ sc_desc.CMP0_current = cycletime - 4; //Count from 0 to (cycle time)-4
+
+ memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
+
+ writeb(1, &sc_descp->request);
+
+ timeout = 5; /* fw should take 2-3 ms */
+ while (timeout--) {
+ if (readb(&sc_descp->acknowledgment))
+ return;
+
+ usleep_range(500, 1000);
+ }
+
+ dev_err(emac->prueth->dev, "settime timeout\n");
+}
+
+static int prueth_perout_enable(void *clockops_data,
+ struct ptp_perout_request *req, int on,
+ u64 *cmp)
+{
+ struct prueth_emac *emac = clockops_data;
+ u32 reduction_factor = 0, offset = 0;
+ struct timespec64 ts;
+ u64 ns_period;
+
+ if (!on)
+ return 0;
+
+ /* Any firmware specific stuff for PPS/PEROUT handling */
+ ts.tv_sec = req->period.sec;
+ ts.tv_nsec = req->period.nsec;
+ ns_period = timespec64_to_ns(&ts);
+
+ /* f/w doesn't support period less than cycle time */
+ if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
+ return -ENXIO;
+
+ reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
+ offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
+
+ /* f/w requires at least 1uS within a cycle so CMP
+ * can trigger after SYNC is enabled
+ */
+ if (offset < 5 * NSEC_PER_USEC)
+ offset = 5 * NSEC_PER_USEC;
+
+ /* if offset is close to cycle time then we will miss
+ * the CMP event for last tick when IEP rolls over.
+ * In normal mode, IEP tick is 4ns.
+ * In slow compensation it could be 0ns or 8ns at
+ * every slow compensation cycle.
+ */
+ if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
+ offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
+
+ /* we're in shadow mode so need to set upper 32-bits */
+ *cmp = (u64)offset << 32;
+
+ writel(reduction_factor, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
+
+ /* HACK: till f/w supports START_TIME cyclcount we set it to 0 */
+ writel(0, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
+
+ return 0;
+}
+
+const struct icss_iep_clockops prueth_iep_clockops = {
+ .settime = prueth_iep_settime,
+ .gettime = prueth_iep_gettime,
+ /* FIXME: add adjtime to use relative mode */
+ .perout_enable = prueth_perout_enable,
+};
+
+static int emac_phy_connect(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+
+ /* connect PHY */
+ emac->phydev = of_phy_connect(emac->ndev, emac->phy_node,
+ &emac_adjust_link, 0, emac->phy_if);
+ if (!emac->phydev) {
+ dev_err(prueth->dev, "couldn't connect to phy %s\n",
+ emac->phy_node->full_name);
+ return -ENODEV;
+ }
+
+ /* remove unsupported modes */
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII)
+ phy_set_max_speed(emac->phydev, SPEED_100);
+
+ return 0;
+}
+
+static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ struct page_pool *pool = emac->rx_chns.pg_pool;
+ int ret;
+
+ ret = xdp_rxq_info_reg(rxq, emac->ndev, 0);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
+/**
+ * emac_ndo_open - EMAC device open
+ * @ndev: network adapter device
+ *
+ * Called when system wants to start the interface.
+ *
+ * Returns 0 for a successful open, or appropriate error code
+ */
+static int emac_ndo_open(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret, i, num_data_chn = emac->tx_ch_num;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ struct device *dev = prueth->dev;
+ int max_rx_flows;
+ int rx_flow;
+
+ /* clear SMEM and MSMC settings for all slices */
+ if (!prueth->emacs_initialized) {
+ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+ }
+
+ if (emac->is_sr1) {
+ /* For SR1, high priority channel is used exclusively for
+ * management messages. Do reduce number of data channels.
+ */
+ num_data_chn--;
+ }
+
+ /* set h/w MAC as user might have re-configured */
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+ if (!emac->is_sr1)
+ icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+
+ icssg_class_default(prueth->miig_rt, slice, 0, emac->is_sr1);
+
+ netif_carrier_off(ndev);
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
+ if (ret) {
+ dev_err(dev, "cannot set real number of tx queues\n");
+ return ret;
+ }
+
+ init_completion(&emac->cmd_complete);
+ ret = prueth_init_tx_chns(emac);
+ if (ret) {
+ dev_err(dev, "failed to init tx channel: %d\n", ret);
+ return ret;
+ }
+
+ max_rx_flows = emac->is_sr1 ?
+ PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS_SR2;
+ ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
+ max_rx_flows, PRUETH_MAX_RX_DESC);
+ if (ret) {
+ dev_err(dev, "failed to init rx channel: %d\n", ret);
+ goto cleanup_tx;
+ }
+
+ if (emac->is_sr1) {
+ ret = prueth_init_rx_chns(emac, &emac->rx_mgm_chn, "rxmgm",
+ PRUETH_MAX_RX_MGM_FLOWS,
+ PRUETH_MAX_RX_MGM_DESC);
+ if (ret) {
+ dev_err(dev, "failed to init rx mgmt channel: %d\n",
+ ret);
+ goto cleanup_rx;
+ }
+ }
+
+ ret = prueth_ndev_add_tx_napi(emac);
+ if (ret)
+ goto cleanup_rx_mgm;
+
+ /* we use only the highest priority flow for now i.e. @irq[3] */
+ rx_flow = emac->is_sr1 ?
+ PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA_SR2;
+ ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX IRQ\n");
+ goto cleanup_napi;
+ }
+
+ if (!emac->is_sr1)
+ goto skip_mgm_irq;
+
+ ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE],
+ NULL, prueth_rx_mgm_rsp_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX Management RSP IRQ\n");
+ goto free_rx_irq;
+ }
+
+ ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP],
+ NULL, prueth_rx_mgm_ts_thread_sr1,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX Management TS IRQ\n");
+ goto free_rx_mgm_rsp_irq;
+ }
+
+skip_mgm_irq:
+ /* reset and start PRU firmware */
+ ret = prueth_emac_start(prueth, emac);
+ if (ret)
+ goto free_rx_mgmt_ts_irq;
+
+ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+
+ if (!emac->is_sr1 && !prueth->emacs_initialized) {
+ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+ emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ }
+
+ if (!emac->is_sr1) {
+ ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
+ IRQF_ONESHOT, dev_name(dev), emac);
+ if (ret)
+ goto stop;
+ }
+
+ /* Prepare RX */
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ goto destroy_xdp_rxqs;
+
+ ret = prueth_create_xdp_rxqs(emac);
+ if (ret)
+ goto free_rx_ts_irq;
+
+ if (emac->is_sr1) {
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_mgm_chn, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_mgm_chn.rx_chn);
+ if (ret)
+ goto reset_rx_chn;
+ }
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto reset_rx_mgm_chn;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
+ if (ret)
+ goto reset_tx_chan;
+ }
+
+ /* Enable NAPI in Tx and Rx direction */
+ for (i = 0; i < emac->tx_ch_num; i++)
+ napi_enable(&emac->tx_chns[i].napi_tx);
+ napi_enable(&emac->napi_rx);
+
+ icssg_qos_init(ndev);
+
+ /* start PHY */
+ phy_start(emac->phydev);
+
+ prueth->emacs_initialized++;
+
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "started\n");
+
+ if (prueth->is_switch_mode) {
+ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK,
+ true);
+ icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
+ BIT(emac->port_id), true);
+ icssg_set_pvid(emac->prueth, emac->port_vlan, emac->port_id);
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+ }
+
+ queue_work(system_long_wq, &emac->stats_work.work);
+
+ return 0;
+
+reset_tx_chan:
+ /* Since interface is not yet up, there is wouldn't be
+ * any SKB for completion. So set false to free_skb
+ */
+ prueth_reset_tx_chan(emac, i, false);
+reset_rx_mgm_chn:
+ if (emac->is_sr1)
+ prueth_reset_rx_chan(&emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS, true);
+reset_rx_chn:
+ prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+destroy_xdp_rxqs:
+ prueth_destroy_xdp_rxqs(emac);
+free_rx_ts_irq:
+ if (!emac->is_sr1)
+ free_irq(emac->tx_ts_irq, emac);
+stop:
+ prueth_emac_stop(emac);
+free_rx_mgmt_ts_irq:
+ if (emac->is_sr1)
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP],
+ emac);
+free_rx_mgm_rsp_irq:
+ if (emac->is_sr1)
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE],
+ emac);
+free_rx_irq:
+ free_irq(emac->rx_chns.irq[rx_flow], emac);
+cleanup_napi:
+ prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
+cleanup_rx_mgm:
+ if (emac->is_sr1)
+ prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS);
+cleanup_rx:
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+cleanup_tx:
+ prueth_cleanup_tx_chns(emac);
+
+ return ret;
+}
+
+/**
+ * emac_ndo_stop - EMAC device stop
+ * @ndev: network adapter device
+ *
+ * Called when system wants to stop or down the interface.
+ */
+static int emac_ndo_stop(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int ret, i;
+ int max_rx_flows;
+ int rx_flow = emac->is_sr1 ?
+ PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA_SR2;
+
+ /* inform the upper layers. */
+ netif_tx_stop_all_queues(ndev);
+
+ /* block packets from wire */
+ phy_stop(emac->phydev);
+
+ icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
+
+ /* send shutdown command */
+ emac_shutdown(ndev);
+
+ atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(ndev, "tx teardown timeout\n");
+
+ prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ napi_disable(&emac->tx_chns[i].napi_tx);
+
+ max_rx_flows = emac->is_sr1 ?
+ PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS_SR2;
+ k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
+
+ prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
+ if (emac->is_sr1) {
+ /* Teardown RX MGM channel */
+ k3_udma_glue_tdown_rx_chn(emac->rx_mgm_chn.rx_chn, true);
+ prueth_reset_rx_chan(&emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS, true);
+ }
+
+ prueth_destroy_xdp_rxqs(emac);
+
+ napi_disable(&emac->napi_rx);
+
+ if (!emac->is_sr1 && prueth->emacs_initialized == 1)
+ icss_iep_exit(emac->iep);
+
+ cancel_work_sync(&emac->rx_mode_work);
+
+ /* Destroying the queued work in ndo_stop() */
+
+ cancel_delayed_work_sync(&emac->stats_work);
+
+ /* stop PRUs */
+ prueth_emac_stop(emac);
+
+ if (!emac->is_sr1)
+ free_irq(emac->tx_ts_irq, emac);
+
+ if (emac->is_sr1) {
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP],
+ emac);
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE],
+ emac);
+ }
+ free_irq(emac->rx_chns.irq[rx_flow], emac);
+ prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
+ prueth_cleanup_tx_chns(emac);
+
+ if (emac->is_sr1)
+ prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS);
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+ prueth_cleanup_tx_chns(emac);
+
+ prueth->emacs_initialized--;
+
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "stopped\n");
+
+ return 0;
+}
+
+static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (netif_msg_tx_err(emac))
+ netdev_err(ndev, "xmit timeout");
+
+ ndev->stats.tx_errors++;
+
+ /* TODO: can we recover or need to reboot firmware? */
+}
+
+static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ bool promisc = ndev->flags & IFF_PROMISC;
+ bool allmulti = ndev->flags & IFF_ALLMULTI;
+
+ if (promisc) {
+ icssg_class_promiscuous_sr1(prueth->miig_rt, slice);
+ return;
+ }
+
+ if (allmulti) {
+ icssg_class_default(prueth->miig_rt, slice, 1, emac->is_sr1);
+ return;
+ }
+
+ icssg_class_default(prueth->miig_rt, slice, 0, emac->is_sr1);
+ if (!netdev_mc_empty(ndev)) {
+ /* program multicast address list into Classifier */
+ icssg_class_add_mcast_sr1(prueth->miig_rt, slice, ndev);
+ return;
+ }
+}
+
+static void emac_ndo_set_rx_mode_work(struct work_struct *work)
+{
+ struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
+ struct net_device *ndev = emac->ndev;
+ bool promisc, allmulti;
+
+ if (!(ndev->flags & IFF_UP))
+ return;
+
+ promisc = ndev->flags & IFF_PROMISC;
+ allmulti = ndev->flags & IFF_ALLMULTI;
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
+
+ if (promisc) {
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ return;
+ }
+
+ if (allmulti) {
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ return;
+ }
+
+ if (!netdev_mc_empty(ndev)) {
+ /* TODO: Add FDB entries for multicast. till then enable allmulti */
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ return;
+ }
+}
+
+/**
+ * emac_ndo_set_rx_mode - EMAC set receive mode function
+ * @ndev: The EMAC network adapter
+ *
+ * Called when system wants to set the receive mode of the device.
+ *
+ */
+static void emac_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ if (prueth->is_sr1) {
+ emac_ndo_set_rx_mode_sr1(ndev);
+ return;
+ }
+
+ queue_work(emac->cmd_wq, &emac->rx_mode_work);
+}
+
+static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ emac->tx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ emac->tx_ts_enabled = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ emac->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ emac->rx_ts_enabled = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return emac_get_ts_config(ndev, ifr);
+ case SIOCSHWTSTAMP:
+ return emac_set_ts_config(ndev, ifr);
+ default:
+ break;
+ }
+
+ if (!emac->phydev)
+ return -EOPNOTSUPP;
+
+ return phy_mii_ioctl(emac->phydev, ifr, cmd);
+}
+
+static struct devlink_port *emac_ndo_get_devlink_port(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ return &emac->devlink_port;
+}
+
+/**
+ * emac_xmit_xdp_frame - transmits an XDP frame
+ * @emac: emac device
+ * @xdpf: data to transmit
+ * @page: page from page pool if already DMA mapped
+ * @q_idx: queue id
+ **/
+static int emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ unsigned int q_idx)
+{
+ struct cppi5_host_desc_t *first_desc;
+ struct net_device *ndev = emac->ndev;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_swdata *swdata;
+ u32 *epib;
+ int ret;
+
+ void *data = xdpf->data;
+ u32 pkt_len = xdpf->len;
+
+ if (q_idx >= PRUETH_MAX_TX_QUEUES) {
+ netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ tx_chn = &emac->tx_chns[q_idx];
+
+ if (page) { /* already DMA mapped by page_pool */
+ buf_dma = page_pool_get_dma_addr(page);
+ buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
+ } else { /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "xdp tx: failed to map data buffer\n");
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
+ if (!page)
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ if (page) {
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
+ /* we assume page came from RX channel page pool */
+ swdata->rx_chn = &emac->rx_chns;
+ } else {
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
+ }
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
+ goto drop_free_descs;
+ }
+
+ return ICSSG_XDP_TX;
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+ return ICSSG_XDP_CONSUMED;
+}
+
+/**
+ * emac_xdp_xmit - Implements ndo_xdp_xmit
+ * @dev: netdev
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
+ *
+ * Returns number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
+ *
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ **/
+static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+ unsigned int q_idx;
+ int nxmit = 0;
+ int i;
+
+ q_idx = smp_processor_id();
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
+ if (err != ICSSG_XDP_TX)
+ break;
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+/**
+ * emac_run_xdp - run an XDP program
+ * @emac: emac device
+ * @xdp: XDP buffer containing the frame
+ * @page: page with RX data if already DMA mapped
+ **/
+static int emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
+ struct page *page)
+{
+ int err, result = ICSSG_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 act;
+ int q_idx;
+
+ xdp_prog = READ_ONCE(emac->xdp_prog);
+
+ if (!xdp_prog)
+ return result;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ /* Send packet to TX ring for immediate transmission */
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto drop;
+
+ q_idx = smp_processor_id();
+ result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
+ if (result == ICSSG_XDP_CONSUMED)
+ goto drop;
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
+ if (err)
+ goto drop;
+ result = ICSSG_XDP_REDIR;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
+drop:
+ trace_xdp_exception(emac->ndev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+ result = ICSSG_XDP_CONSUMED;
+ page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
+ break;
+ }
+
+ return result;
+}
+
+/**
+ * emac_xdp_setup - add/remove an XDP program
+ * @emac: emac device
+ * @prog: XDP program
+ **/
+static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+
+ if (!emac->xdpi.prog && !prog)
+ return 0;
+
+ WRITE_ONCE(emac->xdp_prog, prog);
+
+ xdp_attachment_setup(&emac->xdpi, bpf);
+
+ return 0;
+}
+
+/**
+ * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
+ * @ndev: network adapter device
+ * @xdp: XDP command
+ **/
+static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return emac_xdp_setup(emac, bpf);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_ndo_open,
+ .ndo_stop = emac_ndo_stop,
+ .ndo_start_xmit = emac_ndo_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = emac_ndo_tx_timeout,
+ .ndo_set_rx_mode = emac_ndo_set_rx_mode,
+ .ndo_do_ioctl = emac_ndo_ioctl,
+ .ndo_get_devlink_port = emac_ndo_get_devlink_port,
+ .ndo_setup_tc = icssg_qos_ndo_setup_tc,
+ .ndo_bpf = emac_ndo_bpf,
+ .ndo_xdp_xmit = emac_xdp_xmit,
+};
+
+/* get emac_port corresponding to eth_node name */
+static int prueth_node_port(struct device_node *eth_node)
+{
+ if (!strcmp(eth_node->name, "ethernet-mii0"))
+ return PRUETH_PORT_MII0;
+ else if (!strcmp(eth_node->name, "ethernet-mii1"))
+ return PRUETH_PORT_MII1;
+ else
+ return -EINVAL;
+}
+
+/* get MAC instance corresponding to eth_node name */
+static int prueth_node_mac(struct device_node *eth_node)
+{
+ if (!strcmp(eth_node->name, "ethernet-mii0"))
+ return PRUETH_MAC0;
+ else if (!strcmp(eth_node->name, "ethernet-mii1"))
+ return PRUETH_MAC1;
+ else
+ return -EINVAL;
+}
+
+static int prueth_config_rgmiidelay(struct prueth *prueth,
+ struct device_node *eth_np,
+ phy_interface_t phy_if)
+{
+ struct device *dev = prueth->dev;
+ struct regmap *ctrl_mmr;
+ u32 rgmii_tx_id = 0;
+ u32 icssgctrl_reg;
+
+ if (!phy_interface_mode_is_rgmii(phy_if))
+ return 0;
+
+ ctrl_mmr = syscon_regmap_lookup_by_phandle(eth_np, "syscon-rgmii-delay");
+ if (IS_ERR(ctrl_mmr)) {
+ dev_err(dev, "couldn't get syscon-rgmii-delay\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32_index(eth_np, "syscon-rgmii-delay", 1,
+ &icssgctrl_reg)) {
+ dev_err(dev, "couldn't get rgmii-delay reg. offset\n");
+ return -ENODEV;
+ }
+
+ if (phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
+ rgmii_tx_id |= ICSSG_CTRL_RGMII_ID_MODE;
+
+ regmap_update_bits(ctrl_mmr, icssgctrl_reg, ICSSG_CTRL_RGMII_ID_MODE, rgmii_tx_id);
+
+ return 0;
+}
+
+extern const struct ethtool_ops icssg_ethtool_ops;
+
+static int prueth_netdev_init(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ enum prueth_port port;
+ const char *irq_name;
+ enum prueth_mac mac;
+ const u8 *mac_addr;
+
+ port = prueth_node_port(eth_node);
+ if (port < 0)
+ return -EINVAL;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac < 0)
+ return -EINVAL;
+
+ /* Use 1 channel for management messages on SR1 */
+ if (prueth->is_sr1)
+ num_tx_chn--;
+
+ ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
+ if (!ndev)
+ return -ENOMEM;
+
+ emac = netdev_priv(ndev);
+ prueth->emac[mac] = emac;
+ emac->prueth = prueth;
+ emac->ndev = ndev;
+ emac->port_id = port;
+ emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
+ if (!emac->cmd_wq) {
+ ret = -ENOMEM;
+ goto free_ndev;
+ }
+ INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
+
+ emac_ethtool_stats_init(emac);
+ INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler);
+
+ ret = pruss_request_mem_region(prueth->pruss,
+ port == PRUETH_PORT_MII0 ?
+ PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
+ &emac->dram);
+ if (ret) {
+ dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ emac->is_sr1 = prueth->is_sr1;
+ emac->tx_ch_num = 1;
+ if (emac->is_sr1) {
+ /* use a dedicated high priority channel for management
+ * messages which is +1 of highest priority data channel.
+ */
+ emac->tx_ch_num++;
+ goto skip_irq;
+ }
+
+ irq_name = "tx_ts0";
+ if (emac->port_id == PRUETH_PORT_MII1)
+ irq_name = "tx_ts1";
+ emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
+ if (emac->tx_ts_irq < 0) {
+ ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
+ goto free;
+ }
+
+skip_irq:
+ SET_NETDEV_DEV(ndev, prueth->dev);
+ emac->msg_enable = netif_msg_init(debug_level, PRUETH_EMAC_DEBUG);
+ spin_lock_init(&emac->lock);
+ mutex_init(&emac->cmd_lock);
+
+ emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
+ if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
+ dev_err(prueth->dev, "couldn't find phy-handle\n");
+ ret = -ENODEV;
+ goto free;
+ } else if (of_phy_is_fixed_link(eth_node)) {
+ ret = of_phy_register_fixed_link(eth_node);
+ if (ret) {
+ ret = dev_err_probe(prueth->dev, ret,
+ "failed to register fixed-link phy\n");
+ goto free;
+ }
+
+ emac->phy_node = eth_node;
+ }
+
+ ret = of_get_phy_mode(eth_node, &emac->phy_if);
+ if (ret) {
+ dev_err(prueth->dev, "could not get phy-mode property\n");
+ goto free;
+ }
+
+ if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
+ !phy_interface_mode_is_rgmii(emac->phy_if)) {
+ dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
+ goto free;
+ }
+
+ ret = prueth_config_rgmiidelay(prueth, eth_node, emac->phy_if);
+ if (ret)
+ goto free;
+
+ /* get mac address from DT and set private and netdev addr */
+ mac_addr = of_get_mac_address(eth_node);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
+ port, ndev->dev_addr);
+ }
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
+ ndev->max_mtu = PRUETH_MAX_MTU;
+ ndev->netdev_ops = &emac_netdev_ops;
+ ndev->ethtool_ops = &icssg_ethtool_ops;
+ ndev->hw_features = NETIF_F_SG;
+ ndev->features = ndev->hw_features;
+
+ netif_napi_add(ndev, &emac->napi_rx,
+ emac_napi_rx_poll, NAPI_POLL_WEIGHT);
+
+ return 0;
+
+free:
+ pruss_release_mem_region(prueth->pruss, &emac->dram);
+ destroy_workqueue(emac->cmd_wq);
+free_ndev:
+ free_netdev(ndev);
+ prueth->emac[mac] = NULL;
+
+ return ret;
+}
+
+static void prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ enum prueth_mac mac;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac < 0)
+ return;
+
+ emac = prueth->emac[mac];
+ if (!emac)
+ return;
+
+ if (of_phy_is_fixed_link(emac->phy_node))
+ of_phy_deregister_fixed_link(emac->phy_node);
+
+ netif_napi_del(&emac->napi_rx);
+
+ pruss_release_mem_region(prueth->pruss, &emac->dram);
+ destroy_workqueue(emac->cmd_wq);
+ free_netdev(emac->ndev);
+ prueth->emac[mac] = NULL;
+}
+
+static int prueth_get_cores(struct prueth *prueth, int slice)
+{
+ enum pruss_pru_id pruss_id;
+ struct device *dev = prueth->dev;
+ struct device_node *np = dev->of_node;
+ int pru, rtu, txpru = -1, ret;
+
+ switch (slice) {
+ case ICSS_SLICE0:
+ pru = 0;
+ rtu = 1;
+ if (!prueth->is_sr1)
+ txpru = 2;
+ break;
+ case ICSS_SLICE1:
+ if (prueth->is_sr1) {
+ pru = 2;
+ rtu = 3;
+ } else {
+ pru = 3;
+ rtu = 4;
+ txpru = 5;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ prueth->pru[slice] = pru_rproc_get(np, pru, &pruss_id);
+ if (IS_ERR(prueth->pru[slice])) {
+ ret = PTR_ERR(prueth->pru[slice]);
+ prueth->pru[slice] = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to get PRU%d: %d\n", slice, ret);
+ return ret;
+ }
+ prueth->pru_id[slice] = pruss_id;
+
+ prueth->rtu[slice] = pru_rproc_get(np, rtu, NULL);
+ if (IS_ERR(prueth->rtu[slice])) {
+ ret = PTR_ERR(prueth->rtu[slice]);
+ prueth->rtu[slice] = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to get RTU%d: %d\n", slice, ret);
+ return ret;
+ }
+
+ if (prueth->is_sr1)
+ return 0;
+
+ prueth->txpru[slice] = pru_rproc_get(np, txpru, NULL);
+ if (IS_ERR(prueth->txpru[slice])) {
+ ret = PTR_ERR(prueth->txpru[slice]);
+ prueth->txpru[slice] = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to get TX_PRU%d: %d\n",
+ slice, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void prueth_put_cores(struct prueth *prueth, int slice)
+{
+ if (prueth->txpru[slice])
+ pru_rproc_put(prueth->txpru[slice]);
+
+ if (prueth->rtu[slice])
+ pru_rproc_put(prueth->rtu[slice]);
+
+ if (prueth->pru[slice])
+ pru_rproc_put(prueth->pru[slice]);
+}
+
+static void prueth_offload_fwd_mark_update(struct prueth *prueth)
+{
+ int set_val = 0;
+ int i;
+
+ if (prueth->br_members == (PRUETH_PORT_MII0 | PRUETH_PORT_MII1))
+ set_val = 1;
+
+ dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
+ struct prueth_emac *emac = prueth->emac[i];
+
+ if (!emac || !emac->ndev)
+ continue;
+
+ emac->offload_fwd_mark = set_val;
+ }
+}
+
+bool prueth_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ return emac->prueth->is_switch_mode;
+ }
+
+ return false;
+}
+
+static int prueth_netdevice_port_link(struct net_device *ndev, struct net_device *br_ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ if (!prueth->is_switch_mode)
+ return NOTIFY_DONE;
+
+ if (!prueth->br_members) {
+ prueth->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (prueth->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ prueth->br_members |= BIT(emac->port_id);
+
+ prueth_offload_fwd_mark_update(prueth);
+
+ return NOTIFY_DONE;
+}
+
+static void prueth_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ prueth->br_members &= ~BIT(emac->port_id);
+
+ prueth_offload_fwd_mark_update(prueth);
+
+ if (!prueth->br_members)
+ prueth->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int prueth_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (ndev->netdev_ops != &emac_netdev_ops)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = prueth_netdevice_port_link(ndev, info->upper_dev);
+ else
+ prueth_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int prueth_register_notifiers(struct prueth *prueth)
+{
+ int ret = 0;
+
+ prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
+ ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
+ if (ret) {
+ dev_err(prueth->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = prueth_switchdev_register_notifiers(prueth);
+ if (ret)
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
+
+ return ret;
+}
+
+static void prueth_unregister_notifiers(struct prueth *prueth)
+{
+ prueth_switchdev_unregister_notifiers(prueth);
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
+}
+
+static const struct devlink_ops prueth_devlink_ops = {};
+
+static int prueth_dl_switch_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct prueth_devlink *dl_priv = devlink_priv(dl);
+ struct prueth *prueth = dl_priv->prueth;
+
+ dev_dbg(prueth->dev, "%s id:%u\n", __func__, id);
+
+ if (id != PRUETH_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = prueth->is_switch_mode;
+
+ return 0;
+}
+
+static int prueth_dl_switch_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct prueth_devlink *dl_priv = devlink_priv(dl);
+ struct prueth *prueth = dl_priv->prueth;
+ bool switch_en = ctx->val.vbool;
+ int i;
+
+ dev_dbg(prueth->dev, "%s id:%u\n", __func__, id);
+
+ if (id != PRUETH_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ if (switch_en == prueth->is_switch_mode)
+ return 0;
+
+ if (!switch_en && prueth->br_members) {
+ dev_err(prueth->dev, "Remove ports from bridge before disabling switch mode\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+
+ prueth->default_vlan = 1;
+ prueth->is_switch_mode = switch_en;
+
+ for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
+ struct net_device *sl_ndev = prueth->emac[i]->ndev;
+
+ if (!sl_ndev || !netif_running(sl_ndev))
+ continue;
+
+ dev_err(prueth->dev, "Cannot switch modes when i/f are up\n");
+ goto exit;
+ }
+
+ for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
+ struct net_device *sl_ndev = prueth->emac[i]->ndev;
+ struct prueth_emac *emac;
+
+ if (!sl_ndev)
+ continue;
+
+ emac = netdev_priv(sl_ndev);
+ if (switch_en)
+ emac->port_vlan = prueth->default_vlan;
+ else
+ emac->port_vlan = 0;
+ }
+
+ dev_info(prueth->dev, "Enabling %s mode\n",
+ switch_en ? "switch" : "Dual EMAC");
+
+exit:
+ rtnl_unlock();
+
+ return 0;
+}
+
+static const struct devlink_param prueth_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(PRUETH_DL_PARAM_SWITCH_MODE, "switch_mode",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ prueth_dl_switch_mode_get,
+ prueth_dl_switch_mode_set, NULL),
+};
+
+static void prueth_unregister_devlink_ports(struct prueth *prueth)
+{
+ struct devlink_port *dl_port;
+ struct prueth_emac *emac;
+ int i;
+
+ for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
+ emac = prueth->emac[i];
+ if (!emac)
+ continue;
+
+ dl_port = &emac->devlink_port;
+
+ if (dl_port->registered)
+ devlink_port_unregister(dl_port);
+ }
+}
+
+static int prueth_register_devlink(struct prueth *prueth)
+{
+ struct devlink_port_attrs attrs = {};
+ struct prueth_devlink *dl_priv;
+ struct device *dev = prueth->dev;
+ struct devlink_port *dl_port;
+ struct prueth_emac *emac;
+ int ret = 0;
+ int i;
+
+ prueth->devlink =
+ devlink_alloc(&prueth_devlink_ops, sizeof(*dl_priv));
+ if (!prueth->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(prueth->devlink);
+ dl_priv->prueth = prueth;
+
+ ret = devlink_register(prueth->devlink, dev);
+ if (ret) {
+ dev_err(dev, "devlink reg fail ret:%d\n", ret);
+ goto dl_free;
+ }
+
+ /* Provide devlink hook to switch mode when multiple external ports
+ * are present NUSS switchdev driver is enabled.
+ */
+ if (prueth->is_switchmode_supported) {
+ ret = devlink_params_register(prueth->devlink,
+ prueth_devlink_params,
+ ARRAY_SIZE(prueth_devlink_params));
+ if (ret) {
+ dev_err(dev, "devlink params reg fail ret:%d\n", ret);
+ goto dl_unreg;
+ }
+ devlink_params_publish(prueth->devlink);
+ }
+
+ for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
+ emac = prueth->emac[i];
+ if (!emac)
+ continue;
+
+ dl_port = &emac->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = emac->port_id;
+ attrs.switch_id.id_len = sizeof(resource_size_t);
+ memcpy(attrs.switch_id.id, prueth->switch_id, attrs.switch_id.id_len);
+ devlink_port_attrs_set(dl_port, &attrs);
+
+ ret = devlink_port_register(prueth->devlink, dl_port, emac->port_id);
+ if (ret) {
+ dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
+ emac->port_id, ret);
+ goto dl_port_unreg;
+ }
+ }
+
+ return ret;
+
+dl_port_unreg:
+ prueth_unregister_devlink_ports(prueth);
+dl_unreg:
+ devlink_unregister(prueth->devlink);
+dl_free:
+ devlink_free(prueth->devlink);
+
+ return ret;
+}
+
+static void prueth_unregister_devlink(struct prueth *prueth)
+{
+ if (prueth->is_switchmode_supported) {
+ devlink_params_unpublish(prueth->devlink);
+ devlink_params_unregister(prueth->devlink, prueth_devlink_params,
+ ARRAY_SIZE(prueth_devlink_params));
+ }
+
+ prueth_unregister_devlink_ports(prueth);
+ devlink_unregister(prueth->devlink);
+ devlink_free(prueth->devlink);
+}
+
+static const struct of_device_id prueth_dt_match[];
+
+static int prueth_probe(struct platform_device *pdev)
+{
+ struct prueth *prueth;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *eth0_node, *eth1_node;
+ const struct of_device_id *match;
+ struct pruss *pruss;
+ int i, ret;
+ u32 msmc_ram_size;
+ struct genpool_data_align gp_data = {
+ .align = SZ_64K,
+ };
+
+ if (sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE) {
+ dev_err(dev, "insufficient SW_DATA size: %d vs %ld\n",
+ PRUETH_NAV_SW_DATA_SIZE, sizeof(struct prueth_swdata));
+ return -ENOMEM;
+ }
+
+ match = of_match_device(prueth_dt_match, dev);
+ if (!match)
+ return -ENODEV;
+
+ prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
+ if (!prueth)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, prueth);
+ prueth->pdev = pdev;
+ prueth->pdata = *(const struct prueth_pdata *)match->data;
+
+ if (of_device_is_compatible(np, "ti,am654-icssg-prueth-sr1"))
+ prueth->is_sr1 = true;
+
+ prueth->dev = dev;
+ eth0_node = of_get_child_by_name(np, "ethernet-mii0");
+ if (!of_device_is_available(eth0_node)) {
+ of_node_put(eth0_node);
+ eth0_node = NULL;
+ }
+
+ eth1_node = of_get_child_by_name(np, "ethernet-mii1");
+ if (!of_device_is_available(eth1_node)) {
+ of_node_put(eth1_node);
+ eth1_node = NULL;
+ }
+
+ /* At least one node must be present and available else we fail */
+ if (!eth0_node && !eth1_node) {
+ dev_err(dev, "neither ethernet-mii0 nor ethernet-mii1 node available\n");
+ return -ENODEV;
+ }
+
+ prueth->eth_node[PRUETH_MAC0] = eth0_node;
+ prueth->eth_node[PRUETH_MAC1] = eth1_node;
+
+ prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "mii-g-rt");
+ if (IS_ERR(prueth->miig_rt)) {
+ dev_err(dev, "couldn't get mii-g-rt syscon regmap\n");
+ return -ENODEV;
+ }
+
+ prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "mii-rt");
+ if (IS_ERR(prueth->mii_rt)) {
+ dev_err(dev, "couldn't get mii-rt syscon regmap\n");
+ return -ENODEV;
+ }
+
+ if (eth0_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE0);
+ if (ret)
+ goto put_cores;
+ }
+
+ if (eth1_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE1);
+ if (ret)
+ goto put_cores;
+ }
+
+ pruss = pruss_get(eth0_node ?
+ prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
+ if (IS_ERR(pruss)) {
+ ret = PTR_ERR(pruss);
+ dev_err(dev, "unable to get pruss handle\n");
+ goto put_cores;
+ }
+
+ prueth->pruss = pruss;
+
+ ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
+ &prueth->shram);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
+ goto put_mem;
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+ if (!prueth->sram_pool) {
+ dev_err(dev, "unable to get SRAM pool\n");
+ ret = -ENODEV;
+
+ goto put_mem;
+ }
+
+ msmc_ram_size = prueth->is_sr1 ? MSMC_RAM_SIZE_SR1 : MSMC_RAM_SIZE_SR2;
+ prueth->is_switchmode_supported = prueth->pdata.switch_mode;
+ if (prueth->is_switchmode_supported)
+ msmc_ram_size = MSMC_RAM_SIZE_SR2_SWITCH_MODE;
+
+ if (prueth->is_sr1) {
+ prueth->msmcram.va =
+ (void __iomem *)gen_pool_alloc(prueth->sram_pool,
+ msmc_ram_size);
+ } else {
+ /* TEMP: FW bug needs buffer base to be 64KB aligned */
+ prueth->msmcram.va =
+ (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
+ msmc_ram_size,
+ gen_pool_first_fit_align,
+ &gp_data);
+ }
+
+ if (!prueth->msmcram.va) {
+ ret = -ENOMEM;
+ dev_err(dev, "unable to allocate MSMC resource\n");
+ goto put_mem;
+ }
+ prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va);
+ prueth->msmcram.size = msmc_ram_size;
+ memset(prueth->msmcram.va, 0, msmc_ram_size);
+ dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
+ prueth->msmcram.va, prueth->msmcram.size);
+
+ prueth->iep0 = icss_iep_get_idx(np, 0);
+ if (IS_ERR(prueth->iep0)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
+ prueth->iep0 = NULL;
+ goto free_pool;
+ }
+
+ prueth->iep1 = icss_iep_get_idx(np, 1);
+ if (IS_ERR(prueth->iep1)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
+ icss_iep_put(prueth->iep0);
+ prueth->iep0 = NULL;
+ prueth->iep1 = NULL;
+ goto free_pool;
+ }
+
+ if (prueth->is_sr1) {
+ ret = icss_iep_init(prueth->iep0, NULL, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to init iep0\n");
+ goto free_iep;
+ }
+
+ ret = icss_iep_init(prueth->iep1, NULL, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to init iep1\n");
+ icss_iep_exit(prueth->iep1);
+ goto free_iep;
+ }
+ } else if (prueth->pdata.quirk_10m_link_issue) {
+ /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
+ * traffic.
+ */
+ icss_iep_init_fw(prueth->iep1);
+ }
+
+ /* setup netdev interfaces */
+ if (eth0_node) {
+ ret = prueth_netdev_init(prueth, eth0_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth0_node->name, ret);
+ }
+ goto exit_iep;
+ }
+ prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
+ }
+
+ if (eth1_node) {
+ ret = prueth_netdev_init(prueth, eth1_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth1_node->name, ret);
+ }
+ goto netdev_exit;
+ }
+
+ if (prueth->is_sr1)
+ prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
+ else
+ prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
+ }
+
+ ret = prueth_register_devlink(prueth);
+ if (ret)
+ goto netdev_exit;
+
+ /* register the network devices */
+ if (eth0_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII0");
+ goto netdev_exit;
+ }
+
+ devlink_port_type_eth_set(&prueth->emac[PRUETH_MAC0]->devlink_port,
+ prueth->emac[PRUETH_MAC0]->ndev);
+ prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
+
+ emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ /* Get attached phy details */
+ phy_attached_info(prueth->emac[PRUETH_MAC0]->phydev);
+
+ }
+
+ if (eth1_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII1");
+ goto netdev_unregister;
+ }
+
+ devlink_port_type_eth_set(&prueth->emac[PRUETH_MAC1]->devlink_port,
+ prueth->emac[PRUETH_MAC1]->ndev);
+
+ prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
+
+ emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ /* Get attached phy details */
+ phy_attached_info(prueth->emac[PRUETH_MAC1]->phydev);
+ }
+
+ if (prueth->is_switchmode_supported) {
+ ret = prueth_register_notifiers(prueth);
+ if (ret)
+ goto netdev_unregister;
+
+ sprintf(prueth->switch_id, "%s", dev_name(dev));
+ }
+
+ dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
+ (!eth0_node || !eth1_node) ? "single" : "dual");
+
+ if (eth1_node)
+ of_node_put(eth1_node);
+ if (eth0_node)
+ of_node_put(eth0_node);
+
+ return 0;
+
+netdev_unregister:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ if (prueth->emac[i]->phydev) {
+ phy_disconnect(prueth->emac[i]->phydev);
+ prueth->emac[i]->phydev = NULL;
+ }
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+netdev_exit:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ struct device_node *eth_node;
+
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ }
+exit_iep:
+ if (prueth->is_sr1) {
+ icss_iep_exit(prueth->iep1);
+ icss_iep_exit(prueth->iep0);
+ } else if (prueth->pdata.quirk_10m_link_issue) {
+ icss_iep_exit_fw(prueth->iep1);
+ }
+
+free_iep:
+ icss_iep_put(prueth->iep1);
+ icss_iep_put(prueth->iep0);
+
+free_pool:
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va, msmc_ram_size);
+
+put_mem:
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
+ pruss_put(prueth->pruss);
+
+put_cores:
+ if (eth1_node) {
+ prueth_put_cores(prueth, ICSS_SLICE1);
+ of_node_put(eth1_node);
+ }
+
+ if (eth0_node) {
+ prueth_put_cores(prueth, ICSS_SLICE0);
+ of_node_put(eth0_node);
+ }
+
+ return ret;
+}
+
+static int prueth_remove(struct platform_device *pdev)
+{
+ struct device_node *eth_node;
+ struct prueth *prueth = platform_get_drvdata(pdev);
+ int i;
+
+ prueth_unregister_notifiers(prueth);
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ phy_disconnect(prueth->emac[i]->phydev);
+ prueth->emac[i]->phydev = NULL;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+ prueth_unregister_devlink(prueth);
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ }
+
+ if (prueth->is_sr1) {
+ icss_iep_exit(prueth->iep1);
+ icss_iep_exit(prueth->iep0);
+ } else if (prueth->pdata.quirk_10m_link_issue) {
+ icss_iep_exit_fw(prueth->iep1);
+ }
+
+ icss_iep_put(prueth->iep1);
+ icss_iep_put(prueth->iep0);
+
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va,
+ prueth->is_sr1 ? MSMC_RAM_SIZE_SR1 : MSMC_RAM_SIZE_SR2);
+
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
+
+ pruss_put(prueth->pruss);
+
+ if (prueth->eth_node[PRUETH_MAC1])
+ prueth_put_cores(prueth, ICSS_SLICE1);
+
+ if (prueth->eth_node[PRUETH_MAC0])
+ prueth_put_cores(prueth, ICSS_SLICE0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int prueth_suspend(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = emac_ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int prueth_resume(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ ret = emac_ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops prueth_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
+};
+
+static const struct prueth_pdata am654_icssg_pdata_sr1 = {
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+};
+
+static const struct prueth_pdata am654_icssg_pdata = {
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .quirk_10m_link_issue = 1,
+ .switch_mode = 1,
+};
+
+static const struct prueth_pdata am64x_icssg_pdata = {
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+};
+
+static const struct of_device_id prueth_dt_match[] = {
+ { .compatible = "ti,am654-icssg-prueth-sr1", .data = &am654_icssg_pdata_sr1 },
+ { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
+ { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, prueth_dt_match);
+
+static struct platform_driver prueth_driver = {
+ .probe = prueth_probe,
+ .remove = prueth_remove,
+ .driver = {
+ .name = "icssg-prueth",
+ .of_match_table = prueth_dt_match,
+ .pm = &prueth_dev_pm_ops,
+ },
+};
+module_platform_driver(prueth_driver);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/ti/icssg_prueth.h b/drivers/net/ethernet/ti/icssg_prueth.h
new file mode 100644
index 000000000000..3e0e764a8d27
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_prueth.h
@@ -0,0 +1,398 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSSG_PRUETH_H
+#define __NET_TI_ICSSG_PRUETH_H
+
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/etherdevice.h>
+#include <linux/genalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mutex.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/pruss.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/remoteproc.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include <net/devlink.h>
+#include <net/page_pool.h>
+
+#include "icssg_config.h"
+#include "icss_iep.h"
+#include "icssg_switch_map.h"
+#include "icssg_qos.h"
+
+#define ICSS_SLICE0 0
+#define ICSS_SLICE1 1
+
+#define ICSS_FW_PRU 0
+#define ICSS_FW_RTU 1
+
+#define ICSSG_MAX_RFLOWS 8 /* per slice */
+
+/* Firmware status codes */
+#define ICSS_HS_FW_READY 0x55555555
+#define ICSS_HS_FW_DEAD 0xDEAD0000 /* lower 16 bits contain error code */
+
+/* Firmware command codes */
+#define ICSS_HS_CMD_BUSY 0x40000000
+#define ICSS_HS_CMD_DONE 0x80000000
+#define ICSS_HS_CMD_CANCEL 0x10000000
+
+/* Firmware commands */
+#define ICSS_CMD_SPAD 0x20
+#define ICSS_CMD_RXTX 0x10
+#define ICSS_CMD_ADD_FDB 0x1
+#define ICSS_CMD_DEL_FDB 0x2
+#define ICSS_CMD_SET_RUN 0x4
+#define ICSS_CMD_GET_FDB_SLOT 0x5
+#define ICSS_CMD_ENABLE_VLAN 0x5
+#define ICSS_CMD_DISABLE_VLAN 0x6
+#define ICSS_CMD_ADD_FILTER 0x7
+#define ICSS_CMD_ADD_MAC 0x8
+
+/* Firmware flags */
+#define ICSS_SET_RUN_FLAG_VLAN_ENABLE BIT(0) /* switch only */
+#define ICSS_SET_RUN_FLAG_FLOOD_UNICAST BIT(1) /* switch only */
+#define ICSS_SET_RUN_FLAG_PROMISC BIT(2) /* MAC only */
+#define ICSS_SET_RUN_FLAG_MULTICAST_PROMISC BIT(3) /* MAC only */
+
+/* In switch mode there are 3 real ports i.e. 3 mac addrs.
+ * however Linux sees only the host side port. The other 2 ports
+ * are the switch ports.
+ * In emac mode there are 2 real ports i.e. 2 mac addrs.
+ * Linux sees both the ports.
+ */
+enum prueth_port {
+ PRUETH_PORT_HOST = 0, /* host side port */
+ PRUETH_PORT_MII0, /* physical port RG/SG MII 0 */
+ PRUETH_PORT_MII1, /* physical port RG/SG MII 1 */
+};
+
+enum prueth_mac {
+ PRUETH_MAC0 = 0,
+ PRUETH_MAC1,
+ PRUETH_NUM_MACS,
+};
+
+struct prueth_tx_chn {
+ struct device *dma_dev;
+ struct napi_struct napi_tx;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_tx_channel *tx_chn;
+ struct prueth_emac *emac;
+ u32 id;
+ u32 descs_num;
+ unsigned int irq;
+ char name[32];
+};
+
+struct prueth_rx_chn {
+ struct device *dev;
+ struct device *dma_dev;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_rx_channel *rx_chn;
+ u32 descs_num;
+ unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */
+ char name[32];
+ struct page_pool *pg_pool;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+enum prueth_devlink_param_id {
+ PRUETH_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ PRUETH_DL_PARAM_SWITCH_MODE,
+};
+
+struct prueth_devlink {
+ struct prueth *prueth;
+};
+
+enum prueth_swdata_type {
+ PRUETH_SWDATA_INVALID = 0,
+ PRUETH_SWDATA_SKB,
+ PRUETH_SWDATA_PAGE,
+ PRUETH_SWDATA_CMD,
+ PRUETH_SWDATA_XDPF,
+};
+
+union prueth_data {
+ struct sk_buff *skb;
+ struct page *page;
+ u32 cmd;
+ struct xdp_frame *xdpf;
+};
+
+struct prueth_swdata {
+ union prueth_data data;
+ struct prueth_rx_chn *rx_chn;
+ enum prueth_swdata_type type;
+};
+
+/* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3)
+ * and lower three are lower priority channels or threads.
+ */
+#define PRUETH_MAX_TX_QUEUES 4
+
+#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+
+/* XDP BPF state */
+#define ICSSG_XDP_PASS 0
+#define ICSSG_XDP_CONSUMED BIT(0)
+#define ICSSG_XDP_TX BIT(1)
+#define ICSSG_XDP_REDIR BIT(2)
+
+/* data for each emac port */
+struct prueth_emac {
+ bool is_sr1;
+ bool fw_running;
+ struct prueth *prueth;
+ struct net_device *ndev;
+ u8 mac_addr[6];
+ struct napi_struct napi_rx;
+ u32 msg_enable;
+
+ int link;
+ int speed;
+ int duplex;
+
+ const char *phy_id;
+ struct device_node *phy_node;
+ phy_interface_t phy_if;
+ struct phy_device *phydev;
+ enum prueth_port port_id;
+ struct icss_iep *iep;
+ unsigned int rx_ts_enabled : 1;
+ unsigned int tx_ts_enabled : 1;
+ unsigned int half_duplex : 1;
+
+ /* DMA related */
+ struct prueth_tx_chn tx_chns[PRUETH_MAX_TX_QUEUES];
+ struct completion tdown_complete;
+ atomic_t tdown_cnt;
+ struct prueth_rx_chn rx_chns;
+ int rx_flow_id_base;
+ int tx_ch_num;
+
+ /* SR1.0 Management channel */
+ struct prueth_rx_chn rx_mgm_chn;
+ int rx_mgm_flow_id_base;
+
+ spinlock_t lock; /* serialize access */
+
+ /* TX HW Timestamping */
+ /* TX TS cookie will be index to the tx_ts_skb array */
+ struct sk_buff *tx_ts_skb[PRUETH_MAX_TX_TS_REQUESTS];
+ atomic_t tx_ts_pending;
+ int tx_ts_irq;
+
+ u8 cmd_seq;
+ /* shutdown related */
+ u32 cmd_data[4];
+ struct completion cmd_complete;
+ /* Mutex to serialize access to firmware command interface */
+ struct mutex cmd_lock;
+ struct work_struct rx_mode_work;
+ struct workqueue_struct *cmd_wq;
+
+ struct pruss_mem_region dram;
+
+ bool offload_fwd_mark;
+ struct devlink_port devlink_port;
+ int port_vlan;
+
+ struct prueth_qos qos;
+ struct work_struct ts_work;
+ struct delayed_work stats_work;
+ u64 *stats;
+
+ struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdpi;
+};
+
+/* The buf includes headroom compatible with both skb and xdpf */
+#define PRUETH_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define PRUETH_HEADROOM ALIGN(PRUETH_HEADROOM_NA, sizeof(long))
+
+/**
+ * struct prueth - PRUeth platform data
+ * @fdqring_mode: Free desc queue mode
+ * @quirk_10m_link_issue: 10M link detect errata
+ * @switch_mode: switch firmware support
+ */
+struct prueth_pdata {
+ enum k3_ring_mode fdqring_mode;
+
+ u32 quirk_10m_link_issue:1;
+ u32 switch_mode:1;
+};
+
+/**
+ * struct prueth - PRUeth structure
+ * @is_sr1: device is pg1.0 (pg1.0 will be deprecated upstream)
+ * @dev: device
+ * @pruss: pruss handle
+ * @pru: rproc instances of PRUs
+ * @rtu: rproc instances of RTUs
+ * @rtu: rproc instances of TX_PRUs
+ * @shram: PRUSS shared RAM region
+ * @sram_pool: MSMC RAM pool for buffers
+ * @msmcram: MSMC RAM region
+ * @eth_node: DT node for the port
+ * @emac: private EMAC data structure
+ * @registered_netdevs: list of registered netdevs
+ * @fw_data: firmware names to be used with PRU remoteprocs
+ * @config: firmware load time configuration per slice
+ * @miig_rt: regmap to mii_g_rt block
+ * @pa_stats: regmap to pa_stats block
+ * @pru_id: ID for each of the PRUs
+ * @pdev: pointer to ICSSG platform device
+ * @iep0: pointer to IEP0 device
+ * @iep1: pointer to IEP1 device
+ * @pdata: pointer to platform data for ICSSG driver
+ * @vlan_tbl: VLAN-FID table pointer
+ * @icssg_hwcmdseq: seq counter or HWQ messages
+ * @emacs_initialized: num of EMACs/ext ports that are up/running
+ * @hw_bridge_dev: pointer to HW bridge net device
+ * @br_members: bitmask of bridge member ports
+ * @prueth_netdevice_nb: netdevice notifier block
+ * @prueth_switchdevice_nb: switchdev notifier block
+ * @prueth_switchdev_bl_nb: switchdev blocking notifier block
+ * @is_switch_mode: flag to indicate if device is in Switch mode
+ * @is_switchmode_supported: indicates platform support for switch mode
+ * @switch_id: ID for mapping switch ports to bridge
+ * @default_vlan: Default VLAN for host
+ * @devlink: pointer to devlink
+ */
+struct prueth {
+ bool is_sr1;
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *pru[PRUSS_NUM_PRUS];
+ struct rproc *rtu[PRUSS_NUM_PRUS];
+ struct rproc *txpru[PRUSS_NUM_PRUS];
+ struct pruss_mem_region shram;
+ struct gen_pool *sram_pool;
+ struct pruss_mem_region msmcram;
+
+ struct device_node *eth_node[PRUETH_NUM_MACS];
+ struct prueth_emac *emac[PRUETH_NUM_MACS];
+ struct net_device *registered_netdevs[PRUETH_NUM_MACS];
+ const struct prueth_private_data *fw_data;
+ struct icssg_config_sr1 config[PRUSS_NUM_PRUS];
+ struct regmap *miig_rt;
+ struct regmap *mii_rt;
+ struct regmap *pa_stats;
+
+ enum pruss_pru_id pru_id[PRUSS_NUM_PRUS];
+ struct platform_device *pdev;
+ struct icss_iep *iep0;
+ struct icss_iep *iep1;
+ struct prueth_pdata pdata;
+ struct prueth_vlan_tbl *vlan_tbl;
+ u8 icssg_hwcmdseq;
+
+ int emacs_initialized;
+
+ struct net_device *hw_bridge_dev;
+ u8 br_members;
+ struct notifier_block prueth_netdevice_nb;
+ struct notifier_block prueth_switchdev_nb;
+ struct notifier_block prueth_switchdev_bl_nb;
+ bool is_switch_mode;
+ bool is_switchmode_supported;
+ unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
+ int default_vlan;
+ struct devlink *devlink;
+};
+
+struct emac_tx_ts_response_sr1 {
+ u32 lo_ts;
+ u32 hi_ts;
+ u32 reserved;
+ u32 cookie;
+};
+
+struct emac_tx_ts_response {
+ u32 reserved[2];
+ u32 cookie;
+ u32 lo_ts;
+ u32 hi_ts;
+};
+
+/* Classifier helpers */
+void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac);
+void icssg_class_set_host_mac_addr(struct regmap *miig_rt, u8 *mac);
+void icssg_class_disable(struct regmap *miig_rt, int slice);
+void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
+ bool is_sr1);
+void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice);
+void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
+ struct net_device *ndev);
+void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr);
+
+/* Buffer queue helpers */
+int icssg_queue_pop(struct prueth *prueth, u8 queue);
+void icssg_queue_push(struct prueth *prueth, int queue, u16 addr);
+u32 icssg_queue_level(struct prueth *prueth, int queue);
+
+/* get PRUSS SLICE number from prueth_emac */
+static inline int prueth_emac_slice(struct prueth_emac *emac)
+{
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ return ICSS_SLICE0;
+ case PRUETH_PORT_MII1:
+ return ICSS_SLICE1;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* config helpers */
+void icssg_config_ipg(struct prueth_emac *emac);
+void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac,
+ int slice);
+int icssg_config_sr2(struct prueth *prueth, struct prueth_emac *emac,
+ int slice);
+int emac_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd state);
+void icssg_config_set_speed(struct prueth_emac *emac);
+void icssg_config_half_duplex(struct prueth_emac *emac);
+
+int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
+ struct mgmt_cmd_rsp *rsp);
+int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid, u8 fid_c2, bool add);
+int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid);
+void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
+ u8 untag_mask, bool add);
+u16 icssg_get_pvid(struct prueth_emac *emac);
+void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
+#define prueth_napi_to_tx_chn(pnapi) \
+ container_of(pnapi, struct prueth_tx_chn, napi_tx)
+
+u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts);
+void emac_stats_work_handler(struct work_struct *work);
+void emac_ethtool_stats_init(struct prueth_emac *emac);
+#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssg_qos.c b/drivers/net/ethernet/ti/icssg_qos.c
new file mode 100644
index 000000000000..4c341d777184
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_qos.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments ICSSG PRUETH QoS submodule
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/printk.h>
+#include "icssg_prueth.h"
+#include "icssg_switch_map.h"
+
+/* in msec */
+#define ICSSG_IET_FPE_VERIFY_TIMEOUT_MS 1000
+
+static void icssg_qos_tas_init(struct net_device *ndev);
+static void icssg_prueth_iet_fpe_disable(struct prueth_qos_iet *iet);
+static int icssg_prueth_iet_fpe_enable(struct prueth_emac *emac);
+static void icssg_prueth_iet_fpe_disable(struct prueth_qos_iet *iet);
+static void icssg_qos_enable_ietfpe(struct work_struct *work);
+
+void icssg_qos_init(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_qos_iet *iet = &emac->qos.iet;
+
+ icssg_qos_tas_init(ndev);
+
+ if (!iet->fpe_configured)
+ return;
+
+ /* Init work queue for IET MAC verify process */
+ iet->emac = emac;
+ INIT_WORK(&iet->fpe_config_task, icssg_qos_enable_ietfpe);
+ init_completion(&iet->fpe_config_compl);
+
+ /* As worker may be sleeping, check this flag to abort
+ * as soon as it comes of out of sleep and cancel the
+ * fpe config task.
+ */
+ atomic_set(&iet->cancel_fpe_config, 0);
+}
+
+static void tas_update_fw_list_pointers(struct prueth_emac *emac)
+{
+ struct tas_config *tas = &emac->qos.tas.config;
+
+ if ((readb(tas->active_list)) == TAS_LIST0) {
+ tas->firmware_active_list = emac->dram.va + TAS_GATE_MASK_LIST0;
+ tas->firmware_shadow_list = emac->dram.va + TAS_GATE_MASK_LIST1;
+ } else {
+ tas->firmware_active_list = emac->dram.va + TAS_GATE_MASK_LIST1;
+ tas->firmware_shadow_list = emac->dram.va + TAS_GATE_MASK_LIST0;
+ }
+}
+
+void icssg_qos_link_up(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_qos_iet *iet = &emac->qos.iet;
+
+ if (!iet->fpe_configured)
+ return;
+
+ icssg_prueth_iet_fpe_enable(emac);
+}
+
+void icssg_qos_link_down(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_qos_iet *iet = &emac->qos.iet;
+
+ if (iet->fpe_configured)
+ icssg_prueth_iet_fpe_disable(iet);
+}
+static void tas_update_maxsdu_table(struct prueth_emac *emac)
+{
+ struct tas_config *tas = &emac->qos.tas.config;
+ u16 *max_sdu_tbl_ptr;
+ u8 gate_idx;
+
+ /* update the maxsdu table */
+ max_sdu_tbl_ptr = emac->dram.va + TAS_QUEUE_MAX_SDU_LIST;
+
+ for (gate_idx = 0; gate_idx < TAS_MAX_NUM_QUEUES; gate_idx++)
+ writew(tas->max_sdu_table.max_sdu[gate_idx], &max_sdu_tbl_ptr[gate_idx]);
+}
+
+static void tas_reset(struct prueth_emac *emac)
+{
+ struct tas_config *tas = &emac->qos.tas.config;
+ int i;
+
+ for (i = 0; i < TAS_MAX_NUM_QUEUES; i++)
+ tas->max_sdu_table.max_sdu[i] = 2048;
+
+ tas_update_maxsdu_table(emac);
+
+ writeb(TAS_LIST0, tas->active_list);
+
+ memset_io(tas->firmware_active_list, 0, sizeof(*tas->firmware_active_list));
+ memset_io(tas->firmware_shadow_list, 0, sizeof(*tas->firmware_shadow_list));
+}
+
+static int tas_set_state(struct prueth_emac *emac, enum tas_state state)
+{
+ struct tas_config *tas = &emac->qos.tas.config;
+ int ret;
+
+ if (tas->state == state)
+ return 0;
+
+ switch (state) {
+ case TAS_STATE_RESET:
+ tas_reset(emac);
+ ret = emac_set_port_state(emac, ICSSG_EMAC_PORT_TAS_RESET);
+ tas->state = TAS_STATE_RESET;
+ break;
+ case TAS_STATE_ENABLE:
+ ret = emac_set_port_state(emac, ICSSG_EMAC_PORT_TAS_ENABLE);
+ tas->state = TAS_STATE_ENABLE;
+ break;
+ case TAS_STATE_DISABLE:
+ ret = emac_set_port_state(emac, ICSSG_EMAC_PORT_TAS_DISABLE);
+ tas->state = TAS_STATE_DISABLE;
+ break;
+ default:
+ netdev_err(emac->ndev, "%s: unsupported state\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ netdev_err(emac->ndev, "TAS set state failed %d\n", ret);
+ return ret;
+}
+
+static int tas_set_trigger_list_change(struct prueth_emac *emac)
+{
+ struct tc_taprio_qopt_offload *admin_list = emac->qos.tas.taprio_admin;
+ struct tas_config *tas = &emac->qos.tas.config;
+ struct ptp_system_timestamp sts;
+ u32 change_cycle_count;
+ u32 cycle_time;
+ u64 base_time;
+ u64 cur_time;
+
+ cycle_time = admin_list->cycle_time - 4; /* -4ns to compensate for IEP wraparound time */
+ base_time = admin_list->base_time;
+ cur_time = prueth_iep_gettime(emac, &sts);
+
+ if (base_time > cur_time)
+ change_cycle_count = DIV_ROUND_UP_ULL(base_time - cur_time, cycle_time);
+ else
+ change_cycle_count = 1;
+
+ writel(cycle_time, emac->dram.va + TAS_ADMIN_CYCLE_TIME);
+ writel(change_cycle_count, emac->dram.va + TAS_CONFIG_CHANGE_CYCLE_COUNT);
+ writeb(admin_list->num_entries, emac->dram.va + TAS_ADMIN_LIST_LENGTH);
+
+ /* config_change cleared by f/w to ack reception of new shadow list */
+ writeb(1, &tas->config_list->config_change);
+ /* config_pending cleared by f/w when new shadow list is copied to active list */
+ writeb(1, &tas->config_list->config_pending);
+
+ return emac_set_port_state(emac, ICSSG_EMAC_PORT_TAS_TRIGGER);
+}
+
+static int tas_update_oper_list(struct prueth_emac *emac)
+{
+ struct tas_config *tas = &emac->qos.tas.config;
+ struct tc_taprio_qopt_offload *admin_list = emac->qos.tas.taprio_admin;
+ int ret;
+ u8 win_idx, gate_idx, val;
+ u32 tas_acc_gate_close_time = 0;
+
+ tas_update_fw_list_pointers(emac);
+
+ for (win_idx = 0; win_idx < admin_list->num_entries; win_idx++) {
+ tas->firmware_shadow_list->gate_mask_list[win_idx] = admin_list->entries[win_idx].gate_mask;
+ tas_acc_gate_close_time += admin_list->entries[win_idx].interval;
+
+ /* extend last entry till end of cycle time */
+ if (win_idx == admin_list->num_entries - 1)
+ tas->firmware_shadow_list->window_end_time_list[win_idx] = admin_list->cycle_time;
+ else
+ tas->firmware_shadow_list->window_end_time_list[win_idx] = tas_acc_gate_close_time;
+ }
+
+ /* clear remaining entries */
+ for (win_idx = admin_list->num_entries; win_idx < TAS_MAX_CMD_LISTS; win_idx++) {
+ tas->firmware_shadow_list->gate_mask_list[win_idx] = 0;
+ tas->firmware_shadow_list->window_end_time_list[win_idx] = 0;
+ }
+
+ /* update the Array of gate close time for each queue in each window */
+ for (win_idx = 0 ; win_idx < admin_list->num_entries; win_idx++) {
+ /* On Linux, only PRUETH_MAX_TX_QUEUES are supported per port */
+ for (gate_idx = 0; gate_idx < PRUETH_MAX_TX_QUEUES; gate_idx++) {
+ u32 gate_close_time = 0;
+
+ if (tas->firmware_shadow_list->gate_mask_list[win_idx] & BIT(gate_idx))
+ gate_close_time = tas->firmware_shadow_list->window_end_time_list[win_idx];
+
+ tas->firmware_shadow_list->gate_close_time_list[win_idx][gate_idx] = gate_close_time;
+ }
+ }
+
+ /* tell f/w to swap active & shadow list */
+ ret = tas_set_trigger_list_change(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "failed to swap f/w config list: %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for completion */
+ ret = readb_poll_timeout(&tas->config_list->config_change, val, !val,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC);
+ if (ret) {
+ netdev_err(emac->ndev, "TAS list change completion time out\n");
+ return ret;
+ }
+
+ tas_update_fw_list_pointers(emac);
+
+ return 0;
+}
+
+static int emac_set_taprio(struct prueth_emac *emac)
+{
+ int ret;
+ struct tc_taprio_qopt_offload *taprio = emac->qos.tas.taprio_admin;
+
+ if (!taprio->enable)
+ return tas_set_state(emac, TAS_STATE_DISABLE);
+
+ ret = tas_update_oper_list(emac);
+ if (ret)
+ return ret;
+
+ return tas_set_state(emac, TAS_STATE_ENABLE);
+}
+
+static void emac_cp_taprio(struct tc_taprio_qopt_offload *from,
+ struct tc_taprio_qopt_offload *to)
+{
+ int i;
+
+ *to = *from;
+ for (i = 0; i < from->num_entries; i++)
+ to->entries[i] = from->entries[i];
+}
+
+static int emac_setup_taprio(struct net_device *ndev, struct tc_taprio_qopt_offload *taprio)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct tc_taprio_qopt_offload *est_new;
+ int ret, win_idx;
+
+ if (!netif_running(ndev)) {
+ netdev_err(ndev, "interface is down, link speed unknown\n");
+ return -ENETDOWN;
+ }
+
+ if (taprio->cycle_time_extension) {
+ netdev_err(ndev, "Failed to set cycle time extension");
+ return -EOPNOTSUPP;
+ }
+
+ if (taprio->num_entries == 0 ||
+ taprio->num_entries > TAS_MAX_CMD_LISTS) {
+ netdev_err(ndev, "unsupported num_entries %ld in taprio config\n",
+ taprio->num_entries);
+ return -EINVAL;
+ }
+
+ /* If any time_interval is 0 in between the list, then exit */
+ for (win_idx = 0; win_idx < taprio->num_entries; win_idx++) {
+ if (taprio->entries[win_idx].interval == 0) {
+ netdev_err(ndev, "0 interval in taprio config not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ if (emac->qos.tas.taprio_admin)
+ devm_kfree(&ndev->dev, emac->qos.tas.taprio_admin);
+
+ est_new = devm_kzalloc(&ndev->dev,
+ struct_size(est_new, entries, taprio->num_entries),
+ GFP_KERNEL);
+ emac_cp_taprio(taprio, est_new);
+ emac->qos.tas.taprio_admin = est_new;
+ ret = emac_set_taprio(emac);
+ if (ret)
+ devm_kfree(&ndev->dev, est_new);
+
+ return ret;
+}
+
+int icssg_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (emac->prueth->is_sr1)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return emac_setup_taprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void icssg_qos_tas_init(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct tas_config *tas = &emac->qos.tas.config;
+ bool need_setup = false;
+
+ if (emac->prueth->is_sr1)
+ return;
+
+ if (tas->state == TAS_STATE_ENABLE)
+ need_setup = true;
+
+ tas->config_list = emac->dram.va + TAS_CONFIG_CHANGE_TIME;
+ tas->active_list = emac->dram.va + TAS_ACTIVE_LIST_INDEX;
+
+ tas_update_fw_list_pointers(emac);
+
+ tas_set_state(emac, TAS_STATE_RESET);
+
+ if (need_setup)
+ emac_set_taprio(emac);
+}
+
+static int icssg_config_ietfpe(struct prueth_qos_iet *iet, bool enable)
+{
+ void *config = iet->emac->dram.va + ICSSG_CONFIG_OFFSET;
+ u8 val;
+ int ret, i;
+
+ /* If FPE is to be enabled, first configure MAC Verify state
+ * machine in firmware as firmware kicks the Verify process
+ * as soon as ICSSG_EMAC_PORT_PREMPT_TX_ENABLE command is
+ * received.
+ */
+ if (enable && iet->mac_verify_configured) {
+ writeb(1, config + PRE_EMPTION_ENABLE_VERIFY);
+ /* should be a multiple of 64. TODO to configure
+ * through ethtool.
+ */
+ writew(64, config + PRE_EMPTION_ADD_FRAG_SIZE_LOCAL);
+ writel(ICSSG_IET_FPE_VERIFY_TIMEOUT_MS, config + PRE_EMPTION_VERIFY_TIME);
+ }
+
+ /* Send command to enable FPE Tx side. Rx is always enabled */
+ ret = emac_set_port_state(iet->emac,
+ enable ? ICSSG_EMAC_PORT_PREMPT_TX_ENABLE :
+ ICSSG_EMAC_PORT_PREMPT_TX_DISABLE);
+ if (ret) {
+ netdev_err(iet->emac->ndev, "TX pre-empt %s command failed\n",
+ enable ? "enable" : "disable");
+ writeb(0, config + PRE_EMPTION_ENABLE_VERIFY);
+ return ret;
+ }
+
+ /* Update FPE Tx enable bit. Assume firmware use this bit
+ * and enable PRE_EMPTION_ACTIVE_TX if everything looks
+ * good at firmware
+ */
+ writeb(enable ? 1 : 0, config + PRE_EMPTION_ENABLE_TX);
+
+ if (enable && iet->mac_verify_configured) {
+ ret = readb_poll_timeout(config + PRE_EMPTION_VERIFY_STATUS, val,
+ (val == ICSSG_IETFPE_STATE_SUCCEEDED),
+ USEC_PER_MSEC, 5 * USEC_PER_SEC);
+ if (ret == -ETIMEDOUT) {
+ netdev_err(iet->emac->ndev,
+ "timeout for MAC Verify: status %x\n",
+ val);
+ return ret;
+ }
+ } else {
+ /* Give f/w some time to update PRE_EMPTION_ACTIVE_TX state */
+ usleep_range(100, 200);
+ }
+
+ if (enable) {
+ val = readb(config + PRE_EMPTION_ACTIVE_TX);
+ if (val != 1) {
+ netdev_err(iet->emac->ndev,
+ "F/w fails to activate IET/FPE\n");
+ writeb(0, config + PRE_EMPTION_ENABLE_TX);
+ return -ENODEV;
+ }
+ } else {
+ return ret;
+ }
+
+ /* Configure highest queue as express. Set Bit 4 for FPE,
+ * Reset for express
+ */
+
+ /* first set all 8 queues as Pre-emptive */
+ for (i = 0; i < PRUETH_MAX_TX_QUEUES * PRUETH_NUM_MACS; i++)
+ writeb(BIT(4), config + EXPRESS_PRE_EMPTIVE_Q_MAP + i);
+
+ /* set highest priority channel queue as express */
+ writeb(0, config + EXPRESS_PRE_EMPTIVE_Q_MAP + iet->emac->tx_ch_num - 1);
+
+ /* set up queue mask for FPE. 1 means express */
+ writeb(BIT(iet->emac->tx_ch_num - 1), config + EXPRESS_PRE_EMPTIVE_Q_MASK);
+
+ iet->fpe_enabled = true;
+
+ return ret;
+}
+
+static void icssg_qos_enable_ietfpe(struct work_struct *work)
+{
+ struct prueth_qos_iet *iet =
+ container_of(work, struct prueth_qos_iet, fpe_config_task);
+ int ret;
+
+ /* Set the required flag and send a command to ICSSG firmware to
+ * enable FPE and start MAC verify
+ */
+ ret = icssg_config_ietfpe(iet, true);
+
+ /* if verify configured, poll for the status and complete.
+ * Or just do completion
+ */
+ if (!ret)
+ netdev_err(iet->emac->ndev, "IET FPE configured successfully\n");
+ else
+ netdev_err(iet->emac->ndev, "IET FPE config error\n");
+ complete(&iet->fpe_config_compl);
+}
+
+static void icssg_prueth_iet_fpe_disable(struct prueth_qos_iet *iet)
+{
+ int ret;
+
+ atomic_set(&iet->cancel_fpe_config, 1);
+ cancel_work_sync(&iet->fpe_config_task);
+ ret = icssg_config_ietfpe(iet, false);
+ if (!ret)
+ netdev_err(iet->emac->ndev, "IET FPE disabled successfully\n");
+ else
+ netdev_err(iet->emac->ndev, "IET FPE disable failed\n");
+}
+
+static int icssg_prueth_iet_fpe_enable(struct prueth_emac *emac)
+{
+ struct prueth_qos_iet *iet = &emac->qos.iet;
+ int ret;
+
+ /* Schedule MAC Verify and enable IET FPE if configured */
+ atomic_set(&iet->cancel_fpe_config, 0);
+ reinit_completion(&iet->fpe_config_compl);
+ schedule_work(&iet->fpe_config_task);
+ /* By trial, found it takes about 1.5s. So
+ * wait for 10s
+ */
+ ret = wait_for_completion_timeout(&iet->fpe_config_compl,
+ msecs_to_jiffies(10000));
+ if (!ret) {
+ netdev_err(emac->ndev,
+ "IET verify completion timeout\n");
+ /* cancel verify in progress */
+ atomic_set(&iet->cancel_fpe_config, 1);
+ cancel_work_sync(&iet->fpe_config_task);
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/ti/icssg_qos.h b/drivers/net/ethernet/ti/icssg_qos.h
new file mode 100644
index 000000000000..f29363bb1058
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_qos.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef __NET_TI_ICSSG_QOS_H
+#define __NET_TI_ICSSG_QOS_H
+
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <net/pkt_sched.h>
+
+/**
+ * Maximum number of gate command entries in each list.
+ */
+#define TAS_MAX_CMD_LISTS (16)
+
+/**
+ * Maximum number of transmit queues supported by implementation
+ */
+#define TAS_MAX_NUM_QUEUES (8)
+
+/**
+ * Minimum cycle time supported by implementation (in ns)
+ */
+#define TAS_MIN_CYCLE_TIME (1000000)
+
+/**
+ * Minimum TAS window duration supported by implementation (in ns)
+ */
+#define TAS_MIN_WINDOW_DURATION (10000)
+
+/**
+ * List number 0 or 1. Also the value at memory location TAS_ACTIVE_LIST_INDEX
+ */
+enum tas_list_num {
+ TAS_LIST0 = 0,
+ TAS_LIST1 = 1
+};
+
+/**
+ * state of TAS in f/w
+ */
+enum tas_state {
+ /* PRU's are idle */
+ TAS_STATE_DISABLE = 0,
+ /* Enable TAS */
+ TAS_STATE_ENABLE = 1,
+ /* Firmware will reset the state machine */
+ TAS_STATE_RESET = 2,
+};
+
+/**
+ * Config state machine variables. See IEEE Std 802.1Q-2018 8.6.8.4
+ */
+struct tas_config_list {
+ /* New list is copied at this time */
+ u64 config_change_time;
+ /* config change error counter, incremented if
+ * admin->BaseTime < current time and TAS_enabled is true
+ */
+ u32 config_change_error_counter;
+ /* True if list update is pending */
+ u8 config_pending;
+ /* Set to true when application trigger updating of admin list
+ * to active list, cleared when configChangeTime is updated
+ */
+ u8 config_change;
+};
+
+/**
+ * Max SDU table. See IEEE Std 802.1Q-2018 12.29.1.1
+ */
+struct tas_max_sdu_table {
+ u16 max_sdu[TAS_MAX_NUM_QUEUES];
+};
+
+/**
+ * TAS List Structure based on firmware memory map
+ */
+struct tas_firmware_list {
+ /* window gate mask list */
+ u8 gate_mask_list[TAS_MAX_CMD_LISTS];
+ /* window end time list */
+ u32 window_end_time_list[TAS_MAX_CMD_LISTS];
+ /* Array of gate close time for each queue in each window */
+ u32 gate_close_time_list[TAS_MAX_CMD_LISTS][TAS_MAX_NUM_QUEUES];
+};
+
+/**
+ * Main Time Aware Shaper Handle
+ */
+struct tas_config {
+ enum tas_state state;
+ struct tas_max_sdu_table max_sdu_table;
+ /* Config change variables */
+ struct __iomem tas_config_list * config_list;
+ /* Whether list 1 or list 2 is the operating list */
+ u8 __iomem *active_list;
+ /* active List pointer, used by firmware */
+ struct __iomem tas_firmware_list * firmware_active_list;
+ /* shadow List pointer, used by driver */
+ struct __iomem tas_firmware_list * firmware_shadow_list;
+};
+
+struct prueth_qos_tas {
+ struct tc_taprio_qopt_offload *taprio_admin;
+ struct tc_taprio_qopt_offload *taprio_oper;
+ struct tas_config config;
+};
+
+struct prueth_qos_iet {
+ struct work_struct fpe_config_task;
+ struct completion fpe_config_compl;
+ struct prueth_emac *emac;
+ atomic_t cancel_fpe_config;
+ /* Set through priv flags to enable IET frame preemption */
+ bool fpe_configured;
+ /* Set if IET FPE is active */
+ bool fpe_enabled;
+ /* Set through priv flags to enable IET MAC Verify state machine
+ * in firmware
+ */
+ bool mac_verify_configured;
+};
+
+struct prueth_qos {
+ struct prueth_qos_iet iet;
+ struct prueth_qos_tas tas;
+};
+
+void icssg_qos_init(struct net_device *ndev);
+int icssg_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+void icssg_qos_link_up(struct net_device *ndev);
+void icssg_qos_link_down(struct net_device *ndev);
+#endif /* __NET_TI_ICSSG_QOS_H */
diff --git a/drivers/net/ethernet/ti/icssg_queues.c b/drivers/net/ethernet/ti/icssg_queues.c
new file mode 100644
index 000000000000..3c34f61ad40b
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_queues.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* ICSSG Buffer queue helpers
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/regmap.h>
+#include "icssg_prueth.h"
+
+#define ICSSG_QUEUES_MAX 64
+#define ICSSG_QUEUE_OFFSET 0xd00
+#define ICSSG_QUEUE_PEEK_OFFSET 0xe00
+#define ICSSG_QUEUE_CNT_OFFSET 0xe40
+#define ICSSG_QUEUE_RESET_OFFSET 0xf40
+
+int icssg_queue_pop(struct prueth *prueth, u8 queue)
+{
+ u32 val, cnt;
+
+ if (queue >= ICSSG_QUEUES_MAX)
+ return -EINVAL;
+
+ regmap_read(prueth->miig_rt, ICSSG_QUEUE_CNT_OFFSET + 4 * queue, &cnt);
+ if (!cnt)
+ return -EINVAL;
+
+ regmap_read(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, &val);
+
+ return val;
+}
+
+void icssg_queue_push(struct prueth *prueth, int queue, u16 addr)
+{
+ if (queue >= ICSSG_QUEUES_MAX)
+ return;
+
+ regmap_write(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, addr);
+}
+
+u32 icssg_queue_level(struct prueth *prueth, int queue)
+{
+ u32 reg;
+
+ if (queue >= ICSSG_QUEUES_MAX)
+ return 0;
+
+ regmap_read(prueth->miig_rt, ICSSG_QUEUE_CNT_OFFSET + 4 * queue, &reg);
+
+ return reg;
+}
diff --git a/drivers/net/ethernet/ti/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg_switch_map.h
new file mode 100644
index 000000000000..a5e56b9cd820
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_switch_map.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ *
+ */
+
+#ifndef __NET_TI_ICSSG_SWITCH_MAP_H
+#define __NET_TI_ICSSG_SWITCH_MAP_H
+
+/************************* Ethernet Switch Constants *********************/
+
+/* if bucket size is changed in firmware then this too should be changed */
+/* because it directly impacts FDB ageing calculation */
+#define NUMBER_OF_FDB_BUCKET_ENTRIES (4)
+#define SIZE_OF_FDB (2048) /* This is fixed in ICSSG */
+
+/* Memory Usage of : SHARED_MEMORY
+ *
+ */
+
+#define FW_LINK_SPEED_1G (0x00)
+#define FW_LINK_SPEED_100M (0x01)
+#define FW_LINK_SPEED_10M (0x02)
+#define FW_LINK_SPEED_HD (0x80)
+
+/*Time after which FDB entries are checked for aged out values. Value in nanoseconds*/
+#define FDB_AGEING_TIMEOUT_OFFSET 0x0014
+/*default VLAN tag for Host Port*/
+#define HOST_PORT_DF_VLAN_OFFSET 0x001C
+/*Same as HOST_PORT_DF_VLAN_OFFSET*/
+#define EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET HOST_PORT_DF_VLAN_OFFSET
+/*default VLAN tag for P1 Port*/
+#define P1_PORT_DF_VLAN_OFFSET 0x0020
+/*Same as P1_PORT_DF_VLAN_OFFSET*/
+#define EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET P1_PORT_DF_VLAN_OFFSET
+/*default VLAN tag for P2 Port*/
+#define P2_PORT_DF_VLAN_OFFSET 0x0024
+/*Same as P2_PORT_DF_VLAN_OFFSET*/
+#define EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET P2_PORT_DF_VLAN_OFFSET
+/*VLAN-FID Table offset. 4096 VIDs. 2B per VID = 8KB = 0x2000*/
+#define VLAN_STATIC_REG_TABLE_OFFSET 0x0100
+/*VLAN-FID Table offset for EMAC*/
+#define EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET VLAN_STATIC_REG_TABLE_OFFSET
+/*packet descriptor Q reserved memory*/
+#define PORT_DESC0_HI 0x2104
+/*packet descriptor Q reserved memory*/
+#define PORT_DESC0_LO 0x2F6C
+/*packet descriptor Q reserved memory*/
+#define PORT_DESC1_HI 0x3DD4
+/*packet descriptor Q reserved memory*/
+#define PORT_DESC1_LO 0x4C3C
+/*packet descriptor Q reserved memory*/
+#define HOST_DESC0_HI 0x5AA4
+/*packet descriptor Q reserved memory*/
+#define HOST_DESC0_LO 0x5F0C
+/*packet descriptor Q reserved memory*/
+#define HOST_DESC1_HI 0x6374
+/*packet descriptor Q reserved memory*/
+#define HOST_DESC1_LO 0x67DC
+/*special packet descriptor Q reserved memory*/
+#define HOST_SPPD0 0x7AAC
+/*special packet descriptor Q reserved memory*/
+#define HOST_SPPD1 0x7EAC
+/*_Small_Description_*/
+#define TIMESYNC_FW_WC_CYCLECOUNT_OFFSET 0x83EC
+/*IEP count hi roll over count*/
+#define TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET 0x83F4
+/*_Small_Description_*/
+#define TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET 0x83F8
+/*Set clock descriptor*/
+#define TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET 0x83FC
+/*_Small_Description_*/
+#define TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET 0x843C
+/*_Small_Description_*/
+#define TIMESYNC_FW_WC_SYNCOUT_REDUCTION_COUNT_OFFSET 0x8440
+/*_Small_Description_*/
+#define TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET 0x8444
+/*Control variable to generate SYNC1*/
+#define TIMESYNC_FW_WC_ISOM_PIN_SIGNAL_EN_OFFSET 0x844C
+/*SystemTime Sync0 periodicity*/
+#define TIMESYNC_FW_ST_SYNCOUT_PERIOD_OFFSET 0x8450
+/*pktTxDelay for P1 = link speed dependent p1 mac delay + p1 phy delay*/
+#define TIMESYNC_FW_WC_PKTTXDELAY_P1_OFFSET 0x8454
+/*pktTxDelay for P2 = link speed dependent p2 mac delay + p2 phy delay*/
+#define TIMESYNC_FW_WC_PKTTXDELAY_P2_OFFSET 0x8458
+/*Set clock operation done signal for next task*/
+#define TIMESYNC_FW_SIG_PNFW_OFFSET 0x845C
+/*Set clock operation done signal for next task*/
+#define TIMESYNC_FW_SIG_TIMESYNCFW_OFFSET 0x8460
+
+/* Memory Usage of : MSMC
+ *
+ */
+
+/* Memory Usage of : DMEM0
+ *
+ */
+
+/*New list is copied at this time*/
+#define TAS_CONFIG_CHANGE_TIME 0x000C
+/*config change error counter*/
+#define TAS_CONFIG_CHANGE_ERROR_COUNTER 0x0014
+/*TAS List update pending flag*/
+#define TAS_CONFIG_PENDING 0x0018
+/*TAS list update trigger flag*/
+#define TAS_CONFIG_CHANGE 0x0019
+/*List length for new TAS schedule*/
+#define TAS_ADMIN_LIST_LENGTH 0x001A
+/*Currently active TAS list index*/
+#define TAS_ACTIVE_LIST_INDEX 0x001B
+/*Cycle time for the new TAS schedule*/
+#define TAS_ADMIN_CYCLE_TIME 0x001C
+/*Cycle counts remaining till the TAS list update*/
+#define TAS_CONFIG_CHANGE_CYCLE_COUNT 0x0020
+/*Base Flow ID for sending packets to Host for Slice0*/
+#define PSI_L_REGULAR_FLOW_ID_BASE_OFFSET 0x0024
+/*Same as PSI_L_REGULAR_FLOW_ID_BASE_OFFSET*/
+#define EMAC_ICSSG_SWITCH_PSI_L_REGULAR_FLOW_ID_BASE_OFFSET PSI_L_REGULAR_FLOW_ID_BASE_OFFSET
+/*Base Flow ID for sending mgmt and Tx TS to Host for Slice0*/
+#define PSI_L_MGMT_FLOW_ID_OFFSET 0x0026
+/*Same as PSI_L_MGMT_FLOW_ID_OFFSET*/
+#define EMAC_ICSSG_SWITCH_PSI_L_MGMT_FLOW_ID_BASE_OFFSET PSI_L_MGMT_FLOW_ID_OFFSET
+/*Queue number for Special packets written here*/
+#define SPL_PKT_DEFAULT_PRIORITY 0x0028
+/*Express Preemptible Queue Mask*/
+#define EXPRESS_PRE_EMPTIVE_Q_MASK 0x0029
+/*Port1/Port2 Default Queue number for untagged packets, only 1B is used*/
+#define QUEUE_NUM_UNTAGGED 0x002A
+/*Stores the table used for priority regeneration. 1B per PCP/Queue*/
+#define PORT_Q_PRIORITY_REGEN_OFFSET 0x002C
+/*For marking packet as priority/express (this feature is disabled) or cut-through/S&F.*/
+#define EXPRESS_PRE_EMPTIVE_Q_MAP 0x0034
+/*Stores the table used for priority mapping. 1B per PCP/Queue*/
+#define PORT_Q_PRIORITY_MAPPING_OFFSET 0x003C
+/*Used to notify the FW of the current link speed*/
+#define PORT_LINK_SPEED_OFFSET 0x00A8
+/*TAS gate mask for windows list0*/
+#define TAS_GATE_MASK_LIST0 0x0100
+/*TAS gate mask for windows list1*/
+#define TAS_GATE_MASK_LIST1 0x0350
+/*Memory to Enable/Disable Preemption on TX side*/
+#define PRE_EMPTION_ENABLE_TX 0x05A0
+/*Active State of Preemption on TX side*/
+#define PRE_EMPTION_ACTIVE_TX 0x05A1
+/*Memory to Enable/Disable Verify State Machine Preemption*/
+#define PRE_EMPTION_ENABLE_VERIFY 0x05A2
+/*Verify Status of State Machine*/
+#define PRE_EMPTION_VERIFY_STATUS 0x05A3
+/*Non Final Fragment Size supported by Link Partner*/
+#define PRE_EMPTION_ADD_FRAG_SIZE_REMOTE 0x05A4
+/*Non Final Fragment Size supported by Firmware*/
+#define PRE_EMPTION_ADD_FRAG_SIZE_LOCAL 0x05A6
+/*Time in ms the State machine waits for respond packet*/
+#define PRE_EMPTION_VERIFY_TIME 0x05A8
+/*Memory used for R30 related management commands*/
+#define MGR_R30_CMD_OFFSET 0x05AC
+/*HW Buffer Pool0 base address*/
+#define BUFFER_POOL_0_ADDR_OFFSET 0x05BC
+/*16B for Host Egress MSMC Q (Pre-emptible) context*/
+#define HOST_RX_Q_PRE_CONTEXT_OFFSET 0x0684
+/*Buffer for 8 FDB entries to be added by 'Add Multiple FDB entries IOCTL*/
+#define FDB_CMD_BUFFER 0x0894
+/*TAS queue max sdu length list*/
+#define TAS_QUEUE_MAX_SDU_LIST 0x08FA
+/*Used by FW to generate random number with the SEED value*/
+#define HD_RAND_SEED_OFFSET 0x0934
+/*16B for Host Egress MSMC Q (Express) context*/
+#define HOST_RX_Q_EXP_CONTEXT_OFFSET 0x0940
+
+/* Memory Usage of : DMEM1
+ *
+ */
+
+/* Memory Usage of : PA_STAT
+ *
+ */
+
+/*Start of 32 bits PA_STAT counters*/
+#define PA_STAT_32b_START_OFFSET 0x0080
+
+#endif /* __NET_TI_ICSSG_SWITCH_MAP_H */
diff --git a/drivers/net/ethernet/ti/icssg_switchdev.c b/drivers/net/ethernet/ti/icssg_switchdev.c
new file mode 100644
index 000000000000..e42ff73b60d3
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_switchdev.c
@@ -0,0 +1,494 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments K3 ICSSG Ethernet Switchdev Driver
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "icssg_prueth.h"
+#include "icssg_switchdev.h"
+#include "icss_mii_rt.h"
+
+struct prueth_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct prueth_emac *emac;
+ unsigned long event;
+};
+
+static int prueth_switchdev_stp_state_set(struct prueth_emac *emac,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ enum icssg_port_state_cmd emac_state;
+ int ret = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ emac_state = ICSSG_EMAC_PORT_FORWARD;
+ break;
+ case BR_STATE_DISABLED:
+ emac_state = ICSSG_EMAC_PORT_DISABLE;
+ break;
+ case BR_STATE_LEARNING:
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ emac_state = ICSSG_EMAC_PORT_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ emac_set_port_state(emac, emac_state);
+ netdev_dbg(emac->ndev, "STP state: %u\n", emac_state);
+
+ return ret;
+}
+
+static int prueth_switchdev_attr_br_flags_set(struct prueth_emac *emac,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ unsigned long brport_flags)
+{
+ enum icssg_port_state_cmd emac_state;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (brport_flags & BR_MCAST_FLOOD)
+ emac_state = ICSSG_EMAC_PORT_MC_FLOODING_ENABLE;
+ else
+ emac_state = ICSSG_EMAC_PORT_MC_FLOODING_DISABLE;
+
+ netdev_dbg(emac->ndev, "BR_MCAST_FLOOD: %d port %u\n",
+ emac_state, emac->port_id);
+
+ emac_set_port_state(emac, emac_state);
+
+ return 0;
+}
+
+static int prueth_switchdev_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ unsigned long flags)
+{
+ if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prueth_switchdev_attr_set(struct net_device *ndev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+
+ netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, emac->port_id);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = prueth_switchdev_attr_br_flags_pre_set(ndev, trans,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = prueth_switchdev_stp_state_set(emac, trans,
+ attr->u.stp_state);
+ netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = prueth_switchdev_attr_br_flags_set(emac, trans, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static void prueth_switchdev_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void prueth_switchdev_event_work(struct work_struct *work)
+{
+ struct prueth_switchdev_event_work *switchdev_work =
+ container_of(work, struct prueth_switchdev_event_work, work);
+ struct prueth_emac *emac = switchdev_work->emac;
+ struct switchdev_notifier_fdb_info *fdb;
+ int port_id = emac->port_id;
+ int ret;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(emac->ndev, "prueth_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(emac->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ break;
+
+ ret = icssg_fdb_add_del(emac, fdb->addr, fdb->vid,
+ BIT(port_id), true);
+ if (!ret)
+ prueth_switchdev_fdb_offload_notify(emac->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(emac->ndev, "prueth_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(emac->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ break;
+ icssg_fdb_add_del(emac, fdb->addr, fdb->vid,
+ BIT(port_id), false);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(emac->ndev);
+}
+
+static int prueth_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct prueth_switchdev_event_work *switchdev_work;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ int err;
+
+ if (!prueth_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, prueth_switchdev_event_work);
+ switchdev_work->emac = emac;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int prueth_switchdev_vlan_add(struct prueth_emac *emac, bool untag, bool pvid,
+ u8 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(PRUETH_PORT_HOST);
+ else
+ port_mask = BIT(emac->port_id);
+
+ if (untag)
+ untag_mask = port_mask;
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
+
+ netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X PVID %d\n",
+ vid, port_mask, untag_mask, pvid);
+
+ if (!pvid)
+ return ret;
+
+ icssg_set_pvid(emac->prueth, vid, emac->port_id);
+
+ return ret;
+}
+
+static int prueth_switchdev_vlan_del(struct prueth_emac *emac, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(PRUETH_PORT_HOST);
+ else
+ port_mask = BIT(emac->port_id);
+
+ icssg_vtbl_modify(emac, vid, port_mask, 0, false);
+
+ if (cpu_port)
+ icssg_fdb_add_del(emac, emac->mac_addr, vid,
+ BIT(PRUETH_PORT_HOST), false);
+
+ if (vid == icssg_get_pvid(emac))
+ icssg_set_pvid(emac->prueth, 0, emac->port_id);
+
+ netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X\n",
+ vid, port_mask);
+
+ return ret;
+}
+
+static int prueth_switchdev_vlans_add(struct prueth_emac *emac,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ netdev_dbg(emac->ndev, "VID add vid:%u flags:%X\n",
+ vlan->vid_begin, vlan->flags);
+
+ if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (vlan->vid_begin > 0xff)
+ return 0;
+
+ return prueth_switchdev_vlan_add(emac, untag, pvid, vlan->vid_begin,
+ orig_dev);
+}
+
+static int prueth_switchdev_vlans_del(struct prueth_emac *emac,
+ const struct switchdev_obj_port_vlan *vlan)
+
+{
+ if (vlan->vid_begin > 0xff)
+ return 0;
+
+ return prueth_switchdev_vlan_del(emac, vlan->vid_begin,
+ vlan->obj.orig_dev);
+}
+
+static int prueth_switchdev_mdb_add(struct prueth_emac *emac,
+ struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ u8 port_mask, fid_c2;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (cpu_port)
+ port_mask = BIT(PRUETH_PORT_HOST);
+ else
+ port_mask = BIT(emac->port_id);
+
+ fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid);
+
+ err = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 | port_mask, true);
+ netdev_dbg(emac->ndev, "MDB add vid %u:%pM ports: %X\n",
+ mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int prueth_switchdev_mdb_del(struct prueth_emac *emac,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ int del_mask, ret, fid_c2;
+
+ if (cpu_port)
+ del_mask = BIT(PRUETH_PORT_HOST);
+ else
+ del_mask = BIT(emac->port_id);
+
+ fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid);
+
+ if (fid_c2 & ~del_mask)
+ ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 & ~del_mask, true);
+ else
+ ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, 0, false);
+
+ netdev_dbg(emac->ndev, "MDB del vid %u:%pM ports: %X\n",
+ mdb->vid, mdb->addr, del_mask);
+
+ return ret;
+}
+
+static int prueth_switchdev_obj_add(struct net_device *ndev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, emac->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = prueth_switchdev_vlans_add(emac, vlan, trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = prueth_switchdev_mdb_add(emac, mdb, trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int prueth_switchdev_obj_del(struct net_device *ndev,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, emac->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = prueth_switchdev_vlans_del(emac, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = prueth_switchdev_mdb_del(emac, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int prueth_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int prueth_switchdev_register_notifiers(struct prueth *prueth)
+{
+ int ret = 0;
+
+ prueth->prueth_switchdev_nb.notifier_call = &prueth_switchdev_event;
+ ret = register_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ if (ret) {
+ dev_err(prueth->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ prueth->prueth_switchdev_bl_nb.notifier_call = &prueth_switchdev_blocking_event;
+ ret = register_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb);
+ if (ret) {
+ dev_err(prueth->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ }
+
+ return ret;
+}
+
+void prueth_switchdev_unregister_notifiers(struct prueth *prueth)
+{
+ unregister_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+}
diff --git a/drivers/net/ethernet/ti/icssg_switchdev.h b/drivers/net/ethernet/ti/icssg_switchdev.h
new file mode 100644
index 000000000000..0e64e7760a00
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+#ifndef __NET_TI_ICSSG_SWITCHDEV_H
+#define __NET_TI_ICSSG_SWITCHDEV_H
+
+#include "icssg_prueth.h"
+
+int prueth_switchdev_register_notifiers(struct prueth *prueth);
+void prueth_switchdev_unregister_notifiers(struct prueth *prueth);
+bool prueth_dev_check(const struct net_device *ndev);
+
+#endif /* __NET_TI_ICSSG_SWITCHDEV_H */
diff --git a/drivers/net/ethernet/ti/j721e-cpsw-virt-mac.c b/drivers/net/ethernet/ti/j721e-cpsw-virt-mac.c
new file mode 100644
index 000000000000..aa7200d43d82
--- /dev/null
+++ b/drivers/net/ethernet/ti/j721e-cpsw-virt-mac.c
@@ -0,0 +1,1614 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 J721 Virt Ethernet Switch MAC Driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/inetdevice.h>
+#include <linux/kernel.h>
+#include <linux/kmemleak.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+#include <linux/rpmsg-remotedev/rpmsg-remotedev.h>
+
+#include "k3-cppi-desc-pool.h"
+
+#define VIRT_CPSW_DRV_VER "0.1"
+
+#define VIRT_CPSW_MAX_TX_QUEUES 1
+#define VIRT_CPSW_MAX_RX_QUEUES 1
+#define VIRT_CPSW_MAX_RX_FLOWS 1
+
+#define VIRT_CPSW_MIN_PACKET_SIZE ETH_ZLEN
+#define VIRT_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+/* Number of TX/RX descriptors */
+#define VIRT_CPSW_MAX_TX_DESC 500
+#define VIRT_CPSW_MAX_RX_DESC 500
+
+#define VIRT_CPSW_NAV_PS_DATA_SIZE 16
+#define VIRT_CPSW_NAV_SW_DATA_SIZE 16
+
+#define VIRT_CPSW_DRV_NAME "j721e-cpsw-virt-mac"
+
+struct virt_cpsw_tx_chn {
+ struct device *dev;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_tx_channel *tx_chn;
+ u32 descs_num;
+ unsigned int irq;
+ u32 id;
+};
+
+struct virt_cpsw_rx_chn {
+ struct device *dev;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_rx_channel *rx_chn;
+ u32 descs_num;
+ unsigned int irq;
+};
+
+struct virt_cpsw_port {
+ struct virt_cpsw_common *common;
+ struct net_device *ndev;
+ const char *name;
+ u8 local_mac_addr[ETH_ALEN];
+};
+
+struct virt_cpsw_common {
+ struct device *dev;
+ struct virt_cpsw_port ports;
+
+ struct virt_cpsw_tx_chn tx_chns;
+ struct napi_struct napi_tx;
+ struct hrtimer tx_hrtimer;
+ unsigned long tx_pace_timeout;
+ struct completion tdown_complete;
+ atomic_t tdown_cnt;
+ struct virt_cpsw_rx_chn rx_chns;
+ struct napi_struct napi_rx;
+ bool rx_irq_disabled;
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout;
+ u32 mac_only_port;
+
+ const char *rdev_name;
+ struct rpmsg_remotedev *rdev;
+ struct rpmsg_remotedev_eth_switch_ops *rdev_switch_ops;
+ u32 rdev_features;
+ u32 rdev_mtu;
+ u8 rdev_mac_addr[ETH_ALEN];
+ u32 rdev_tx_psil_dst_id;
+ u32 tx_psil_id_base;
+ u32 rdev_rx_flow_id;
+ struct notifier_block virt_cpsw_inetaddr_nb;
+ struct work_struct rx_mode_work;
+ struct workqueue_struct *cmd_wq;
+ struct netdev_hw_addr_list mc_list;
+ unsigned int mac_only:1;
+ unsigned int mc_filter:1;
+};
+
+struct virt_cpsw_ndev_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+struct virt_cpsw_ndev_priv {
+ struct virt_cpsw_ndev_stats __percpu *stats;
+ struct virt_cpsw_port *port;
+};
+
+#define virt_ndev_to_priv(ndev) \
+ ((struct virt_cpsw_ndev_priv *)netdev_priv(ndev))
+#define virt_ndev_to_port(ndev) (virt_ndev_to_priv(ndev)->port)
+#define virt_ndev_to_common(ndev) (virt_ndev_to_port(ndev)->common)
+
+static void virt_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
+ unsigned int txqueue)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+ struct virt_cpsw_tx_chn *tx_chn = &common->tx_chns;
+ struct netdev_queue *netif_txq;
+ unsigned long trans_start;
+
+ /* process every txq*/
+ netif_txq = netdev_get_tx_queue(ndev, txqueue);
+ trans_start = netif_txq->trans_start;
+
+ netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
+ txqueue,
+ netif_tx_queue_stopped(netif_txq),
+ jiffies_to_msecs(jiffies - trans_start),
+ dql_avail(&netif_txq->dql),
+ k3_cppi_desc_pool_avail(tx_chn->desc_pool));
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* try recover if stopped by us */
+ txq_trans_update(netif_txq);
+ netif_tx_wake_queue(netif_txq);
+ }
+}
+
+static int virt_cpsw_nuss_rx_push(struct virt_cpsw_common *common,
+ struct sk_buff *skb)
+{
+ struct cppi5_host_desc_t *desc_rx;
+ struct virt_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct device *dev = common->dev;
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ u32 pkt_len = skb_tailroom(skb);
+ void *swdata;
+
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ dev_err(dev, "Failed to map rx skb buffer\n");
+ return -EINVAL;
+ }
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ VIRT_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb));
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ *((void **)swdata) = skb;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
+}
+
+static int virt_cpsw_nuss_common_open(struct virt_cpsw_common *common,
+ netdev_features_t features)
+{
+ struct sk_buff *skb;
+ int i, ret;
+
+ for (i = 0; i < common->rx_chns.descs_num; i++) {
+ skb = __netdev_alloc_skb_ip_align(NULL,
+ VIRT_CPSW_MAX_PACKET_SIZE,
+ GFP_KERNEL);
+ if (!skb) {
+ dev_err(common->dev, "cannot allocate skb\n");
+ return -ENOMEM;
+ }
+
+ ret = virt_cpsw_nuss_rx_push(common, skb);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit skb to channel rx, error %d\n",
+ ret);
+ kfree_skb(skb);
+ return ret;
+ }
+ kmemleak_not_leak(skb);
+ }
+ ret = k3_udma_glue_rx_flow_enable(common->rx_chns.rx_chn, 0);
+ if (ret)
+ return ret;
+
+ ret = k3_udma_glue_enable_tx_chn(common->tx_chns.tx_chn);
+ if (ret)
+ return ret;
+
+ napi_enable(&common->napi_tx);
+ napi_enable(&common->napi_rx);
+ if (common->rx_irq_disabled) {
+ common->rx_irq_disabled = false;
+ enable_irq(common->rx_chns.irq);
+ }
+
+ return 0;
+}
+
+static void virt_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
+static void virt_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+
+static void virt_cpsw_nuss_common_stop(struct virt_cpsw_common *common)
+{
+ int i;
+
+ /* shutdown tx channels */
+ atomic_set(&common->tdown_cnt, VIRT_CPSW_MAX_TX_QUEUES);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ reinit_completion(&common->tdown_complete);
+
+ k3_udma_glue_tdown_tx_chn(common->tx_chns.tx_chn, false);
+
+ i = wait_for_completion_timeout(&common->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!i)
+ dev_err(common->dev, "tx teardown timeout\n");
+
+ k3_udma_glue_reset_tx_chn(common->tx_chns.tx_chn,
+ &common->tx_chns,
+ virt_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(common->tx_chns.tx_chn);
+ napi_disable(&common->napi_tx);
+ hrtimer_cancel(&common->tx_hrtimer);
+
+ k3_udma_glue_rx_flow_disable(common->rx_chns.rx_chn, 0);
+ /* Need some delay to process RX ring before reset */
+ msleep(100);
+ k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, 0,
+ &common->rx_chns,
+ virt_cpsw_nuss_rx_cleanup, false);
+ napi_disable(&common->napi_rx);
+ hrtimer_cancel(&common->rx_hrtimer);
+ cancel_work_sync(&common->rx_mode_work);
+}
+
+static int virt_cpsw_nuss_del_mc(struct net_device *ndev, const u8 *addr);
+
+static int virt_cpsw_nuss_ndo_stop(struct net_device *ndev)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+ struct rpmsg_remotedev_eth_switch_ops *rdev_ops;
+ struct device *dev = common->dev;
+ int ret;
+
+ rdev_ops = common->rdev_switch_ops;
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+
+ ret = rdev_ops->unregister_mac(common->rdev, ndev->dev_addr,
+ common->rdev_rx_flow_id);
+ if (ret)
+ dev_err(dev, "unregister_mac rpmsg - fail %d\n", ret);
+
+ __dev_mc_unsync(ndev, virt_cpsw_nuss_del_mc);
+ __hw_addr_init(&common->mc_list);
+ virt_cpsw_nuss_common_stop(common);
+
+ dev_info(common->dev, "virt_cpsw_nuss mac stopped\n");
+ return 0;
+}
+
+static int virt_cpsw_nuss_ndo_open(struct net_device *ndev)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+ struct rpmsg_remotedev_eth_switch_ops *rdev_ops;
+ struct device *dev = common->dev;
+ int ret;
+
+ rdev_ops = common->rdev_switch_ops;
+ netdev_tx_reset_queue(netdev_get_tx_queue(ndev, 0));
+
+ ret = virt_cpsw_nuss_common_open(common, ndev->features);
+ if (ret)
+ return ret;
+
+ ret = rdev_ops->register_mac(common->rdev,
+ ndev->dev_addr,
+ common->rdev_rx_flow_id);
+ if (ret) {
+ dev_err(dev, "register_mac rpmsg - fail %d\n", ret);
+ virt_cpsw_nuss_common_stop(common);
+ return ret;
+ }
+
+ netif_tx_wake_all_queues(ndev);
+ netif_carrier_on(ndev);
+
+ dev_info(common->dev, "virt_cpsw_nuss mac started\n");
+ return 0;
+}
+
+static void virt_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct virt_cpsw_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ u32 buf_dma_len;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+
+ dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ dev_kfree_skb_any(skb);
+}
+
+/* RX psdata[2] word format - checksum information */
+#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
+#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
+#define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17)
+#define AM65_CPSW_RX_PSD_IS_TCP BIT(18)
+#define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19)
+#define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20)
+
+static void virt_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
+{
+ /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
+ * csum information provides in psdata[2] word:
+ * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
+ * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
+ * bits - indicates IPv4/IPv6 packet
+ * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
+ * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
+ * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
+ */
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return;
+
+ if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
+ AM65_CPSW_RX_PSD_IPV4_VALID)) &&
+ !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
+ /* csum for fragmented packets is unsupported */
+ if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static int virt_cpsw_nuss_rx_packets(struct virt_cpsw_common *common,
+ u32 flow_idx)
+{
+ struct virt_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct device *dev = common->dev;
+ struct virt_cpsw_ndev_priv *ndev_priv;
+ struct virt_cpsw_ndev_stats *stats;
+ struct net_device *ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb, *new_skb;
+ dma_addr_t desc_dma, buf_dma;
+ u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
+ int ret = 0;
+ void **swdata;
+ u32 *psdata;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ dev_err(dev, "RX: pop chn fail %d\n", ret);
+ return ret;
+ }
+
+ if (desc_dma & 0x1) {
+ dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
+ return 0;
+ }
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
+ __func__, flow_idx, &desc_dma);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+ /* read port for dbg */
+ dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
+ ndev = common->ports.ndev;
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ csum_info = psdata[2];
+ dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
+
+ dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ if (unlikely(!netif_running(skb->dev))) {
+ dev_kfree_skb_any(skb);
+ return -ENODEV;
+ }
+
+ new_skb = netdev_alloc_skb_ip_align(ndev, VIRT_CPSW_MAX_PACKET_SIZE);
+ if (new_skb) {
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ virt_cpsw_nuss_rx_csum(skb, csum_info);
+ napi_gro_receive(&common->napi_rx, skb);
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ u64_stats_update_end(&stats->syncp);
+ kmemleak_not_leak(new_skb);
+ } else {
+ ndev->stats.rx_dropped++;
+ new_skb = skb;
+ }
+
+ if (netif_dormant(ndev)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_dropped++;
+ return -ENODEV;
+ }
+
+ ret = virt_cpsw_nuss_rx_push(common, new_skb);
+ if (WARN_ON(ret < 0)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ }
+
+ return ret;
+}
+
+static enum hrtimer_restart virt_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
+{
+ struct virt_cpsw_common *common =
+ container_of(timer, struct virt_cpsw_common, rx_hrtimer);
+
+ enable_irq(common->rx_chns.irq);
+ return HRTIMER_NORESTART;
+}
+
+static int virt_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct virt_cpsw_common *common =
+ container_of(napi_rx, struct virt_cpsw_common, napi_rx);
+ int num_rx = 0;
+ int cur_budget;
+ int ret;
+
+ /* process every flow */
+ cur_budget = budget;
+
+ while (cur_budget--) {
+ ret = virt_cpsw_nuss_rx_packets(common, 0);
+ if (ret)
+ break;
+ num_rx++;
+ }
+
+ dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
+
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
+ if (common->rx_irq_disabled) {
+ common->rx_irq_disabled = false;
+ if (unlikely(common->rx_pace_timeout)) {
+ hrtimer_start(&common->rx_hrtimer,
+ ns_to_ktime(common->rx_pace_timeout),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(common->rx_chns.irq);
+ }
+ }
+ }
+
+ return num_rx;
+}
+
+static void virt_cpsw_nuss_xmit_free(struct virt_cpsw_tx_chn *tx_chn,
+ struct device *dev,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+
+ dma_unmap_single(dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+
+ dma_unmap_page(dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+
+static void virt_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct virt_cpsw_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ virt_cpsw_nuss_xmit_free(tx_chn, tx_chn->dev, desc_tx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static int virt_cpsw_nuss_tx_compl_packets(struct virt_cpsw_common *common,
+ int chn, unsigned int budget, bool *tdown)
+{
+ struct cppi5_host_desc_t *desc_tx;
+ struct device *dev = common->dev;
+ struct netdev_queue *netif_txq;
+ struct virt_cpsw_tx_chn *tx_chn;
+ struct net_device *ndev;
+ unsigned int total_bytes = 0;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+ void **swdata;
+
+ tx_chn = &common->tx_chns;
+
+ while (budget--) {
+ struct virt_cpsw_ndev_priv *ndev_priv;
+ struct virt_cpsw_ndev_stats *stats;
+
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ if (desc_dma & 0x1) {
+ if (atomic_dec_and_test(&common->tdown_cnt))
+ complete(&common->tdown_complete);
+ *tdown = true;
+ break;
+ }
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ virt_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx);
+
+ ndev = skb->dev;
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ num_tx++;
+ }
+
+ if (!num_tx)
+ return 0;
+
+ netif_txq = netdev_get_tx_queue(ndev, 0);
+
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+ dev_dbg(dev, "compl 0 %d Bytes\n", total_bytes);
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* Check whether the queue is stopped due to stalled tx dma,
+ * if the queue is stopped then wake the queue as
+ * we have free desc for tx
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+
+ __netif_tx_unlock(netif_txq);
+ }
+ dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
+
+ return num_tx;
+}
+
+static enum hrtimer_restart virt_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
+{
+ struct virt_cpsw_common *common =
+ container_of(timer, struct virt_cpsw_common, tx_hrtimer);
+
+ enable_irq(common->tx_chns.irq);
+ return HRTIMER_NORESTART;
+}
+
+static int virt_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct virt_cpsw_common *common =
+ container_of(napi_tx, struct virt_cpsw_common, napi_tx);
+ bool tdown = false;
+ int num_tx;
+
+ /* process every unprocessed channel */
+ num_tx = virt_cpsw_nuss_tx_compl_packets(common, 0, budget, &tdown);
+
+ if (num_tx >= budget)
+ return budget;
+
+ if (napi_complete_done(napi_tx, num_tx)) {
+ if (unlikely(common->tx_pace_timeout && !tdown)) {
+ hrtimer_start(&common->tx_hrtimer,
+ ns_to_ktime(common->tx_pace_timeout),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(common->tx_chns.irq);
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t virt_cpsw_nuss_rx_irq(int irq, void *dev_id)
+{
+ struct virt_cpsw_common *common = dev_id;
+
+ common->rx_irq_disabled = true;
+ disable_irq_nosync(irq);
+ napi_schedule(&common->napi_rx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t virt_cpsw_nuss_tx_irq(int irq, void *dev_id)
+{
+ struct virt_cpsw_common *common = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&common->napi_tx);
+
+ return IRQ_HANDLED;
+}
+
+static netdev_tx_t virt_cpsw_nuss_ndo_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+ struct device *dev = common->dev;
+ struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
+ struct virt_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ dma_addr_t desc_dma, buf_dma;
+ int ret, i;
+ u32 pkt_len;
+ void **swdata;
+ u32 *psdata;
+
+ /* padding enabled in hw */
+ pkt_len = skb_headlen(skb);
+
+ tx_chn = &common->tx_chns;
+ netif_txq = netdev_get_tx_queue(ndev, 0);
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(dev, skb->data, pkt_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ dev_err(dev, "Failed to map tx skb buffer\n");
+ ndev->stats.tx_errors++;
+ goto drop_free_skb;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ dev_dbg(dev, "Failed to allocate descriptor\n");
+ dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ goto busy_stop_q;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ VIRT_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
+ cppi5_hdesc_set_pkttype(first_desc, 0x7);
+ /* target port has to be 0 */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, common->mac_only_port);
+
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ *(swdata) = skb;
+ psdata = cppi5_hdesc_get_psdata(first_desc);
+
+ /* HW csum offload if enabled */
+ psdata[2] = 0;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ unsigned int cs_start, cs_offset;
+
+ cs_start = skb_transport_offset(skb);
+ cs_offset = cs_start + skb->csum_offset;
+ /* HW numerates bytes starting from 1 */
+ psdata[2] = ((cs_offset + 1) << 24) |
+ ((cs_start + 1) << 16) | (skb->len - cs_start);
+ dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
+ }
+
+ if (!skb_is_nonlinear(skb))
+ goto done_tx;
+
+ dev_dbg(dev, "fragmented SKB\n");
+
+ /* Handle the case where skb is fragmented in pages */
+ cur_desc = first_desc;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 frag_size = skb_frag_size(frag);
+
+ next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!next_desc) {
+ dev_err(dev, "Failed to allocate descriptor\n");
+ goto busy_free_descs;
+ }
+
+ buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ dev_err(dev, "Failed to map tx skb page\n");
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ ndev->stats.tx_errors++;
+ goto drop_free_descs;
+ }
+
+ cppi5_hdesc_reset_hbdesc(next_desc);
+ cppi5_hdesc_attach_buf(next_desc,
+ buf_dma, frag_size, buf_dma, frag_size);
+
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ next_desc);
+ cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
+
+ pkt_len += frag_size;
+ cur_desc = next_desc;
+ }
+ WARN_ON(pkt_len != skb->len);
+
+done_tx:
+ skb_tx_timestamp(skb);
+
+ /* report bql before sending packet */
+ dev_dbg(dev, "push 0 %d Bytes\n", pkt_len);
+
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ dev_err(dev, "can't push desc %d\n", ret);
+ /* inform bql */
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ ndev->stats.tx_errors++;
+ goto drop_free_descs;
+ }
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
+ netif_tx_stop_queue(netif_txq);
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+ dev_dbg(dev, "netif_tx_stop_queue %d\n", 0);
+
+ /* re-check for smp */
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS) {
+ netif_tx_wake_queue(netif_txq);
+ dev_dbg(dev, "netif_tx_wake_queue %d\n", 0);
+ }
+ }
+
+ return NETDEV_TX_OK;
+
+drop_free_descs:
+ virt_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+drop_free_skb:
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+busy_free_descs:
+ virt_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+busy_stop_q:
+ netif_tx_stop_queue(netif_txq);
+ return NETDEV_TX_BUSY;
+}
+
+static void virt_cpsw_nuss_ndo_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct virt_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct virt_cpsw_ndev_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ rx_packets = cpu_stats->rx_packets;
+ rx_bytes = cpu_stats->rx_bytes;
+ tx_packets = cpu_stats->tx_packets;
+ tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+}
+
+static int virt_cpsw_nuss_add_mc(struct net_device *ndev, const u8 *addr)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+ struct rpmsg_remotedev_eth_switch_ops *rdev_ops;
+ struct device *dev;
+ int ret;
+
+ dev = common->dev;
+ rdev_ops = common->rdev_switch_ops;
+
+ ret = rdev_ops->filter_add_mc(common->rdev, addr, 0, common->rdev_rx_flow_id);
+ if (ret) {
+ dev_err(dev, "filter_add_mc rpmsg - fail %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int virt_cpsw_nuss_del_mc(struct net_device *ndev, const u8 *addr)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+ struct rpmsg_remotedev_eth_switch_ops *rdev_ops;
+ struct device *dev;
+ int ret;
+
+ dev = common->dev;
+ rdev_ops = common->rdev_switch_ops;
+
+ ret = rdev_ops->filter_del_mc(common->rdev, addr, 0, common->rdev_rx_flow_id);
+ if (ret) {
+ dev_err(dev, "filter_add_mc rpmsg - fail %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void virt_cpsw_nuss_ndo_set_rx_mode_work(struct work_struct *work)
+{
+ struct rpmsg_remotedev_eth_switch_ops *rdev_ops;
+ struct virt_cpsw_common *common;
+ struct net_device *ndev;
+ struct device *dev;
+ int ret;
+
+ common = container_of(work, struct virt_cpsw_common, rx_mode_work);
+ dev = common->dev;
+ rdev_ops = common->rdev_switch_ops;
+
+ if (common->mac_only) {
+ ret = rdev_ops->set_promisc_mode(common->rdev,
+ common->ports.ndev->flags & IFF_PROMISC);
+ if (ret) {
+ dev_err(dev, "set_promisc rpmsg - fail %d\n", ret);
+ return;
+ }
+ } else if (common->mc_filter) {
+ ndev = common->ports.ndev;
+
+ /* make a mc list copy */
+ netif_addr_lock_bh(ndev);
+ __hw_addr_sync(&common->mc_list, &ndev->mc, ndev->addr_len);
+ netif_addr_unlock_bh(ndev);
+
+ __hw_addr_sync_dev(&common->mc_list, ndev,
+ virt_cpsw_nuss_add_mc, virt_cpsw_nuss_del_mc);
+ }
+}
+
+static void virt_cpsw_nuss_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+
+ if (common->mac_only || common->mc_filter)
+ queue_work(common->cmd_wq, &common->rx_mode_work);
+}
+
+static const struct net_device_ops virt_cpsw_nuss_netdev_ops = {
+ .ndo_open = virt_cpsw_nuss_ndo_open,
+ .ndo_stop = virt_cpsw_nuss_ndo_stop,
+ .ndo_start_xmit = virt_cpsw_nuss_ndo_xmit,
+ .ndo_get_stats64 = virt_cpsw_nuss_ndo_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_tx_timeout = virt_cpsw_nuss_ndo_host_tx_timeout,
+ .ndo_set_rx_mode = virt_cpsw_nuss_ndo_set_rx_mode,
+};
+
+static void virt_cpsw_nuss_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+ struct rpmsg_remotedev_eth_switch_ops *rdev_ops;
+ char fw_version[ETHTOOL_FWVERS_LEN];
+
+ rdev_ops = common->rdev_switch_ops;
+
+ strlcpy(info->driver, dev_driver_string(common->dev),
+ sizeof(info->driver));
+ strlcpy(info->version, VIRT_CPSW_DRV_VER, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
+
+ rdev_ops->get_fw_ver(common->rdev, fw_version, ETHTOOL_FWVERS_LEN);
+ strlcpy(info->fw_version, fw_version, ETHTOOL_FWVERS_LEN);
+}
+
+static const char virt_cpsw_nuss_ethtool_priv_flags[][ETH_GSTRING_LEN] = {
+ "RPMSG Ping test",
+ "RPMSG Read reg",
+ "RPMSG Dump stat",
+};
+
+static int
+virt_cpsw_nuss_get_sset_count(struct net_device __always_unused *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return ARRAY_SIZE(virt_cpsw_nuss_ethtool_priv_flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+virt_cpsw_nuss_get_strings(struct net_device __always_unused *ndev,
+ u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, virt_cpsw_nuss_ethtool_priv_flags,
+ sizeof(virt_cpsw_nuss_ethtool_priv_flags));
+ break;
+ }
+}
+
+static void virt_cpsw_nuss_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+ struct device *dev = common->dev;
+ static const char ping_data[] = "0123456789";
+ u32 reg_val;
+ int ret;
+
+ data[0] = 0;
+ ret = common->rdev_switch_ops->ping(common->rdev,
+ ping_data, strlen(ping_data));
+ if (ret) {
+ dev_err(dev, "rpmsg ping fail %d\n", ret);
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[0] = 1;
+ }
+
+ data[1] = 0;
+ ret = common->rdev_switch_ops->read_reg(common->rdev,
+ 0x0C000000, &reg_val);
+ if (ret) {
+ dev_err(dev, "rpmsg read_reg fail %d\n", ret);
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[1] = 1;
+ }
+ dev_dbg(dev, "read_reg rpmsg cpsw_nuss_ver - 0x0C000000:%08X\n",
+ reg_val);
+
+ ret = common->rdev_switch_ops->read_reg(common->rdev,
+ 0x0C020000, &reg_val);
+ if (ret) {
+ dev_err(dev, "rpmsg read_reg fail %d\n", ret);
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[1] = 1;
+ }
+ dev_dbg(dev, "read_reg rpmsg cpsw_ver - 0x0C020000:%08X\n",
+ reg_val);
+
+ ret = 0;
+ data[2] = 0;
+ if (common->rdev_features & RPMSG_KDRV_ETHSWITCH_FEATURE_DUMP_STATS)
+ ret = common->rdev_switch_ops->dbg_dump_stats(common->rdev);
+ if (ret) {
+ dev_err(dev, "rpmsg dump_stats fail %d\n", ret);
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[2] = 1;
+ }
+}
+
+static int virt_cpsw_nuss_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+
+ coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
+ coal->tx_coalesce_usecs = common->tx_pace_timeout / 1000;
+ return 0;
+}
+
+static int virt_cpsw_nuss_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+
+ if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
+ coal->rx_coalesce_usecs = 20;
+
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
+ coal->tx_coalesce_usecs = 20;
+
+ common->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+ common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+const struct ethtool_ops virt_cpsw_nuss_ethtool_ops = {
+ .get_drvinfo = virt_cpsw_nuss_get_drvinfo,
+ .get_sset_count = virt_cpsw_nuss_get_sset_count,
+ .get_strings = virt_cpsw_nuss_get_strings,
+ .self_test = virt_cpsw_nuss_self_test,
+ .get_link = ethtool_op_get_link,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_coalesce = virt_cpsw_nuss_get_coalesce,
+ .set_coalesce = virt_cpsw_nuss_set_coalesce,
+};
+
+static void virt_cpsw_nuss_free_tx_chns(void *data)
+{
+ struct virt_cpsw_common *common = data;
+ struct virt_cpsw_tx_chn *tx_chn = &common->tx_chns;
+
+ if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ memset(tx_chn, 0, sizeof(*tx_chn));
+}
+
+static int virt_cpsw_nuss_init_tx_chns(struct virt_cpsw_common *common)
+{
+ u32 max_desc_num = ALIGN(VIRT_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
+ struct virt_cpsw_tx_chn *tx_chn = &common->tx_chns;
+ struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
+ struct device *dev = common->dev;
+ struct k3_ring_cfg ring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0
+ };
+ char tx_chn_name[IFNAMSIZ];
+ u32 hdesc_size, tx_chn_num;
+ int ret = 0, ret1;
+
+ /* convert to tx chn offset */
+ tx_chn_num = common->rdev_tx_psil_dst_id - common->tx_psil_id_base;
+ snprintf(tx_chn_name, sizeof(tx_chn_name), "tx%d", tx_chn_num);
+
+ init_completion(&common->tdown_complete);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, VIRT_CPSW_NAV_PS_DATA_SIZE,
+ VIRT_CPSW_NAV_SW_DATA_SIZE);
+
+ tx_cfg.swdata_size = VIRT_CPSW_NAV_SW_DATA_SIZE;
+ tx_cfg.tx_cfg = ring_cfg;
+ tx_cfg.txcq_cfg = ring_cfg;
+ tx_cfg.tx_cfg.size = max_desc_num;
+ tx_cfg.txcq_cfg.size = max_desc_num;
+
+ tx_chn->dev = dev;
+ tx_chn->id = 0;
+ tx_chn->descs_num = max_desc_num;
+ tx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn_name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ dev_err(dev, "Failed to create poll %d\n", ret);
+ goto err;
+ }
+
+ tx_chn->tx_chn = k3_udma_glue_request_tx_chn(dev, tx_chn_name, &tx_cfg);
+ if (IS_ERR(tx_chn->tx_chn)) {
+ ret = PTR_ERR(tx_chn->tx_chn);
+ dev_err(dev, "Failed to request tx dma channel %d\n", ret);
+ goto err;
+ }
+
+ tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (tx_chn->irq <= 0) {
+ dev_err(dev, "Failed to get tx dma irq %d\n", tx_chn->irq);
+ ret = -ENXIO;
+ }
+
+err:
+ ret1 = devm_add_action(dev, virt_cpsw_nuss_free_tx_chns, common);
+ if (ret1) {
+ dev_err(dev, "failed to add free_tx_chns action %d", ret1);
+ return ret1;
+ }
+
+ return ret;
+}
+
+static void virt_cpsw_nuss_free_rx_chns(void *data)
+{
+ struct virt_cpsw_common *common = data;
+ struct virt_cpsw_rx_chn *rx_chn = &common->rx_chns;
+
+ if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+}
+
+static int virt_cpsw_nuss_init_rx_chns(struct virt_cpsw_common *common)
+{
+ struct virt_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct k3_udma_glue_rx_channel_cfg rx_cfg = {0};
+ u32 max_desc_num = VIRT_CPSW_MAX_RX_DESC;
+ struct device *dev = common->dev;
+ u32 hdesc_size;
+ int ret = 0, ret1;
+ struct k3_ring_cfg rxring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .flags = 0,
+ };
+ struct k3_ring_cfg fdqring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .flags = 0,
+ };
+ struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+ .rx_cfg = rxring_cfg,
+ .rxfdq_cfg = fdqring_cfg,
+ .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+ .ring_rxfdq0_id = K3_RINGACC_RING_ID_ANY,
+ .src_tag_lo_sel = K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+ };
+
+ hdesc_size = cppi5_hdesc_calc_size(true, VIRT_CPSW_NAV_PS_DATA_SIZE,
+ VIRT_CPSW_NAV_SW_DATA_SIZE);
+
+ rx_cfg.swdata_size = VIRT_CPSW_NAV_SW_DATA_SIZE;
+ rx_cfg.flow_id_num = VIRT_CPSW_MAX_RX_FLOWS;
+ rx_cfg.flow_id_base = common->rdev_rx_flow_id;
+ rx_cfg.remote = true;
+
+ /* init all flows */
+ rx_chn->dev = dev;
+ rx_chn->descs_num = max_desc_num;
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
+ rx_chn->descs_num,
+ hdesc_size, "rx");
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ dev_err(dev, "Failed to create rx poll %d\n", ret);
+ goto err;
+ }
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = PTR_ERR(rx_chn->rx_chn);
+ dev_err(dev, "Failed to request rx dma channel %d\n", ret);
+ goto err;
+ }
+
+ common->rdev_rx_flow_id =
+ k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ dev_dbg(dev, "used flow-id-base %u\n", common->rdev_rx_flow_id);
+
+ rx_flow_cfg.rx_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
+ 0, &rx_flow_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to init rx flow%d %d\n", 0, ret);
+ goto err;
+ }
+
+ rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, 0);
+ if (rx_chn->irq <= 0) {
+ ret = -ENXIO;
+ dev_err(dev, "Failed to get rx dma irq %d\n", rx_chn->irq);
+ }
+
+err:
+ ret1 = devm_add_action(dev, virt_cpsw_nuss_free_rx_chns, common);
+ if (ret1) {
+ dev_err(dev, "failed to add free_rx_chns action %d", ret1);
+ return ret1;
+ }
+
+ return ret;
+}
+
+static int virt_cpsw_nuss_of(struct virt_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct device_node *port_np;
+ struct virt_cpsw_port *port;
+ const void *mac_addr;
+ int ret;
+
+ ret = of_property_read_u32(dev->of_node, "ti,psil-base",
+ &common->tx_psil_id_base);
+ if (ret) {
+ dev_err(dev, "ti,psil-base read fail %d\n", ret);
+ return ret;
+ }
+
+ port_np = of_get_child_by_name(dev->of_node, "virt_emac_port");
+ if (!port_np)
+ return -ENOENT;
+
+ port = &common->ports;
+ port->common = common;
+ port->name = of_get_property(port_np, "ti,label", NULL);
+
+ mac_addr = of_get_mac_address(port_np);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(port->local_mac_addr, mac_addr);
+
+ of_node_put(port_np);
+ return 0;
+}
+
+static int virt_cpsw_nuss_rdev_init(struct virt_cpsw_common *common)
+{
+ struct rpmsg_rdev_eth_switch_attach_ext_info attach_info = { 0 };
+ struct device *dev = common->dev;
+ int ret;
+
+ ret = common->rdev_switch_ops->attach_ext(common->rdev, &attach_info);
+ if (ret) {
+ dev_err(dev, "rpmsg attach - fail %d\n", ret);
+ return ret;
+ }
+ dev_err(dev, "rpmsg attach_ext - rx_mtu:%d features:%08X tx_mtu[0]:%d flow_idx:%d tx_cpsw_psil_dst_id:%d mac_addr:%pM mac-only:%d\n",
+ attach_info.rx_mtu, attach_info.features,
+ attach_info.tx_mtu[0],
+ attach_info.flow_idx,
+ attach_info.tx_cpsw_psil_dst_id,
+ attach_info.mac_addr,
+ attach_info.mac_only_port);
+ common->rdev_features = attach_info.features;
+ common->rdev_mtu = VIRT_CPSW_MAX_PACKET_SIZE;
+ common->rdev_tx_psil_dst_id = attach_info.tx_cpsw_psil_dst_id &
+ (~0x8000);
+ common->rdev_rx_flow_id = attach_info.flow_idx;
+ ether_addr_copy(common->rdev_mac_addr, attach_info.mac_addr);
+
+ if (common->rdev_features & RPMSG_KDRV_ETHSWITCH_FEATURE_MAC_ONLY) {
+ common->mac_only = true;
+ common->mac_only_port = attach_info.mac_only_port;
+ }
+
+ if (common->rdev_features & RPMSG_KDRV_ETHSWITCH_FEATURE_MC_FILTER)
+ common->mc_filter = true;
+
+ if (!common->mac_only && common->mac_only_port)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int virt_cpsw_nuss_init_ndev(struct virt_cpsw_common *common)
+{
+ struct virt_cpsw_ndev_priv *ndev_priv;
+ struct device *dev = common->dev;
+ struct virt_cpsw_port *port;
+ int ret;
+
+ port = &common->ports;
+
+ /* alloc netdev */
+ port->ndev = devm_alloc_etherdev_mqs(common->dev,
+ sizeof(struct virt_cpsw_ndev_priv),
+ 1, 1);
+ if (!port->ndev) {
+ dev_err(dev, "error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ ndev_priv = netdev_priv(port->ndev);
+ ndev_priv->port = port;
+ SET_NETDEV_DEV(port->ndev, dev);
+
+ if (is_valid_ether_addr(port->local_mac_addr))
+ ether_addr_copy(port->ndev->dev_addr, port->local_mac_addr);
+ else if (is_valid_ether_addr(common->rdev_mac_addr))
+ ether_addr_copy(port->ndev->dev_addr, common->rdev_mac_addr);
+
+ port->ndev->min_mtu = VIRT_CPSW_MIN_PACKET_SIZE;
+ port->ndev->max_mtu = VIRT_CPSW_MAX_PACKET_SIZE;
+ port->ndev->hw_features = NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ port->ndev->features = port->ndev->hw_features;
+ port->ndev->vlan_features |= NETIF_F_SG;
+ port->ndev->netdev_ops = &virt_cpsw_nuss_netdev_ops;
+ port->ndev->ethtool_ops = &virt_cpsw_nuss_ethtool_ops;
+
+ /* TX checksum offload if supported */
+ if (common->rdev_features & RPMSG_KDRV_ETHSWITCH_FEATURE_TXCSUM)
+ port->ndev->features |= NETIF_F_HW_CSUM;
+
+ ndev_priv->stats = netdev_alloc_pcpu_stats(struct virt_cpsw_ndev_stats);
+ if (!ndev_priv->stats)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, (void(*)(void *))free_percpu,
+ ndev_priv->stats);
+ if (ret) {
+ dev_err(dev, "failed to add percpu stat free action %d", ret);
+ return ret;
+ }
+
+ netif_tx_napi_add(port->ndev, &common->napi_tx,
+ virt_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(port->ndev, &common->napi_rx,
+ virt_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
+
+ hrtimer_init(&common->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ common->tx_hrtimer.function = &virt_cpsw_nuss_tx_timer_callback;
+ hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ common->rx_hrtimer.function = &virt_cpsw_nuss_rx_timer_callback;
+
+ ret = register_netdev(port->ndev);
+ if (ret)
+ dev_err(dev, "error registering slave net device %d\n", ret);
+
+ /* can't auto unregister ndev using devm_add_action() due to broken
+ * devres release sequence in DD core
+ */
+
+ return ret;
+}
+
+static void virt_cpsw_nuss_cleanup_ndev(struct virt_cpsw_common *common)
+{
+ if (common->ports.ndev)
+ unregister_netdev(common->ports.ndev);
+}
+
+static bool virt_cpsw_dev_check(const struct net_device *ndev)
+{
+ struct virt_cpsw_common *common = virt_ndev_to_common(ndev);
+
+ return ndev->netdev_ops == &virt_cpsw_nuss_netdev_ops && !common->mac_only;
+}
+
+static int virt_cpsw_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct rpmsg_remotedev_eth_switch_ops *rdev_ops;
+ struct net_device *ndev = ifa->ifa_dev->dev;
+ struct virt_cpsw_common *common;
+ int ret = 0;
+
+ if (!virt_cpsw_dev_check(ndev))
+ goto out;
+
+ common = virt_ndev_to_common(ndev);
+ rdev_ops = common->rdev_switch_ops;
+ switch (event) {
+ case NETDEV_UP:
+ ret = rdev_ops->register_ipv4(common->rdev,
+ ndev->dev_addr,
+ ifa->ifa_address);
+ if (ret)
+ dev_err(common->dev, "register_ipv4 rpmsg - fail %d\n",
+ ret);
+ dev_dbg(common->dev, "NETDEV_UP %pI4 %s\n",
+ &ifa->ifa_address, ifa->ifa_label);
+ break;
+
+ case NETDEV_DOWN:
+ ret = rdev_ops->unregister_ipv4(common->rdev,
+ ifa->ifa_address);
+ if (ret)
+ dev_err(common->dev, "unregister_ipv4 rpmsg - fail %d\n",
+ ret);
+ dev_dbg(common->dev, "NETDEV_DOWN %pI4\n", &ifa->ifa_address);
+ break;
+ }
+
+out:
+ return notifier_from_errno(ret);
+}
+
+static const struct of_device_id virt_cpsw_virt_of_mtable[] = {
+ { .compatible = "ti,j721e-cpsw-virt-mac", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, virt_cpsw_virt_of_mtable);
+
+static int virt_cpsw_nuss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct virt_cpsw_common *common;
+ int ret;
+
+ common = devm_kzalloc(dev, sizeof(struct virt_cpsw_common), GFP_KERNEL);
+ if (!common)
+ return -ENOMEM;
+ common->dev = dev;
+
+ ret = of_property_read_string(dev->of_node, "ti,remote-name",
+ &common->rdev_name);
+ if (ret < 0) {
+ dev_info(dev, "remote-name is not set %d\n", ret);
+ return ret;
+ }
+
+ common->rdev = rpmsg_remotedev_get_named_device(common->rdev_name);
+ if (!common->rdev)
+ return -EPROBE_DEFER;
+ if (IS_ERR(common->rdev)) {
+ ret = PTR_ERR(common->rdev);
+ return ret;
+ }
+ common->rdev_switch_ops = common->rdev->device.eth_switch.ops;
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))rpmsg_remotedev_put_device,
+ common->rdev);
+ if (ret) {
+ dev_err(dev, "add remotedev put device action fail:%d", ret);
+ return ret;
+ }
+
+ ret = virt_cpsw_nuss_of(common);
+ if (ret)
+ return ret;
+
+ ret = virt_cpsw_nuss_rdev_init(common);
+ if (ret)
+ return ret;
+ /* init tx channels */
+ ret = virt_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+ ret = virt_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ return ret;
+
+ if (common->tx_chns.irq == 0 || common->rx_chns.irq == 0)
+ return -ENXIO;
+
+ dev_set_drvdata(dev, common);
+ __hw_addr_init(&common->mc_list);
+ INIT_WORK(&common->rx_mode_work, virt_cpsw_nuss_ndo_set_rx_mode_work);
+ common->cmd_wq = create_singlethread_workqueue("virt_cpsw");
+ if (!common->cmd_wq) {
+ dev_err(dev, "failure requesting wq\n");
+ return -ENOMEM;
+ }
+
+ ret = virt_cpsw_nuss_init_ndev(common);
+ if (ret)
+ return ret;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "error setting dma mask: %d\n", ret);
+ goto unreg_ndev;
+ }
+
+ ret = devm_request_irq(dev, common->tx_chns.irq,
+ virt_cpsw_nuss_tx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "failure requesting tx irq %u, %d\n",
+ common->tx_chns.irq, ret);
+ goto unreg_ndev;
+ }
+
+ ret = devm_request_irq(dev, common->rx_chns.irq,
+ virt_cpsw_nuss_rx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "failure requesting rx irq %u, %d\n",
+ common->rx_chns.irq, ret);
+ goto unreg_ndev;
+ }
+
+ if (!common->mac_only) {
+ common->virt_cpsw_inetaddr_nb.notifier_call = &virt_cpsw_inetaddr_event;
+ register_inetaddr_notifier(&common->virt_cpsw_inetaddr_nb);
+ }
+
+ dev_info(common->dev, "virt_cpsw_nuss mac loaded\n");
+ dev_info(dev, "rdev_features:%08X rdev_mtu:%d flow_id:%d tx_psil_dst_id:%04X mac_only:%d\n",
+ common->rdev_features,
+ common->rdev_mtu,
+ common->rdev_rx_flow_id,
+ common->rdev_tx_psil_dst_id,
+ common->mac_only_port);
+ dev_info(dev, "local_mac_addr:%pM rdev_mac_addr:%pM\n",
+ common->ports.local_mac_addr,
+ common->rdev_mac_addr);
+
+ return 0;
+
+unreg_ndev:
+ virt_cpsw_nuss_cleanup_ndev(common);
+ return ret;
+}
+
+static int virt_cpsw_nuss_remove(struct platform_device *pdev)
+{
+ struct virt_cpsw_common *common = platform_get_drvdata(pdev);
+ struct device *dev = common->dev;
+ int ret;
+
+ if (!common->mac_only)
+ unregister_inetaddr_notifier(&common->virt_cpsw_inetaddr_nb);
+
+ /* must unregister ndevs here because DD release_driver routine calls
+ * dma_deconfigure(dev) before devres_release_all(dev)
+ */
+ virt_cpsw_nuss_cleanup_ndev(common);
+ if (common->mac_only)
+ destroy_workqueue(common->cmd_wq);
+
+ ret = common->rdev_switch_ops->detach(common->rdev);
+ if (ret)
+ dev_err(dev, "rpmsg detach - fail %d\n", ret);
+
+ return 0;
+}
+
+static struct platform_driver virt_cpsw_nuss_driver = {
+ .driver = {
+ .name = VIRT_CPSW_DRV_NAME,
+ .of_match_table = virt_cpsw_virt_of_mtable,
+ },
+ .probe = virt_cpsw_nuss_probe,
+ .remove = virt_cpsw_nuss_remove,
+};
+
+module_platform_driver(virt_cpsw_nuss_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI J721E VIRT CPSW Ethernet mac driver");
diff --git a/drivers/net/ethernet/ti/prueth.h b/drivers/net/ethernet/ti/prueth.h
new file mode 100644
index 000000000000..6bfbdc491e35
--- /dev/null
+++ b/drivers/net/ethernet/ti/prueth.h
@@ -0,0 +1,491 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* PRU ICSS Ethernet driver
+ *
+ * Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __NET_TI_PRUETH_H
+#define __NET_TI_PRUETH_H
+
+#include <linux/types.h>
+#include <linux/phy.h>
+#include <linux/pruss.h>
+#include <linux/types.h>
+#include <net/lredev.h>
+
+#include "icss_switch.h"
+#include "prueth_ptp.h"
+
+#define PRUETH_NUMQUEUES 5
+
+/* PRUSS local memory map */
+#define ICSS_LOCAL_SHARED_RAM 0x00010000
+
+#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
+#define EMAC_MAX_PKTLEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+
+#define PRUETH_NSP_TIMER_MS (100) /* Refresh NSP counters every 100ms */
+/* default timer for NSP and HSR/PRP */
+#define PRUETH_TIMER_MS (10)
+
+#define PRUETH_REG_DUMP_VER 1
+
+/* Encoding: 32-16: Reserved, 16-8: Reg dump version, 8-0: Ethertype */
+#define PRUETH_REG_DUMP_GET_VER(x) ((PRUETH_REG_DUMP_VER << 8) | ((x)->eth_type))
+
+/* PRU Ethernet Type - Ethernet functionality (protocol
+ * implemented) provided by the PRU firmware being loaded.
+ */
+enum pruss_ethtype {
+ PRUSS_ETHTYPE_EMAC = 0,
+ PRUSS_ETHTYPE_HSR,
+ PRUSS_ETHTYPE_PRP,
+ PRUSS_ETHTYPE_SWITCH,
+ PRUSS_ETHTYPE_MAX,
+};
+
+#define PRUETH_IS_EMAC(p) ((p)->eth_type == PRUSS_ETHTYPE_EMAC)
+#define PRUETH_IS_SWITCH(p) ((p)->eth_type == PRUSS_ETHTYPE_SWITCH)
+#define PRUETH_IS_HSR(p) ((p)->eth_type == PRUSS_ETHTYPE_HSR)
+#define PRUETH_IS_PRP(p) ((p)->eth_type == PRUSS_ETHTYPE_PRP)
+#define PRUETH_IS_LRE(p) (PRUETH_IS_HSR(p) || PRUETH_IS_PRP(p))
+
+/**
+ * struct prueth_queue_desc - Queue descriptor
+ * @rd_ptr: Read pointer, points to a buffer descriptor in Shared PRU RAM.
+ * @wr_ptr: Write pointer, points to a buffer descriptor in Shared PRU RAM.
+ * @busy_s: Slave queue busy flag, set by slave(us) to request access from
+ * master(PRU).
+ * @status: Bit field status register, Bits:
+ * 0: Master queue busy flag.
+ * 1: Packet has been placed in collision queue.
+ * 2: Packet has been discarded due to overflow.
+ * @max_fill_level: Maximum queue usage seen.
+ * @overflow_cnt: Count of queue overflows.
+ *
+ * Each port has up to 4 queues with variable length. The queue is processed
+ * as ring buffer with read and write pointers. Both pointers are address
+ * pointers and increment by 4 for each buffer descriptor position. Queue has
+ * a length defined in constants and a status.
+ */
+struct prueth_queue_desc {
+ u16 rd_ptr;
+ u16 wr_ptr;
+ u8 busy_s;
+ u8 status;
+ u8 max_fill_level;
+ u8 overflow_cnt;
+} __packed;
+
+/**
+ * struct prueth_queue - Information about a queue in memory
+ * @buffer_offset: buffer offset in OCMC RAM
+ * @queue_desc_offset: queue descriptor offset in Shared RAM
+ * @buffer_desc_offset: buffer descriptors offset in Shared RAM
+ * @buffer_desc_end: end address of buffer descriptors in Shared RAM
+ */
+struct prueth_queue_info {
+ u16 buffer_offset;
+ u16 queue_desc_offset;
+ u16 buffer_desc_offset;
+ u16 buffer_desc_end;
+} __packed;
+
+/**
+ * struct prueth_packet_info - Info about a packet in buffer
+ * @start_offset: start offset of the frame in the buffer for HSR/PRP
+ * @shadow: this packet is stored in the collision queue
+ * @port: port packet is on
+ * @length: length of packet
+ * @broadcast: this packet is a broadcast packet
+ * @error: this packet has an error
+ * @sv_frame: indicate if the frame is a SV frame for HSR/PRP
+ * @lookup_success: src mac found in FDB
+ * @flood: packet is to be flooded
+ * @timstamp: Specifies if timestamp is appended to the packet
+ */
+struct prueth_packet_info {
+ bool start_offset;
+ bool shadow;
+ unsigned int port;
+ unsigned int length;
+ bool broadcast;
+ bool error;
+ bool sv_frame;
+ bool lookup_success;
+ bool flood;
+ bool timestamp;
+};
+
+/**
+ * struct port_statistics - Statistics structure for capturing statistics
+ * on PRUs
+ * @tx_bcast: Number of broadcast packets sent
+ * @tx_mcast:Number of multicast packets sent
+ * @tx_ucast:Number of unicast packets sent
+ *
+ * @tx_octets:Number of undersized frames rcvd
+ *
+ * @rx_bcast:Number of broadcast packets rcvd
+ * @rx_mcast:Number of multicast packets rcvd
+ * @rx_ucast:Number of unicast packets rcvd
+ *
+ * @rx_octets:Number of Rx packets
+ *
+ * @tx64byte:Number of 64 byte packets sent
+ * @tx65_127byte:Number of 65-127 byte packets sent
+ * @tx128_255byte:Number of 128-255 byte packets sent
+ * @tx256_511byte:Number of 256-511 byte packets sent
+ * @tx512_1023byte:Number of 512-1023 byte packets sent
+ * @tx1024byte:Number of 1024 and larger size packets sent
+ *
+ * @rx64byte:Number of 64 byte packets rcvd
+ * @rx65_127byte:Number of 65-127 byte packets rcvd
+ * @rx128_255byte:Number of 128-255 byte packets rcvd
+ * @rx256_511byte:Number of 256-511 byte packets rcvd
+ * @rx512_1023byte:Number of 512-1023 byte packets rcvd
+ * @rx1024byte:Number of 1024 and larger size packets rcvd
+ *
+ * @late_coll:Number of late collisions(Half Duplex)
+ * @single_coll:Number of single collisions (Half Duplex)
+ * @multi_coll:Number of multiple collisions (Half Duplex)
+ * @excess_coll:Number of excess collisions(Half Duplex)
+ *
+ * @rx_misalignment_frames:Number of non multiple of 8 byte frames rcvd
+ * @stormprev_counter:Number of packets dropped because of Storm Prevention
+ * @mac_rxerror:Number of MAC receive errors
+ * @sfd_error:Number of invalid SFD
+ * @def_tx:Number of transmissions deferred
+ * @mac_txerror:Number of MAC transmit errors
+ * @rx_oversized_frames:Number of oversized frames rcvd
+ * @rx_undersized_frames:Number of undersized frames rcvd
+ * @rx_crc_frames:Number of CRC error frames rcvd
+ * @dropped_packets:Number of packets dropped due to link down on opposite port
+ *
+ * @tx_hwq_overflow:Hardware Tx Queue (on PRU) over flow count
+ * @tx_hwq_underflow:Hardware Tx Queue (on PRU) under flow count
+ *
+ * @u32 cs_error: Number of carrier sense errors
+ * @sqe_test_error: Number of MAC receive errors
+ *
+ * Above fields are aligned so that it's consistent
+ * with the memory layout in PRU DRAM, this is to facilitate easy
+ * memcpy. Don't change the order of the fields.
+ *
+ * @vlan_dropped: Number of VLAN tagged packets dropped
+ * @multicast_dropped: Number of multicast packets dropped
+ */
+struct port_statistics {
+ u32 tx_bcast;
+ u32 tx_mcast;
+ u32 tx_ucast;
+
+ u32 tx_octets;
+
+ u32 rx_bcast;
+ u32 rx_mcast;
+ u32 rx_ucast;
+
+ u32 rx_octets;
+
+ u32 tx64byte;
+ u32 tx65_127byte;
+ u32 tx128_255byte;
+ u32 tx256_511byte;
+ u32 tx512_1023byte;
+ u32 tx1024byte;
+
+ u32 rx64byte;
+ u32 rx65_127byte;
+ u32 rx128_255byte;
+ u32 rx256_511byte;
+ u32 rx512_1023byte;
+ u32 rx1024byte;
+
+ u32 late_coll;
+ u32 single_coll;
+ u32 multi_coll;
+ u32 excess_coll;
+
+ u32 rx_misalignment_frames;
+ u32 stormprev_counter_bc;
+ u32 stormprev_counter_mc;
+ u32 stormprev_counter_uc;
+ u32 mac_rxerror;
+ u32 sfd_error;
+ u32 def_tx;
+ u32 mac_txerror;
+ u32 rx_oversized_frames;
+ u32 rx_undersized_frames;
+ u32 rx_crc_frames;
+ u32 dropped_packets;
+
+ u32 tx_hwq_overflow;
+ u32 tx_hwq_underflow;
+
+ u32 cs_error;
+ u32 sqe_test_error;
+
+ u32 vlan_dropped;
+ u32 multicast_dropped;
+} __packed;
+
+/* In switch mode there are 3 real ports i.e. 3 mac addrs.
+ * however Linux sees only the host side port. The other 2 ports
+ * are the switch ports.
+ * In emac mode there are 2 real ports i.e. 2 mac addrs.
+ * Linux sees both the ports.
+ */
+enum prueth_port {
+ PRUETH_PORT_HOST = 0, /* host side port */
+ PRUETH_PORT_MII0, /* physical port MII 0 */
+ PRUETH_PORT_MII1, /* physical port MII 1 */
+};
+
+enum prueth_mac {
+ PRUETH_MAC0 = 0,
+ PRUETH_MAC1,
+ PRUETH_NUM_MACS,
+};
+
+/* In both switch & emac modes there are 3 port queues
+ * EMAC mode:
+ * RX packets for both MII0 & MII1 ports come on
+ * QUEUE_HOST.
+ * TX packets for MII0 go on QUEUE_MII0, TX packets
+ * for MII1 go on QUEUE_MII1.
+ * Switch mode:
+ * Host port RX packets come on QUEUE_HOST
+ * TX packets might have to go on MII0 or MII1 or both.
+ * MII0 TX queue is QUEUE_MII0 and MII1 TX queue is
+ * QUEUE_MII1.
+ */
+enum prueth_port_queue_id {
+ PRUETH_PORT_QUEUE_HOST = 0,
+ PRUETH_PORT_QUEUE_MII0,
+ PRUETH_PORT_QUEUE_MII1,
+ PRUETH_PORT_QUEUE_MAX,
+};
+
+/* Each port queue has 4 queues and 1 collision queue */
+enum prueth_queue_id {
+ PRUETH_QUEUE1 = 0,
+ PRUETH_QUEUE2,
+ PRUETH_QUEUE3,
+ PRUETH_QUEUE4,
+ PRUETH_COLQUEUE, /* collision queue */
+};
+
+/* PRUeth memory range identifiers */
+enum prueth_mem {
+ PRUETH_MEM_DRAM0 = 0,
+ PRUETH_MEM_DRAM1,
+ PRUETH_MEM_SHARED_RAM,
+ PRUETH_MEM_OCMC,
+ PRUETH_MEM_MAX,
+};
+
+/* Firmware offsets/size information */
+struct prueth_fw_offsets {
+ u32 index_array_offset;
+ u32 bin_array_offset;
+ u32 nt_array_offset;
+ u32 index_array_loc;
+ u32 bin_array_loc;
+ u32 nt_array_loc;
+ u32 index_array_max_entries;
+ u32 bin_array_max_entries;
+ u32 nt_array_max_entries;
+ u32 vlan_ctrl_byte;
+ u32 vlan_filter_tbl;
+ u32 mc_ctrl_byte;
+ u32 mc_filter_mask;
+ u32 mc_filter_tbl;
+ /* IEP wrap is used in the rx packet ordering logic and
+ * is different for ICSSM v1.0 vs 2.1
+ */
+ u32 iep_wrap;
+ u16 hash_mask;
+};
+
+/**
+ * @fw_name: firmware names of firmware to run on PRU
+ */
+struct prueth_firmware {
+ const char *fw_name[PRUSS_ETHTYPE_MAX];
+};
+
+/**
+ * struct prueth_private_data - PRU Ethernet private data
+ * @fw_names: firmware names to be used for PRUSS ethernet usecases
+ */
+struct prueth_private_data {
+ const struct prueth_firmware fw_pru[PRUSS_NUM_PRUS];
+ bool support_lre;
+ bool support_switch;
+};
+
+struct nsp_counter {
+ unsigned long cookie;
+ u16 credit;
+};
+
+/* data for each emac port */
+struct prueth_emac {
+ struct prueth *prueth;
+ struct net_device *ndev;
+ u8 mac_addr[6];
+ u32 msg_enable;
+
+ int link;
+ int speed;
+ int duplex;
+
+ const char *phy_id;
+ struct device_node *phy_node;
+ phy_interface_t phy_if;
+ struct phy_device *phydev;
+ struct rproc *pru;
+
+ enum prueth_port port_id;
+ enum prueth_port_queue_id tx_port_queue;
+
+ enum prueth_queue_id rx_queue_start;
+ enum prueth_queue_id rx_queue_end;
+
+ enum prueth_mem dram;
+
+ int rx_irq;
+ int tx_irq;
+
+ struct prueth_queue_desc __iomem *rx_queue_descs;
+ struct prueth_queue_desc __iomem *tx_queue_descs;
+
+ struct port_statistics stats; /* stats holder when i/f is down */
+ unsigned char mc_filter_mask[ETH_ALEN]; /* for multicast filtering */
+
+ spinlock_t lock; /* serialize access */
+ spinlock_t addr_lock; /* serialize access to VLAN/MC filter table */
+
+ struct nsp_counter nsp_bc;
+ struct nsp_counter nsp_mc;
+ struct nsp_counter nsp_uc;
+ bool nsp_enabled;
+
+ int offload_fwd_mark;
+
+ struct sk_buff *ptp_skb[PRUETH_PTP_TS_EVENTS];
+ struct sk_buff *ptp_ct_skb[PRUETH_PTP_TS_EVENTS];
+ spinlock_t ptp_skb_lock; /* serialize access */
+ int emac_ptp_tx_irq;
+ int hsr_ptp_tx_irq;
+ bool ptp_tx_enable;
+ bool ptp_rx_enable;
+};
+
+struct prueth_ndev_priority {
+ struct net_device *ndev;
+ int priority;
+};
+
+/**
+ * struct prueth - PRUeth structure
+ * @dev: device
+ * @pruss: pruss handle
+ * @pru0: rproc instance to PRU0
+ * @pru1: rproc instance to PRU1
+ * @mem: PRUSS memory resources we need to access
+ * @sram_pool: OCMC ram pool for buffers
+ * @mii_rt: regmap to mii_rt block
+ * @iep: Pointer to ICSS IEP data
+ *
+ * @eth_node: node for each emac node
+ * @emac: emac data for three ports, one host and two physical
+ * @registered_netdevs: net device for each registered emac
+ *
+ * @hw_bridge_dev: pointer to hw_bridge device
+ * @fdb_tbl: pointer to FDB table struct
+ *
+ * @prueth_ndev_nb: netdev notifier block
+ * @prueth_sw_switchdev_notifier: non blocking switchdev notifier block
+ * @prueth_sw_switchdev_bl_notifier: blocking switchdev notifier block
+ *
+ * @emac_configured: bit mask to configured ports
+ * @br_members: bit mask indicating ports that are part of the bridge
+ * @eth_type: flag indicate firmware mode (Dual emac vs Switch etc)
+ * @base_mac: random mac used as physical ID for each port of a switch
+ */
+struct prueth {
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *pru0, *pru1;
+ struct pruss_mem_region mem[PRUETH_MEM_MAX];
+ struct gen_pool *sram_pool;
+ struct regmap *mii_rt;
+ struct icss_iep *iep;
+ struct hrtimer tbl_check_timer;
+ const struct prueth_private_data *fw_data;
+ struct prueth_fw_offsets *fw_offsets;
+
+ /* HSR-PRP */
+ bool support_lre;
+ struct prueth_ndev_priority *hp, *lp;
+ int rx_lpq_irq;
+ int rx_hpq_irq;
+ unsigned int hsr_mode;
+ unsigned int tbl_check_period;
+ unsigned int node_table_clear;
+ unsigned int node_table_clear_last_cmd;
+ unsigned int tbl_check_mask;
+ enum iec62439_3_tr_modes prp_tr_mode;
+ struct node_tbl *nt;
+ struct nt_queue_t *mac_queue;
+ struct kthread_worker *nt_kworker;
+ struct kthread_work nt_work;
+ u32 rem_cnt;
+ /* lock between kthread worker and rx packet processing code */
+ spinlock_t nt_lock;
+ struct lre_statistics *lre_stats;
+
+ struct device_node *eth_node[PRUETH_NUM_MACS];
+ struct prueth_emac *emac[PRUETH_NUM_MACS];
+ struct net_device *registered_netdevs[PRUETH_NUM_MACS];
+ struct device_node *prueth_np;
+
+ struct net_device *hw_bridge_dev;
+ struct fdb_tbl *fdb_tbl;
+
+ struct notifier_block prueth_ndev_nb;
+ struct notifier_block prueth_sw_switchdev_notifier;
+ struct notifier_block prueth_sw_switchdev_bl_notifier;
+
+ unsigned int eth_type;
+ /* mutex to enter critical region in ndo_open() and
+ * ndo_kill() as common resources for switch based firmware is
+ * to be initialized for the first port in ndo_open() and
+ * cleaned up on last port in ndo_stop().
+ */
+ u8 emac_configured;
+ u8 br_members;
+ u8 base_mac[ETH_ALEN];
+};
+
+int emac_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+void parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
+ struct prueth_packet_info *pkt_info);
+int emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
+ struct prueth_packet_info *pkt_info,
+ const struct prueth_queue_info *rxqueue);
+int emac_add_del_vid(struct prueth_emac *emac,
+ bool add, __be16 proto, u16 vid);
+irqreturn_t prueth_ptp_tx_irq_handle(int irq, void *dev);
+irqreturn_t prueth_ptp_tx_irq_work(int irq, void *dev);
+
+extern const struct prueth_queue_desc queue_descs[][NUM_QUEUES];
+
+void emac_mc_filter_bin_allow(struct prueth_emac *emac, u8 hash);
+void emac_mc_filter_bin_disallow(struct prueth_emac *emac, u8 hash);
+u8 emac_get_mc_hash(u8 *mac, u8 *mask);
+#endif /* __NET_TI_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/prueth_core.c b/drivers/net/ethernet/ti/prueth_core.c
new file mode 100644
index 000000000000..c4cb25422c8a
--- /dev/null
+++ b/drivers/net/ethernet/ti/prueth_core.c
@@ -0,0 +1,3526 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* PRU ICSS Ethernet Driver
+ *
+ * Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ * Roger Quadros <rogerq@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/genalloc.h>
+#include <linux/if_bridge.h>
+#include <linux/if_hsr.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/pruss.h>
+#include <linux/ptp_classify.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <net/pkt_cls.h>
+
+#include "prueth.h"
+#include "icss_mii_rt.h"
+#include "icss_vlan_mcast_filter_mmap.h"
+#include "prueth_lre.h"
+#include "prueth_switch.h"
+#include "icss_iep.h"
+
+#define PRUETH_MODULE_VERSION "0.2"
+#define PRUETH_MODULE_DESCRIPTION "PRUSS Ethernet driver"
+
+#define OCMC_RAM_SIZE (SZ_64K - SZ_8K)
+#define PRUETH_ETH_TYPE_OFFSET 12
+#define PRUETH_ETH_TYPE_UPPER_SHIFT 8
+
+/* TX Minimum Inter packet gap */
+#define TX_MIN_IPG 0xb8
+
+#define TX_START_DELAY 0x40
+#define TX_CLK_DELAY_100M 0x6
+#define TX_CLK_DELAY_10M 0
+
+/* PRUSS_IEP_GLOBAL_CFG register definitions */
+#define PRUSS_IEP_GLOBAL_CFG 0
+
+#define PRUSS_IEP_GLOBAL_CFG_CNT_ENABLE BIT(0)
+
+/* Netif debug messages possible */
+#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+static int debug_level = -1;
+module_param(debug_level, int, 0444);
+MODULE_PARM_DESC(debug_level, "PRUETH debug level (NETIF_MSG bits)");
+
+/* ensure that order of PRUSS mem regions is same as enum prueth_mem */
+static enum pruss_mem pruss_mem_ids[] = { PRUSS_MEM_DRAM0, PRUSS_MEM_DRAM1,
+ PRUSS_MEM_SHRD_RAM2 };
+
+static struct prueth_fw_offsets fw_offsets_v2_1 = {
+ .hash_mask = ICSS_LRE_V2_1_HASH_MASK,
+ .index_array_offset = ICSS_LRE_V2_1_INDEX_ARRAY_NT,
+ .bin_array_offset = ICSS_LRE_V2_1_BIN_ARRAY,
+ .nt_array_offset = ICSS_LRE_V2_1_NODE_TABLE_NEW,
+ .index_array_loc = ICSS_LRE_V2_1_INDEX_ARRAY_LOC,
+ .bin_array_loc = ICSS_LRE_V2_1_BIN_ARRAY_LOC,
+ .nt_array_loc = ICSS_LRE_V2_1_NODE_TABLE_LOC,
+ .index_array_max_entries = ICSS_LRE_V2_1_INDEX_TBL_MAX_ENTRIES,
+ .bin_array_max_entries = ICSS_LRE_V2_1_BIN_TBL_MAX_ENTRIES,
+ .nt_array_max_entries = ICSS_LRE_V2_1_NODE_TBL_MAX_ENTRIES,
+ .iep_wrap = 0xffffffff,
+};
+
+static void prueth_set_fw_offsets(struct prueth *prueth)
+{
+ /* Set VLAN/Multicast filter control and table offsets */
+ if (PRUETH_IS_EMAC(prueth) || PRUETH_IS_SWITCH(prueth)) {
+ prueth->fw_offsets->vlan_ctrl_byte =
+ ICSS_EMAC_FW_VLAN_FILTER_CTRL_BITMAP_OFFSET;
+ prueth->fw_offsets->vlan_filter_tbl =
+ ICSS_EMAC_FW_VLAN_FLTR_TBL_BASE_ADDR;
+
+ prueth->fw_offsets->mc_ctrl_byte =
+ ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_OFFSET;
+ prueth->fw_offsets->mc_filter_mask =
+ ICSS_EMAC_FW_MULTICAST_FILTER_MASK_OFFSET;
+ prueth->fw_offsets->mc_filter_tbl =
+ ICSS_EMAC_FW_MULTICAST_FILTER_TABLE;
+
+ } else {
+ prueth->fw_offsets->vlan_ctrl_byte =
+ ICSS_LRE_FW_VLAN_FLTR_CTRL_BYTE;
+ prueth->fw_offsets->vlan_filter_tbl =
+ ICSS_LRE_FW_VLAN_FLTR_TBL_BASE_ADDR;
+
+ prueth->fw_offsets->mc_ctrl_byte =
+ ICSS_LRE_FW_MULTICAST_TABLE_SEARCH_OP_CONTROL_BIT;
+ prueth->fw_offsets->mc_filter_mask =
+ ICSS_LRE_FW_MULTICAST_FILTER_MASK;
+ prueth->fw_offsets->mc_filter_tbl =
+ ICSS_LRE_FW_MULTICAST_FILTER_TABLE;
+
+ }
+}
+
+static inline u32 prueth_read_reg(struct prueth *prueth,
+ enum prueth_mem region,
+ unsigned int reg)
+{
+ return readl_relaxed(prueth->mem[region].va + reg);
+}
+
+static inline void prueth_write_reg(struct prueth *prueth,
+ enum prueth_mem region,
+ unsigned int reg, u32 val)
+{
+ writel_relaxed(val, prueth->mem[region].va + reg);
+}
+
+static inline void prueth_ptp_ts_enable(struct prueth_emac *emac)
+{
+ void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ u8 val = 0;
+
+ if (emac->ptp_tx_enable) {
+ /* Disable fw background task */
+ val &= ~TIMESYNC_CTRL_BG_ENABLE;
+ /* Enable forced 2-step */
+ val |= TIMESYNC_CTRL_FORCED_2STEP;
+ }
+
+ writeb(val, sram + TIMESYNC_CTRL_VAR_OFFSET);
+ /* disable PTP forwarding for switch */
+ if (PRUETH_IS_SWITCH(emac->prueth))
+ writeb(1, sram + DISABLE_PTP_FRAME_FORWARDING_CTRL_OFFSET);
+}
+
+static inline void prueth_ptp_tx_ts_enable(struct prueth_emac *emac, bool enable)
+{
+ emac->ptp_tx_enable = enable;
+ prueth_ptp_ts_enable(emac);
+}
+
+static inline bool prueth_ptp_tx_ts_is_enabled(struct prueth_emac *emac)
+{
+ return !!emac->ptp_tx_enable;
+}
+
+static inline void prueth_ptp_rx_ts_enable(struct prueth_emac *emac, bool enable)
+{
+ emac->ptp_rx_enable = enable;
+ prueth_ptp_ts_enable(emac);
+}
+
+static inline bool prueth_ptp_rx_ts_is_enabled(struct prueth_emac *emac)
+{
+ return !!emac->ptp_rx_enable;
+}
+
+static inline
+void prueth_set_reg(struct prueth *prueth, enum prueth_mem region,
+ unsigned int reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = prueth_read_reg(prueth, region, reg);
+ val &= ~mask;
+ val |= (set & mask);
+ prueth_write_reg(prueth, region, reg, val);
+}
+
+static const struct prueth_queue_info queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+const struct prueth_queue_desc queue_descs[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ { .rd_ptr = P0_Q1_BD_OFFSET, .wr_ptr = P0_Q1_BD_OFFSET, },
+ { .rd_ptr = P0_Q2_BD_OFFSET, .wr_ptr = P0_Q2_BD_OFFSET, },
+ { .rd_ptr = P0_Q3_BD_OFFSET, .wr_ptr = P0_Q3_BD_OFFSET, },
+ { .rd_ptr = P0_Q4_BD_OFFSET, .wr_ptr = P0_Q4_BD_OFFSET, },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ { .rd_ptr = P1_Q1_BD_OFFSET, .wr_ptr = P1_Q1_BD_OFFSET, },
+ { .rd_ptr = P1_Q2_BD_OFFSET, .wr_ptr = P1_Q2_BD_OFFSET, },
+ { .rd_ptr = P1_Q3_BD_OFFSET, .wr_ptr = P1_Q3_BD_OFFSET, },
+ { .rd_ptr = P1_Q4_BD_OFFSET, .wr_ptr = P1_Q4_BD_OFFSET, },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ { .rd_ptr = P2_Q1_BD_OFFSET, .wr_ptr = P2_Q1_BD_OFFSET, },
+ { .rd_ptr = P2_Q2_BD_OFFSET, .wr_ptr = P2_Q2_BD_OFFSET, },
+ { .rd_ptr = P2_Q3_BD_OFFSET, .wr_ptr = P2_Q3_BD_OFFSET, },
+ { .rd_ptr = P2_Q4_BD_OFFSET, .wr_ptr = P2_Q4_BD_OFFSET, },
+ }
+};
+
+static void prueth_hostconfig(struct prueth *prueth)
+{
+ void __iomem *sram_base = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ void __iomem *sram;
+
+ /* queue size lookup table */
+ sram = sram_base + HOST_QUEUE_SIZE_ADDR;
+ writew(HOST_QUEUE_1_SIZE, sram);
+ writew(HOST_QUEUE_2_SIZE, sram + 2);
+ writew(HOST_QUEUE_3_SIZE, sram + 4);
+ writew(HOST_QUEUE_4_SIZE, sram + 6);
+
+ /* queue information table */
+ sram = sram_base + HOST_Q1_RX_CONTEXT_OFFSET;
+ memcpy_toio(sram, queue_infos[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_infos[PRUETH_PORT_QUEUE_HOST]));
+
+ /* buffer offset table */
+ sram = sram_base + HOST_QUEUE_OFFSET_ADDR;
+ writew(P0_Q1_BUFFER_OFFSET, sram);
+ writew(P0_Q2_BUFFER_OFFSET, sram + 2);
+ writew(P0_Q3_BUFFER_OFFSET, sram + 4);
+ writew(P0_Q4_BUFFER_OFFSET, sram + 6);
+
+ /* buffer descriptor offset table*/
+ sram = sram_base + HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR;
+ writew(P0_Q1_BD_OFFSET, sram);
+ writew(P0_Q2_BD_OFFSET, sram + 2);
+ writew(P0_Q3_BD_OFFSET, sram + 4);
+ writew(P0_Q4_BD_OFFSET, sram + 6);
+
+ /* queue table */
+ sram = sram_base + HOST_QUEUE_DESC_OFFSET;
+ memcpy_toio(sram, queue_descs[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_descs[PRUETH_PORT_QUEUE_HOST]));
+}
+
+#define prueth_mii_set(dir, port, mask, set) \
+ regmap_update_bits(prueth->mii_rt, PRUSS_MII_RT_##dir##CFG##port, \
+ PRUSS_MII_RT_##dir##CFG_##dir##_##mask, set)
+
+static void prueth_mii_init(struct prueth *prueth)
+{
+ /* Configuration of Port 0 Rx */
+ prueth_mii_set(RX, 0, ENABLE, PRUSS_MII_RT_RXCFG_RX_ENABLE);
+ prueth_mii_set(RX, 0, DATA_RDY_MODE_DIS,
+ PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS);
+ prueth_mii_set(RX, 0, MUX_SEL, 0x0);
+ prueth_mii_set(RX, 0, L2_EN, PRUSS_MII_RT_RXCFG_RX_L2_EN);
+ prueth_mii_set(RX, 0, CUT_PREAMBLE, PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE);
+ prueth_mii_set(RX, 0, L2_EOF_SCLR_DIS,
+ PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS);
+
+ /* Configuration of Port 0 Tx */
+ prueth_mii_set(TX, 0, ENABLE, PRUSS_MII_RT_TXCFG_TX_ENABLE);
+ prueth_mii_set(TX, 0, AUTO_PREAMBLE,
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE);
+ prueth_mii_set(TX, 0, 32_MODE_EN, PRUSS_MII_RT_TXCFG_TX_32_MODE_EN);
+ if (!PRUETH_IS_EMAC(prueth))
+ prueth_mii_set(TX, 0, MUX_SEL, PRUSS_MII_RT_TXCFG_TX_MUX_SEL);
+ else
+ prueth_mii_set(TX, 0, MUX_SEL, 0x0);
+ prueth_mii_set(TX, 0, START_DELAY_MASK,
+ TX_START_DELAY << PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT);
+ prueth_mii_set(TX, 0, CLK_DELAY_MASK,
+ TX_CLK_DELAY_100M << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT);
+
+ /* Configuration of Port 1 Rx */
+ prueth_mii_set(RX, 1, ENABLE, PRUSS_MII_RT_RXCFG_RX_ENABLE);
+ prueth_mii_set(RX, 1,
+ DATA_RDY_MODE_DIS, PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS);
+ prueth_mii_set(RX, 1, MUX_SEL, PRUSS_MII_RT_RXCFG_RX_MUX_SEL);
+ prueth_mii_set(RX, 1, L2_EN, PRUSS_MII_RT_RXCFG_RX_L2_EN);
+ prueth_mii_set(RX, 1, CUT_PREAMBLE, PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE);
+ prueth_mii_set(RX, 1, L2_EOF_SCLR_DIS,
+ PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS);
+
+ /* Configuration of Port 1 Tx */
+ prueth_mii_set(TX, 1, ENABLE, PRUSS_MII_RT_TXCFG_TX_ENABLE);
+ prueth_mii_set(TX, 1, AUTO_PREAMBLE,
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE);
+ prueth_mii_set(TX, 1, 32_MODE_EN, PRUSS_MII_RT_TXCFG_TX_32_MODE_EN);
+ if (!PRUETH_IS_EMAC(prueth))
+ prueth_mii_set(TX, 1, MUX_SEL, 0x0);
+ else
+ prueth_mii_set(TX, 1, MUX_SEL, PRUSS_MII_RT_TXCFG_TX_MUX_SEL);
+ prueth_mii_set(TX, 1, START_DELAY_MASK,
+ TX_START_DELAY << PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT);
+ prueth_mii_set(TX, 1, CLK_DELAY_MASK,
+ TX_CLK_DELAY_100M << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT);
+
+ /* Min frame length should be set to 64 to allow receive of standard
+ * Ethernet frames such as PTP, LLDP that will not have the tag/rct.
+ * Actual size written to register is size - 1 per TRM. This also
+ * includes CRC/FCS.
+ */
+ regmap_update_bits(prueth->mii_rt,
+ PRUSS_MII_RT_RX_FRMS0,
+ PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MIN_FRM - 1) <<
+ PRUSS_MII_RT_RX_FRMS_MIN_FRM_SHIFT);
+
+ regmap_update_bits(prueth->mii_rt,
+ PRUSS_MII_RT_RX_FRMS1,
+ PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MIN_FRM - 1) <<
+ PRUSS_MII_RT_RX_FRMS_MIN_FRM_SHIFT);
+
+ /* For EMAC, set Max frame size to 1522 i.e size with VLAN and for
+ * HSR/PRP set it to 1528 i.e size with tag or rct. Actual size
+ * written to register is size - 1 as per TRM. Since driver
+ * support run time change of protocol, driver must overwrite
+ * the values based on Ethernet type.
+ */
+ if (PRUETH_IS_LRE(prueth)) {
+ regmap_update_bits(prueth->mii_rt,
+ PRUSS_MII_RT_RX_FRMS0,
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MAX_FRM_LRE - 1) <<
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
+
+ regmap_update_bits(prueth->mii_rt,
+ PRUSS_MII_RT_RX_FRMS1,
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MAX_FRM_LRE - 1) <<
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
+ } else {
+ regmap_update_bits(prueth->mii_rt,
+ PRUSS_MII_RT_RX_FRMS0,
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MAX - 1) <<
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
+
+ regmap_update_bits(prueth->mii_rt,
+ PRUSS_MII_RT_RX_FRMS1,
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MAX - 1) <<
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
+ }
+}
+
+static void prueth_clearmem(struct prueth *prueth, enum prueth_mem region)
+{
+ memset_io(prueth->mem[region].va, 0, prueth->mem[region].size);
+}
+
+static void prueth_hostinit(struct prueth *prueth)
+{
+ /* Clear shared RAM */
+ prueth_clearmem(prueth, PRUETH_MEM_SHARED_RAM);
+
+ /* Clear OCMC RAM */
+ prueth_clearmem(prueth, PRUETH_MEM_OCMC);
+
+ /* Clear data RAMs */
+ if (prueth->eth_node[PRUETH_MAC0])
+ prueth_clearmem(prueth, PRUETH_MEM_DRAM0);
+ if (prueth->eth_node[PRUETH_MAC1])
+ prueth_clearmem(prueth, PRUETH_MEM_DRAM1);
+
+ /* Initialize host queues in shared RAM */
+ if (!PRUETH_IS_EMAC(prueth))
+ prueth_sw_hostconfig(prueth);
+ else
+ prueth_hostconfig(prueth);
+
+ /* Configure MII_RT */
+ prueth_mii_init(prueth);
+}
+
+/* This function initialize the driver in EMAC or HSR or PRP mode
+ * based on eth_type
+ */
+static void prueth_init_ethernet_mode(struct prueth *prueth)
+{
+ prueth_set_fw_offsets(prueth);
+ prueth_hostinit(prueth);
+ if (PRUETH_IS_LRE(prueth))
+ prueth_lre_config(prueth);
+}
+
+static void prueth_port_enable(struct prueth_emac *emac, bool enable)
+{
+ void __iomem *port_ctrl, *vlan_ctrl;
+ struct prueth *prueth = emac->prueth;
+ u32 vlan_ctrl_offset = prueth->fw_offsets->vlan_ctrl_byte;
+ void __iomem *ram = prueth->mem[emac->dram].va;
+
+ port_ctrl = ram + PORT_CONTROL_ADDR;
+ writeb(!!enable, port_ctrl);
+
+ /* HSR/PRP firmware use a different memory and offset
+ * for VLAN filter control
+ */
+ if (PRUETH_IS_LRE(prueth))
+ ram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ vlan_ctrl = ram + vlan_ctrl_offset;
+ writeb(!!enable, vlan_ctrl);
+}
+
+static int prueth_emac_config(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+
+ /* PRU needs local shared RAM address for C28 */
+ u32 sharedramaddr = ICSS_LOCAL_SHARED_RAM;
+
+ /* PRU needs real global OCMC address for C30*/
+ u32 ocmcaddr = (u32)prueth->mem[PRUETH_MEM_OCMC].pa;
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ void __iomem *dram_base;
+ void __iomem *mac_addr;
+ void __iomem *dram;
+
+ /* Clear data RAM */
+ prueth_clearmem(prueth, emac->dram);
+
+ dram_base = prueth->mem[emac->dram].va;
+
+ /* setup mac address */
+ mac_addr = dram_base + PORT_MAC_ADDR;
+ memcpy_toio(mac_addr, emac->mac_addr, 6);
+
+ /* queue information table */
+ dram = dram_base + TX_CONTEXT_Q1_OFFSET_ADDR;
+ memcpy_toio(dram, queue_infos[emac->port_id],
+ sizeof(queue_infos[emac->port_id]));
+
+ /* queue table */
+ dram = dram_base + PORT_QUEUE_DESC_OFFSET;
+ memcpy_toio(dram, queue_descs[emac->port_id],
+ sizeof(queue_descs[emac->port_id]));
+
+ emac->rx_queue_descs = sram + HOST_QUEUE_DESC_OFFSET;
+ emac->tx_queue_descs = dram;
+
+ /* Set in constant table C28 of PRU0 to ICSS Shared memory */
+ pru_rproc_set_ctable(emac->pru, PRU_C28, sharedramaddr);
+
+ /* Set in constant table C30 of PRU0 to OCMC memory */
+ pru_rproc_set_ctable(emac->pru, PRU_C30, ocmcaddr);
+
+ return 0;
+}
+
+/* update phy/port status information for firmware */
+static void emac_update_phystatus(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ enum prueth_mem region;
+ u32 phy_speed, port_status = 0;
+ u8 delay;
+
+ region = emac->dram;
+ phy_speed = emac->speed;
+ prueth_write_reg(prueth, region, PHY_SPEED_OFFSET, phy_speed);
+
+ if (phy_speed == SPEED_10)
+ delay = TX_CLK_DELAY_10M;
+ else
+ delay = TX_CLK_DELAY_100M;
+
+ if (emac->port_id) {
+ prueth_mii_set(TX, 1, CLK_DELAY_MASK,
+ delay << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT);
+ } else {
+ prueth_mii_set(TX, 0, CLK_DELAY_MASK,
+ delay << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT);
+ }
+
+ if (emac->duplex == DUPLEX_HALF)
+ port_status |= PORT_IS_HD_MASK;
+ if (emac->link)
+ port_status |= PORT_LINK_MASK;
+ writeb(port_status, prueth->mem[region].va + PORT_STATUS_OFFSET);
+}
+
+/* called back by PHY layer if there is change in link state of hw port*/
+static void emac_adjust_link(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct phy_device *phydev = emac->phydev;
+ unsigned long flags;
+ bool new_state = false;
+
+ spin_lock_irqsave(&emac->lock, flags);
+
+ if (phydev->link) {
+ /* check the mode of operation - full/half duplex */
+ if (phydev->duplex != emac->duplex) {
+ new_state = true;
+ emac->duplex = phydev->duplex;
+ }
+ if (phydev->speed != emac->speed) {
+ new_state = true;
+ emac->speed = phydev->speed;
+ }
+ if (!emac->link) {
+ new_state = true;
+ emac->link = 1;
+ }
+ } else if (emac->link) {
+ new_state = true;
+ emac->link = 0;
+ /* defaults for no link */
+
+ /* f/w only support 10 or 100 */
+ emac->speed = SPEED_100;
+
+ /* half duplex may not be supported by f/w */
+ emac->duplex = DUPLEX_FULL;
+ }
+
+ emac_update_phystatus(emac);
+
+ if (new_state)
+ phy_print_status(phydev);
+
+ if (emac->link) {
+ /* link ON */
+ netif_carrier_on(ndev);
+
+ /* reactivate the transmit queue if it is stopped */
+ if (netif_running(ndev) && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ } else {
+ /* link OFF */
+ netif_carrier_off(ndev);
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ }
+
+ spin_unlock_irqrestore(&emac->lock, flags);
+}
+
+/**
+ * emac_tx_hardirq - EMAC Tx interrupt handler
+ * @irq: interrupt number
+ * @dev_id: pointer to net_device
+ *
+ * This is called whenever a packet has finished being transmitted, this clears
+ * up hardware buffer space, our only task is to re-enable the transmit queue
+ * if it was previously disabled due to hardware queue being full
+ *
+ * Returns interrupt handled condition
+ */
+static irqreturn_t emac_tx_hardirq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+
+ if (unlikely(netif_queue_stopped(ndev)))
+ netif_wake_queue(ndev);
+
+ return IRQ_HANDLED;
+}
+
+static u8 prueth_ptp_ts_event_type(struct sk_buff *skb, u8 *ptp_msgtype)
+{
+ unsigned int ptp_class = ptp_classify_raw(skb);
+ struct ptp_header *hdr;
+ u8 msgtype, event_type;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return PRUETH_PTP_TS_EVENTS;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return PRUETH_PTP_TS_EVENTS;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ /* Treat E2E Delay Req/Resp messages sane as P2P peer delay req/resp
+ * in driver here since firmware stores timestamps in the same memory
+ * location for either (since they cannot operate simultaneously
+ * anyway)
+ */
+ switch (msgtype) {
+ case PTP_MSGTYPE_SYNC:
+ event_type = PRUETH_PTP_SYNC;
+ break;
+ case PTP_MSGTYPE_DELAY_REQ:
+ case PTP_MSGTYPE_PDELAY_REQ:
+ event_type = PRUETH_PTP_DLY_REQ;
+ break;
+ /* TODO: Check why PTP_MSGTYPE_DELAY_RESP needs timestamp
+ * and need for it.
+ */
+ case 0x9:
+ case PTP_MSGTYPE_PDELAY_RESP:
+ event_type = PRUETH_PTP_DLY_RESP;
+ break;
+ default:
+ event_type = PRUETH_PTP_TS_EVENTS;
+ }
+
+ if (ptp_msgtype)
+ *ptp_msgtype = msgtype;
+
+ return event_type;
+}
+
+static void prueth_ptp_tx_ts_reset(struct prueth_emac *emac, u8 event)
+{
+ void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ u32 ts_notify_offs, ts_offs;
+
+ ts_offs = prueth_tx_ts_offs_get(emac->port_id - 1, event);
+ ts_notify_offs = prueth_tx_ts_notify_offs_get(emac->port_id - 1, event);
+
+ writeb(0, sram + ts_notify_offs);
+ memset_io(sram + ts_offs, 0, sizeof(u64));
+}
+
+static int prueth_ptp_tx_ts_enqueue(struct prueth_emac *emac, struct sk_buff *skb)
+{
+ struct skb_redundant_info *sred = skb_redinfo(skb);
+ u8 event, changed = 0;
+ unsigned long flags;
+
+ if (skb_vlan_tagged(skb)) {
+ __skb_pull(skb, VLAN_HLEN);
+ changed += VLAN_HLEN;
+ }
+
+ if (sred && sred->ethertype == ETH_P_HSR) {
+ __skb_pull(skb, ICSS_LRE_TAG_RCT_SIZE);
+ changed += ICSS_LRE_TAG_RCT_SIZE;
+ }
+
+ skb_reset_mac_header(skb);
+ event = prueth_ptp_ts_event_type(skb, NULL);
+ __skb_push(skb, changed);
+ if (event == PRUETH_PTP_TS_EVENTS) {
+ netdev_err(emac->ndev, "invalid PTP event\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&emac->ptp_skb_lock, flags);
+ if (emac->ptp_skb[event]) {
+ dev_consume_skb_any(emac->ptp_skb[event]);
+ prueth_ptp_tx_ts_reset(emac, event);
+ netdev_warn(emac->ndev, "Dropped event waiting for tx ts.\n");
+ }
+
+ skb_get(skb);
+ emac->ptp_skb[event] = skb;
+ spin_unlock_irqrestore(&emac->ptp_skb_lock, flags);
+
+ return 0;
+}
+
+irqreturn_t prueth_ptp_tx_irq_handle(int irq, void *dev)
+{
+ struct net_device *ndev = (struct net_device *)dev;
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (unlikely(netif_queue_stopped(ndev)))
+ netif_wake_queue(ndev);
+
+ if (prueth_ptp_tx_ts_is_enabled(emac))
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
+}
+
+static u64 prueth_ptp_ts_get(struct prueth_emac *emac, u32 ts_offs)
+{
+ void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ u64 cycles;
+
+ memcpy_fromio(&cycles, sram + ts_offs, sizeof(cycles));
+ memset_io(sram + ts_offs, 0, sizeof(cycles));
+
+ return cycles;
+}
+
+static void prueth_ptp_tx_ts_get(struct prueth_emac *emac, u8 event)
+{
+ struct skb_shared_hwtstamps *red_ssh;
+ struct skb_shared_hwtstamps ssh;
+ struct sk_buff *skb;
+ unsigned long flags;
+ bool ct_ts = false;
+ u64 ns;
+
+ /* get the msg from list */
+ spin_lock_irqsave(&emac->ptp_skb_lock, flags);
+ skb = emac->ptp_skb[event];
+ emac->ptp_skb[event] = NULL;
+ if (!skb) {
+ /* In case of HSR, tx timestamp may be generated by
+ * cut-through packets such as SYNC, pass this ts in skb redinfo.
+ */
+ skb = emac->ptp_ct_skb[event];
+ emac->ptp_ct_skb[event] = NULL;
+ ct_ts = true;
+ }
+ spin_unlock_irqrestore(&emac->ptp_skb_lock, flags);
+ if (!skb) {
+ /* TS for cut throguh packet might have already be read by emac_rx_packet()
+ * So ignore this interrupt for HSR.
+ */
+ if (!PRUETH_IS_HSR(emac->prueth))
+ netdev_err(emac->ndev, "no tx msg %u found waiting for ts\n", event);
+ return;
+ }
+
+ /* get timestamp */
+ ns = prueth_ptp_ts_get(emac,
+ prueth_tx_ts_offs_get(emac->port_id - 1, event));
+ if (ct_ts) {
+ /* Save the cut-through tx ts in skb redinfo. */
+ red_ssh = skb_redinfo_hwtstamps(skb);
+ memset(red_ssh, 0, sizeof(*red_ssh));
+ red_ssh->hwtstamp = ns_to_ktime(ns);
+ skb->protocol = eth_type_trans(skb, emac->ndev);
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+ } else {
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+ dev_consume_skb_any(skb);
+ }
+}
+
+irqreturn_t prueth_ptp_tx_irq_work(int irq, void *dev)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+ u32 ts_notify_offs, ts_notify_mask, i;
+ void __iomem *sram;
+
+ /* get and reset the ts notifications */
+ sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ for (i = 0; i < PRUETH_PTP_TS_EVENTS; i++) {
+ ts_notify_offs = prueth_tx_ts_notify_offs_get(emac->port_id - 1,
+ i);
+ memcpy_fromio(&ts_notify_mask, sram + ts_notify_offs,
+ PRUETH_PTP_TS_NOTIFY_SIZE);
+ memset_io(sram + ts_notify_offs, 0, PRUETH_PTP_TS_NOTIFY_SIZE);
+
+ if (ts_notify_mask & PRUETH_PTP_TS_NOTIFY_MASK)
+ prueth_ptp_tx_ts_get(emac, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * prueth_tx_enqueue - queue a packet to firmware for transmission
+ *
+ * @emac: EMAC data structure
+ * @skb: packet data buffer
+ * @queue_id: priority queue id
+ */
+static int prueth_tx_enqueue(struct prueth_emac *emac, struct sk_buff *skb,
+ enum prueth_queue_id queue_id)
+{
+ struct net_device *ndev = emac->ndev;
+ struct prueth *prueth = emac->prueth;
+ int pktlen;
+ struct prueth_queue_desc __iomem *queue_desc;
+ const struct prueth_queue_info *txqueue;
+ u16 bd_rd_ptr, bd_wr_ptr, update_wr_ptr;
+ int write_block, read_block, free_blocks, update_block, pkt_block_size;
+ unsigned int buffer_desc_count;
+ bool buffer_wrapped = false;
+ void *src_addr;
+ void *dst_addr;
+
+ /* OCMC RAM is not cached and write order is not important */
+ void *ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va;
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ void __iomem *dram;
+ u32 wr_buf_desc;
+ int txport = emac->tx_port_queue; /* which port to tx: MII0 or MII1 */
+
+ if (!PRUETH_IS_EMAC(prueth))
+ dram = prueth->mem[PRUETH_MEM_DRAM1].va;
+ else
+ dram = emac->prueth->mem[emac->dram].va;
+
+ if (eth_skb_pad(skb)) {
+ if (netif_msg_tx_err(emac) && net_ratelimit())
+ netdev_err(ndev, "packet pad failed");
+ return -ENOMEM;
+ }
+ src_addr = skb->data;
+ pktlen = skb->len;
+
+ /* Get the tx queue */
+ queue_desc = emac->tx_queue_descs + queue_id;
+ if (!PRUETH_IS_EMAC(prueth))
+ txqueue = &sw_queue_infos[txport][queue_id];
+ else
+ txqueue = &queue_infos[txport][queue_id];
+ buffer_desc_count = txqueue->buffer_desc_end -
+ txqueue->buffer_desc_offset;
+ buffer_desc_count /= BD_SIZE;
+ buffer_desc_count++;
+
+ bd_rd_ptr = readw(&queue_desc->rd_ptr);
+ bd_wr_ptr = readw(&queue_desc->wr_ptr);
+
+ /* the PRU firmware deals mostly in pointers already
+ * offset into ram, we would like to deal in indexes
+ * within the queue we are working with for code
+ * simplicity, calculate this here
+ */
+ write_block = (bd_wr_ptr - txqueue->buffer_desc_offset) / BD_SIZE;
+ read_block = (bd_rd_ptr - txqueue->buffer_desc_offset) / BD_SIZE;
+ if (write_block > read_block) {
+ free_blocks = buffer_desc_count - write_block;
+ free_blocks += read_block;
+ } else if (write_block < read_block) {
+ free_blocks = read_block - write_block;
+ } else { /* they are all free */
+ free_blocks = buffer_desc_count;
+ }
+ pkt_block_size = DIV_ROUND_UP(pktlen, ICSS_BLOCK_SIZE);
+ if (pkt_block_size > free_blocks) /* out of queue space */
+ return -ENOBUFS;
+
+ /* calculate end BD address post write */
+ update_block = write_block + pkt_block_size;
+
+ /* Check for wrap around */
+ if (update_block >= buffer_desc_count) {
+ update_block %= buffer_desc_count;
+ buffer_wrapped = true;
+ }
+
+ dst_addr = ocmc_ram + txqueue->buffer_offset +
+ (write_block * ICSS_BLOCK_SIZE);
+
+ /* Copy the data from socket buffer(DRAM) to PRU buffers(OCMC) */
+ if (buffer_wrapped) { /* wrapped around buffer */
+ int bytes = (buffer_desc_count - write_block) * ICSS_BLOCK_SIZE;
+ int remaining;
+
+ /* bytes is integral multiple of ICSS_BLOCK_SIZE but
+ * entire packet may have fit within the last BD
+ * if pkt_info.length is not integral multiple of
+ * ICSS_BLOCK_SIZE
+ */
+ if (pktlen < bytes)
+ bytes = pktlen;
+
+ /* copy non-wrapped part */
+ memcpy(dst_addr, src_addr, bytes);
+
+ /* copy wrapped part */
+ src_addr += bytes;
+ remaining = pktlen - bytes;
+ dst_addr = ocmc_ram + txqueue->buffer_offset;
+ memcpy(dst_addr, src_addr, remaining);
+ } else {
+ memcpy(dst_addr, src_addr, pktlen);
+ }
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ prueth_ptp_tx_ts_is_enabled(emac)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ prueth_ptp_tx_ts_enqueue(emac, skb);
+ }
+
+ /* update first buffer descriptor */
+ wr_buf_desc = (pktlen << PRUETH_BD_LENGTH_SHIFT) & PRUETH_BD_LENGTH_MASK;
+ if (PRUETH_IS_HSR(prueth))
+ wr_buf_desc |= BIT(PRUETH_BD_HSR_FRAME_SHIFT);
+
+ if (!PRUETH_IS_EMAC(prueth))
+ writel(wr_buf_desc, sram + bd_wr_ptr);
+ else
+ writel(wr_buf_desc, dram + bd_wr_ptr);
+
+ /* update the write pointer in this queue descriptor, the firmware
+ * polls for this change so this will signal the start of transmission
+ */
+ update_wr_ptr = txqueue->buffer_desc_offset + (update_block * BD_SIZE);
+ writew(update_wr_ptr, &queue_desc->wr_ptr);
+
+ return 0;
+}
+
+void parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
+ struct prueth_packet_info *pkt_info)
+{
+ /* For HSR, start_offset indicates Tag is not present and actual
+ * data starts at an offset of 6 bytes from start of the buffer.
+ * For example, for Supervisor frame start_offset is set, but for
+ * data frame it is reset. For PRP, start_offset indicate if RCT
+ * is present in the data or not. i.e in this case, depending upon
+ * LRE_TRANSPARENT_RECEPTION state RCT is to be stripped or not
+ * before passing data to upper layer. Software adjust the skb->len
+ * accordingly. TODO Support for LRE_TRANSPARENT_RECEPTION set to
+ * passRCT is TBD.
+ */
+ if (PRUETH_IS_LRE(prueth))
+ pkt_info->start_offset = !!(buffer_descriptor &
+ PRUETH_BD_START_FLAG_MASK);
+ else
+ pkt_info->start_offset = false;
+
+ pkt_info->shadow = !!(buffer_descriptor & PRUETH_BD_SHADOW_MASK);
+ pkt_info->port = (buffer_descriptor & PRUETH_BD_PORT_MASK) >>
+ PRUETH_BD_PORT_SHIFT;
+ pkt_info->length = (buffer_descriptor & PRUETH_BD_LENGTH_MASK) >>
+ PRUETH_BD_LENGTH_SHIFT;
+ pkt_info->broadcast = !!(buffer_descriptor & PRUETH_BD_BROADCAST_MASK);
+ pkt_info->error = !!(buffer_descriptor & PRUETH_BD_ERROR_MASK);
+ if (PRUETH_IS_LRE(prueth))
+ pkt_info->sv_frame = !!(buffer_descriptor &
+ PRUETH_BD_SUP_HSR_FRAME_MASK);
+ else
+ pkt_info->sv_frame = false;
+ pkt_info->lookup_success = !!(buffer_descriptor &
+ PRUETH_BD_LOOKUP_SUCCESS_MASK);
+ pkt_info->flood = !!(buffer_descriptor & PRUETH_BD_SW_FLOOD_MASK);
+ pkt_info->timestamp = !!(buffer_descriptor & PRUETH_BD_TIMESTAMP_MASK);
+}
+
+static int prueth_hsr_ptp_ct_tx_ts_enqueue(struct prueth_emac *emac, struct sk_buff *skb, u16 type)
+{
+ struct prueth_emac *other_emac = emac->prueth->emac[other_port_id(emac->port_id) - 1];
+ struct skb_shared_hwtstamps *red_ssh;
+ unsigned long flags;
+ u8 ptp_type, event;
+ int changed = 0;
+ u64 ns;
+
+ if (type == ETH_P_8021Q) {
+ __skb_pull(skb, VLAN_HLEN);
+ changed += VLAN_HLEN;
+ }
+
+ __skb_pull(skb, ICSS_LRE_TAG_RCT_SIZE);
+ changed += ICSS_LRE_TAG_RCT_SIZE;
+
+ skb_reset_mac_header(skb);
+ event = prueth_ptp_ts_event_type(skb, &ptp_type);
+
+ __skb_push(skb, changed);
+
+ /* Store skbs for only cut through packets */
+ if (event == PRUETH_PTP_TS_EVENTS ||
+ (ptp_type != PTP_MSGTYPE_SYNC &&
+ ptp_type != PTP_MSGTYPE_DELAY_REQ))
+ return 0;
+
+ /* cut through packet might have already be forwarded before the rx packet has reached
+ * the host. In this case tx irq handler ignores the interrupt as there is no skb stored.
+ * So check if ts is already available before storing the skb.
+ */
+ ns = prueth_ptp_ts_get(other_emac, prueth_tx_ts_offs_get(other_emac->port_id - 1, event));
+ if (ns || !other_emac->link) {
+ struct skb_shared_hwtstamps *ssh1;
+
+ ssh1 = skb_hwtstamps(skb);
+ /* Save the cut-through tx ts in skb redinfo. */
+ red_ssh = skb_redinfo_hwtstamps(skb);
+ memset(red_ssh, 0, sizeof(*red_ssh));
+ red_ssh->hwtstamp = ns_to_ktime(ns);
+ return 0;
+ }
+
+ /* Store the skb so that tx irq handler will populate the ts */
+ spin_lock_irqsave(&other_emac->ptp_skb_lock, flags);
+ if (other_emac->ptp_ct_skb[event]) {
+ netdev_warn(other_emac->ndev, "Dropped cut through event waiting for tx ts.\n");
+ dev_consume_skb_any(other_emac->ptp_ct_skb[event]);
+ prueth_ptp_tx_ts_reset(other_emac, event);
+ }
+
+ other_emac->ptp_ct_skb[event] = skb;
+ spin_unlock_irqrestore(&other_emac->ptp_skb_lock, flags);
+
+ return -EAGAIN;
+}
+
+/* get packet from queue
+ * negative for error
+ */
+int emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
+ struct prueth_packet_info *pkt_info,
+ const struct prueth_queue_info *rxqueue)
+{
+ struct net_device *ndev = emac->ndev;
+ struct prueth *prueth = emac->prueth;
+ int read_block, update_block, pkt_block_size;
+ bool buffer_wrapped = false, prp_rct = false;
+ unsigned int buffer_desc_count;
+ struct sk_buff *skb;
+ void *src_addr;
+ void *dst_addr;
+ u64 ts;
+
+ void *nt_dst_addr;
+ u8 macid[6];
+ /* OCMC RAM is not cached and read order is not important */
+ void *ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va;
+ struct skb_shared_hwtstamps *ssh;
+ unsigned int actual_pkt_len;
+ u16 start_offset = 0, type;
+ u8 offset = 0, *ptr;
+ int ret;
+
+ if (PRUETH_IS_HSR(prueth))
+ start_offset = (pkt_info->start_offset ?
+ ICSS_LRE_TAG_RCT_SIZE : 0);
+ else if (PRUETH_IS_PRP(prueth) && pkt_info->start_offset)
+ prp_rct = true;
+
+ /* the PRU firmware deals mostly in pointers already
+ * offset into ram, we would like to deal in indexes
+ * within the queue we are working with for code
+ * simplicity, calculate this here
+ */
+ buffer_desc_count = rxqueue->buffer_desc_end -
+ rxqueue->buffer_desc_offset;
+ buffer_desc_count /= BD_SIZE;
+ buffer_desc_count++;
+ read_block = (*bd_rd_ptr - rxqueue->buffer_desc_offset) / BD_SIZE;
+ pkt_block_size = DIV_ROUND_UP(pkt_info->length, ICSS_BLOCK_SIZE);
+ if (pkt_info->timestamp)
+ pkt_block_size++;
+
+ /* calculate end BD address post read */
+ update_block = read_block + pkt_block_size;
+
+ /* Check for wrap around */
+ if (update_block >= buffer_desc_count) {
+ update_block %= buffer_desc_count;
+ if (update_block)
+ buffer_wrapped = true;
+ }
+
+ /* calculate new pointer in ram */
+ *bd_rd_ptr = rxqueue->buffer_desc_offset + (update_block * BD_SIZE);
+
+ /* Pkt len w/ HSR tag removed, If applicable */
+ actual_pkt_len = pkt_info->length - start_offset;
+
+ /* Allocate a socket buffer for this packet */
+ skb = netdev_alloc_skb_ip_align(ndev, actual_pkt_len);
+ if (!skb) {
+ if (netif_msg_rx_err(emac) && net_ratelimit())
+ netdev_err(ndev, "failed rx buffer alloc\n");
+ return -ENOMEM;
+ }
+ dst_addr = skb->data;
+ nt_dst_addr = dst_addr;
+
+ /* Get the start address of the first buffer from
+ * the read buffer description
+ */
+ if (pkt_info->shadow) {
+ src_addr = ocmc_ram + P0_COL_BUFFER_OFFSET;
+ } else {
+ src_addr = ocmc_ram + rxqueue->buffer_offset +
+ (read_block * ICSS_BLOCK_SIZE);
+ }
+ src_addr += start_offset;
+
+ /* Copy the data from PRU buffers(OCMC) to socket buffer(DRAM) */
+ if (buffer_wrapped) { /* wrapped around buffer */
+ int bytes = (buffer_desc_count - read_block) * ICSS_BLOCK_SIZE;
+ int remaining;
+
+ /* bytes is integral multiple of ICSS_BLOCK_SIZE but
+ * entire packet may have fit within the last BD
+ * if pkt_info.length is not integral multiple of
+ * ICSS_BLOCK_SIZE
+ */
+ if (pkt_info->length < bytes)
+ bytes = pkt_info->length;
+
+ /* If applicable, account for the HSR tag removed */
+ bytes -= start_offset;
+
+ /* copy non-wrapped part */
+ memcpy(dst_addr, src_addr, bytes);
+
+ /* copy wrapped part */
+ dst_addr += bytes;
+ remaining = actual_pkt_len - bytes;
+ if (pkt_info->shadow)
+ src_addr += bytes;
+ else
+ src_addr = ocmc_ram + rxqueue->buffer_offset;
+ memcpy(dst_addr, src_addr, remaining);
+ src_addr += remaining;
+ } else {
+ memcpy(dst_addr, src_addr, actual_pkt_len);
+ src_addr += actual_pkt_len;
+ }
+
+ if (pkt_info->timestamp) {
+ src_addr = (void *)roundup((uintptr_t)src_addr, ICSS_BLOCK_SIZE);
+ dst_addr = &ts;
+ memcpy(dst_addr, src_addr, sizeof(ts));
+ }
+
+ if (PRUETH_IS_SWITCH(emac->prueth)) {
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ if (!pkt_info->lookup_success)
+ prueth_sw_learn_fdb(emac, skb->data + ETH_ALEN);
+ }
+
+ /* Check if VLAN tag is present since SV payload location will change
+ * based on that
+ */
+ if (PRUETH_IS_LRE(prueth)) {
+ ptr = nt_dst_addr + PRUETH_ETH_TYPE_OFFSET;
+ type = (*ptr++) << PRUETH_ETH_TYPE_UPPER_SHIFT;
+ type |= *ptr++;
+ if (type == ETH_P_8021Q)
+ offset = 4;
+ }
+
+ /* TODO. The check for FW_REV_V1_0 is a workaround since
+ * lookup of MAC address in Node table by this version of firmware
+ * is not reliable. Once this issue is fixed in firmware, this driver
+ * check has to be removed.
+ */
+ if (PRUETH_IS_LRE(prueth) && !pkt_info->lookup_success) {
+ if (PRUETH_IS_PRP(prueth)) {
+ memcpy(macid,
+ ((pkt_info->sv_frame) ?
+ nt_dst_addr + LRE_SV_FRAME_OFFSET + offset :
+ nt_dst_addr + ICSS_LRE_TAG_RCT_SIZE),
+ ICSS_LRE_TAG_RCT_SIZE);
+
+ prueth_lre_nt_insert(prueth, macid, emac->port_id,
+ pkt_info->sv_frame,
+ LRE_PROTO_PRP);
+
+ } else if (pkt_info->sv_frame) {
+ memcpy(macid,
+ nt_dst_addr + LRE_SV_FRAME_OFFSET + offset,
+ ICSS_LRE_TAG_RCT_SIZE);
+ prueth_lre_nt_insert(prueth, macid, emac->port_id,
+ pkt_info->sv_frame,
+ LRE_PROTO_HSR);
+ }
+ }
+
+ /* For PRP, firmware always send us RCT. So skip Tag if
+ * prp_tr_mode is IEC62439_3_TR_REMOVE_RCT
+ */
+ if (prp_rct && prueth->prp_tr_mode == IEC62439_3_TR_REMOVE_RCT)
+ actual_pkt_len -= ICSS_LRE_TAG_RCT_SIZE;
+
+ if (!pkt_info->sv_frame) {
+ skb_put(skb, actual_pkt_len);
+
+ if (prueth_ptp_rx_ts_is_enabled(emac) && pkt_info->timestamp) {
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ts);
+ if (PRUETH_IS_HSR(prueth)) {
+ ret = prueth_hsr_ptp_ct_tx_ts_enqueue(emac, skb, type);
+ if (ret == -EAGAIN)
+ goto out;
+ }
+ }
+
+ /* send packet up the stack */
+ skb->protocol = eth_type_trans(skb, ndev);
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+ } else {
+ dev_kfree_skb_any(skb);
+ }
+out:
+
+ /* update stats */
+ ndev->stats.rx_bytes += actual_pkt_len;
+ ndev->stats.rx_packets++;
+
+ return 0;
+}
+
+/**
+ * emac_rx_thread - EMAC Rx interrupt thread handler
+ * @irq: interrupt number
+ * @dev_id: pointer to net_device
+ *
+ * EMAC Rx Interrupt thread handler - function to process the rx frames in a
+ * irq thread function. There is only limited buffer at the ingress to
+ * queue the frames. As the frames are to be emptied as quickly as
+ * possible to avoid overflow, irq thread is necessary. Current implementation
+ * based on NAPI poll results in packet loss due to overflow at
+ * the ingress queues. Industrial use case requires loss free packet
+ * processing. Tests shows that with threaded irq based processing,
+ * no overflow happens when receiving at ~92Mbps for MTU sized frames and thus
+ * meet the requirement for industrial use case.
+ *
+ * Returns interrupt handled condition
+ */
+static irqreturn_t emac_rx_thread(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int start_queue, end_queue;
+ struct prueth_queue_desc __iomem *queue_desc;
+ const struct prueth_queue_info *rxqueue;
+ u8 overflow_cnt;
+ u16 bd_rd_ptr, bd_wr_ptr, update_rd_ptr;
+ u32 rd_buf_desc;
+ void __iomem *shared_ram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ struct prueth_packet_info pkt_info;
+ struct net_device_stats *ndevstats = &emac->ndev->stats;
+ int i, ret, used = 0;
+ struct prueth_emac *other_emac;
+
+ other_emac = prueth->emac[other_port_id(emac->port_id) - 1];
+
+ if (PRUETH_IS_SWITCH(prueth)) {
+ start_queue = PRUETH_QUEUE1;
+ end_queue = PRUETH_QUEUE4;
+ } else {
+ start_queue = emac->rx_queue_start;
+ end_queue = emac->rx_queue_end;
+ }
+
+retry:
+ /* search host queues for packets */
+ for (i = start_queue; i <= end_queue; i++) {
+ queue_desc = emac->rx_queue_descs + i;
+ if (PRUETH_IS_SWITCH(emac->prueth))
+ rxqueue = &sw_queue_infos[PRUETH_PORT_HOST][i];
+ else
+ rxqueue = &queue_infos[PRUETH_PORT_HOST][i];
+
+ overflow_cnt = readb(&queue_desc->overflow_cnt);
+ if (overflow_cnt > 0) {
+ emac->ndev->stats.rx_over_errors += overflow_cnt;
+
+ /* reset to zero */
+ writeb(0, &queue_desc->overflow_cnt);
+ }
+
+ bd_rd_ptr = readw(&queue_desc->rd_ptr);
+ bd_wr_ptr = readw(&queue_desc->wr_ptr);
+
+ /* while packets are available in this queue */
+ while (bd_rd_ptr != bd_wr_ptr) {
+ /* get packet info from the read buffer descriptor */
+ rd_buf_desc = readl(shared_ram + bd_rd_ptr);
+ parse_packet_info(prueth, rd_buf_desc, &pkt_info);
+
+ if (pkt_info.length <= 0) {
+ /* a packet length of zero will cause us to
+ * never move the read pointer ahead, locking
+ * the driver, so we manually have to move it
+ * to the write pointer, discarding all
+ * remaining packets in this queue. This should
+ * never happen.
+ */
+ update_rd_ptr = bd_wr_ptr;
+ ndevstats->rx_length_errors++;
+ } else if (pkt_info.length > EMAC_MAX_PKTLEN) {
+ /* if the packet is too large we skip it but we
+ * still need to move the read pointer ahead
+ * and assume something is wrong with the read
+ * pointer as the firmware should be filtering
+ * these packets
+ */
+ update_rd_ptr = bd_wr_ptr;
+ ndevstats->rx_length_errors++;
+ } else {
+ update_rd_ptr = bd_rd_ptr;
+ if (PRUETH_IS_SWITCH(emac->prueth)) {
+ if (pkt_info.port ==
+ other_emac->port_id) {
+ emac = other_emac;
+ }
+ }
+
+ ret = emac_rx_packet(emac, &update_rd_ptr,
+ &pkt_info, rxqueue);
+ if (ret)
+ return IRQ_HANDLED;
+ used++;
+ }
+
+ /* after reading the buffer descriptor we clear it
+ * to prevent improperly moved read pointer errors
+ * from simply looking like old packets.
+ */
+ writel(0, shared_ram + bd_rd_ptr);
+
+ /* update read pointer in queue descriptor */
+ writew(update_rd_ptr, &queue_desc->rd_ptr);
+ bd_rd_ptr = update_rd_ptr;
+ }
+ }
+
+ if (used) {
+ used = 0;
+ goto retry;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* get statistics maintained by the PRU firmware into @pstats */
+static void emac_get_stats(struct prueth_emac *emac,
+ struct port_statistics *pstats)
+{
+ void __iomem *dram;
+
+ dram = emac->prueth->mem[emac->dram].va;
+ memcpy_fromio(pstats, dram + STATISTICS_OFFSET, STAT_SIZE);
+
+ pstats->vlan_dropped =
+ readl(dram + ICSS_EMAC_FW_VLAN_FILTER_DROP_CNT_OFFSET);
+ pstats->multicast_dropped =
+ readl(dram + ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_OFFSET);
+}
+
+/* set PRU firmware statistics */
+static void emac_set_stats(struct prueth_emac *emac,
+ struct port_statistics *pstats)
+{
+ void __iomem *dram;
+
+ dram = emac->prueth->mem[emac->dram].va;
+ memcpy_toio(dram + STATISTICS_OFFSET, pstats, STAT_SIZE);
+
+ writel(pstats->vlan_dropped, dram +
+ ICSS_EMAC_FW_VLAN_FILTER_DROP_CNT_OFFSET);
+ writel(pstats->multicast_dropped, dram +
+ ICSS_EMAC_FW_MULTICAST_FILTER_DROP_CNT_OFFSET);
+}
+
+static int emac_set_boot_pru(struct prueth_emac *emac, struct net_device *ndev)
+{
+ const struct prueth_firmware *pru_firmwares;
+ struct prueth *prueth = emac->prueth;
+ const char *fw_name;
+ int ret = 0;
+
+ pru_firmwares = &prueth->fw_data->fw_pru[emac->port_id - 1];
+ fw_name = pru_firmwares->fw_name[prueth->eth_type];
+ if (!fw_name) {
+ netdev_err(ndev, "eth_type %d not supported\n",
+ prueth->eth_type);
+ return -ENODEV;
+ }
+
+ ret = rproc_set_firmware(emac->pru, fw_name);
+ if (ret) {
+ netdev_err(ndev, "failed to set PRU0 firmware %s: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = rproc_boot(emac->pru);
+ if (ret) {
+ netdev_err(ndev, "failed to boot PRU0: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int emac_request_irqs(struct prueth_emac *emac)
+{
+ struct net_device *ndev = emac->ndev;
+ int ret = 0;
+
+ ret = request_threaded_irq(emac->rx_irq, NULL, emac_rx_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to request RX IRQ\n");
+ return ret;
+ }
+
+ if (PRUETH_IS_EMAC(emac->prueth) && emac->tx_irq > 0) {
+ ret = request_irq(emac->tx_irq, emac_tx_hardirq,
+ IRQF_TRIGGER_HIGH, ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to request TX IRQ\n");
+ free_irq(emac->rx_irq, ndev);
+ return ret;
+ }
+ }
+
+ if (emac->emac_ptp_tx_irq) {
+ ret = request_threaded_irq(emac->emac_ptp_tx_irq,
+ prueth_ptp_tx_irq_handle,
+ prueth_ptp_tx_irq_work,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to request PTP TX IRQ\n");
+ free_irq(emac->rx_irq, ndev);
+ free_irq(emac->tx_irq, ndev);
+ }
+ }
+
+ return ret;
+}
+
+static int emac_sanitize_feature_flags(struct prueth_emac *emac)
+{
+ if ((PRUETH_IS_HSR(emac->prueth) || PRUETH_IS_PRP(emac->prueth)) &&
+ !(emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)) {
+ netdev_err(emac->ndev, "Error: Turn ON HSR offload\n");
+ return -EINVAL;
+ }
+
+ if ((PRUETH_IS_EMAC(emac->prueth) || PRUETH_IS_SWITCH(emac->prueth)) &&
+ (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)) {
+ netdev_err(emac->ndev, "Error: Turn OFF HSR offload\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Function to free memory related to sw/lre */
+static void prueth_free_memory(struct prueth *prueth)
+{
+ if (PRUETH_IS_SWITCH(prueth))
+ prueth_sw_free_fdb_table(prueth);
+ if (PRUETH_IS_LRE(prueth))
+ prueth_lre_free_memory(prueth);
+}
+
+static void icss_ptp_dram_init(struct prueth_emac *emac)
+{
+ void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ u64 temp64;
+
+ writew(0, sram + MII_RX_CORRECTION_OFFSET);
+ writew(0, sram + MII_TX_CORRECTION_OFFSET);
+
+ /* Initialize RCF to 1 (Linux N/A) */
+ writel(1 * 1024, sram + TIMESYNC_TC_RCF_OFFSET);
+
+ /* This flag will be set and cleared by firmware */
+ /* Write Sync0 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ writel(200000000 / 50, sram + TIMESYNC_SYNC0_WIDTH_OFFSET);
+
+ /* Write CMP1 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ temp64 = 1000000;
+ memcpy_toio(sram + TIMESYNC_CMP1_CMP_OFFSET, &temp64, sizeof(temp64));
+
+ /* Write Sync0 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ writel(1000000, sram + TIMESYNC_CMP1_PERIOD_OFFSET);
+
+ /* Configures domainNumber list. Firmware supports 2 domains */
+ writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST);
+ writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST + 1);
+
+ /* Configure 1-step/2-step */
+ writeb(1, sram + DISABLE_SWITCH_SYNC_RELAY_OFFSET);
+
+ /* Configures the setting to Link local frame without HSR tag */
+ writeb(0, sram + LINK_LOCAL_FRAME_HAS_HSR_TAG);
+
+ /* Enable E2E/UDP PTP message timestamping */
+ writeb(1, sram + PTP_IPV4_UDP_E2E_ENABLE);
+}
+
+/**
+ * emac_ndo_open - EMAC device open
+ * @ndev: network adapter device
+ *
+ * Called when system wants to start the interface.
+ *
+ * Returns 0 for a successful open, or appropriate error code
+ */
+static int emac_ndo_open(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int ret;
+
+ /* set h/w MAC as user might have re-configured */
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ netif_carrier_off(ndev);
+
+ if (!prueth->emac_configured)
+ prueth_init_ethernet_mode(prueth);
+
+ ret = emac_sanitize_feature_flags(emac);
+ if (ret)
+ return ret;
+
+ /* reset and start PRU firmware */
+ if (!PRUETH_IS_EMAC(prueth)) {
+ ret = prueth_sw_emac_config(emac);
+ if (ret)
+ return ret;
+
+ if (PRUETH_IS_SWITCH(prueth)) {
+ ret = prueth_sw_init_fdb_table(prueth);
+ } else {
+ /* HSR/PRP */
+ prueth_lre_config_check_flags(prueth);
+ ret = prueth_lre_init_node_table(prueth);
+ }
+ } else {
+ prueth_emac_config(emac);
+ }
+
+ if (ret)
+ return ret;
+
+ /* restore stats */
+ emac_set_stats(emac, &emac->stats);
+ if (PRUETH_IS_LRE(prueth))
+ prueth_lre_set_stats(prueth, prueth->lre_stats);
+
+ if (!prueth->emac_configured) {
+ icss_ptp_dram_init(emac);
+ ret = icss_iep_init(prueth->iep, NULL, NULL, 0);
+ if (ret) {
+ netdev_err(ndev, "Failed to initialize iep: %d\n", ret);
+ goto free_mem;
+ }
+ }
+
+ if (!PRUETH_IS_EMAC(prueth)) {
+ ret = prueth_sw_boot_prus(prueth, ndev);
+ if (ret)
+ goto iep_exit;
+ } else {
+ /* boot the PRU */
+ ret = emac_set_boot_pru(emac, ndev);
+ if (ret) {
+ netdev_err(ndev, "failed to boot PRU: %d\n", ret);
+ goto iep_exit;
+ }
+ }
+
+ if (PRUETH_IS_EMAC(prueth) || PRUETH_IS_SWITCH(prueth))
+ ret = emac_request_irqs(emac);
+ else
+ ret = prueth_lre_request_irqs(emac);
+ if (ret)
+ goto rproc_shutdown;
+
+ /* start PHY */
+ phy_start(emac->phydev);
+
+ /* enable the port and vlan */
+ prueth_port_enable(emac, true);
+
+ prueth->emac_configured |= BIT(emac->port_id);
+ if (PRUETH_IS_SWITCH(prueth))
+ prueth_sw_port_set_stp_state(prueth, emac->port_id,
+ BR_STATE_LEARNING);
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "started\n");
+
+ return 0;
+
+rproc_shutdown:
+ if (!PRUETH_IS_EMAC(prueth))
+ prueth_sw_shutdown_prus(emac, ndev);
+ else
+ rproc_shutdown(emac->pru);
+iep_exit:
+ if (!prueth->emac_configured)
+ icss_iep_exit(prueth->iep);
+free_mem:
+ if (PRUETH_IS_SWITCH(prueth))
+ prueth_sw_free_fdb_table(prueth);
+
+ prueth_free_memory(emac->prueth);
+ return ret;
+}
+
+/**
+ * emac_ndo_stop - EMAC device stop
+ * @ndev: network adapter device
+ *
+ * Called when system wants to stop or down the interface.
+ */
+static int emac_ndo_stop(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int i;
+
+ prueth->emac_configured &= ~BIT(emac->port_id);
+
+ /* disable the mac port */
+ prueth_port_enable(emac, false);
+
+ /* stop PHY */
+ phy_stop(emac->phydev);
+
+ /* inform the upper layers. */
+ netif_stop_queue(ndev);
+
+ netif_carrier_off(ndev);
+
+ /* stop the PRU */
+ if (!PRUETH_IS_EMAC(prueth))
+ prueth_sw_shutdown_prus(emac, ndev);
+ else
+ rproc_shutdown(emac->pru);
+
+ /* save and lre stats */
+ emac_get_stats(emac, &emac->stats);
+ if (PRUETH_IS_LRE(prueth) && !prueth->emac_configured) {
+ prueth_lre_get_stats(prueth, prueth->lre_stats);
+ prueth_lre_cleanup(prueth);
+ }
+
+ /* free table memory of the switch */
+ if (PRUETH_IS_SWITCH(emac->prueth))
+ prueth_sw_free_fdb_table(prueth);
+
+ /* Cleanup ptp related stuff for all protocols */
+ prueth_ptp_tx_ts_enable(emac, 0);
+ prueth_ptp_rx_ts_enable(emac, 0);
+ for (i = 0; i < PRUETH_PTP_TS_EVENTS; i++) {
+ if (emac->ptp_skb[i]) {
+ prueth_ptp_tx_ts_reset(emac, i);
+ dev_consume_skb_any(emac->ptp_skb[i]);
+ emac->ptp_skb[i] = NULL;
+ }
+ if (emac->ptp_ct_skb[i]) {
+ prueth_ptp_tx_ts_reset(emac, i);
+ dev_consume_skb_any(emac->ptp_ct_skb[i]);
+ emac->ptp_ct_skb[i] = NULL;
+ }
+ }
+
+ /* free rx and tx interrupts */
+ if (PRUETH_IS_EMAC(emac->prueth) && emac->tx_irq > 0)
+ free_irq(emac->tx_irq, ndev);
+ /* For EMAC and Switch, interrupt is per port.
+ * So free interrupts same way
+ */
+ if (PRUETH_IS_EMAC(emac->prueth) || PRUETH_IS_SWITCH(prueth)) {
+ free_irq(emac->rx_irq, ndev);
+ if (emac->emac_ptp_tx_irq)
+ free_irq(emac->emac_ptp_tx_irq, ndev);
+
+ } else {
+ /* Free interrupts on last port */
+ prueth_lre_free_irqs(emac);
+ }
+
+ /* free memory related to sw/lre */
+ prueth_free_memory(emac->prueth);
+
+ if (!prueth->emac_configured)
+ icss_iep_exit(prueth->iep);
+
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "stopped\n");
+
+ return 0;
+}
+
+static void prueth_change_to_switch_mode(struct prueth *prueth)
+{
+ bool portstatus[PRUETH_NUM_MACS];
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ emac = prueth->emac[i];
+ ndev = emac->ndev;
+
+ portstatus[i] = netif_running(ndev);
+ if (!portstatus[i])
+ continue;
+
+ ret = ndev->netdev_ops->ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return;
+ }
+ }
+
+ prueth->eth_type = PRUSS_ETHTYPE_SWITCH;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ emac = prueth->emac[i];
+ ndev = emac->ndev;
+
+ if (!portstatus[i])
+ continue;
+
+ ret = ndev->netdev_ops->ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return;
+ }
+ }
+
+ dev_info(prueth->dev, "TI PRU ethernet now in Switch mode\n");
+}
+
+static void prueth_change_to_emac_mode(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ bool portstatus[PRUETH_NUM_MACS];
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ emac = prueth->emac[i];
+ ndev = emac->ndev;
+
+ portstatus[i] = netif_running(ndev);
+ if (!portstatus[i])
+ continue;
+
+ ret = ndev->netdev_ops->ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return;
+ }
+ }
+
+ prueth->eth_type = PRUSS_ETHTYPE_EMAC;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ emac = prueth->emac[i];
+ ndev = emac->ndev;
+
+ if (!portstatus[i])
+ continue;
+
+ ret = ndev->netdev_ops->ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return;
+ }
+ }
+
+ dev_info(prueth->dev, "TI PRU ethernet now in Dual EMAC mode\n");
+}
+
+/* VLAN-tag PCP to priority queue map for EMAC used by driver. Should be
+ * in sync with fw_pcp_default_priority_queue_map[]
+ * Index is PCP val.
+ * low - pcp 0..1 maps to Q4
+ * 2..3 maps to Q3
+ * 4..5 maps to Q2
+ * high - pcp 6..7 maps to Q1.
+ *
+ * VLAN-tag PCP to priority queue map for Switch/HSR/PRP used by driver
+ * Index is PCP val / 2.
+ * low - pcp 0..3 maps to Q4 for Host
+ * high - pcp 4..7 maps to Q3 for Host
+ * low - pcp 0..3 maps to Q2 for PRU-x where x = 1 for PRUETH_PORT_MII0
+ * 0 for PRUETH_PORT_MII1
+ * high - pcp 4..7 maps to Q1 for PRU-x
+ */
+static const unsigned short emac_pcp_tx_priority_queue_map[] = {
+ PRUETH_QUEUE4, PRUETH_QUEUE4,
+ PRUETH_QUEUE3, PRUETH_QUEUE3,
+ PRUETH_QUEUE2, PRUETH_QUEUE2,
+ PRUETH_QUEUE1, PRUETH_QUEUE1,
+};
+
+static u16 prueth_get_tx_queue_id(struct prueth *prueth, struct sk_buff *skb)
+{
+ u16 vlan_tci, pcp;
+ int err;
+
+ err = vlan_get_tag(skb, &vlan_tci);
+ if (likely(err))
+ pcp = 0;
+ else
+ pcp = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ /* For HSR/PRP, we use only QUEUE4 and QUEUE3 at the egress. QUEUE2 and
+ * QUEUE1 are used for port to port traffic. Current version of SWITCH
+ * firmware uses 4 egress queues.
+ */
+ if (PRUETH_IS_LRE(prueth))
+ pcp >>= 1;
+
+ return emac_pcp_tx_priority_queue_map[pcp];
+}
+
+/**
+ * emac_ndo_start_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
+ */
+static int emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret = 0;
+ u16 qid;
+
+ if (unlikely(!emac->link)) {
+ if (netif_msg_tx_err(emac) && net_ratelimit())
+ netdev_err(ndev, "No link to transmit");
+ goto fail_tx;
+ }
+
+ qid = prueth_get_tx_queue_id(emac->prueth, skb);
+ ret = prueth_tx_enqueue(emac, skb, qid);
+ if (ret) {
+ if (ret != -ENOBUFS && netif_msg_tx_err(emac) &&
+ net_ratelimit())
+ netdev_err(ndev, "packet queue failed: %d\n", ret);
+ goto fail_tx;
+ }
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+
+fail_tx:
+ if (ret == -ENOBUFS) {
+ /* no free TX queue */
+ if (emac->tx_irq > 0)
+ netif_stop_queue(ndev);
+ ret = NETDEV_TX_BUSY;
+ } else {
+ /* error */
+ ndev->stats.tx_dropped++;
+ ret = NET_XMIT_DROP;
+ }
+
+ return ret;
+}
+
+/**
+ * emac_ndo_tx_timeout - EMAC Transmit timeout function
+ * @ndev: The EMAC network adapter
+ * @txqueue: TX queue being used
+ *
+ * Called when system detects that a skb timeout period has expired
+ * potentially due to a fault in the adapter in not being able to send
+ * it out on the wire.
+ */
+static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (netif_msg_tx_err(emac))
+ netdev_err(ndev, "xmit timeout");
+
+ ndev->stats.tx_errors++;
+
+ /* TODO: can we recover or need to reboot firmware? */
+
+ netif_wake_queue(ndev);
+}
+
+/**
+ * emac_ndo_getstats - EMAC get statistics function
+ * @ndev: The EMAC network adapter
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static struct net_device_stats *emac_ndo_get_stats(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct port_statistics pstats;
+ struct net_device_stats *stats = &ndev->stats;
+
+ emac_get_stats(emac, &pstats);
+ stats->collisions = pstats.late_coll + pstats.single_coll +
+ pstats.multi_coll + pstats.excess_coll;
+ stats->multicast = pstats.rx_mcast;
+
+ return stats;
+}
+
+/* enable/disable MC filter */
+static void emac_mc_filter_ctrl(struct prueth_emac *emac, bool enable)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *ram = prueth->mem[emac->dram].va;
+ u32 mc_ctrl_byte = prueth->fw_offsets->mc_ctrl_byte;
+ void __iomem *mc_filter_ctrl;
+ u32 reg;
+
+ if (PRUETH_IS_LRE(prueth))
+ ram = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ mc_filter_ctrl = ram + mc_ctrl_byte;
+
+ if (enable)
+ reg = ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_ENABLED;
+ else
+ reg = ICSS_EMAC_FW_MULTICAST_FILTER_CTRL_DISABLED;
+
+ writeb(reg, mc_filter_ctrl);
+}
+
+/* reset MC filter bins */
+static void emac_mc_filter_reset(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *ram = prueth->mem[emac->dram].va;
+ u32 mc_filter_tbl_base = prueth->fw_offsets->mc_filter_tbl;
+ void __iomem *mc_filter_tbl;
+
+ if (PRUETH_IS_LRE(prueth))
+ ram = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ mc_filter_tbl = ram + mc_filter_tbl_base;
+ memset_io(mc_filter_tbl, 0, ICSS_EMAC_FW_MULTICAST_TABLE_SIZE_BYTES);
+}
+
+/* set MC filter hashmask */
+static void emac_mc_filter_hashmask(struct prueth_emac *emac,
+ u8 mask[ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES])
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *ram = prueth->mem[emac->dram].va;
+ u32 mc_filter_mask_base = prueth->fw_offsets->mc_filter_mask;
+ void __iomem *mc_filter_mask;
+
+ if (PRUETH_IS_LRE(prueth))
+ ram = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ mc_filter_mask = ram + mc_filter_mask_base;
+ memcpy_toio(mc_filter_mask, mask,
+ ICSS_EMAC_FW_MULTICAST_FILTER_MASK_SIZE_BYTES);
+}
+
+static void emac_mc_filter_bin_update(struct prueth_emac *emac, u8 hash, u8 val)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 mc_filter_tbl_base = prueth->fw_offsets->mc_filter_tbl;
+ void __iomem *mc_filter_tbl;
+ void __iomem *ram = prueth->mem[emac->dram].va;
+
+ if (PRUETH_IS_LRE(prueth))
+ ram = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ mc_filter_tbl = ram + mc_filter_tbl_base;
+ writeb(val, mc_filter_tbl + hash);
+}
+
+void emac_mc_filter_bin_allow(struct prueth_emac *emac, u8 hash)
+{
+ emac_mc_filter_bin_update(emac, hash, ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_ALLOWED);
+}
+
+void emac_mc_filter_bin_disallow(struct prueth_emac *emac, u8 hash)
+{
+ emac_mc_filter_bin_update(emac, hash, ICSS_EMAC_FW_MULTICAST_FILTER_HOST_RCV_NOT_ALLOWED);
+}
+
+u8 emac_get_mc_hash(u8 *mac, u8 *mask)
+{
+ int j;
+ u8 hash;
+
+ for (j = 0, hash = 0; j < ETH_ALEN; j++)
+ hash ^= (mac[j] & mask[j]);
+
+ return hash;
+}
+
+/**
+ * emac_ndo_set_rx_mode - EMAC set receive mode function
+ * @ndev: The EMAC network adapter
+ *
+ * Called when system wants to set the receive mode of the device.
+ *
+ */
+static void emac_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ u32 reg = readl(sram + EMAC_PROMISCUOUS_MODE_OFFSET);
+ bool promisc = ndev->flags & IFF_PROMISC;
+ struct netdev_hw_addr *ha;
+ unsigned long flags;
+ u32 mask;
+ u8 hash;
+
+ if (PRUETH_IS_SWITCH(prueth)) {
+ netdev_dbg(ndev,
+ "%s: promisc/mc filtering not supported for switch\n",
+ __func__);
+ return;
+ }
+
+ if (promisc && PRUETH_IS_LRE(prueth)) {
+ netdev_dbg(ndev,
+ "%s: promisc mode not supported for LRE\n",
+ __func__);
+ return;
+ }
+
+ /* for LRE, it is a shared table. So lock the access */
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ /* Disable and reset multicast filter, allows allmulti */
+ emac_mc_filter_ctrl(emac, false);
+ emac_mc_filter_reset(emac);
+ emac_mc_filter_hashmask(emac, emac->mc_filter_mask);
+
+ if (PRUETH_IS_EMAC(prueth)) {
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ mask = EMAC_P1_PROMISCUOUS_BIT;
+ break;
+ case PRUETH_PORT_MII1:
+ mask = EMAC_P2_PROMISCUOUS_BIT;
+ break;
+ default:
+ netdev_err(ndev, "%s: invalid port\n", __func__);
+ goto unlock;
+ }
+
+ if (promisc) {
+ /* Enable promiscuous mode */
+ reg |= mask;
+ } else {
+ /* Disable promiscuous mode */
+ reg &= ~mask;
+ }
+
+ writel(reg, sram + EMAC_PROMISCUOUS_MODE_OFFSET);
+
+ if (promisc)
+ goto unlock;
+ }
+
+ if (ndev->flags & IFF_ALLMULTI && !PRUETH_IS_SWITCH(prueth))
+ goto unlock;
+
+ emac_mc_filter_ctrl(emac, true); /* all multicast blocked */
+
+ if (netdev_mc_empty(ndev))
+ goto unlock;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash = emac_get_mc_hash(ha->addr, emac->mc_filter_mask);
+ emac_mc_filter_bin_allow(emac, hash);
+ }
+
+ /* Add bridge device's MC addresses as well */
+ if (prueth->hw_bridge_dev) {
+ netdev_for_each_mc_addr(ha, prueth->hw_bridge_dev) {
+ hash = emac_get_mc_hash(ha->addr, emac->mc_filter_mask);
+ emac_mc_filter_bin_allow(emac, hash);
+ }
+ }
+
+unlock:
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+}
+
+static int emac_hwtstamp_config_set(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ prueth_ptp_rx_ts_enable(emac, 0);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ prueth_ptp_rx_ts_enable(emac, 1);
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ default:
+ return -ERANGE;
+ }
+
+ prueth_ptp_tx_ts_enable(emac, cfg.tx_type == HWTSTAMP_TX_ON);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int emac_hwtstamp_config_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config cfg;
+
+ cfg.flags = 0;
+ cfg.tx_type = prueth_ptp_tx_ts_is_enabled(emac) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = prueth_ptp_rx_ts_is_enabled(emac) ?
+ HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return emac_hwtstamp_config_set(ndev, ifr);
+ case SIOCGHWTSTAMP:
+ return emac_hwtstamp_config_get(ndev, ifr);
+ }
+
+ return phy_mii_ioctl(emac->phydev, ifr, cmd);
+}
+
+int emac_add_del_vid(struct prueth_emac *emac,
+ bool add, __be16 proto, u16 vid)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 vlan_filter_tbl = prueth->fw_offsets->vlan_filter_tbl;
+ void __iomem *ram = prueth->mem[emac->dram].va;
+ unsigned long flags;
+ u8 bit_index, val;
+ u16 byte_index;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ if (vid >= ICSS_EMAC_FW_VLAN_FILTER_VID_MAX)
+ return -EINVAL;
+
+ if (PRUETH_IS_LRE(prueth))
+ ram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ /* By default, VLAN ID 0 (priority tagged packets) is routed to
+ * host, so nothing to be done if vid = 0
+ */
+ if (!vid)
+ return 0;
+
+ /* for LRE, it is a shared table. So lock the access */
+ spin_lock_irqsave(&emac->addr_lock, flags);
+
+ /* VLAN filter table is 512 bytes (4096 bit) bitmap.
+ * Each bit controls enabling or disabling corresponding
+ * VID. Therefore byte index that controls a given VID is
+ * can calculated as vid / 8 and the bit within that byte
+ * that controls VID is given by vid % 8. Allow untagged
+ * frames to host by default.
+ */
+ byte_index = vid / BITS_PER_BYTE;
+ bit_index = vid % BITS_PER_BYTE;
+ val = readb(ram + vlan_filter_tbl + byte_index);
+ if (add)
+ val |= BIT(bit_index);
+ else
+ val &= ~BIT(bit_index);
+ writeb(val, ram + vlan_filter_tbl + byte_index);
+
+ spin_unlock_irqrestore(&emac->addr_lock, flags);
+
+ netdev_dbg(emac->ndev, "%s VID bit at index %d and bit %d\n",
+ add ? "Setting" : "Clearing", byte_index, bit_index);
+
+ return 0;
+}
+
+static int emac_ndo_vlan_rx_add_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+
+ return emac_add_del_vid(emac, true, proto, vid);
+}
+
+static int emac_ndo_vlan_rx_kill_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+
+ return emac_add_del_vid(emac, false, proto, vid);
+}
+
+static int emac_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+ struct prueth *prueth = emac->prueth;
+
+ ppid->id_len = sizeof(prueth->base_mac);
+ memcpy(&ppid->id, &prueth->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err;
+
+ err = snprintf(name, len, "p%d", emac->port_id);
+
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * emac_ndo_set_features - function to set feature flag
+ * @ndev: The network adapter device
+ *
+ * Called when ethtool -K option is invoked by user
+ *
+ * Change the eth_type in the prueth structure based on hsr or prp
+ * offload options from user through ethtool -K command. If the device
+ * is running or if the other paired device is running, then don't accept.
+ * Otherwise, set the ethernet type and offload feature flag
+ *
+ * Returns success if eth_type and feature flags are updated or error
+ * otherwise.
+ */
+static int emac_ndo_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct prueth_emac *emac = netdev_priv(ndev), *other_emac;
+ struct prueth *prueth = emac->prueth;
+ enum prueth_port other_port;
+ netdev_features_t wanted = features & NETIF_F_HW_HSR_TAG_RM;
+ netdev_features_t have = ndev->features & NETIF_F_HW_HSR_TAG_RM;
+ bool change_request = ((wanted ^ have) != 0);
+ int ret = -EBUSY;
+
+ if (!prueth->support_lre)
+ return 0;
+
+ if (PRUETH_IS_SWITCH(prueth)) {
+ /* Don't allow switching to HSR/PRP ethtype from Switch.
+ * User needs to first remove eth ports from a bridge which
+ * will automatically put the ethtype back to EMAC. So
+ * disallow this.
+ */
+ netdev_err(ndev,
+ "Switch to HSR/PRP/EMAC not allowed\n");
+ return -EINVAL;
+ }
+
+ if (netif_running(ndev) && change_request) {
+ netdev_err(ndev,
+ "Can't change feature when device runs\n");
+ return ret;
+ }
+
+ other_port = other_port_id(emac->port_id);
+ /* MAC instance index starts from 0. So index by port_id - 1 */
+ other_emac = prueth->emac[other_port - 1];
+ if (other_emac && netif_running(other_emac->ndev) && change_request) {
+ netdev_err(ndev,
+ "Can't change feature when other device runs\n");
+ return ret;
+ }
+
+ if (features & NETIF_F_HW_HSR_TAG_RM) {
+ ndev->features |= NETIF_F_HW_HSR_TAG_RM;
+ } else if (features & NETIF_F_HW_HSR_FWD) {
+ ndev->features |= NETIF_F_HW_HSR_FWD;
+ } else {
+ prueth->eth_type = PRUSS_ETHTYPE_EMAC;
+ ndev->features &= ~(NETIF_F_HW_HSR_TAG_RM | NETIF_F_HW_HSR_FWD);
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_ndo_open,
+ .ndo_stop = emac_ndo_stop,
+ .ndo_start_xmit = emac_ndo_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = emac_ndo_tx_timeout,
+ .ndo_get_stats = emac_ndo_get_stats,
+ .ndo_set_rx_mode = emac_ndo_set_rx_mode,
+ .ndo_do_ioctl = emac_ndo_ioctl,
+ .ndo_set_features = emac_ndo_set_features,
+ .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = emac_ndo_setup_tc,
+ .ndo_get_port_parent_id = emac_get_port_parent_id,
+ .ndo_get_phys_port_name = emac_ndo_get_phys_port_name,
+};
+
+/**
+ * emac_get_drvinfo - Get EMAC driver information
+ * @ndev: The network adapter
+ * @info: ethtool info structure containing name and version
+ *
+ * Returns EMAC driver information (name and version)
+ */
+static void emac_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, PRUETH_MODULE_DESCRIPTION, sizeof(info->driver));
+ strlcpy(info->version, PRUETH_MODULE_VERSION, sizeof(info->version));
+}
+
+/**
+ * emac_get_link_ksettings - Get EMAC settings
+ * @ndev: The network adapter
+ * @ecmd: ethtool command
+ *
+ * Executes ethool get command
+ */
+static int emac_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (!emac->phydev)
+ return -EOPNOTSUPP;
+
+ phy_ethtool_ksettings_get(emac->phydev, ecmd);
+ return 0;
+}
+
+/**
+ * emac_set_link_ksettings - Set EMAC settings
+ * @ndev: The EMAC network adapter
+ * @ecmd: ethtool command
+ *
+ * Executes ethool set command
+ */
+static int emac_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (!emac->phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_ksettings_set(emac->phydev, ecmd);
+}
+
+#define PRUETH_STAT_OFFSET(m) offsetof(struct port_statistics, m)
+
+static const struct {
+ char string[ETH_GSTRING_LEN];
+ u32 offset;
+} prueth_ethtool_stats[] = {
+ {"txBcast", PRUETH_STAT_OFFSET(tx_bcast)},
+ {"txMcast", PRUETH_STAT_OFFSET(tx_mcast)},
+ {"txUcast", PRUETH_STAT_OFFSET(tx_ucast)},
+ {"txOctets", PRUETH_STAT_OFFSET(tx_octets)},
+ {"rxBcast", PRUETH_STAT_OFFSET(rx_bcast)},
+ {"rxMcast", PRUETH_STAT_OFFSET(rx_mcast)},
+ {"rxUcast", PRUETH_STAT_OFFSET(rx_ucast)},
+ {"rxOctets", PRUETH_STAT_OFFSET(rx_octets)},
+
+ {"tx64byte", PRUETH_STAT_OFFSET(tx64byte)},
+ {"tx65_127byte", PRUETH_STAT_OFFSET(tx65_127byte)},
+ {"tx128_255byte", PRUETH_STAT_OFFSET(tx128_255byte)},
+ {"tx256_511byte", PRUETH_STAT_OFFSET(tx256_511byte)},
+ {"tx512_1023byte", PRUETH_STAT_OFFSET(tx512_1023byte)},
+ {"tx1024byte", PRUETH_STAT_OFFSET(tx1024byte)},
+ {"rx64byte", PRUETH_STAT_OFFSET(rx64byte)},
+ {"rx65_127byte", PRUETH_STAT_OFFSET(rx65_127byte)},
+ {"rx128_255byte", PRUETH_STAT_OFFSET(rx128_255byte)},
+ {"rx256_511byte", PRUETH_STAT_OFFSET(rx256_511byte)},
+ {"rx512_1023byte", PRUETH_STAT_OFFSET(rx512_1023byte)},
+ {"rx1024byte", PRUETH_STAT_OFFSET(rx1024byte)},
+
+ {"lateColl", PRUETH_STAT_OFFSET(late_coll)},
+ {"singleColl", PRUETH_STAT_OFFSET(single_coll)},
+ {"multiColl", PRUETH_STAT_OFFSET(multi_coll)},
+ {"excessColl", PRUETH_STAT_OFFSET(excess_coll)},
+
+ {"rxMisAlignmentFrames", PRUETH_STAT_OFFSET(rx_misalignment_frames)},
+ {"stormPrevCounterBC", PRUETH_STAT_OFFSET(stormprev_counter_bc)},
+ {"stormPrevCounterMC", PRUETH_STAT_OFFSET(stormprev_counter_mc)},
+ {"stormPrevCounterUC", PRUETH_STAT_OFFSET(stormprev_counter_uc)},
+ {"macRxError", PRUETH_STAT_OFFSET(mac_rxerror)},
+ {"SFDError", PRUETH_STAT_OFFSET(sfd_error)},
+ {"defTx", PRUETH_STAT_OFFSET(def_tx)},
+ {"macTxError", PRUETH_STAT_OFFSET(mac_txerror)},
+ {"rxOverSizedFrames", PRUETH_STAT_OFFSET(rx_oversized_frames)},
+ {"rxUnderSizedFrames", PRUETH_STAT_OFFSET(rx_undersized_frames)},
+ {"rxCRCFrames", PRUETH_STAT_OFFSET(rx_crc_frames)},
+ {"droppedPackets", PRUETH_STAT_OFFSET(dropped_packets)},
+
+ {"txHWQOverFlow", PRUETH_STAT_OFFSET(tx_hwq_overflow)},
+ {"txHWQUnderFlow", PRUETH_STAT_OFFSET(tx_hwq_underflow)},
+ {"vlanDropped", PRUETH_STAT_OFFSET(vlan_dropped)},
+ {"multicastDropped", PRUETH_STAT_OFFSET(multicast_dropped)},
+};
+
+static int emac_get_sset_count(struct net_device *ndev, int stringset)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int a_size;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ a_size = ARRAY_SIZE(prueth_ethtool_stats);
+ a_size += prueth_lre_get_sset_count(emac->prueth);
+
+ return a_size;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(prueth_ethtool_stats); i++) {
+ memcpy(p, prueth_ethtool_stats[i].string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ prueth_lre_get_strings(emac->prueth, p);
+ break;
+ default:
+ break;
+ }
+}
+
+static void emac_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct port_statistics pstats;
+ u32 val;
+ int i;
+ void *ptr;
+
+ emac_get_stats(emac, &pstats);
+
+ for (i = 0; i < ARRAY_SIZE(prueth_ethtool_stats); i++) {
+ ptr = &pstats;
+ ptr += prueth_ethtool_stats[i].offset;
+ val = *(u32 *)ptr;
+ data[i] = val;
+ }
+ prueth_lre_update_stats(emac->prueth, &data[i]);
+}
+
+static int emac_get_regs_len(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ /* VLAN Table at the end of the memory map, after MultiCast
+ * filter region. So VLAN table base +
+ * size will give the entire size of reg dump in case of
+ * Dual-EMAC firmware.
+ */
+ if (PRUETH_IS_EMAC(prueth) || PRUETH_IS_SWITCH(prueth)) {
+ return ICSS_EMAC_FW_VLAN_FLTR_TBL_BASE_ADDR +
+ ICSS_EMAC_FW_VLAN_FILTER_TABLE_SIZE_BYTES;
+ }
+
+ /* MultiCast table and VLAN filter table are in different
+ * memories in case of HSR/PRP firmware. Therefore add the sizes
+ * of individual region.
+ */
+ if (PRUETH_IS_LRE(prueth)) {
+ return ICSS_LRE_FW_VLAN_FLTR_TBL_BASE_ADDR +
+ ICSS_EMAC_FW_VLAN_FILTER_TABLE_SIZE_BYTES +
+ ICSS_LRE_FW_MULTICAST_FILTER_TABLE +
+ ICSS_EMAC_FW_MULTICAST_TABLE_SIZE_BYTES;
+ }
+
+ return 0;
+}
+
+static void emac_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ void __iomem *ram;
+ u8 *reg = p;
+
+ regs->version = PRUETH_REG_DUMP_GET_VER(prueth);
+
+ /* Dump firmware's VLAN and MC tables */
+ if (PRUETH_IS_EMAC(prueth) || PRUETH_IS_SWITCH(prueth)) {
+ ram = prueth->mem[emac->dram].va;
+ memcpy_fromio(reg, ram, emac_get_regs_len(ndev));
+ return;
+ }
+
+ if (PRUETH_IS_LRE(prueth)) {
+ size_t len = ICSS_LRE_FW_VLAN_FLTR_TBL_BASE_ADDR +
+ ICSS_EMAC_FW_VLAN_FILTER_TABLE_SIZE_BYTES;
+
+ ram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ memcpy_fromio(reg, ram, len);
+
+ reg += len;
+
+ ram = prueth->mem[PRUETH_MEM_DRAM1].va;
+ len = ICSS_LRE_FW_MULTICAST_FILTER_TABLE +
+ ICSS_EMAC_FW_MULTICAST_TABLE_SIZE_BYTES;
+ memcpy_fromio(reg, ram, len);
+ }
+}
+
+static int emac_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if ((PRUETH_IS_EMAC(emac->prueth) && !emac->emac_ptp_tx_irq) ||
+ (PRUETH_IS_LRE(emac->prueth) && !emac->hsr_ptp_tx_irq))
+ return ethtool_op_get_ts_info(ndev, info);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = icss_iep_get_ptp_clock_idx(emac->prueth->iep);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+/* Ethtool support for EMAC adapter */
+static const struct ethtool_ops emac_ethtool_ops = {
+ .get_drvinfo = emac_get_drvinfo,
+ .get_link_ksettings = emac_get_link_ksettings,
+ .set_link_ksettings = emac_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = emac_get_ts_info,
+ .get_sset_count = emac_get_sset_count,
+ .get_strings = emac_get_strings,
+ .get_ethtool_stats = emac_get_ethtool_stats,
+ .get_regs = emac_get_regs,
+ .get_regs_len = emac_get_regs_len,
+};
+
+/* get emac_port corresponding to eth_node name */
+static int prueth_node_port(struct device_node *eth_node)
+{
+ if (!strcmp(eth_node->name, "ethernet-mii0"))
+ return PRUETH_PORT_MII0;
+ else if (!strcmp(eth_node->name, "ethernet-mii1"))
+ return PRUETH_PORT_MII1;
+ else
+ return -EINVAL;
+}
+
+/* get MAC instance corresponding to eth_node name */
+static int prueth_node_mac(struct device_node *eth_node)
+{
+ if (!strcmp(eth_node->name, "ethernet-mii0"))
+ return PRUETH_MAC0;
+ else if (!strcmp(eth_node->name, "ethernet-mii1"))
+ return PRUETH_MAC1;
+ else
+ return -EINVAL;
+}
+
+static int prueth_netdev_init(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ enum prueth_port port;
+ enum prueth_mac mac;
+ struct net_device *ndev;
+ struct prueth_emac *emac;
+ const u8 *mac_addr;
+ int ret;
+
+ port = prueth_node_port(eth_node);
+ if (port < 0)
+ return -EINVAL;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac < 0)
+ return -EINVAL;
+
+ ndev = devm_alloc_etherdev(prueth->dev, sizeof(*emac));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, prueth->dev);
+ emac = netdev_priv(ndev);
+ prueth->emac[mac] = emac;
+ emac->prueth = prueth;
+ emac->ndev = ndev;
+ emac->port_id = port;
+ memset(&emac->mc_filter_mask[0], 0xff, ETH_ALEN); /* default mask */
+
+ /* by default eth_type is EMAC */
+ switch (port) {
+ case PRUETH_PORT_MII0:
+ emac->tx_port_queue = PRUETH_PORT_QUEUE_MII0;
+
+ /* packets from MII0 are on queues 1 through 2 */
+ emac->rx_queue_start = PRUETH_QUEUE1;
+ emac->rx_queue_end = PRUETH_QUEUE2;
+
+ emac->dram = PRUETH_MEM_DRAM0;
+ emac->pru = prueth->pru0;
+ break;
+ case PRUETH_PORT_MII1:
+ emac->tx_port_queue = PRUETH_PORT_QUEUE_MII1;
+
+ /* packets from MII1 are on queues 3 through 4 */
+ emac->rx_queue_start = PRUETH_QUEUE3;
+ emac->rx_queue_end = PRUETH_QUEUE4;
+
+ emac->dram = PRUETH_MEM_DRAM1;
+ emac->pru = prueth->pru1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ emac->rx_irq = of_irq_get_byname(eth_node, "rx");
+ if (emac->rx_irq < 0) {
+ ret = emac->rx_irq;
+ if (ret != -EPROBE_DEFER)
+ dev_err(prueth->dev, "could not get rx irq\n");
+ goto free;
+ }
+ emac->tx_irq = of_irq_get_byname(eth_node, "tx");
+ if (emac->tx_irq < 0) {
+ if (emac->tx_irq != -EPROBE_DEFER)
+ dev_dbg(prueth->dev, "tx irq not configured\n");
+ }
+
+ emac->emac_ptp_tx_irq = of_irq_get_byname(eth_node, "emac_ptp_tx");
+ if (emac->emac_ptp_tx_irq < 0) {
+ emac->emac_ptp_tx_irq = 0;
+ dev_err(prueth->dev, "could not get ptp tx irq. Skipping PTP support\n");
+ }
+
+ emac->hsr_ptp_tx_irq = of_irq_get_byname(eth_node, "hsr_ptp_tx");
+ if (emac->hsr_ptp_tx_irq < 0) {
+ emac->hsr_ptp_tx_irq = 0;
+ dev_err(prueth->dev, "could not get hsr ptp tx irq. Skipping PTP support\n");
+ }
+
+ emac->msg_enable = netif_msg_init(debug_level, PRUETH_EMAC_DEBUG);
+ spin_lock_init(&emac->lock);
+ spin_lock_init(&emac->ptp_skb_lock);
+ spin_lock_init(&emac->addr_lock);
+
+ /* get mac address from DT and set private and netdev addr */
+ mac_addr = of_get_mac_address(eth_node);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
+ port, ndev->dev_addr);
+ }
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
+ if (!emac->phy_node) {
+ dev_err(prueth->dev, "couldn't find phy-handle\n");
+ ret = -ENODEV;
+ goto free;
+ }
+
+ ret = of_get_phy_mode(eth_node, &emac->phy_if);
+ if (ret) {
+ dev_err(prueth->dev, "could not get phy-mode property err %d\n", ret);
+ goto free;
+ }
+
+ /* connect PHY */
+ emac->phydev = of_phy_connect(ndev, emac->phy_node,
+ &emac_adjust_link, 0, emac->phy_if);
+ if (!emac->phydev) {
+ dev_dbg(prueth->dev, "couldn't connect to phy %s\n",
+ emac->phy_node->full_name);
+ ret = -EPROBE_DEFER;
+ goto free;
+ }
+
+ /* remove unsupported modes */
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+
+ if (of_property_read_bool(eth_node, "ti,no-half-duplex")) {
+ phy_remove_link_mode(emac->phydev,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ }
+
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
+
+ if (prueth->support_lre)
+ ndev->hw_features |= (NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_TAG_RM);
+
+ ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ ndev->netdev_ops = &emac_netdev_ops;
+ ndev->ethtool_ops = &emac_ethtool_ops;
+#if IS_ENABLED(CONFIG_HSR)
+ if (prueth->support_lre)
+ ndev->lredev_ops = &prueth_lredev_ops;
+#endif
+
+ /* for HSR/PRP */
+ if (prueth->support_lre && emac->port_id == PRUETH_PORT_MII0) {
+ prueth->hp->ndev = ndev;
+ prueth->hp->priority = 0;
+ prueth->lp->ndev = ndev;
+ prueth->lp->priority = 1;
+ }
+
+ return 0;
+
+free:
+ prueth->emac[mac] = NULL;
+
+ return ret;
+}
+
+static void prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ enum prueth_mac mac;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac < 0)
+ return;
+
+ emac = prueth->emac[mac];
+ if (!emac)
+ return;
+
+ phy_disconnect(emac->phydev);
+
+ prueth->emac[mac] = NULL;
+}
+
+bool prueth_sw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops != &emac_netdev_ops)
+ return false;
+
+ if (ndev->features & NETIF_F_HW_HSR_TAG_RM)
+ return true;
+
+ return false;
+}
+
+static void prueth_port_offload_fwd_mark_update(struct prueth *prueth)
+{
+ int set_val = 0;
+ int i;
+ u8 all_slaves = BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1);
+
+ if (prueth->br_members == all_slaves)
+ set_val = 1;
+
+ dev_dbg(prueth->dev, "set offload_fwd_mark %d, mbrs=0x%x\n",
+ set_val, prueth->br_members);
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++)
+ prueth->emac[i]->offload_fwd_mark = set_val;
+
+ /* Bridge is created, load switch firmware, if not already in
+ * that mode
+ */
+ if (set_val && !PRUETH_IS_SWITCH(prueth))
+ prueth_change_to_switch_mode(prueth);
+
+ /* Bridge is deleted, switch to Dual EMAC mode */
+ if (!prueth->br_members && !PRUETH_IS_EMAC(prueth))
+ prueth_change_to_emac_mode(prueth);
+}
+
+static int prueth_ndev_port_link(struct net_device *ndev,
+ struct net_device *br_ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ dev_dbg(prueth->dev, "%s: br_mbrs=0x%x %s\n",
+ __func__, prueth->br_members, ndev->name);
+
+ if (!prueth->br_members) {
+ prueth->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (prueth->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ prueth->br_members |= BIT(emac->port_id);
+
+ prueth_port_offload_fwd_mark_update(prueth);
+
+ return NOTIFY_DONE;
+}
+
+static void prueth_ndev_port_unlink(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ dev_dbg(prueth->dev, "emac_sw_ndev_port_unlink\n");
+
+ prueth->br_members &= ~BIT(emac->port_id);
+
+ prueth_port_offload_fwd_mark_update(prueth);
+
+ if (!prueth->br_members)
+ prueth->hw_bridge_dev = NULL;
+}
+
+static int prueth_ndev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int ret = NOTIFY_DONE;
+ enum hsr_version ver;
+
+ if (!prueth_sw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (ndev->features & NETIF_F_HW_HSR_TAG_RM) {
+ if (is_hsr_master(info->upper_dev)) {
+ hsr_get_version(info->upper_dev, &ver);
+ if (ver == HSR_V1)
+ prueth->eth_type = PRUSS_ETHTYPE_HSR;
+ else if (ver == PRP_V1)
+ prueth->eth_type = PRUSS_ETHTYPE_PRP;
+ }
+ }
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = prueth_ndev_port_link(ndev,
+ info->upper_dev);
+ else
+ prueth_ndev_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int prueth_register_notifiers(struct prueth *prueth)
+{
+ struct notifier_block *nb;
+ int ret;
+
+ nb = &prueth->prueth_ndev_nb;
+ nb->notifier_call = prueth_ndev_event;
+ ret = register_netdevice_notifier(nb);
+ if (ret) {
+ dev_err(prueth->dev,
+ "register netdevice notifier failed ret: %d\n", ret);
+ return ret;
+ }
+
+ ret = prueth_sw_register_notifiers(prueth);
+ if (ret) {
+ unregister_netdevice_notifier(nb);
+ return ret;
+ }
+
+ return 0;
+}
+static const struct of_device_id prueth_dt_match[];
+
+static int prueth_probe(struct platform_device *pdev)
+{
+ struct prueth *prueth;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *eth0_node, *eth1_node;
+ const struct of_device_id *match;
+ enum pruss_pru_id pruss_id0, pruss_id1;
+ bool has_lre = false;
+ struct pruss *pruss;
+ int i, ret;
+
+ if (!np)
+ return -ENODEV; /* we don't support non DT */
+
+ match = of_match_device(prueth_dt_match, dev);
+ if (!match)
+ return -ENODEV;
+
+ prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
+ if (!prueth)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, prueth);
+
+ prueth->dev = dev;
+ prueth->fw_data = match->data;
+ prueth->prueth_np = np;
+ prueth->fw_offsets = &fw_offsets_v2_1;
+
+ eth0_node = of_get_child_by_name(np, "ethernet-mii0");
+ if (!of_device_is_available(eth0_node)) {
+ of_node_put(eth0_node);
+ eth0_node = NULL;
+ }
+
+ eth1_node = of_get_child_by_name(np, "ethernet-mii1");
+ if (!of_device_is_available(eth1_node)) {
+ of_node_put(eth1_node);
+ eth1_node = NULL;
+ }
+
+ /* At least one node must be present and available else we fail */
+ if (!eth0_node && !eth1_node) {
+ dev_err(dev, "neither ethernet-mii0 nor ethernet-mii1 node available\n");
+ ret = -ENODEV;
+ goto put_node;
+ }
+
+ prueth->eth_node[PRUETH_MAC0] = eth0_node;
+ prueth->eth_node[PRUETH_MAC1] = eth1_node;
+
+ prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "mii-rt");
+ if (IS_ERR(prueth->mii_rt)) {
+ dev_err(dev, "couldn't get mii-rt syscon regmap\n");
+ return -ENODEV;
+ }
+
+ if (eth0_node) {
+ prueth->pru0 = pru_rproc_get(np, 0, &pruss_id0);
+ if (IS_ERR(prueth->pru0)) {
+ ret = PTR_ERR(prueth->pru0);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to get PRU0: %d\n", ret);
+ goto put_node;
+ }
+ }
+
+ if (eth1_node) {
+ prueth->pru1 = pru_rproc_get(np, 1, &pruss_id1);
+ if (IS_ERR(prueth->pru1)) {
+ ret = PTR_ERR(prueth->pru1);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to get PRU1: %d\n", ret);
+ goto put_pru0;
+ }
+ }
+
+ pruss = pruss_get(prueth->pru0 ? prueth->pru0 : prueth->pru1);
+ if (IS_ERR(pruss)) {
+ ret = PTR_ERR(pruss);
+ dev_err(dev, "unable to get pruss handle\n");
+ goto put_pru1;
+ }
+ prueth->pruss = pruss;
+
+ ret = pruss_cfg_ocp_master_ports(prueth->pruss, 1);
+ if (ret) {
+ dev_err(dev, "couldn't enabled ocp master port: %d\n", ret);
+ goto put_pruss;
+ }
+
+ /* Configure PRUSS */
+ if (eth0_node)
+ pruss_cfg_gpimode(pruss, pruss_id0, PRUSS_GPI_MODE_MII);
+ if (eth1_node)
+ pruss_cfg_gpimode(pruss, pruss_id1, PRUSS_GPI_MODE_MII);
+ pruss_cfg_miirt_enable(pruss, true);
+ pruss_cfg_xfr_enable(pruss, true);
+
+ /* Get PRUSS mem resources */
+ /* OCMC is system resource which we get separately */
+ for (i = 0; i < ARRAY_SIZE(pruss_mem_ids); i++) {
+ /* skip appropriate DRAM if not required */
+ if (!eth0_node && i == PRUETH_MEM_DRAM0)
+ continue;
+
+ if (!eth1_node && i == PRUETH_MEM_DRAM1)
+ continue;
+
+ ret = pruss_request_mem_region(pruss, pruss_mem_ids[i],
+ &prueth->mem[i]);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS resource %d: %d\n",
+ i, ret);
+ goto put_mem;
+ }
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+ if (!prueth->sram_pool) {
+ dev_err(dev, "unable to get SRAM pool\n");
+ ret = -ENODEV;
+
+ goto put_mem;
+ }
+ prueth->mem[PRUETH_MEM_OCMC].va =
+ (void __iomem *)gen_pool_alloc(prueth->sram_pool,
+ OCMC_RAM_SIZE);
+ if (!prueth->mem[PRUETH_MEM_OCMC].va) {
+ dev_err(dev, "unable to allocate OCMC resource\n");
+ ret = -ENOMEM;
+ goto put_mem;
+ }
+ prueth->mem[PRUETH_MEM_OCMC].pa =
+ gen_pool_virt_to_phys(prueth->sram_pool,
+ (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va);
+ prueth->mem[PRUETH_MEM_OCMC].size = OCMC_RAM_SIZE;
+ dev_dbg(dev, "ocmc: pa %pa va %p size %#zx\n",
+ &prueth->mem[PRUETH_MEM_OCMC].pa,
+ prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->mem[PRUETH_MEM_OCMC].size);
+
+ if (IS_ENABLED(CONFIG_HSR) && prueth->fw_data->support_lre)
+ has_lre = true;
+
+ /* if lre is supported, then both eth nodes to be present in
+ * DT node. If not, reset the support flag
+ */
+ if (has_lre && (!eth0_node || !eth1_node))
+ has_lre = false;
+
+ if (has_lre) {
+ /* need to configure interrupts per queue common for
+ * both ports
+ */
+ prueth->hp = devm_kzalloc(dev,
+ sizeof(struct prueth_ndev_priority),
+ GFP_KERNEL);
+ if (!prueth->hp) {
+ ret = -ENOMEM;
+ goto free_pool;
+ }
+ prueth->lp = devm_kzalloc(dev,
+ sizeof(struct prueth_ndev_priority),
+ GFP_KERNEL);
+ if (!prueth->hp) {
+ ret = -ENOMEM;
+ goto free_pool;
+ }
+
+ prueth->lre_stats = devm_kzalloc(dev,
+ sizeof(*prueth->lre_stats),
+ GFP_KERNEL);
+ if (!prueth->lre_stats) {
+ ret = -ENOMEM;
+ goto free_pool;
+ }
+
+ prueth->rx_lpq_irq = of_irq_get_byname(np, "rx_lre_lp");
+ prueth->rx_hpq_irq = of_irq_get_byname(np, "rx_lre_hp");
+ if (prueth->rx_lpq_irq < 0 || prueth->rx_hpq_irq < 0)
+ has_lre = false;
+ }
+ prueth->support_lre = has_lre;
+
+ /* setup netdev interfaces */
+ if (eth0_node) {
+ ret = prueth_netdev_init(prueth, eth0_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth0_node->name, ret);
+ }
+ goto free_pool;
+ }
+ }
+
+ if (eth1_node) {
+ ret = prueth_netdev_init(prueth, eth1_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth1_node->name, ret);
+ }
+ goto netdev_exit;
+ }
+ }
+
+ prueth->iep = icss_iep_get(np);
+ if (IS_ERR(prueth->iep)) {
+ ret = PTR_ERR(prueth->iep);
+ dev_err(dev, "unable to get IEP\n");
+ goto netdev_exit;
+ }
+
+ prueth_set_fw_offsets(prueth);
+ prueth_hostinit(prueth);
+
+ /* register the network devices */
+ if (eth0_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII0");
+ goto iep_put;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
+ }
+
+ if (eth1_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII1");
+ goto netdev_unregister;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
+ }
+
+ ret = prueth_register_notifiers(prueth);
+ if (ret) {
+ dev_err(dev, "can't register switchdev notifiers");
+ goto netdev_unregister;
+ }
+
+ eth_random_addr(prueth->base_mac);
+
+ dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
+ (!eth0_node || !eth1_node) ? "single" : "dual");
+
+ return 0;
+
+netdev_unregister:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+iep_put:
+ icss_iep_put(prueth->iep);
+netdev_exit:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ struct device_node *eth_node;
+
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ }
+
+free_pool:
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va, OCMC_RAM_SIZE);
+
+put_mem:
+ pruss_cfg_ocp_master_ports(prueth->pruss, 0);
+ for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) {
+ if (prueth->mem[i].va)
+ pruss_release_mem_region(pruss, &prueth->mem[i]);
+ }
+
+put_pruss:
+ pruss_put(prueth->pruss);
+
+put_pru1:
+ if (eth1_node)
+ pru_rproc_put(prueth->pru1);
+put_pru0:
+ if (eth0_node)
+ pru_rproc_put(prueth->pru0);
+
+put_node:
+ of_node_put(eth1_node);
+ of_node_put(eth0_node);
+
+ return ret;
+}
+
+static int prueth_remove(struct platform_device *pdev)
+{
+ struct device_node *eth_node;
+ struct prueth *prueth = platform_get_drvdata(pdev);
+ int i;
+
+ unregister_netdevice_notifier(&prueth->prueth_ndev_nb);
+ prueth_sw_unregister_notifiers(prueth);
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ of_node_put(eth_node);
+ }
+
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va,
+ OCMC_RAM_SIZE);
+
+ for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) {
+ if (prueth->mem[i].va)
+ pruss_release_mem_region(prueth->pruss, &prueth->mem[i]);
+ }
+
+ icss_iep_put(prueth->iep);
+
+ pruss_put(prueth->pruss);
+
+ if (prueth->eth_node[PRUETH_MAC0])
+ pru_rproc_put(prueth->pru1);
+ if (prueth->eth_node[PRUETH_MAC1])
+ pru_rproc_put(prueth->pru0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int prueth_suspend(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = emac_ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ pruss_cfg_ocp_master_ports(prueth->pruss, 0);
+
+ return 0;
+}
+
+static int prueth_resume(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ pruss_cfg_ocp_master_ports(prueth->pruss, 1);
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ ret = emac_ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops prueth_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
+};
+
+/* AM33xx SoC-specific firmware data */
+static struct prueth_private_data am335x_prueth_pdata = {
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am335x-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am335x-pru1-prueth-fw.elf",
+ },
+ .support_lre = false,
+};
+
+/* AM437x SoC-specific firmware data */
+static struct prueth_private_data am437x_prueth_pdata = {
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am437x-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am437x-pru1-prueth-fw.elf",
+ },
+ .support_lre = false,
+};
+
+/* AM57xx SoC-specific firmware data */
+static struct prueth_private_data am57xx_prueth_pdata = {
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am57xx-pru0-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_HSR] =
+ "ti-pruss/am57xx-pru0-pruhsr-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_PRP] =
+ "ti-pruss/am57xx-pru0-pruprp-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am57xx-pru0-prusw-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am57xx-pru1-prueth-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_HSR] =
+ "ti-pruss/am57xx-pru1-pruhsr-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_PRP] =
+ "ti-pruss/am57xx-pru1-pruprp-fw.elf",
+ .fw_name[PRUSS_ETHTYPE_SWITCH] =
+ "ti-pruss/am57xx-pru1-prusw-fw.elf",
+ },
+ .support_lre = true,
+ .support_switch = true,
+};
+
+/* 66AK2G SoC-specific firmware data */
+static struct prueth_private_data k2g_prueth_pdata = {
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/k2g-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/k2g-pru1-prueth-fw.elf",
+ },
+ .support_lre = false,
+};
+
+static const struct of_device_id prueth_dt_match[] = {
+ { .compatible = "ti,am57-prueth", .data = &am57xx_prueth_pdata, },
+ { .compatible = "ti,am4376-prueth", .data = &am437x_prueth_pdata, },
+ { .compatible = "ti,am3359-prueth", .data = &am335x_prueth_pdata, },
+ { .compatible = "ti,k2g-prueth", .data = &k2g_prueth_pdata, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, prueth_dt_match);
+
+static struct platform_driver prueth_driver = {
+ .probe = prueth_probe,
+ .remove = prueth_remove,
+ .driver = {
+ .name = "prueth",
+ .of_match_table = prueth_dt_match,
+ .pm = &prueth_dev_pm_ops,
+ },
+};
+module_platform_driver(prueth_driver);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PRU Ethernet Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/ti/prueth_fdb_tbl.h b/drivers/net/ethernet/ti/prueth_fdb_tbl.h
new file mode 100644
index 000000000000..1c004185a11a
--- /dev/null
+++ b/drivers/net/ethernet/ti/prueth_fdb_tbl.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021 Texas Instruments Incorporated - https://www.ti.com */
+#ifndef __NET_TI_PRUSS_FDB_TBL_H
+#define __NET_TI_PRUSS_FDB_TBL_H
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include "prueth.h"
+
+#define ETHER_ADDR_LEN 6
+
+/* 4 bytes */
+struct fdb_index_tbl_entry_t {
+ u16 bucket_idx; /* Bucket Table index of first Bucket
+ * with this MAC address
+ */
+ u16 bucket_entries; /* Number of entries in this bucket */
+} __packed;
+
+/* 4 * 256 = 1024 = 0x200 bytes */
+struct fdb_index_array_t {
+ struct fdb_index_tbl_entry_t index_tbl_entry[FDB_INDEX_TBL_MAX_ENTRIES];
+} __packed;
+
+/* 10 bytes */
+struct fdb_mac_tbl_entry_t {
+ u8 mac[ETHER_ADDR_LEN];
+ u16 age;
+ u8 port; /* 0 based: 0=port1, 1=port2 */
+ u8 is_static:1;
+ u8 active:1;
+} __packed;
+
+/* 10 * 256 = 2560 = 0xa00 bytes */
+struct fdb_mac_tbl_array_t {
+ struct fdb_mac_tbl_entry_t mac_tbl_entry[FDB_MAC_TBL_MAX_ENTRIES];
+} __packed;
+
+/* 1 byte */
+struct fdb_stp_config {
+ u8 state; /* per-port STP state (defined in FW header) */
+} __packed;
+
+/* 1 byte */
+struct fdb_flood_config {
+ u8 host_flood_enable:1;
+ u8 port1_flood_enable:1;
+ u8 port2_flood_enable:1;
+} __packed;
+
+/* 2 byte */
+struct fdb_arbitration {
+ u8 host_lock;
+ u8 pru_locks;
+} __packed;
+
+struct fdb_tbl {
+ struct fdb_index_array_t *index_a; /* fdb index table */
+ struct fdb_mac_tbl_array_t *mac_tbl_a; /* fdb mac table */
+ struct fdb_stp_config *port1_stp_cfg; /* port 1 strp config */
+ struct fdb_stp_config *port2_stp_cfg; /* port 2 strp config */
+ struct fdb_flood_config *flood_enable_flags; /* per-port flood enable */
+ struct fdb_arbitration *locks; /* fdb locking mechanism */
+ u16 total_entries; /* total num entries in hash table */
+};
+
+#endif
diff --git a/drivers/net/ethernet/ti/prueth_lre.c b/drivers/net/ethernet/ti/prueth_lre.c
new file mode 100644
index 000000000000..bb23a72a64ad
--- /dev/null
+++ b/drivers/net/ethernet/ti/prueth_lre.c
@@ -0,0 +1,1320 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments PRUETH hsr/prp Link Redunancy Entity (LRE) Driver.
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/spinlock_types.h>
+
+#include "icss_lre_firmware.h"
+#include "prueth.h"
+#include "prueth_lre.h"
+#include "prueth_switch.h"
+
+void prueth_lre_config_check_flags(struct prueth *prueth)
+{
+ void __iomem *dram1 = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ /* HSR/PRP: initialize check table when first port is up */
+ if (prueth->emac_configured)
+ return;
+
+ prueth->tbl_check_mask = (ICSS_LRE_HOST_TIMER_NODE_TABLE_CHECK_BIT |
+ ICSS_LRE_HOST_TIMER_HOST_TABLE_CHECK_BIT);
+ if (PRUETH_IS_HSR(prueth))
+ prueth->tbl_check_mask |=
+ ICSS_LRE_HOST_TIMER_PORT_TABLE_CHECK_BITS;
+ writel(prueth->tbl_check_mask, dram1 + ICSS_LRE_HOST_TIMER_CHECK_FLAGS);
+}
+
+/* A group of PCPs are mapped to a Queue. This is the size of firmware
+ * array in shared memory
+ */
+#define PCP_GROUP_TO_QUEUE_MAP_SIZE 4
+
+/* PRU firmware default PCP to priority Queue map for ingress & egress
+ *
+ * At ingress to Host
+ * ==================
+ * byte 0 => PRU 1, PCP 0-3 => Q3
+ * byte 1 => PRU 1, PCP 4-7 => Q2
+ * byte 2 => PRU 0, PCP 0-3 => Q1
+ * byte 3 => PRU 0, PCP 4-7 => Q0
+ *
+ * At egress to wire/network on PRU-0 and PRU-1
+ * ============================================
+ * byte 0 => Host, PCP 0-3 => Q3
+ * byte 1 => Host, PCP 4-7 => Q2
+ *
+ * PRU-0
+ * -----
+ * byte 2 => PRU-1, PCP 0-3 => Q1
+ * byte 3 => PRU-1, PCP 4-7 => Q0
+ *
+ * PRU-1
+ * -----
+ * byte 2 => PRU-0, PCP 0-3 => Q1
+ * byte 3 => PRU-0, PCP 4-7 => Q0
+ *
+ * queue names below are named 1 based. i.e PRUETH_QUEUE1 is Q0,
+ * PRUETH_QUEUE2 is Q1 and so forth. Firmware convention is that
+ * a lower queue number has higher priority than a higher queue
+ * number.
+ */
+static u8 fw_pcp_default_priority_queue_map[PCP_GROUP_TO_QUEUE_MAP_SIZE] = {
+ /* port 2 or PRU 1 */
+ PRUETH_QUEUE4, PRUETH_QUEUE3,
+ /* port 1 or PRU 0 */
+ PRUETH_QUEUE2, PRUETH_QUEUE1,
+};
+
+static void prueth_lre_pcp_queue_map_config(struct prueth *prueth)
+{
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ memcpy_toio(sram + ICSS_LRE_QUEUE_2_PCP_MAP_OFFSET,
+ &fw_pcp_default_priority_queue_map[0],
+ PCP_GROUP_TO_QUEUE_MAP_SIZE);
+}
+
+static void prueth_lre_host_table_init(struct prueth *prueth)
+{
+ void __iomem *dram0 = prueth->mem[PRUETH_MEM_DRAM0].va;
+ void __iomem *dram1 = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ memset_io(dram0 + ICSS_LRE_DUPLICATE_HOST_TABLE, 0,
+ ICSS_LRE_DUPLICATE_HOST_TABLE_DMEM_SIZE);
+
+ writel(ICSS_LRE_DUPLICATE_HOST_TABLE_SIZE_INIT,
+ dram1 + ICSS_LRE_DUPLICATE_HOST_TABLE_SIZE);
+
+ writel(ICSS_LRE_TABLE_CHECK_RESOLUTION_10_MS,
+ dram1 + ICSS_LRE_DUPLI_HOST_CHECK_RESO);
+
+ writel(ICSS_LRE_MASTER_SLAVE_BUSY_BITS_CLEAR,
+ dram1 + ICSS_LRE_HOST_DUPLICATE_ARBITRATION);
+}
+
+static void pru2host_mac(u8 *mac)
+{
+ swap(mac[0], mac[3]);
+ swap(mac[1], mac[2]);
+ swap(mac[4], mac[5]);
+}
+
+static u16 get_hash(u8 *mac, u16 hash_mask)
+{
+ int j;
+ u16 hash;
+
+ for (j = 0, hash = 0; j < ETH_ALEN; j++)
+ hash ^= mac[j];
+ hash = hash & hash_mask;
+
+ return hash;
+}
+
+static void pru_spin_lock(struct node_tbl *nt)
+{
+ while (1) {
+ nt->nt_info->arm_lock = 1;
+ if (!nt->nt_info->fw_lock)
+ break;
+ nt->nt_info->arm_lock = 0;
+ }
+}
+
+static inline void pru_spin_unlock(struct node_tbl *nt)
+{
+ nt->nt_info->arm_lock = 0;
+}
+
+int prueth_lre_nt_insert(struct prueth *prueth,
+ u8 *mac, int port, int sv_frame, int proto)
+{
+ struct nt_queue_t *q = prueth->mac_queue;
+ unsigned long flags;
+ int ret = LRE_OK;
+
+ /* Will encounter a null mac_queue if we are in the middle of
+ * ndo_close. So check and return. Otherwise a kernel crash is
+ * seen when doing ifdown continuously.
+ */
+ if (!q)
+ return ret;
+
+ spin_lock_irqsave(&prueth->nt_lock, flags);
+ if (q->full) {
+ ret = LRE_ERR;
+ } else {
+ memcpy(q->nt_queue[q->wr_ind].mac, mac, ETH_ALEN);
+ q->nt_queue[q->wr_ind].sv_frame = sv_frame;
+ q->nt_queue[q->wr_ind].port_id = port;
+ q->nt_queue[q->wr_ind].proto = proto;
+
+ q->wr_ind++;
+ q->wr_ind &= (PRUETH_MAC_QUEUE_MAX - 1);
+ if (q->wr_ind == q->rd_ind)
+ q->full = true;
+ }
+ spin_unlock_irqrestore(&prueth->nt_lock, flags);
+
+ return ret;
+}
+
+static inline bool node_expired(struct node_tbl *nt, u16 node, u16 forget_time)
+{
+ struct node_tbl_t nt_node = nt->nt_array->node_tbl[node];
+
+ return ((nt_node.time_last_seen_s > forget_time ||
+ nt_node.status & ICSS_LRE_NT_REM_NODE_TYPE_SANAB) &&
+ nt_node.time_last_seen_a > forget_time &&
+ nt_node.time_last_seen_b > forget_time);
+}
+
+#define IND_BIN_NO(x) nt->index_array->index_tbl[x].bin_no_entries
+#define IND_BINOFS(x) nt->index_array->index_tbl[x].bin_offset
+#define BIN_NODEOFS(x) nt->bin_array->bin_tbl[x].node_tbl_offset
+
+static void _prueth_lre_init_node_table(struct prueth *prueth)
+{
+ struct nt_queue_t *q = prueth->mac_queue;
+ struct node_tbl *nt = prueth->nt;
+ int j;
+
+ const struct prueth_fw_offsets *fw_offsets = prueth->fw_offsets;
+
+ nt->nt_array = prueth->mem[fw_offsets->nt_array_loc].va +
+ fw_offsets->nt_array_offset;
+ memset_io(nt->nt_array, 0, sizeof(struct node_tbl_t) *
+ fw_offsets->nt_array_max_entries);
+
+ nt->bin_array = prueth->mem[fw_offsets->bin_array_loc].va +
+ fw_offsets->bin_array_offset;
+ memset_io(nt->bin_array, 0, sizeof(struct bin_tbl_t) *
+ fw_offsets->bin_array_max_entries);
+
+ nt->index_array = prueth->mem[fw_offsets->index_array_loc].va +
+ fw_offsets->index_array_offset;
+ memset_io(nt->index_array, 0, sizeof(struct node_index_tbl_t) *
+ fw_offsets->index_array_max_entries);
+
+ nt->nt_info = prueth->mem[fw_offsets->nt_array_loc].va +
+ fw_offsets->nt_array_offset +
+ (sizeof(struct node_tbl_t) *
+ fw_offsets->nt_array_max_entries);
+ memset_io(nt->nt_info, 0, sizeof(struct node_tbl_info_t));
+
+ nt->nt_lre_cnt =
+ prueth->mem[PRUETH_MEM_SHARED_RAM].va + ICSS_LRE_CNT_NODES;
+ memset_io(nt->nt_lre_cnt, 0, sizeof(struct node_tbl_lre_cnt_t));
+
+ nt->nt_array_max_entries = fw_offsets->nt_array_max_entries;
+ nt->bin_array_max_entries = fw_offsets->bin_array_max_entries;
+ nt->index_array_max_entries = fw_offsets->index_array_max_entries;
+ nt->hash_mask = fw_offsets->hash_mask;
+
+ for (j = 0; j < fw_offsets->index_array_max_entries; j++)
+ IND_BINOFS(j) = fw_offsets->bin_array_max_entries;
+ for (j = 0; j < fw_offsets->bin_array_max_entries; j++)
+ BIN_NODEOFS(j) = fw_offsets->nt_array_max_entries;
+ for (j = 0; j < fw_offsets->nt_array_max_entries; j++)
+ nt->nt_array->node_tbl[j].entry_state = ICSS_LRE_NODE_FREE;
+
+ q->rd_ind = 0;
+ q->wr_ind = 0;
+ q->full = false;
+}
+
+static u16 find_free_bin(struct node_tbl *nt)
+{
+ u16 j;
+
+ for (j = 0; j < nt->bin_array_max_entries; j++)
+ if (BIN_NODEOFS(j) == nt->nt_array_max_entries)
+ break;
+
+ return j;
+}
+
+/* find first free node table slot and write it to the next_free_slot */
+static u16 next_free_slot_update(struct node_tbl *nt)
+{
+ int j;
+
+ nt->nt_info->next_free_slot = nt->nt_array_max_entries;
+ for (j = 0; j < nt->nt_array_max_entries; j++) {
+ if (nt->nt_array->node_tbl[j].entry_state ==
+ ICSS_LRE_NODE_FREE) {
+ nt->nt_info->next_free_slot = j;
+ break;
+ }
+ }
+
+ return nt->nt_info->next_free_slot;
+}
+
+static void inc_time(u16 *t)
+{
+ *t += 1;
+ if (*t > ICSS_LRE_MAX_FORGET_TIME)
+ *t = ICSS_LRE_MAX_FORGET_TIME;
+}
+
+static void node_table_update_time(struct node_tbl *nt)
+{
+ int j;
+ u16 ofs;
+ struct nt_array_t *nt_arr = nt->nt_array;
+ struct node_tbl_t *node;
+
+ for (j = 0; j < nt->bin_array_max_entries; j++) {
+ ofs = nt->bin_array->bin_tbl[j].node_tbl_offset;
+ if (ofs < nt->nt_array_max_entries) {
+ node = &nt_arr->node_tbl[ofs];
+ inc_time(&node->time_last_seen_a);
+ inc_time(&node->time_last_seen_b);
+ /* increment time_last_seen_s if nod is not SAN */
+ if ((node->status &
+ ICSS_LRE_NT_REM_NODE_TYPE_SANAB) == 0)
+ inc_time(&node->time_last_seen_s);
+ }
+ }
+}
+
+static void write2node_slot(struct node_tbl *nt, u16 node, int port,
+ int sv_frame, int proto)
+{
+ memset(&nt->nt_array->node_tbl[node], 0, sizeof(struct node_tbl_t));
+ nt->nt_array->node_tbl[node].entry_state = ICSS_LRE_NODE_TAKEN;
+
+ if (port == 0x01) {
+ nt->nt_array->node_tbl[node].status =
+ ICSS_LRE_NT_REM_NODE_TYPE_SANA;
+ nt->nt_array->node_tbl[node].cnt_ra = 1;
+ if (sv_frame)
+ nt->nt_array->node_tbl[node].cnt_rx_sup_a = 1;
+ } else {
+ nt->nt_array->node_tbl[node].status =
+ ICSS_LRE_NT_REM_NODE_TYPE_SANB;
+ nt->nt_array->node_tbl[node].cnt_rb = 1;
+ if (sv_frame)
+ nt->nt_array->node_tbl[node].cnt_rx_sup_b = 1;
+ }
+
+ if (sv_frame) {
+ nt->nt_array->node_tbl[node].status = (proto == LRE_PROTO_PRP) ?
+ ICSS_LRE_NT_REM_NODE_TYPE_DAN :
+ ICSS_LRE_NT_REM_NODE_TYPE_DAN |
+ ICSS_LRE_NT_REM_NODE_HSR_BIT;
+ }
+}
+
+/* We assume that the _start_ cannot point to middle of a bin */
+static void update_indexes(u16 start, u16 end, struct node_tbl *nt)
+{
+ u16 hash, hash_prev;
+
+ hash_prev = 0xffff; /* invalid hash */
+ for (; start <= end; start++) {
+ hash = get_hash(nt->bin_array->bin_tbl[start].src_mac_id,
+ nt->hash_mask);
+ if (hash != hash_prev)
+ IND_BINOFS(hash) = start;
+ hash_prev = hash;
+ }
+}
+
+/* start > end */
+static void move_up(u16 start, u16 end, struct node_tbl *nt,
+ bool update)
+{
+ u16 j = end;
+
+ pru_spin_lock(nt);
+
+ for (; j < start; j++)
+ memcpy(&nt->bin_array->bin_tbl[j],
+ &nt->bin_array->bin_tbl[j + 1],
+ sizeof(struct bin_tbl_t));
+
+ BIN_NODEOFS(start) = nt->nt_array_max_entries;
+
+ if (update)
+ update_indexes(end, start + 1, nt);
+
+ pru_spin_unlock(nt);
+}
+
+/* start < end */
+static void move_down(u16 start, u16 end, struct node_tbl *nt,
+ bool update)
+{
+ u16 j = end;
+
+ pru_spin_lock(nt);
+
+ for (; j > start; j--)
+ memcpy(&nt->bin_array->bin_tbl[j],
+ &nt->bin_array->bin_tbl[j - 1],
+ sizeof(struct bin_tbl_t));
+
+ nt->bin_array->bin_tbl[start].node_tbl_offset =
+ nt->nt_array_max_entries;
+
+ if (update)
+ update_indexes(start + 1, end, nt);
+
+ pru_spin_unlock(nt);
+}
+
+static int node_table_insert_from_queue(struct node_tbl *nt,
+ struct nt_queue_entry *entry)
+{
+ u8 macid[ETH_ALEN];
+ u16 hash;
+ u16 index;
+ u16 free_node;
+ bool not_found;
+ u16 empty_slot;
+
+ if (!nt)
+ return LRE_ERR;
+
+ memcpy(macid, entry->mac, ETH_ALEN);
+ pru2host_mac(macid);
+
+ hash = get_hash(macid, nt->hash_mask);
+
+ not_found = 1;
+ if (IND_BIN_NO(hash) == 0) {
+ /* there is no bin for this hash, create one */
+ index = find_free_bin(nt);
+ if (index == nt->bin_array_max_entries)
+ return LRE_ERR;
+
+ IND_BINOFS(hash) = index;
+ } else {
+ for (index = IND_BINOFS(hash);
+ index < IND_BINOFS(hash) + IND_BIN_NO(hash); index++) {
+ if ((memcmp(nt->bin_array->bin_tbl[index].src_mac_id,
+ macid, ETH_ALEN) == 0)) {
+ not_found = 0;
+ break;
+ }
+ }
+ }
+
+ if (not_found) {
+ free_node = next_free_slot_update(nt);
+
+ /* at this point we might create a new bin and set
+ * bin_offset at the index table. It was only possible
+ * if we found a free slot in the bin table.
+ * So, it also must be a free slot in the node table
+ * and we will not exit here in this case.
+ * So, be don't have to take care about fixing IND_BINOFS()
+ * on return LRE_ERR
+ */
+ if (free_node >= nt->nt_array_max_entries)
+ return LRE_ERR;
+
+ /* if we are here, we have at least one empty slot in the bin
+ * table and one slot at the node table
+ */
+
+ IND_BIN_NO(hash)++;
+
+ /* look for an empty slot downwards */
+ for (empty_slot = index;
+ (BIN_NODEOFS(empty_slot) != nt->nt_array_max_entries) &&
+ (empty_slot < nt->nt_array_max_entries);
+ empty_slot++)
+ ;
+
+ /* if emptySlot != maxNodes => empty slot is found,
+ * else no space available downwards, look upwards
+ */
+ if (empty_slot != nt->nt_array_max_entries) {
+ move_down(index, empty_slot, nt, true);
+ } else {
+ for (empty_slot = index - 1;
+ (BIN_NODEOFS(empty_slot) !=
+ nt->nt_array_max_entries) &&
+ (empty_slot > 0);
+ empty_slot--)
+ ;
+ /* we're sure to get a space here as nodetable
+ * has a empty slot, so no need to check for
+ * value of emptySlot
+ */
+ move_up(index, empty_slot, nt, true);
+ }
+
+ /* space created, now populate the values*/
+ BIN_NODEOFS(index) = free_node;
+ memcpy(nt->bin_array->bin_tbl[index].src_mac_id, macid,
+ ETH_ALEN);
+ write2node_slot(nt, free_node, entry->port_id, entry->sv_frame,
+ entry->proto);
+
+ nt->nt_lre_cnt->lre_cnt++;
+ }
+
+ return LRE_OK;
+}
+
+static void node_table_check_and_remove(struct node_tbl *nt, u16 forget_time)
+{
+ int j, end_bin;
+ u16 node;
+ u16 hash;
+
+ /*loop to remove a node reaching NODE_FORGET_TIME*/
+ for (j = 0; j < nt->bin_array_max_entries; j++) {
+ node = BIN_NODEOFS(j);
+ if (node >= nt->nt_array_max_entries)
+ continue;
+
+ if (node_expired(nt, node, forget_time)) {
+ hash = get_hash(nt->bin_array->bin_tbl[j].src_mac_id,
+ nt->hash_mask);
+
+ /* remove entry from bin array */
+ end_bin = IND_BINOFS(hash) + IND_BIN_NO(hash) - 1;
+
+ move_up(end_bin, j, nt, false);
+ (IND_BIN_NO(hash))--;
+
+ if (!IND_BIN_NO(hash))
+ IND_BINOFS(hash) = nt->bin_array_max_entries;
+
+ nt->nt_array->node_tbl[node].entry_state =
+ ICSS_LRE_NODE_FREE;
+ BIN_NODEOFS(end_bin) = nt->nt_array_max_entries;
+
+ nt->nt_lre_cnt->lre_cnt--;
+ }
+ }
+}
+
+static int pop_queue(struct prueth *prueth, spinlock_t *lock)
+{
+ unsigned long flags;
+ struct node_tbl *nt = prueth->nt;
+ struct nt_queue_t *q = prueth->mac_queue;
+ struct nt_queue_entry one_mac;
+ int ret = 0;
+
+ spin_lock_irqsave(lock, flags);
+ if (!q->full && q->wr_ind == q->rd_ind) { /* queue empty */
+ ret = 1;
+ } else {
+ memcpy(&one_mac, &q->nt_queue[q->rd_ind],
+ sizeof(struct nt_queue_entry));
+ spin_unlock_irqrestore(lock, flags);
+ node_table_insert_from_queue(nt, &one_mac);
+ spin_lock_irqsave(lock, flags);
+ q->rd_ind++;
+ q->rd_ind &= (PRUETH_MAC_QUEUE_MAX - 1);
+ q->full = false;
+ }
+ spin_unlock_irqrestore(lock, flags);
+
+ return ret;
+}
+
+static void pop_queue_process(struct prueth *prueth, spinlock_t *lock)
+{
+ while (pop_queue(prueth, lock) == 0)
+ ;
+}
+
+static void prueth_lre_port_table_init(struct prueth *prueth)
+{
+ void __iomem *dram1 = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ if (PRUETH_IS_HSR(prueth)) {
+ memset_io(dram1 + ICSS_LRE_DUPLICATE_PORT_TABLE_PRU0, 0,
+ ICSS_LRE_DUPLICATE_PORT_TABLE_DMEM_SIZE);
+ memset_io(dram1 + ICSS_LRE_DUPLICATE_PORT_TABLE_PRU1, 0,
+ ICSS_LRE_DUPLICATE_PORT_TABLE_DMEM_SIZE);
+
+ writel(ICSS_LRE_DUPLICATE_PORT_TABLE_SIZE_INIT,
+ dram1 + ICSS_LRE_DUPLICATE_PORT_TABLE_SIZE);
+ } else {
+ writel(0, dram1 + ICSS_LRE_DUPLICATE_PORT_TABLE_SIZE);
+ }
+
+ writel(ICSS_LRE_TABLE_CHECK_RESOLUTION_10_MS,
+ dram1 + ICSS_LRE_DUPLI_PORT_CHECK_RESO);
+}
+
+static void prueth_lre_init(struct prueth *prueth)
+{
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ memset_io(sram + ICSS_LRE_START, 0, ICSS_LRE_STATS_DMEM_SIZE);
+
+ writel(ICSS_LRE_IEC62439_CONST_DUPLICATE_DISCARD,
+ sram + ICSS_LRE_DUPLICATE_DISCARD);
+ writel(ICSS_LRE_IEC62439_CONST_TRANSP_RECEPTION_REMOVE_RCT,
+ sram + ICSS_LRE_TRANSPARENT_RECEPTION);
+ prueth->prp_tr_mode = IEC62439_3_TR_REMOVE_RCT;
+}
+
+static void prueth_lre_dbg_init(struct prueth *prueth)
+{
+ void __iomem *dram0 = prueth->mem[PRUETH_MEM_DRAM0].va;
+
+ memset_io(dram0 + ICSS_LRE_DBG_START, 0,
+ ICSS_LRE_DEBUG_COUNTER_DMEM_SIZE);
+}
+
+static void prueth_lre_protocol_init(struct prueth *prueth)
+{
+ void __iomem *dram0 = prueth->mem[PRUETH_MEM_DRAM0].va;
+ void __iomem *dram1 = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ if (PRUETH_IS_HSR(prueth))
+ writew(prueth->hsr_mode, dram0 + ICSS_LRE_HSR_MODE);
+
+ writel(ICSS_LRE_DUPLICATE_FORGET_TIME_400_MS,
+ dram1 + ICSS_LRE_DUPLI_FORGET_TIME);
+ writel(ICSS_LRE_SUP_ADDRESS_INIT_OCTETS_HIGH,
+ dram1 + ICSS_LRE_SUP_ADDR);
+ writel(ICSS_LRE_SUP_ADDRESS_INIT_OCTETS_LOW,
+ dram1 + ICSS_LRE_SUP_ADDR_LOW);
+}
+
+static void prueth_lre_config_packet_timestamping(struct prueth *prueth)
+{
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ writeb(1, sram + ICSS_LRE_PRIORITY_INTRS_STATUS_OFFSET);
+ writeb(1, sram + ICSS_LRE_TIMESTAMP_PKTS_STATUS_OFFSET);
+}
+
+static void prueth_lre_process_check_flags_event(struct prueth *prueth)
+{
+ void __iomem *dram = prueth->mem[PRUETH_MEM_DRAM1].va;
+ unsigned long flags;
+
+ if (prueth->node_table_clear) {
+ pru_spin_lock(prueth->nt);
+ spin_lock_irqsave(&prueth->nt_lock, flags);
+ _prueth_lre_init_node_table(prueth);
+ spin_unlock_irqrestore(&prueth->nt_lock, flags);
+ /* we don't have to release the prueth lock
+ * the note_table_init() cleares it anyway
+ */
+ prueth->node_table_clear = 0;
+ } else {
+ prueth->tbl_check_mask &=
+ ~ICSS_LRE_HOST_TIMER_NODE_TABLE_CLEAR_BIT;
+ }
+
+ /* schedule work here */
+ kthread_queue_work(prueth->nt_kworker, &prueth->nt_work);
+
+ writel(prueth->tbl_check_mask, dram + ICSS_LRE_HOST_TIMER_CHECK_FLAGS);
+}
+
+static enum hrtimer_restart prueth_lre_timer(struct hrtimer *timer)
+{
+ struct prueth *prueth = container_of(timer, struct prueth,
+ tbl_check_timer);
+ unsigned int timeout = PRUETH_TIMER_MS;
+
+ hrtimer_forward_now(timer, ms_to_ktime(timeout));
+ if (prueth->emac_configured !=
+ (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
+ return HRTIMER_RESTART;
+
+ prueth_lre_process_check_flags_event(prueth);
+
+ return HRTIMER_RESTART;
+}
+
+static void prueth_lre_init_timer(struct prueth *prueth)
+{
+ hrtimer_init(&prueth->tbl_check_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ prueth->tbl_check_timer.function = prueth_lre_timer;
+}
+
+static void prueth_lre_start_timer(struct prueth *prueth)
+{
+ unsigned int timeout = PRUETH_TIMER_MS;
+
+ if (hrtimer_active(&prueth->tbl_check_timer))
+ return;
+
+ hrtimer_start(&prueth->tbl_check_timer, ms_to_ktime(timeout),
+ HRTIMER_MODE_REL);
+}
+
+void prueth_lre_config(struct prueth *prueth)
+{
+ if (PRUETH_IS_HSR(prueth))
+ prueth->hsr_mode = ICSS_LRE_MODEH;
+
+ prueth_lre_init_timer(prueth);
+ prueth_lre_start_timer(prueth);
+ prueth_lre_pcp_queue_map_config(prueth);
+ prueth_lre_host_table_init(prueth);
+ prueth_lre_port_table_init(prueth);
+ prueth_lre_init(prueth);
+ prueth_lre_dbg_init(prueth);
+ prueth_lre_protocol_init(prueth);
+ /* for HSR/PRP LRE driver order the frames based on
+ * packet timestamp.
+ */
+ prueth_lre_config_packet_timestamping(prueth);
+}
+
+void prueth_lre_cleanup(struct prueth *prueth)
+{
+ hrtimer_cancel(&prueth->tbl_check_timer);
+}
+
+static void nt_updater(struct kthread_work *work)
+{
+ struct prueth *prueth = container_of(work, struct prueth, nt_work);
+
+ pop_queue_process(prueth, &prueth->nt_lock);
+
+ node_table_update_time(prueth->nt);
+ if (++prueth->rem_cnt >= 100) {
+ node_table_check_and_remove(prueth->nt,
+ ICSS_LRE_NODE_FORGET_TIME_60000_MS);
+ prueth->rem_cnt = 0;
+ }
+}
+
+static int prueth_lre_emac_rx_packets(struct prueth_emac *emac,
+ u8 qid1, u8 qid2)
+{
+ struct prueth *prueth = emac->prueth;
+ void *ocmc_ram = (__force void *)prueth->mem[PRUETH_MEM_OCMC].va;
+ u16 bd_rd_ptr, bd_wr_ptr, update_rd_ptr, bd_rd_ptr_o, bd_wr_ptr_o;
+ void __iomem *shared_ram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ struct prueth_queue_desc __iomem *queue_desc, *queue_desc_o;
+ struct net_device_stats *ndevstats = &emac->ndev->stats;
+ int ret, used = 0, port, port0_q_empty, port1_q_empty;
+ unsigned int emac_max_pktlen = PRUETH_MAX_PKTLEN_LRE;
+ const struct prueth_queue_info *rxqueue, *rxqueue_o;
+ struct prueth_packet_info pkt_info, pkt_info_o;
+ const struct prueth_queue_info *rxqueue_p;
+ struct prueth_packet_info *pkt_info_p;
+ struct net_device_stats *ndevstats_o;
+ struct net_device_stats *ndevstats_p;
+ u8 overflow_cnt, overflow_cnt_o;
+ u32 rd_buf_desc, rd_buf_desc_o;
+ struct prueth_emac *other_emac;
+ u16 *bd_rd_ptr_p, *bd_wr_ptr_p;
+ struct prueth_emac *emac_p;
+ u32 pkt_ts, pkt_ts_o;
+ u32 iep_wrap;
+
+ other_emac = prueth->emac[(emac->port_id ^ 0x3) - 1];
+ ndevstats_o = &other_emac->ndev->stats;
+
+ /* use the correct wrap value based on ICSSM version */
+ iep_wrap = prueth->fw_offsets->iep_wrap;
+ /* search host queues for packets */
+ queue_desc = emac->rx_queue_descs + qid1;
+ queue_desc_o = other_emac->rx_queue_descs + qid2;
+
+ rxqueue = &sw_queue_infos[PRUETH_PORT_HOST][qid1];
+ rxqueue_o = &sw_queue_infos[PRUETH_PORT_HOST][qid2];
+
+retry:
+ overflow_cnt = readb(&queue_desc->overflow_cnt);
+ overflow_cnt_o = readb(&queue_desc_o->overflow_cnt);
+
+ if (overflow_cnt > 0) {
+ emac->ndev->stats.rx_over_errors += overflow_cnt;
+ /* reset to zero */
+ writeb(0, &queue_desc->overflow_cnt);
+ }
+ if (overflow_cnt_o > 0) {
+ other_emac->ndev->stats.rx_over_errors += overflow_cnt_o;
+
+ /* reset to zero */
+ writeb(0, &queue_desc_o->overflow_cnt);
+ }
+
+ bd_rd_ptr = readw(&queue_desc->rd_ptr);
+ bd_wr_ptr = readw(&queue_desc->wr_ptr);
+
+ bd_rd_ptr_o = readw(&queue_desc_o->rd_ptr);
+ bd_wr_ptr_o = readw(&queue_desc_o->wr_ptr);
+
+ port0_q_empty = (bd_rd_ptr == bd_wr_ptr) ? 1 : 0;
+ port1_q_empty = (bd_rd_ptr_o == bd_wr_ptr_o) ? 1 : 0;
+
+ /* while packets are available in this queue */
+ while (!port0_q_empty || !port1_q_empty) {
+ /* get packet info from the read buffer descriptor */
+ rd_buf_desc = readl(shared_ram + bd_rd_ptr);
+ rd_buf_desc_o = readl(shared_ram + bd_rd_ptr_o);
+
+ parse_packet_info(prueth, rd_buf_desc, &pkt_info);
+ parse_packet_info(prueth, rd_buf_desc_o, &pkt_info_o);
+
+ pkt_ts = readl(ocmc_ram + ICSS_LRE_TIMESTAMP_ARRAY_OFFSET +
+ bd_rd_ptr - SRAM_START_OFFSET);
+ pkt_ts_o = readl(ocmc_ram + ICSS_LRE_TIMESTAMP_ARRAY_OFFSET +
+ bd_rd_ptr_o - SRAM_START_OFFSET);
+
+ if (!port0_q_empty && !port1_q_empty) {
+ /* Packets in both port queues */
+ /* Calculate diff b/n timestamps and account for
+ * wraparound
+ */
+ if (pkt_ts > pkt_ts_o)
+ port = (pkt_ts - pkt_ts_o) > (iep_wrap / 2) ?
+ 0 : 1;
+ else
+ port = (pkt_ts_o - pkt_ts) > (iep_wrap / 2) ?
+ 1 : 0;
+
+ } else if (!port0_q_empty) {
+ /* Packet(s) in port0 queue only */
+ port = 0;
+ } else {
+ /* Packet(s) in port1 queue only */
+ port = 1;
+ }
+
+ /* Select correct data structures for queue/packet selected */
+ if (port == 0) {
+ pkt_info_p = &pkt_info;
+ bd_wr_ptr_p = &bd_wr_ptr;
+ bd_rd_ptr_p = &bd_rd_ptr;
+ emac_p = emac;
+ ndevstats_p = ndevstats;
+ rxqueue_p = rxqueue;
+ } else {
+ pkt_info_p = &pkt_info_o;
+ bd_wr_ptr_p = &bd_wr_ptr_o;
+ bd_rd_ptr_p = &bd_rd_ptr_o;
+ emac_p = other_emac;
+ ndevstats_p = ndevstats_o;
+ rxqueue_p = rxqueue_o;
+ }
+
+ if ((*pkt_info_p).length <= 0) {
+ /* a packet length of zero will cause us to
+ * never move the read pointer ahead, locking
+ * the driver, so we manually have to move it
+ * to the write pointer, discarding all
+ * remaining packets in this queue. This should
+ * never happen.
+ */
+ update_rd_ptr = *bd_wr_ptr_p;
+ ndevstats_p->rx_length_errors++;
+ } else if ((*pkt_info_p).length > emac_max_pktlen) {
+ /* if the packet is too large we skip it but we
+ * still need to move the read pointer ahead
+ * and assume something is wrong with the read
+ * pointer as the firmware should be filtering
+ * these packets
+ */
+ update_rd_ptr = *bd_wr_ptr_p;
+ ndevstats_p->rx_length_errors++;
+ } else {
+ update_rd_ptr = *bd_rd_ptr_p;
+ ret = emac_rx_packet(emac_p, &update_rd_ptr,
+ pkt_info_p, rxqueue_p);
+ if (ret)
+ return IRQ_HANDLED;
+
+ used++;
+ }
+
+ /* after reading the buffer descriptor we clear it
+ * to prevent improperly moved read pointer errors
+ * from simply looking like old packets.
+ */
+
+ /* update read pointer in queue descriptor */
+ if (port == 0) {
+ writel(0, shared_ram + bd_rd_ptr);
+ writew(update_rd_ptr, &queue_desc->rd_ptr);
+ bd_rd_ptr = update_rd_ptr;
+ } else {
+ writel(0, shared_ram + bd_rd_ptr_o);
+ writew(update_rd_ptr, &queue_desc_o->rd_ptr);
+ bd_rd_ptr_o = update_rd_ptr;
+ }
+
+ port0_q_empty = (bd_rd_ptr == bd_wr_ptr) ? 1 : 0;
+ port1_q_empty = (bd_rd_ptr_o == bd_wr_ptr_o) ? 1 : 0;
+ }
+
+ if (used) {
+ used = 0;
+ goto retry;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prueth_lre_emac_rx_hardirq_lp(int irq, void *dev_id)
+{
+ struct prueth_ndev_priority *ndev_prio =
+ (struct prueth_ndev_priority *)dev_id;
+ struct net_device *ndev = ndev_prio->ndev;
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ return prueth_lre_emac_rx_packets(emac, PRUETH_QUEUE2, PRUETH_QUEUE4);
+}
+
+static irqreturn_t prueth_lre_emac_rx_hardirq_hp(int irq, void *dev_id)
+{
+ struct prueth_ndev_priority *ndev_prio =
+ (struct prueth_ndev_priority *)dev_id;
+ struct net_device *ndev = ndev_prio->ndev;
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ return prueth_lre_emac_rx_packets(emac, PRUETH_QUEUE1, PRUETH_QUEUE3);
+}
+
+int prueth_lre_request_irqs(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int ret;
+
+ if (emac->hsr_ptp_tx_irq) {
+ ret = request_threaded_irq(emac->hsr_ptp_tx_irq,
+ prueth_ptp_tx_irq_handle,
+ prueth_ptp_tx_irq_work,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ emac->ndev->name, emac->ndev);
+ if (ret) {
+ netdev_err(emac->ndev, "unable to request PTP TX IRQ\n");
+ return ret;
+ }
+ }
+
+ /* HSR/PRP. Request irq when first port is initialized */
+ if (prueth->emac_configured)
+ return 0;
+
+ ret = request_threaded_irq(prueth->rx_hpq_irq, NULL, prueth_lre_emac_rx_hardirq_hp,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "eth_hp_int", prueth->hp);
+ if (ret) {
+ netdev_err(emac->ndev, "unable to request RX HPQ IRQ\n");
+ goto free_ptp_irq;
+ }
+
+ ret = request_threaded_irq(prueth->rx_lpq_irq, NULL, prueth_lre_emac_rx_hardirq_lp,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "eth_lp_int", prueth->lp);
+ if (ret) {
+ netdev_err(emac->ndev, "unable to request RX LPQ IRQ\n");
+ goto free_rx_hpq_irq;
+ }
+
+ return 0;
+
+free_rx_hpq_irq:
+ free_irq(prueth->rx_hpq_irq, prueth->hp);
+free_ptp_irq:
+ if (emac->hsr_ptp_tx_irq)
+ free_irq(emac->hsr_ptp_tx_irq, emac->ndev);
+
+ return ret;
+}
+
+void prueth_lre_free_irqs(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+
+ if (emac->hsr_ptp_tx_irq)
+ free_irq(emac->hsr_ptp_tx_irq, emac->ndev);
+
+ /* HSR/PRP: free irqs when last port is down */
+ if (prueth->emac_configured)
+ return;
+
+ free_irq(prueth->rx_lpq_irq, prueth->lp);
+ free_irq(prueth->rx_hpq_irq, prueth->hp);
+}
+
+void prueth_lre_free_memory(struct prueth *prueth)
+{
+ /* HSR/PRP: initialize node table when first port is up */
+ if (prueth->emac_configured)
+ return;
+
+ kfree(prueth->nt);
+ kfree(prueth->mac_queue);
+ prueth->mac_queue = NULL;
+ prueth->nt = NULL;
+}
+
+#define PRUETH_LRE_STAT_OFS(m) offsetof(struct lre_statistics, m)
+static const struct {
+ char string[ETH_GSTRING_LEN];
+ u32 offset;
+} prueth_ethtool_lre_stats[] = {
+ {"lreTxA", PRUETH_LRE_STAT_OFS(cnt_tx_a)},
+ {"lreTxB", PRUETH_LRE_STAT_OFS(cnt_tx_b)},
+ {"lreTxC", PRUETH_LRE_STAT_OFS(cnt_tx_c)},
+
+ {"lreErrWrongLanA", PRUETH_LRE_STAT_OFS(cnt_errwronglan_a)},
+ {"lreErrWrongLanB", PRUETH_LRE_STAT_OFS(cnt_errwronglan_b)},
+ {"lreErrWrongLanC", PRUETH_LRE_STAT_OFS(cnt_errwronglan_c)},
+
+ {"lreRxA", PRUETH_LRE_STAT_OFS(cnt_rx_a)},
+ {"lreRxB", PRUETH_LRE_STAT_OFS(cnt_rx_b)},
+ {"lreRxC", PRUETH_LRE_STAT_OFS(cnt_rx_c)},
+
+ {"lreErrorsA", PRUETH_LRE_STAT_OFS(cnt_errors_a)},
+ {"lreErrorsB", PRUETH_LRE_STAT_OFS(cnt_errors_b)},
+ {"lreErrorsC", PRUETH_LRE_STAT_OFS(cnt_errors_c)},
+
+ {"lreNodes", PRUETH_LRE_STAT_OFS(cnt_nodes)},
+ {"lreProxyNodes", PRUETH_LRE_STAT_OFS(cnt_proxy_nodes)},
+
+ {"lreUniqueRxA", PRUETH_LRE_STAT_OFS(cnt_unique_rx_a)},
+ {"lreUniqueRxB", PRUETH_LRE_STAT_OFS(cnt_unique_rx_b)},
+ {"lreUniqueRxC", PRUETH_LRE_STAT_OFS(cnt_unique_rx_c)},
+
+ {"lreDuplicateRxA", PRUETH_LRE_STAT_OFS(cnt_duplicate_rx_a)},
+ {"lreDuplicateRxB", PRUETH_LRE_STAT_OFS(cnt_duplicate_rx_b)},
+ {"lreDuplicateRxC", PRUETH_LRE_STAT_OFS(cnt_duplicate_rx_c)},
+
+ {"lreMultiRxA", PRUETH_LRE_STAT_OFS(cnt_multiple_rx_a)},
+ {"lreMultiRxB", PRUETH_LRE_STAT_OFS(cnt_multiple_rx_b)},
+ {"lreMultiRxC", PRUETH_LRE_STAT_OFS(cnt_multiple_rx_c)},
+
+ {"lreOwnRxA", PRUETH_LRE_STAT_OFS(cnt_own_rx_a)},
+ {"lreOwnRxB", PRUETH_LRE_STAT_OFS(cnt_own_rx_b)},
+
+ {"lreDuplicateDiscard", PRUETH_LRE_STAT_OFS(duplicate_discard)},
+ {"lreTransRecept", PRUETH_LRE_STAT_OFS(transparent_reception)},
+
+ {"lreNtLookupErrA", PRUETH_LRE_STAT_OFS(node_table_lookup_error_a)},
+ {"lreNtLookupErrB", PRUETH_LRE_STAT_OFS(node_table_lookup_error_b)},
+ {"lreNodeTableFull", PRUETH_LRE_STAT_OFS(node_table_full)},
+ {"lreMulticastDropped", PRUETH_LRE_STAT_OFS(lre_multicast_dropped)},
+ {"lreVlanDropped", PRUETH_LRE_STAT_OFS(lre_vlan_dropped)},
+ {"lrePaceTimerExpired", PRUETH_LRE_STAT_OFS(lre_intr_tmr_exp)},
+ {"lreTotalRxA", PRUETH_LRE_STAT_OFS(lre_total_rx_a)},
+ {"lreTotalRxB", PRUETH_LRE_STAT_OFS(lre_total_rx_b)},
+ {"lreOverflowPru0", PRUETH_LRE_STAT_OFS(lre_overflow_pru0)},
+ {"lreOverflowPru1", PRUETH_LRE_STAT_OFS(lre_overflow_pru1)},
+ {"lreDDCountPru0", PRUETH_LRE_STAT_OFS(lre_cnt_dd_pru0)},
+ {"lreDDCountPru1", PRUETH_LRE_STAT_OFS(lre_cnt_dd_pru1)},
+ {"lreCntSupPru0", PRUETH_LRE_STAT_OFS(lre_cnt_sup_pru0)},
+ {"lreCntSupPru1", PRUETH_LRE_STAT_OFS(lre_cnt_sup_pru1)},
+};
+
+void prueth_lre_set_stats(struct prueth *prueth,
+ struct lre_statistics *pstats)
+{
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ if (prueth->emac_configured)
+ return;
+
+ /* These two are actually not statistics, so keep original */
+ pstats->duplicate_discard = readl(sram + ICSS_LRE_DUPLICATE_DISCARD);
+ pstats->transparent_reception =
+ readl(sram + ICSS_LRE_TRANSPARENT_RECEPTION);
+ memcpy_fromio(sram + ICSS_LRE_START + 4, pstats, sizeof(*pstats));
+}
+
+void prueth_lre_get_stats(struct prueth *prueth,
+ struct lre_statistics *pstats)
+{
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ memcpy_fromio(pstats, sram + ICSS_LRE_CNT_TX_A, sizeof(*pstats));
+}
+
+int prueth_lre_get_sset_count(struct prueth *prueth)
+{
+ if (!PRUETH_IS_LRE(prueth))
+ return 0;
+
+ return ARRAY_SIZE(prueth_ethtool_lre_stats);
+}
+
+void prueth_lre_get_strings(struct prueth *prueth, u8 *data)
+{
+ int i;
+
+ if (!PRUETH_IS_LRE(prueth))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(prueth_ethtool_lre_stats); i++) {
+ memcpy(data, prueth_ethtool_lre_stats[i].string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+void prueth_lre_update_stats(struct prueth *prueth, u64 *data)
+{
+ struct lre_statistics lre_stats;
+ void *ptr;
+ u32 val;
+ int i;
+
+ if (!PRUETH_IS_LRE(prueth))
+ return;
+
+ prueth_lre_get_stats(prueth, &lre_stats);
+ for (i = 0; i < ARRAY_SIZE(prueth_ethtool_lre_stats); i++) {
+ ptr = &lre_stats;
+ ptr += prueth_ethtool_lre_stats[i].offset;
+ val = *(u32 *)ptr;
+ data[i] = val;
+ }
+}
+
+static int prueth_lre_attr_get(struct net_device *ndev,
+ struct lredev_attr *attr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ void __iomem *dram0 = prueth->mem[PRUETH_MEM_DRAM0].va;
+ void __iomem *dram1 = prueth->mem[PRUETH_MEM_DRAM1].va;
+ int ret = 0;
+
+ netdev_dbg(ndev, "%d:%s, id %d\n", __LINE__, __func__, attr->id);
+
+ switch (attr->id) {
+ case LREDEV_ATTR_ID_HSR_MODE:
+ if (!PRUETH_IS_HSR(prueth))
+ return -EPERM;
+ attr->mode = readl(dram0 + ICSS_LRE_HSR_MODE);
+ break;
+ case LREDEV_ATTR_ID_DD_MODE:
+ attr->dd_mode = readl(sram + ICSS_LRE_DUPLICATE_DISCARD);
+ break;
+ case LREDEV_ATTR_ID_PRP_TR:
+ if (!PRUETH_IS_PRP(prueth))
+ return -EINVAL;
+ attr->tr_mode = prueth->prp_tr_mode;
+ break;
+ case LREDEV_ATTR_ID_DLRMT:
+ attr->dl_reside_max_time =
+ readl(dram1 + ICSS_LRE_DUPLI_FORGET_TIME) * 10;
+ break;
+ case LREDEV_ATTR_ID_CLEAR_NT:
+ attr->clear_nt_cmd = prueth->node_table_clear_last_cmd;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int prueth_lre_attr_set(struct net_device *ndev,
+ struct lredev_attr *attr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ void __iomem *dram0 = prueth->mem[PRUETH_MEM_DRAM0].va;
+ void __iomem *dram1 = prueth->mem[PRUETH_MEM_DRAM1].va;
+ int ret = 0;
+
+ netdev_dbg(ndev, "%d:%s, id = %d\n", __LINE__, __func__, attr->id);
+
+ switch (attr->id) {
+ case LREDEV_ATTR_ID_HSR_MODE:
+ if (!PRUETH_IS_HSR(prueth))
+ return -EPERM;
+ prueth->hsr_mode = attr->mode;
+ writel(prueth->hsr_mode, dram0 + ICSS_LRE_HSR_MODE);
+ break;
+ case LREDEV_ATTR_ID_DD_MODE:
+ writel(attr->dd_mode, sram + ICSS_LRE_DUPLICATE_DISCARD);
+ break;
+ case LREDEV_ATTR_ID_PRP_TR:
+ if (!PRUETH_IS_PRP(prueth))
+ return -EINVAL;
+ prueth->prp_tr_mode = attr->tr_mode;
+ break;
+ case LREDEV_ATTR_ID_DLRMT:
+ /* input is in milli seconds. Firmware expects in unit
+ * of 10 msec
+ */
+ writel((attr->dl_reside_max_time / 10),
+ dram1 + ICSS_LRE_DUPLI_FORGET_TIME);
+ break;
+ case LREDEV_ATTR_ID_CLEAR_NT:
+ /* need to return last cmd received for corresponding
+ * get command. So save it
+ */
+ prueth->node_table_clear_last_cmd = attr->clear_nt_cmd;
+ if (attr->clear_nt_cmd == IEC62439_3_CLEAR_NT)
+ prueth->node_table_clear = 1;
+ else
+ prueth->node_table_clear = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int emac_lredev_update_node_entry(struct node_tbl_t *node,
+ struct lre_node_table_entry table[],
+ int j)
+{
+ u8 val, is_hsr, updated = 1;
+
+ table[j].time_last_seen_a = node->time_last_seen_a;
+ table[j].time_last_seen_b = node->time_last_seen_b;
+
+ is_hsr = node->status & ICSS_LRE_NT_REM_NODE_HSR_BIT;
+ val = (node->status & ICSS_LRE_NT_REM_NODE_TYPE_MASK) >>
+ ICSS_LRE_NT_REM_NODE_TYPE_SHIFT;
+ switch (val) {
+ case ICSS_LRE_NT_REM_NODE_TYPE_DAN:
+ if (is_hsr)
+ table[j].node_type = IEC62439_3_DANH;
+ else
+ table[j].node_type = IEC62439_3_DANP;
+ break;
+
+ case ICSS_LRE_NT_REM_NODE_TYPE_REDBOX:
+ if (is_hsr)
+ table[j].node_type = IEC62439_3_REDBOXH;
+ else
+ table[j].node_type = IEC62439_3_REDBOXP;
+ break;
+
+ case ICSS_LRE_NT_REM_NODE_TYPE_VDAN:
+ if (is_hsr)
+ table[j].node_type = IEC62439_3_VDANH;
+ else
+ table[j].node_type = IEC62439_3_VDANP;
+ break;
+ default:
+ updated = 0;
+ break;
+ }
+
+ return updated;
+}
+
+static int prueth_lre_get_node_table(struct net_device *ndev,
+ struct lre_node_table_entry table[],
+ int size)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct node_tbl *nt = prueth->nt;
+ struct bin_tbl_t *bin;
+ struct node_tbl_t *node;
+ int i, j = 0, updated;
+ unsigned long flags;
+
+ netdev_dbg(ndev, "%d:%s\n", __LINE__, __func__);
+
+ if (size < nt->nt_lre_cnt->lre_cnt)
+ netdev_warn(ndev,
+ "actual table size %d is < required size %d\n",
+ size, nt->nt_lre_cnt->lre_cnt);
+
+ spin_lock_irqsave(&prueth->nt_lock, flags);
+ for (i = 0; i < nt->bin_array_max_entries; i++) {
+ if (nt->bin_array->bin_tbl[i].node_tbl_offset <
+ nt->nt_array_max_entries) {
+ bin = &nt->bin_array->bin_tbl[i];
+ if (WARN_ON(bin->node_tbl_offset >=
+ nt->nt_array_max_entries))
+ continue;
+ node = &nt->nt_array->node_tbl[bin->node_tbl_offset];
+
+ if (!(node->entry_state & 0x1))
+ continue;
+
+ updated = emac_lredev_update_node_entry(node, table, j);
+ if (updated) {
+ table[j].mac_address[0] = bin->src_mac_id[3];
+ table[j].mac_address[1] = bin->src_mac_id[2];
+ table[j].mac_address[2] = bin->src_mac_id[1];
+ table[j].mac_address[3] = bin->src_mac_id[0];
+ table[j].mac_address[4] = bin->src_mac_id[5];
+ table[j].mac_address[5] = bin->src_mac_id[4];
+ j++;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&prueth->nt_lock, flags);
+
+ return j;
+}
+
+static int prueth_lre_get_lre_stats(struct net_device *ndev,
+ struct lre_stats *stats)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ void __iomem *sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ memcpy_fromio(stats, sram + ICSS_LRE_CNT_TX_A, sizeof(*stats));
+
+ return 0;
+}
+
+static int prueth_lre_set_sv_vlan_id(struct net_device *ndev, u16 vid)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ if (!PRUETH_IS_LRE(prueth))
+ return 0;
+
+ return emac_add_del_vid(emac, true, htons(ETH_P_8021Q), vid);
+}
+
+const struct lredev_ops prueth_lredev_ops = {
+ .lredev_attr_get = prueth_lre_attr_get,
+ .lredev_attr_set = prueth_lre_attr_set,
+ .lredev_get_node_table = prueth_lre_get_node_table,
+ .lredev_get_stats = prueth_lre_get_lre_stats,
+ .lredev_set_sv_vlan_id = prueth_lre_set_sv_vlan_id,
+};
+
+int prueth_lre_init_node_table(struct prueth *prueth)
+{
+ /* HSR/PRP: initialize node table when first port is up */
+ if (prueth->emac_configured)
+ return 0;
+
+ /* initialize for node table handling in driver for HSR/PRP */
+ prueth->mac_queue = kmalloc(sizeof(*prueth->mac_queue), GFP_KERNEL);
+ prueth->nt = kmalloc(sizeof(*prueth->nt), GFP_KERNEL);
+ if (!prueth->mac_queue || !prueth->nt) {
+ kfree(prueth->mac_queue);
+ kfree(prueth->nt);
+ prueth->mac_queue = NULL;
+ prueth->nt = NULL;
+ return -ENOMEM;
+ }
+
+ _prueth_lre_init_node_table(prueth);
+ spin_lock_init(&prueth->nt_lock);
+ kthread_init_work(&prueth->nt_work, nt_updater);
+ prueth->nt_kworker = kthread_create_worker(0, "prueth_nt");
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/prueth_lre.h b/drivers/net/ethernet/ti/prueth_lre.h
new file mode 100644
index 000000000000..982298b77489
--- /dev/null
+++ b/drivers/net/ethernet/ti/prueth_lre.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __NET_TI_PRUETH_LRE_H
+#define __NET_TI_PRUETH_LRE_H
+
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
+
+#include "prueth.h"
+#include "icss_lre_firmware.h"
+
+#define PRUETH_MAX_PKTLEN_LRE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN + \
+ ICSS_LRE_TAG_RCT_SIZE)
+#define PRUETH_MAC_QUEUE_MAX_SHIFT 6
+#define PRUETH_MAC_QUEUE_MAX BIT(PRUETH_MAC_QUEUE_MAX_SHIFT)
+#define PRUETH_LRE_INDEX_TBL_MAX_ENTRIES 256
+#define PRUETH_LRE_BIN_TBL_MAX_ENTRIES 256
+#define PRUETH_LRE_NODE_TBL_MAX_ENTRIES 256
+#define LRE_PROTO_HSR 0
+#define LRE_PROTO_PRP 1
+#define LRE_OK 0
+#define LRE_ERR -1
+#define LRE_SV_FRAME_OFFSET 20
+
+/* Link Redundancy Entity stats counters */
+struct lre_statistics {
+ u32 cnt_tx_a;
+ u32 cnt_tx_b;
+ u32 cnt_tx_c;
+
+ u32 cnt_errwronglan_a;
+ u32 cnt_errwronglan_b;
+ u32 cnt_errwronglan_c;
+
+ u32 cnt_rx_a;
+ u32 cnt_rx_b;
+ u32 cnt_rx_c;
+
+ u32 cnt_errors_a;
+ u32 cnt_errors_b;
+ u32 cnt_errors_c;
+
+ u32 cnt_nodes;
+ u32 cnt_proxy_nodes;
+
+ u32 cnt_unique_rx_a;
+ u32 cnt_unique_rx_b;
+ u32 cnt_unique_rx_c;
+
+ u32 cnt_duplicate_rx_a;
+ u32 cnt_duplicate_rx_b;
+ u32 cnt_duplicate_rx_c;
+
+ u32 cnt_multiple_rx_a;
+ u32 cnt_multiple_rx_b;
+ u32 cnt_multiple_rx_c;
+
+ u32 cnt_own_rx_a;
+ u32 cnt_own_rx_b;
+
+ u32 duplicate_discard;
+ u32 transparent_reception;
+
+ u32 node_table_lookup_error_a;
+ u32 node_table_lookup_error_b;
+ u32 node_table_full;
+ u32 lre_multicast_dropped;
+ u32 lre_vlan_dropped;
+ u32 lre_intr_tmr_exp;
+
+ /* additional debug counters */
+ u32 lre_total_rx_a; /* count of all frames received at port-A */
+ u32 lre_total_rx_b; /* count of all frames received at port-B */
+ u32 lre_overflow_pru0; /* count of overflow frames to host on PRU 0 */
+ u32 lre_overflow_pru1; /* count of overflow frames to host on PRU 1 */
+ u32 lre_cnt_dd_pru0; /* count of DD frames to host on PRU 0 */
+ u32 lre_cnt_dd_pru1; /* count of DD frames to host on PRU 1 */
+ u32 lre_cnt_sup_pru0; /* count of supervisor frames to host on PRU 0 */
+ u32 lre_cnt_sup_pru1; /* count of supervisor frames to host on PRU 1 */
+} __packed;
+
+/* node table info */
+struct prueth_lre_node {
+ u8 mac[6];
+ u8 state;
+ u8 status;
+
+ u32 cnt_rx_a;
+ u32 cnt_rx_b;
+
+ u32 prp_lid_err_a;
+ u32 prp_lid_err_b;
+
+ u8 cnt_rx_sup_a;
+ u8 cnt_rx_sup_b;
+ u16 time_last_seen_sup;
+
+ u16 time_last_seen_a;
+ u16 time_last_seen_b;
+} __packed;
+
+/* NT queue definitions */
+struct nt_queue_entry {
+ u8 mac[ETH_ALEN];
+ unsigned int sv_frame:1;
+ unsigned int proto:1;
+ int port_id:6;
+};
+
+struct nt_queue_t {
+ struct nt_queue_entry nt_queue[PRUETH_MAC_QUEUE_MAX];
+ int rd_ind;
+ int wr_ind;
+ bool full;
+};
+
+struct node_index_tbl_t {
+ u16 bin_offset;
+ u16 bin_no_entries;
+ u8 lin_bin; /* 0 - linear; 1 - binary; */
+ u8 res1;
+} __packed;
+
+struct bin_tbl_t {
+ u8 src_mac_id[ETH_ALEN];
+ u16 node_tbl_offset;
+} __packed;
+
+struct node_tbl_t {
+ u8 mac[ETH_ALEN];
+ u8 entry_state;
+ u8 status;
+ u32 cnt_ra;
+ u32 cnt_rb;
+ u32 err_wla;
+ u32 err_wlb;
+ u8 cnt_rx_sup_a;
+ u8 cnt_rx_sup_b;
+ u16 time_last_seen_s;
+ u16 time_last_seen_a;
+ u16 time_last_seen_b;
+} __packed;
+
+struct node_tbl_lre_cnt_t {
+ u16 lre_cnt;
+} __packed;
+
+struct node_tbl_info_t {
+ u32 next_free_slot;
+ u8 arm_lock;
+ u8 res;
+ u16 fw_lock; /* firmware use this field as 2 independent bytes
+ * first byte for PRU0, second for PRU1
+ */
+} __packed;
+
+struct nt_array_t {
+ struct node_tbl_t node_tbl[PRUETH_LRE_NODE_TBL_MAX_ENTRIES];
+} __packed;
+struct index_array_t {
+ struct node_index_tbl_t index_tbl[PRUETH_LRE_INDEX_TBL_MAX_ENTRIES];
+} __packed;
+struct bin_array_t {
+ struct bin_tbl_t bin_tbl[PRUETH_LRE_BIN_TBL_MAX_ENTRIES];
+} __packed;
+
+struct node_tbl {
+ struct bin_array_t *bin_array;
+ struct index_array_t *index_array;
+ struct nt_array_t *nt_array;
+ struct node_tbl_info_t *nt_info;
+ struct node_tbl_lre_cnt_t *nt_lre_cnt;
+ u32 index_array_max_entries;
+ u32 bin_array_max_entries;
+ u32 nt_array_max_entries;
+ u16 hash_mask;
+};
+
+void prueth_lre_config(struct prueth *prueth);
+void prueth_lre_cleanup(struct prueth *prueth);
+int prueth_lre_init_node_table(struct prueth *prueth);
+int prueth_lre_request_irqs(struct prueth_emac *emac);
+void prueth_lre_free_irqs(struct prueth_emac *emac);
+int prueth_lre_get_sset_count(struct prueth *prueth);
+void prueth_lre_get_strings(struct prueth *prueth, u8 *data);
+void prueth_lre_update_stats(struct prueth *prueth, u64 *data);
+void prueth_lre_set_stats(struct prueth *prueth,
+ struct lre_statistics *pstats);
+void prueth_lre_get_stats(struct prueth *prueth,
+ struct lre_statistics *pstats);
+void prueth_lre_config_check_flags(struct prueth *prueth);
+void prueth_lre_free_memory(struct prueth *prueth);
+int prueth_lre_nt_insert(struct prueth *prueth,
+ u8 *mac, int port, int sv_frame, int proto);
+
+extern const struct lredev_ops prueth_lredev_ops;
+
+#endif /* __NET_TI_PRUETH_LRE_H */
diff --git a/drivers/net/ethernet/ti/prueth_ptp.h b/drivers/net/ethernet/ti/prueth_ptp.h
new file mode 100644
index 000000000000..d3cb8cceee4a
--- /dev/null
+++ b/drivers/net/ethernet/ti/prueth_ptp.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+#ifndef PRUETH_PTP_H
+#define PRUETH_PTP_H
+
+#define RX_SYNC_TIMESTAMP_OFFSET_P1 0x8 /* 8 bytes */
+#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x14 /* 12 bytes */
+
+#define DISABLE_PTP_FRAME_FORWARDING_CTRL_OFFSET 0x14 /* 1 byte */
+
+#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x20 /* 12 bytes */
+#define RX_SYNC_TIMESTAMP_OFFSET_P2 0x2c /* 12 bytes */
+#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0x38 /* 12 bytes */
+#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0x44 /* 12 bytes */
+#define TIMESYNC_DOMAIN_NUMBER_LIST 0x50 /* 2 bytes */
+#define P1_SMA_LINE_DELAY_OFFSET 0x52 /* 4 bytes */
+#define P2_SMA_LINE_DELAY_OFFSET 0x56 /* 4 bytes */
+#define TIMESYNC_SECONDS_COUNT_OFFSET 0x5a /* 6 bytes */
+#define TIMESYNC_TC_RCF_OFFSET 0x60 /* 4 bytes */
+#define DUT_IS_MASTER_OFFSET 0x64 /* 1 byte */
+#define MASTER_PORT_NUM_OFFSET 0x65 /* 1 byte */
+#define SYNC_MASTER_MAC_OFFSET 0x66 /* 6 bytes */
+#define TX_TS_NOTIFICATION_OFFSET_SYNC_P1 0x6c /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P1 0x6d /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P1 0x6e /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_SYNC_P2 0x6f /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P2 0x70 /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P2 0x71 /* 1 byte */
+#define TX_SYNC_TIMESTAMP_OFFSET_P1 0x72 /* 12 bytes */
+#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x7e /* 12 bytes */
+#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x8a /* 12 bytes */
+#define TX_SYNC_TIMESTAMP_OFFSET_P2 0x96 /* 12 bytes */
+#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0xa2 /* 12 bytes */
+#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0xae /* 12 bytes */
+#define TIMESYNC_CTRL_VAR_OFFSET 0xba /* 1 byte */
+#define DISABLE_SWITCH_SYNC_RELAY_OFFSET 0xbb /* 1 byte */
+#define MII_RX_CORRECTION_OFFSET 0xbc /* 2 bytes */
+#define MII_TX_CORRECTION_OFFSET 0xbe /* 2 bytes */
+#define TIMESYNC_CMP1_CMP_OFFSET 0xc0 /* 8 bytes */
+#define TIMESYNC_SYNC0_CMP_OFFSET 0xc8 /* 8 bytes */
+#define TIMESYNC_CMP1_PERIOD_OFFSET 0xd0 /* 4 bytes */
+#define TIMESYNC_SYNC0_WIDTH_OFFSET 0xd4 /* 4 bytes */
+#define SINGLE_STEP_IEP_OFFSET_P1 0xd8 /* 8 bytes */
+#define SINGLE_STEP_SECONDS_OFFSET_P1 0xe0 /* 8 bytes */
+#define SINGLE_STEP_IEP_OFFSET_P2 0xe8 /* 8 bytes */
+#define SINGLE_STEP_SECONDS_OFFSET_P2 0xf0 /* 8 bytes */
+#define LINK_LOCAL_FRAME_HAS_HSR_TAG 0xf8 /* 1 bytes */
+#define PTP_PREV_TX_TIMESTAMP_P1 0xf9 /* 8 bytes */
+#define PTP_PREV_TX_TIMESTAMP_P2 0x101 /* 8 bytes */
+#define PTP_CLK_IDENTITY_OFFSET 0x109 /* 8 bytes */
+#define PTP_SCRATCH_MEM 0x111 /* 16 byte */
+#define PTP_IPV4_UDP_E2E_ENABLE 0x121 /* 1 byte */
+
+enum {
+ PRUETH_PTP_SYNC,
+ PRUETH_PTP_DLY_REQ,
+ PRUETH_PTP_DLY_RESP,
+ PRUETH_PTP_TS_EVENTS,
+};
+
+#define PRUETH_PTP_TS_SIZE 12
+#define PRUETH_PTP_TS_NOTIFY_SIZE 1
+#define PRUETH_PTP_TS_NOTIFY_MASK 0xff
+
+/* Bit definitions for TIMESYNC_CTRL */
+#define TIMESYNC_CTRL_BG_ENABLE BIT(0)
+#define TIMESYNC_CTRL_FORCED_2STEP BIT(1)
+
+static inline u32 prueth_tx_ts_offs_get(u8 port, u8 event)
+{
+ return TX_SYNC_TIMESTAMP_OFFSET_P1 + port *
+ PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_SIZE +
+ event * PRUETH_PTP_TS_SIZE;
+}
+
+static inline u32 prueth_tx_ts_notify_offs_get(u8 port, u8 event)
+{
+ return TX_TS_NOTIFICATION_OFFSET_SYNC_P1 +
+ PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_NOTIFY_SIZE * port +
+ event * PRUETH_PTP_TS_NOTIFY_SIZE;
+}
+
+#endif /* PRUETH_PTP_H */
diff --git a/drivers/net/ethernet/ti/prueth_qos.c b/drivers/net/ethernet/ti/prueth_qos.c
new file mode 100644
index 000000000000..008a7cbdf005
--- /dev/null
+++ b/drivers/net/ethernet/ti/prueth_qos.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+#include <linux/pruss.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <net/pkt_cls.h>
+
+#include "icss_mii_rt.h"
+#include "icss_vlan_mcast_filter_mmap.h"
+#include "prueth.h"
+
+static void emac_nsp_enable(void __iomem *counter, u16 credit)
+{
+ writel((credit << PRUETH_NSP_CREDIT_SHIFT) | PRUETH_NSP_ENABLE,
+ counter);
+}
+
+static void prueth_enable_nsp(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *dram = prueth->mem[emac->dram].va;
+
+ if (emac->nsp_bc.cookie)
+ emac_nsp_enable(dram + STORM_PREVENTION_OFFSET_BC,
+ emac->nsp_bc.credit);
+ if (emac->nsp_mc.cookie)
+ emac_nsp_enable(dram + STORM_PREVENTION_OFFSET_MC,
+ emac->nsp_mc.credit);
+ if (emac->nsp_uc.cookie)
+ emac_nsp_enable(dram + STORM_PREVENTION_OFFSET_UC,
+ emac->nsp_uc.credit);
+}
+
+static int emac_flower_parse_policer(struct prueth_emac *emac,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_bytes_per_sec)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ u8 null_mac[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+ struct nsp_counter *nsp = NULL;
+ char *str;
+ u32 pps;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!ether_addr_equal_masked(match.key->src, null_mac,
+ match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (ether_addr_equal(match.key->dst, bc_mac)) {
+ if (!emac->nsp_bc.cookie ||
+ emac->nsp_bc.cookie == cls->cookie)
+ nsp = &emac->nsp_bc;
+ else
+ NL_SET_ERR_MSG_MOD(extack, "BC Filter already set");
+ str = "Broad";
+ } else if (ether_addr_equal_masked(match.key->dst, mc_mac, mc_mac)) {
+ if (!emac->nsp_mc.cookie ||
+ emac->nsp_mc.cookie == cls->cookie)
+ nsp = &emac->nsp_mc;
+ else
+ NL_SET_ERR_MSG_MOD(extack, "MC Filter already set");
+ str = "Multi";
+ } else {
+ if (!emac->nsp_uc.cookie ||
+ emac->nsp_uc.cookie == cls->cookie)
+ nsp = &emac->nsp_uc;
+ else
+ NL_SET_ERR_MSG_MOD(extack, "UC Filter already set");
+ str = "Uni";
+ }
+
+ if (!nsp)
+ return -EOPNOTSUPP;
+
+ /* Calculate number of packets per second for given bps
+ * assuming min ethernet packet size
+ */
+ pps = div_u64(rate_bytes_per_sec, ETH_ZLEN);
+ /* Convert that to packets per 100ms */
+ pps /= MSEC_PER_SEC / PRUETH_NSP_TIMER_MS;
+
+ nsp->cookie = cls->cookie;
+ nsp->credit = pps;
+ emac->nsp_enabled = emac->nsp_bc.cookie | emac->nsp_mc.cookie |
+ emac->nsp_uc.cookie;
+
+ prueth_enable_nsp(emac);
+
+ netdev_dbg(emac->ndev,
+ "%scast filter set to %d packets per %dms\n", str,
+ nsp->credit, PRUETH_NSP_TIMER_MS);
+
+ return 0;
+}
+
+static int emac_configure_clsflower(struct prueth_emac *emac,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ return emac_flower_parse_policer(emac, extack, cls,
+ act->police.rate_bytes_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int emac_delete_clsflower(struct prueth_emac *emac,
+ struct flow_cls_offload *cls)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *dram = prueth->mem[emac->dram].va;
+
+ if (cls->cookie == emac->nsp_bc.cookie) {
+ emac->nsp_bc.cookie = 0;
+ emac->nsp_bc.credit = 0;
+ writel(0, dram + STORM_PREVENTION_OFFSET_BC);
+ } else if (cls->cookie == emac->nsp_mc.cookie) {
+ emac->nsp_mc.cookie = 0;
+ emac->nsp_mc.credit = 0;
+ writel(0, dram + STORM_PREVENTION_OFFSET_MC);
+ } else if (cls->cookie == emac->nsp_uc.cookie) {
+ emac->nsp_uc.cookie = 0;
+ emac->nsp_uc.credit = 0;
+ writel(0, dram + STORM_PREVENTION_OFFSET_UC);
+ }
+
+ emac->nsp_enabled = emac->nsp_bc.cookie | emac->nsp_mc.cookie |
+ emac->nsp_uc.cookie;
+
+ return 0;
+}
+
+static int emac_setup_tc_cls_flower(struct prueth_emac *emac,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return emac_configure_clsflower(emac, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return emac_delete_clsflower(emac, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int emac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct prueth_emac *emac = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(emac->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return emac_setup_tc_cls_flower(emac, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(emac_block_cb_list);
+
+int emac_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+
+ if (type == TC_SETUP_BLOCK) {
+ return flow_block_cb_setup_simple(type_data,
+ &emac_block_cb_list,
+ emac_setup_tc_block_cb,
+ emac, emac, true);
+ }
+
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/ti/prueth_switch.c b/drivers/net/ethernet/ti/prueth_switch.c
new file mode 100644
index 000000000000..b8436cc23f4e
--- /dev/null
+++ b/drivers/net/ethernet/ti/prueth_switch.c
@@ -0,0 +1,1341 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments PRUETH Switch Driver
+ *
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include <net/switchdev.h>
+#include "prueth.h"
+#include "prueth_switch.h"
+#include "prueth_fdb_tbl.h"
+
+#define FDB_IDX_TBL() \
+ (&prueth->fdb_tbl->index_a->index_tbl_entry[0])
+
+#define FDB_IDX_TBL_ENTRY(n) \
+ (&prueth->fdb_tbl->index_a->index_tbl_entry[n])
+
+#define FDB_MAC_TBL() \
+ (&prueth->fdb_tbl->mac_tbl_a->mac_tbl_entry[0])
+
+#define FDB_MAC_TBL_ENTRY(n) \
+ (&prueth->fdb_tbl->mac_tbl_a->mac_tbl_entry[n])
+
+#define FDB_LEARN 1
+#define FDB_DELETE 2
+#define FDB_PURGE 3
+
+struct prueth_sw_fdb_work {
+ struct work_struct work;
+ struct prueth_emac *emac;
+ u8 addr[ETH_ALEN];
+ int event;
+};
+
+static inline
+u8 prueth_sw_port_get_stp_state(struct prueth *prueth, enum prueth_port port)
+{
+ struct fdb_tbl *t = prueth->fdb_tbl;
+ u8 state;
+
+ state = readb(port - 1 ?
+ &t->port2_stp_cfg->state : &t->port1_stp_cfg->state);
+ return state;
+}
+
+const struct prueth_queue_info sw_queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ P0_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_Q1_BUFFER_OFFSET +
+ ((QUEUE_1_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_Q2_BUFFER_OFFSET +
+ ((QUEUE_2_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_Q3_BUFFER_OFFSET +
+ ((QUEUE_3_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_Q4_BUFFER_OFFSET +
+ ((QUEUE_4_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_Q1_BUFFER_OFFSET +
+ ((QUEUE_1_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_Q2_BUFFER_OFFSET +
+ ((QUEUE_2_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_Q3_BUFFER_OFFSET +
+ ((QUEUE_3_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_Q4_BUFFER_OFFSET +
+ ((QUEUE_4_SIZE - 1) * ICSS_BLOCK_SIZE),
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+static const struct prueth_queue_info rx_queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET,
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET + 8,
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET + 16,
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_QUEUE_DESC_OFFSET + 24,
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET,
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET + 8,
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET + 16,
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_QUEUE_DESC_OFFSET + 24,
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+static const struct prueth_col_rx_context_info col_rx_context_infos[] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ P0_COL_BUFFER_OFFSET,
+ P0_COL_BUFFER_OFFSET,
+ P0_COL_QUEUE_DESC_OFFSET,
+ END_OF_BD_POOL,
+ END_OF_BD_POOL + ((COL_QUEUE_SIZE - 1) * BD_SIZE)
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ P0_COL_BUFFER_OFFSET + (COL_QUEUE_SIZE * ICSS_BLOCK_SIZE),
+ P0_COL_BUFFER_OFFSET + (COL_QUEUE_SIZE * ICSS_BLOCK_SIZE),
+ P0_COL_QUEUE_DESC_OFFSET + 8,
+ END_OF_BD_POOL,
+ END_OF_BD_POOL + ((COL_QUEUE_SIZE - 1) * BD_SIZE)
+ },
+
+ [PRUETH_PORT_QUEUE_MII1] = {
+ P0_COL_BUFFER_OFFSET + (COL_QUEUE_SIZE * ICSS_BLOCK_SIZE),
+ P0_COL_BUFFER_OFFSET + (COL_QUEUE_SIZE * ICSS_BLOCK_SIZE),
+ P0_COL_QUEUE_DESC_OFFSET + 16,
+ END_OF_BD_POOL,
+ END_OF_BD_POOL + ((COL_QUEUE_SIZE - 1) * BD_SIZE)
+ },
+};
+
+static const struct prueth_col_tx_context_info col_tx_context_infos[] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ P0_COL_BUFFER_OFFSET,
+ P0_COL_BUFFER_OFFSET,
+ P0_COL_BUFFER_OFFSET + ((COL_QUEUE_SIZE - 1) * ICSS_BLOCK_SIZE),
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ P0_COL_BUFFER_OFFSET + (COL_QUEUE_SIZE * ICSS_BLOCK_SIZE),
+ P0_COL_BUFFER_OFFSET + (COL_QUEUE_SIZE * ICSS_BLOCK_SIZE),
+ P0_COL_BUFFER_OFFSET + ((COL_QUEUE_SIZE - 1) * ICSS_BLOCK_SIZE),
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ P0_COL_BUFFER_OFFSET + (COL_QUEUE_SIZE * ICSS_BLOCK_SIZE),
+ P0_COL_BUFFER_OFFSET + (COL_QUEUE_SIZE * ICSS_BLOCK_SIZE),
+ P0_COL_BUFFER_OFFSET + ((COL_QUEUE_SIZE - 1) * ICSS_BLOCK_SIZE),
+ }
+};
+
+static const struct prueth_queue_desc col_queue_descs[3] = {
+ [PRUETH_PORT_QUEUE_MII0] = {
+ .rd_ptr = END_OF_BD_POOL, .wr_ptr = END_OF_BD_POOL, },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ .rd_ptr = END_OF_BD_POOL, .wr_ptr = END_OF_BD_POOL, }
+};
+
+void prueth_sw_free_fdb_table(struct prueth *prueth)
+{
+ if (prueth->emac_configured)
+ return;
+
+ kfree(prueth->fdb_tbl);
+ prueth->fdb_tbl = NULL;
+}
+
+void prueth_sw_hostconfig(struct prueth *prueth)
+{
+ void __iomem *dram1_base = prueth->mem[PRUETH_MEM_DRAM1].va;
+ void __iomem *dram;
+
+ /* queue information table */
+ dram = dram1_base + P0_Q1_RX_CONTEXT_OFFSET;
+ memcpy_toio(dram, sw_queue_infos[PRUETH_PORT_QUEUE_HOST],
+ sizeof(sw_queue_infos[PRUETH_PORT_QUEUE_HOST]));
+
+ dram = dram1_base + COL_RX_CONTEXT_P0_OFFSET_ADDR;
+ memcpy_toio(dram, &col_rx_context_infos[PRUETH_PORT_QUEUE_HOST],
+ sizeof(col_rx_context_infos[PRUETH_PORT_QUEUE_HOST]));
+
+ /* buffer descriptor offset table*/
+ dram = dram1_base + QUEUE_DESCRIPTOR_OFFSET_ADDR;
+ writew(P0_Q1_BD_OFFSET, dram);
+ writew(P0_Q2_BD_OFFSET, dram + 2);
+ writew(P0_Q3_BD_OFFSET, dram + 4);
+ writew(P0_Q4_BD_OFFSET, dram + 6);
+
+ /* buffer offset table */
+ dram = dram1_base + QUEUE_OFFSET_ADDR;
+ writew(P0_Q1_BUFFER_OFFSET, dram);
+ writew(P0_Q2_BUFFER_OFFSET, dram + 2);
+ writew(P0_Q3_BUFFER_OFFSET, dram + 4);
+ writew(P0_Q4_BUFFER_OFFSET, dram + 6);
+
+ /* queue size lookup table */
+ dram = dram1_base + QUEUE_SIZE_ADDR;
+ writew(HOST_QUEUE_1_SIZE, dram);
+ writew(HOST_QUEUE_1_SIZE, dram + 2);
+ writew(HOST_QUEUE_1_SIZE, dram + 4);
+ writew(HOST_QUEUE_1_SIZE, dram + 6);
+
+ /* queue table */
+ dram = dram1_base + P0_QUEUE_DESC_OFFSET;
+ memcpy_toio(dram, queue_descs[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_descs[PRUETH_PORT_QUEUE_HOST]));
+}
+
+static int prueth_sw_port_config(struct prueth *prueth,
+ enum prueth_port port_id)
+{
+ unsigned int tx_context_ofs_addr, col_tx_context_ofs_addr,
+ rx_context_ofs, col_rx_context_ofs_addr,
+ queue_desc_ofs, col_queue_desc_ofs;
+ void __iomem *dram, *dram_base, *dram_mac;
+ struct prueth_emac *emac;
+ void __iomem *dram1_base = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ emac = prueth->emac[port_id - 1];
+ switch (port_id) {
+ case PRUETH_PORT_MII0:
+ tx_context_ofs_addr = TX_CONTEXT_P1_Q1_OFFSET_ADDR;
+ col_tx_context_ofs_addr = COL_TX_CONTEXT_P1_Q1_OFFSET_ADDR;
+ rx_context_ofs = P1_Q1_RX_CONTEXT_OFFSET;
+ col_rx_context_ofs_addr = COL_RX_CONTEXT_P1_OFFSET_ADDR;
+ queue_desc_ofs = P1_QUEUE_DESC_OFFSET;
+ col_queue_desc_ofs = P1_COL_QUEUE_DESC_OFFSET;
+
+ /* for switch PORT MII0 mac addr is in DRAM0. */
+ dram_mac = prueth->mem[PRUETH_MEM_DRAM0].va;
+ break;
+ case PRUETH_PORT_MII1:
+ tx_context_ofs_addr = TX_CONTEXT_P2_Q1_OFFSET_ADDR;
+ col_tx_context_ofs_addr = COL_TX_CONTEXT_P2_Q1_OFFSET_ADDR;
+ rx_context_ofs = P2_Q1_RX_CONTEXT_OFFSET;
+ col_rx_context_ofs_addr = COL_RX_CONTEXT_P2_OFFSET_ADDR;
+ queue_desc_ofs = P2_QUEUE_DESC_OFFSET;
+ col_queue_desc_ofs = P2_COL_QUEUE_DESC_OFFSET;
+
+ /* for switch PORT MII1 mac addr is in DRAM1. */
+ dram_mac = prueth->mem[PRUETH_MEM_DRAM1].va;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return -EINVAL;
+ }
+
+ /* setup mac address */
+ memcpy_toio(dram_mac + PORT_MAC_ADDR, emac->mac_addr, 6);
+
+ /* Remaining switch port configs are in DRAM1 */
+ dram_base = prueth->mem[PRUETH_MEM_DRAM1].va;
+
+ /* queue information table */
+ memcpy_toio(dram_base + tx_context_ofs_addr,
+ sw_queue_infos[port_id],
+ sizeof(sw_queue_infos[port_id]));
+
+ memcpy_toio(dram_base + col_tx_context_ofs_addr,
+ &col_tx_context_infos[port_id],
+ sizeof(col_tx_context_infos[port_id]));
+
+ memcpy_toio(dram_base + rx_context_ofs,
+ rx_queue_infos[port_id],
+ sizeof(rx_queue_infos[port_id]));
+
+ memcpy_toio(dram_base + col_rx_context_ofs_addr,
+ &col_rx_context_infos[port_id],
+ sizeof(col_rx_context_infos[port_id]));
+
+ /* buffer descriptor offset table*/
+ dram = dram_base + QUEUE_DESCRIPTOR_OFFSET_ADDR +
+ (port_id * NUM_QUEUES * sizeof(u16));
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE1].buffer_desc_offset, dram);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE2].buffer_desc_offset,
+ dram + 2);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE3].buffer_desc_offset,
+ dram + 4);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE4].buffer_desc_offset,
+ dram + 6);
+
+ /* buffer offset table */
+ dram = dram_base + QUEUE_OFFSET_ADDR +
+ port_id * NUM_QUEUES * sizeof(u16);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE1].buffer_offset, dram);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE2].buffer_offset,
+ dram + 2);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE3].buffer_offset,
+ dram + 4);
+ writew(sw_queue_infos[port_id][PRUETH_QUEUE4].buffer_offset,
+ dram + 6);
+
+ /* queue size lookup table */
+ dram = dram_base + QUEUE_SIZE_ADDR +
+ port_id * NUM_QUEUES * sizeof(u16);
+ writew(QUEUE_1_SIZE, dram);
+ writew(QUEUE_2_SIZE, dram + 2);
+ writew(QUEUE_3_SIZE, dram + 4);
+ writew(QUEUE_4_SIZE, dram + 6);
+
+ /* collision queue table */
+ memcpy_toio(dram_base + col_queue_desc_ofs,
+ &col_queue_descs[port_id],
+ sizeof(col_queue_descs[port_id]));
+
+ /* queue table */
+ memcpy_toio(dram_base + queue_desc_ofs,
+ &queue_descs[port_id][0],
+ 4 * sizeof(queue_descs[port_id][0]));
+
+ emac->rx_queue_descs = dram1_base + P0_QUEUE_DESC_OFFSET;
+ emac->tx_queue_descs = dram1_base +
+ rx_queue_infos[port_id][PRUETH_QUEUE1].queue_desc_offset;
+
+ return 0;
+}
+
+int prueth_sw_emac_config(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+
+ /* PRU needs local shared RAM address for C28 */
+ u32 sharedramaddr = ICSS_LOCAL_SHARED_RAM;
+ /* PRU needs real global OCMC address for C30*/
+ u32 ocmcaddr = (u32)prueth->mem[PRUETH_MEM_OCMC].pa;
+ int ret;
+
+ if (prueth->emac_configured & BIT(emac->port_id))
+ return 0;
+
+ ret = prueth_sw_port_config(prueth, emac->port_id);
+ if (ret)
+ return ret;
+
+ if (!prueth->emac_configured) {
+ /* Set in constant table C28 of PRUn to ICSS Shared memory */
+ pru_rproc_set_ctable(prueth->pru0, PRU_C28, sharedramaddr);
+ pru_rproc_set_ctable(prueth->pru1, PRU_C28, sharedramaddr);
+
+ /* Set in constant table C30 of PRUn to OCMC memory */
+ pru_rproc_set_ctable(prueth->pru0, PRU_C30, ocmcaddr);
+ pru_rproc_set_ctable(prueth->pru1, PRU_C30, ocmcaddr);
+ }
+ return 0;
+}
+
+void prueth_sw_fdb_tbl_init(struct prueth *prueth)
+{
+ struct fdb_tbl *t = prueth->fdb_tbl;
+
+ t->index_a = prueth->mem[V2_1_FDB_TBL_LOC].va +
+ V2_1_FDB_TBL_OFFSET;
+
+ t->mac_tbl_a = (void __iomem *)t->index_a +
+ FDB_INDEX_TBL_MAX_ENTRIES *
+ sizeof(struct fdb_index_tbl_entry_t);
+
+ t->port1_stp_cfg = (void __iomem *)t->mac_tbl_a +
+ FDB_MAC_TBL_MAX_ENTRIES *
+ sizeof(struct fdb_mac_tbl_entry_t);
+
+ t->port2_stp_cfg = (void __iomem *)t->port1_stp_cfg +
+ sizeof(struct fdb_stp_config);
+
+ t->flood_enable_flags = (void __iomem *)t->port2_stp_cfg +
+ sizeof(struct fdb_stp_config);
+
+ t->locks = (void __iomem *)t->flood_enable_flags +
+ sizeof(struct fdb_flood_config);
+
+ t->flood_enable_flags->host_flood_enable = 1;
+ t->flood_enable_flags->port1_flood_enable = 1;
+ t->flood_enable_flags->port2_flood_enable = 1;
+ t->locks->host_lock = 0;
+ t->total_entries = 0;
+}
+
+static void prueth_sw_fdb_spin_lock(struct fdb_tbl *fdb_tbl)
+{
+ /* Take the host lock */
+ writeb(1, &fdb_tbl->locks->host_lock);
+
+ /* Wait for the PRUs to release their locks */
+ while (readb(&fdb_tbl->locks->pru_locks))
+ ;
+}
+
+static inline void prueth_sw_fdb_spin_unlock(struct fdb_tbl *fdb_tbl)
+{
+ writeb(0, &fdb_tbl->locks->host_lock);
+}
+
+static void mac_copy(u8 *dst, const u8 *src)
+{
+ u8 i;
+
+ for (i = 0; i < 6; i++) {
+ *(dst) = *(src);
+ dst++;
+ src++;
+ }
+}
+
+/* -1 mac_a < mac_b
+ * 0 mac_a == mac_b
+ * 1 mac_a > mac_b
+ */
+static s8 mac_cmp(const u8 *mac_a, const u8 *mac_b)
+{
+ s8 ret = 0, i;
+
+ for (i = 0; i < 6; i++) {
+ if (mac_a[i] == mac_b[i])
+ continue;
+
+ ret = mac_a[i] < mac_b[i] ? -1 : 1;
+ break;
+ }
+
+ return ret;
+}
+
+static inline u8 prueth_sw_fdb_hash(const u8 *mac)
+{
+ return mac[0] ^ mac[1] ^ mac[2] ^ mac[3] ^ mac[4] ^ mac[5];
+}
+
+static s16
+prueth_sw_fdb_search(struct fdb_mac_tbl_array_t *mac_tbl,
+ struct fdb_index_tbl_entry_t *bucket_info,
+ const u8 *mac)
+{
+ int i;
+ u8 mac_tbl_idx = bucket_info->bucket_idx;
+
+ for (i = 0; i < bucket_info->bucket_entries; i++, mac_tbl_idx++) {
+ if (!mac_cmp(mac, mac_tbl->mac_tbl_entry[mac_tbl_idx].mac))
+ return mac_tbl_idx;
+ }
+
+ return -ENODATA;
+}
+
+static u16 prueth_sw_fdb_find_open_slot(struct fdb_tbl *fdb_tbl)
+{
+ u16 i;
+
+ for (i = 0; i < FDB_MAC_TBL_MAX_ENTRIES; i++) {
+ if (!fdb_tbl->mac_tbl_a->mac_tbl_entry[i].active)
+ break;
+ }
+
+ return i;
+}
+
+/* port: 0 based: 0=port1, 1=port2 */
+static s16
+prueth_sw_fdb_find_bucket_insert_point(struct fdb_tbl *fdb,
+ struct fdb_index_tbl_entry_t *bkt_info,
+ const u8 *mac, const u8 port)
+{
+ struct fdb_mac_tbl_array_t *mac_tbl = fdb->mac_tbl_a;
+ struct fdb_mac_tbl_entry_t *e;
+ int i;
+ u8 mac_tbl_idx;
+ s8 cmp;
+
+ mac_tbl_idx = bkt_info->bucket_idx;
+
+ for (i = 0; i < bkt_info->bucket_entries; i++, mac_tbl_idx++) {
+ e = &mac_tbl->mac_tbl_entry[mac_tbl_idx];
+ cmp = mac_cmp(mac, e->mac);
+ if (cmp < 0) {
+ return mac_tbl_idx;
+ } else if (cmp == 0) {
+ if (e->port != port) {
+ /* mac is already in FDB, only port is
+ * different. So just update the port.
+ * Note: total_entries and bucket_entries
+ * remain the same.
+ */
+ prueth_sw_fdb_spin_lock(fdb);
+ e->port = port;
+ prueth_sw_fdb_spin_unlock(fdb);
+ }
+
+ /* mac and port are the same, touch the fdb */
+ e->age = 0;
+ return -1;
+ }
+ }
+
+ return mac_tbl_idx;
+}
+
+static s16
+prueth_sw_fdb_check_empty_slot_left(struct fdb_mac_tbl_array_t *mac_tbl,
+ u8 mac_tbl_idx)
+{
+ s16 i;
+
+ for (i = mac_tbl_idx - 1; i > -1; i--) {
+ if (!mac_tbl->mac_tbl_entry[i].active)
+ break;
+ }
+
+ return i;
+}
+
+static s16
+prueth_sw_fdb_check_empty_slot_right(struct fdb_mac_tbl_array_t *mac_tbl,
+ u8 mac_tbl_idx)
+{
+ s16 i;
+
+ for (i = mac_tbl_idx; i < FDB_MAC_TBL_MAX_ENTRIES; i++) {
+ if (!mac_tbl->mac_tbl_entry[i].active)
+ return i;
+ }
+
+ return -1;
+}
+
+static void prueth_sw_fdb_move_range_left(struct prueth *prueth,
+ u16 left, u16 right)
+{
+ u16 i;
+ u8 *src, *dst;
+ u32 sz = 0;
+
+ for (i = left; i < right; i++) {
+ dst = (u8 *)FDB_MAC_TBL_ENTRY(i);
+ src = (u8 *)FDB_MAC_TBL_ENTRY(i + 1);
+ sz = sizeof(struct fdb_mac_tbl_entry_t);
+ memcpy_toio(dst, src, sz);
+ }
+}
+
+static void prueth_sw_fdb_move_range_right(struct prueth *prueth,
+ u16 left, u16 right)
+{
+ u16 i;
+ u8 *src, *dst;
+ u32 sz = 0;
+
+ for (i = right; i > left; i--) {
+ dst = (u8 *)FDB_MAC_TBL_ENTRY(i);
+ src = (u8 *)FDB_MAC_TBL_ENTRY(i - 1);
+ sz = sizeof(struct fdb_mac_tbl_entry_t);
+ memcpy_toio(dst, src, sz);
+ }
+}
+
+static void prueth_sw_fdb_update_index_tbl(struct prueth *prueth,
+ u16 left, u16 right)
+{
+ u16 i;
+ u8 hash, hash_prev;
+
+ /* To ensure we don't improperly update the
+ * bucket index, initialize with an invalid
+ * hash in case we are in leftmost slot
+ */
+ hash_prev = 0xff;
+
+ if (left > 0) {
+ hash_prev =
+ prueth_sw_fdb_hash(FDB_MAC_TBL_ENTRY(left - 1)->mac);
+ }
+
+ /* For each moved element, update the bucket index */
+ for (i = left; i <= right; i++) {
+ hash = prueth_sw_fdb_hash(FDB_MAC_TBL_ENTRY(i)->mac);
+
+ /* Only need to update buckets once */
+ if (hash != hash_prev)
+ FDB_IDX_TBL_ENTRY(hash)->bucket_idx = i;
+
+ hash_prev = hash;
+ }
+}
+
+static struct fdb_mac_tbl_entry_t *
+prueth_sw_get_empty_mac_tbl_entry(struct prueth *prueth,
+ struct fdb_index_tbl_entry_t *bucket_info,
+ u8 suggested_mac_tbl_idx,
+ bool *update_indexes)
+{
+ struct fdb_tbl *fdb = prueth->fdb_tbl;
+ struct fdb_mac_tbl_array_t *mt = fdb->mac_tbl_a;
+ s16 empty_slot_idx = 0, left = 0, right = 0;
+ u8 mti = suggested_mac_tbl_idx;
+
+ if (!FDB_MAC_TBL_ENTRY(mti)->active) {
+ /* Claim the entry */
+ FDB_MAC_TBL_ENTRY(mti)->active = 1;
+
+ return FDB_MAC_TBL_ENTRY(mti);
+ }
+
+ if (fdb->total_entries == FDB_MAC_TBL_MAX_ENTRIES)
+ return NULL;
+
+ empty_slot_idx = prueth_sw_fdb_check_empty_slot_left(mt, mti);
+ if (empty_slot_idx == -1) {
+ /* Nothing available on the left. But table isn't full
+ * so there must be space to the right,
+ */
+ empty_slot_idx = prueth_sw_fdb_check_empty_slot_right(mt, mti);
+
+ /* Shift right */
+ left = mti;
+ right = empty_slot_idx;
+ prueth_sw_fdb_move_range_right(prueth, left, right);
+
+ /* Claim the entry */
+ FDB_MAC_TBL_ENTRY(mti)->active = 1;
+
+ /* There is a chance we moved something in a
+ * different bucket, update index table
+ */
+ prueth_sw_fdb_update_index_tbl(prueth, left, right);
+
+ return FDB_MAC_TBL_ENTRY(mti);
+ }
+
+ if (empty_slot_idx == mti - 1) {
+ /* There is space immediately left of the open slot,
+ * which means the inserted MAC address.
+ * Must be the lowest-valued MAC address in bucket.
+ * Update bucket pointer accordingly.
+ */
+ bucket_info->bucket_idx = empty_slot_idx;
+
+ /* Claim the entry */
+ FDB_MAC_TBL_ENTRY(empty_slot_idx)->active = 1;
+
+ return FDB_MAC_TBL_ENTRY(empty_slot_idx);
+ }
+
+ /* There is empty space to the left, shift MAC table entries left */
+ left = empty_slot_idx;
+ right = mti - 1;
+ prueth_sw_fdb_move_range_left(prueth, left, right);
+
+ /* Claim the entry */
+ FDB_MAC_TBL_ENTRY(mti - 1)->active = 1;
+
+ /* There is a chance we moved something in a
+ * different bucket, update index table
+ */
+ prueth_sw_fdb_update_index_tbl(prueth, left, right);
+
+ return FDB_MAC_TBL_ENTRY(mti - 1);
+}
+
+static int prueth_sw_insert_fdb_entry(struct prueth_emac *emac,
+ const u8 *mac, u8 is_static)
+{
+ struct prueth *prueth = emac->prueth;
+ struct prueth_emac *other_emac;
+ struct fdb_tbl *fdb = prueth->fdb_tbl;
+ struct fdb_index_tbl_entry_t *bucket_info;
+ struct fdb_mac_tbl_entry_t *mac_info;
+ u8 hash_val, mac_tbl_idx;
+ s16 ret;
+
+ other_emac = prueth->emac[other_port_id(emac->port_id) - 1];
+
+ if (fdb->total_entries == FDB_MAC_TBL_MAX_ENTRIES)
+ return -ENOMEM;
+
+ if (mac_cmp(mac, emac->mac_addr) == 0 ||
+ mac_cmp(mac, other_emac->mac_addr) == 0) {
+ /* Don't insert fdb of own mac addr */
+ return -EINVAL;
+ }
+
+ /* Empty mac table entries are available */
+
+ /* Get the bucket that the mac belongs to */
+ hash_val = prueth_sw_fdb_hash(mac);
+ bucket_info = FDB_IDX_TBL_ENTRY(hash_val);
+
+ if (!bucket_info->bucket_entries) {
+ mac_tbl_idx = prueth_sw_fdb_find_open_slot(fdb);
+ bucket_info->bucket_idx = mac_tbl_idx;
+ }
+
+ ret = prueth_sw_fdb_find_bucket_insert_point(fdb, bucket_info, mac,
+ emac->port_id - 1);
+
+ if (ret < 0)
+ /* mac is already in fdb table */
+ return 0;
+
+ mac_tbl_idx = ret;
+
+ prueth_sw_fdb_spin_lock(fdb);
+
+ mac_info = prueth_sw_get_empty_mac_tbl_entry(prueth, bucket_info,
+ mac_tbl_idx, NULL);
+ if (!mac_info) {
+ /* Should not happen */
+ dev_warn(prueth->dev, "OUT of MEM\n");
+ return -ENOMEM;
+ }
+
+ mac_copy(mac_info->mac, mac);
+ mac_info->active = 1;
+ mac_info->age = 0;
+ mac_info->port = emac->port_id - 1;
+ mac_info->is_static = is_static;
+
+ bucket_info->bucket_entries++;
+ fdb->total_entries++;
+
+ prueth_sw_fdb_spin_unlock(fdb);
+
+ dev_dbg(prueth->dev, "added fdb: %pM port=%d total_entries=%u\n",
+ mac, emac->port_id, fdb->total_entries);
+
+ return 0;
+}
+
+static int prueth_sw_delete_fdb_entry(struct prueth_emac *emac,
+ const u8 *mac, u8 is_static)
+{
+ struct prueth *prueth = emac->prueth;
+ struct fdb_tbl *fdb = prueth->fdb_tbl;
+ struct fdb_mac_tbl_array_t *mt = fdb->mac_tbl_a;
+ struct fdb_index_tbl_entry_t *bucket_info;
+ struct fdb_mac_tbl_entry_t *mac_info;
+ u8 hash_val, mac_tbl_idx;
+ s16 ret, left, right;
+
+ if (fdb->total_entries == 0)
+ return 0;
+
+ /* Get the bucket that the mac belongs to */
+ hash_val = prueth_sw_fdb_hash(mac);
+ bucket_info = FDB_IDX_TBL_ENTRY(hash_val);
+
+ ret = prueth_sw_fdb_search(mt, bucket_info, mac);
+ if (ret < 0)
+ return ret;
+
+ mac_tbl_idx = ret;
+ mac_info = FDB_MAC_TBL_ENTRY(mac_tbl_idx);
+
+ prueth_sw_fdb_spin_lock(fdb);
+
+ /* Shift all elements in bucket to the left. No need to
+ * update index table since only shifting within bucket.
+ */
+ left = mac_tbl_idx;
+ right = bucket_info->bucket_idx + bucket_info->bucket_entries - 1;
+ prueth_sw_fdb_move_range_left(prueth, left, right);
+
+ /* Remove end of bucket from table */
+ mac_info = FDB_MAC_TBL_ENTRY(right);
+ mac_info->active = 0;
+ bucket_info->bucket_entries--;
+ fdb->total_entries--;
+
+ prueth_sw_fdb_spin_unlock(fdb);
+
+ dev_dbg(prueth->dev, "del fdb: %pM total_entries=%u\n",
+ mac, fdb->total_entries);
+
+ return 0;
+}
+
+static int prueth_sw_do_purge_fdb(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ struct fdb_tbl *fdb = prueth->fdb_tbl;
+ s16 i;
+
+ if (fdb->total_entries == 0)
+ return 0;
+
+ prueth_sw_fdb_spin_lock(fdb);
+
+ for (i = 0; i < FDB_INDEX_TBL_MAX_ENTRIES; i++)
+ fdb->index_a->index_tbl_entry[i].bucket_entries = 0;
+
+ for (i = 0; i < FDB_MAC_TBL_MAX_ENTRIES; i++)
+ fdb->mac_tbl_a->mac_tbl_entry[i].active = 0;
+
+ fdb->total_entries = 0;
+
+ prueth_sw_fdb_spin_unlock(fdb);
+ return 0;
+}
+
+static void prueth_sw_fdb_work(struct work_struct *work)
+{
+ struct prueth_sw_fdb_work *fdb_work =
+ container_of(work, struct prueth_sw_fdb_work, work);
+ struct prueth_emac *emac = fdb_work->emac;
+
+ rtnl_lock();
+
+ /* Interface is not up */
+ if (!emac->prueth->fdb_tbl) {
+ rtnl_unlock();
+ return;
+ }
+
+ switch (fdb_work->event) {
+ case FDB_LEARN:
+ prueth_sw_insert_fdb_entry(emac, fdb_work->addr, 0);
+ break;
+ case FDB_PURGE:
+ prueth_sw_do_purge_fdb(emac);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(fdb_work);
+ dev_put(emac->ndev);
+}
+
+int prueth_sw_learn_fdb(struct prueth_emac *emac, u8 *src_mac)
+{
+ struct prueth_sw_fdb_work *fdb_work;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (WARN_ON(!fdb_work))
+ return -ENOMEM;
+
+ INIT_WORK(&fdb_work->work, prueth_sw_fdb_work);
+
+ fdb_work->event = FDB_LEARN;
+ fdb_work->emac = emac;
+ ether_addr_copy(fdb_work->addr, src_mac);
+
+ dev_hold(emac->ndev);
+ queue_work(system_long_wq, &fdb_work->work);
+ return 0;
+}
+
+static int prueth_sw_purge_fdb(struct prueth_emac *emac)
+{
+ struct prueth_sw_fdb_work *fdb_work;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (WARN_ON(!fdb_work))
+ return -ENOMEM;
+
+ INIT_WORK(&fdb_work->work, prueth_sw_fdb_work);
+
+ fdb_work->event = FDB_PURGE;
+ fdb_work->emac = emac;
+
+ dev_hold(emac->ndev);
+ queue_work(system_long_wq, &fdb_work->work);
+ return 0;
+}
+
+int prueth_sw_init_fdb_table(struct prueth *prueth)
+{
+ if (prueth->emac_configured)
+ return 0;
+
+ prueth->fdb_tbl = kmalloc(sizeof(*prueth->fdb_tbl), GFP_KERNEL);
+ if (!prueth->fdb_tbl)
+ return -ENOMEM;
+
+ prueth_sw_fdb_tbl_init(prueth);
+
+ return 0;
+}
+
+int prueth_sw_boot_prus(struct prueth *prueth, struct net_device *ndev)
+{
+ const struct prueth_firmware *pru_firmwares;
+ const char *fw_name, *fw_name1;
+ int ret;
+
+ if (prueth->emac_configured)
+ return 0;
+
+ pru_firmwares = &prueth->fw_data->fw_pru[PRUSS_PRU0];
+ fw_name = pru_firmwares->fw_name[prueth->eth_type];
+ pru_firmwares = &prueth->fw_data->fw_pru[PRUSS_PRU1];
+ fw_name1 = pru_firmwares->fw_name[prueth->eth_type];
+
+ ret = rproc_set_firmware(prueth->pru0, fw_name);
+ if (ret) {
+ netdev_err(ndev, "failed to set PRU0 firmware %s: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+ ret = rproc_boot(prueth->pru0);
+ if (ret) {
+ netdev_err(ndev, "failed to boot PRU: %d\n", ret);
+ return ret;
+ }
+
+ ret = rproc_set_firmware(prueth->pru1, fw_name1);
+ if (ret) {
+ netdev_err(ndev, "failed to set PRU1 firmware %s: %d\n",
+ fw_name, ret);
+ goto rproc0_shutdown;
+ }
+ ret = rproc_boot(prueth->pru1);
+ if (ret) {
+ netdev_err(ndev, "failed to boot PRU: %d\n", ret);
+ goto rproc0_shutdown;
+ }
+
+ return 0;
+
+rproc0_shutdown:
+ rproc_shutdown(prueth->pru0);
+ return ret;
+}
+
+int prueth_sw_shutdown_prus(struct prueth_emac *emac, struct net_device *ndev)
+{
+ struct prueth *prueth = emac->prueth;
+
+ if (prueth->emac_configured)
+ return 0;
+
+ rproc_shutdown(prueth->pru0);
+ rproc_shutdown(prueth->pru1);
+
+ return 0;
+}
+
+static int prueth_switchdev_attr_set(struct net_device *ndev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int err = 0;
+ u8 o_state;
+
+ /* Interface is not up */
+ if (!prueth->fdb_tbl)
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ o_state = prueth_sw_port_get_stp_state(prueth, emac->port_id);
+ prueth_sw_port_set_stp_state(prueth, emac->port_id,
+ attr->u.stp_state);
+
+ if (o_state != attr->u.stp_state)
+ prueth_sw_purge_fdb(emac);
+
+ dev_dbg(prueth->dev, "attr set: stp state:%u port:%u\n",
+ attr->u.stp_state, emac->port_id);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int prueth_switchdev_obj_add(struct net_device *ndev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct switchdev_obj_port_mdb *mdb;
+ int ret = 0;
+ u8 hash;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ dev_dbg(prueth->dev, "MDB add: %s: vid %u:%pM port: %x\n",
+ ndev->name, mdb->vid, mdb->addr, emac->port_id);
+ hash = emac_get_mc_hash(mdb->addr, emac->mc_filter_mask);
+ emac_mc_filter_bin_allow(emac, hash);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int prueth_switchdev_obj_del(struct net_device *ndev,
+ const struct switchdev_obj *obj)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct switchdev_obj_port_mdb *mdb;
+ struct netdev_hw_addr *ha;
+ u8 hash, tmp_hash;
+ int ret = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ dev_dbg(prueth->dev, "MDB del: %s: vid %u:%pM port: %x\n",
+ ndev->name, mdb->vid, mdb->addr, emac->port_id);
+ hash = emac_get_mc_hash(mdb->addr, emac->mc_filter_mask);
+ netdev_for_each_mc_addr(ha, prueth->hw_bridge_dev) {
+ tmp_hash = emac_get_mc_hash(ha->addr, emac->mc_filter_mask);
+ /* Another MC address is in the bin. Don't disable. */
+ if (tmp_hash == hash)
+ return 0;
+ }
+ emac_mc_filter_bin_disallow(emac, hash);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/* switchdev notifiers */
+static int prueth_sw_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int err;
+
+ if (!PRUETH_IS_SWITCH(prueth))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ prueth_sw_port_dev_check,
+ prueth_switchdev_attr_set);
+ return notifier_from_errno(err);
+
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(ndev, ptr,
+ prueth_sw_port_dev_check,
+ prueth_switchdev_obj_add);
+ return notifier_from_errno(err);
+
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(ndev, ptr,
+ prueth_sw_port_dev_check,
+ prueth_switchdev_obj_del);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* switchev event work */
+struct prueth_sw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct prueth_emac *emac;
+ unsigned long event;
+};
+
+static void
+prueth_sw_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, ndev, &info.info,
+ NULL);
+}
+
+static void prueth_sw_fdb_add(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ prueth_sw_insert_fdb_entry(emac, fdb->addr, 1);
+}
+
+static void prueth_sw_fdb_del(struct prueth_emac *emac,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ prueth_sw_delete_fdb_entry(emac, fdb->addr, 1);
+}
+
+static void prueth_sw_switchdev_event_work(struct work_struct *work)
+{
+ struct prueth_sw_switchdev_event_work *switchdev_work =
+ container_of(work, struct prueth_sw_switchdev_event_work, work);
+ struct prueth_emac *emac = switchdev_work->emac;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct prueth *prueth = emac->prueth;
+ int port = emac->port_id;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+ dev_dbg(prueth->dev,
+ "prueth fdb add: MACID = %pM vid = %u flags = %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user, port);
+
+ if (!fdb->added_by_user)
+ break;
+
+ prueth_sw_fdb_add(emac, fdb);
+ prueth_sw_fdb_offload_notify(emac->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+ dev_dbg(prueth->dev,
+ "prueth fdb del: MACID = %pM vid = %u flags = %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user, port);
+
+ if (!fdb->added_by_user)
+ break;
+
+ prueth_sw_fdb_del(emac, fdb);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(emac->ndev);
+}
+
+/* called under rcu_read_lock() */
+static int prueth_sw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct prueth_sw_switchdev_event_work *switchdev_work;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err;
+
+ netdev_dbg(ndev, "switchdev_event: event=%lu", event);
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ prueth_sw_port_dev_check,
+ prueth_switchdev_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!prueth_sw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, prueth_sw_switchdev_event_work);
+ switchdev_work->emac = emac;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+int prueth_sw_register_notifiers(struct prueth *prueth)
+{
+ struct notifier_block *nb;
+ int ret;
+
+ nb = &prueth->prueth_sw_switchdev_notifier;
+ nb->notifier_call = prueth_sw_switchdev_event;
+ ret = register_switchdev_notifier(nb);
+ if (ret) {
+ dev_err(prueth->dev,
+ "register switchdev notifier failed ret:%d\n", ret);
+ return ret;
+ }
+
+ nb = &prueth->prueth_sw_switchdev_bl_notifier;
+ nb->notifier_call = prueth_sw_switchdev_blocking_event;
+ ret = register_switchdev_blocking_notifier(nb);
+ if (ret) {
+ dev_err(prueth->dev, "register switchdev blocking notifier failed ret:%d\n",
+ ret);
+ nb = &prueth->prueth_sw_switchdev_notifier;
+ unregister_switchdev_notifier(nb);
+ return ret;
+ }
+
+ return 0;
+}
+
+void prueth_sw_unregister_notifiers(struct prueth *prueth)
+{
+ unregister_switchdev_blocking_notifier(&prueth->prueth_sw_switchdev_bl_notifier);
+ unregister_switchdev_notifier(&prueth->prueth_sw_switchdev_notifier);
+}
diff --git a/drivers/net/ethernet/ti/prueth_switch.h b/drivers/net/ethernet/ti/prueth_switch.h
new file mode 100644
index 000000000000..8e1d7d3c114b
--- /dev/null
+++ b/drivers/net/ethernet/ti/prueth_switch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __NET_TI_PRUETH_SWITCH_H
+#define __NET_TI_PRUETH_SWITCH_H
+
+#include "prueth.h"
+#include "prueth_fdb_tbl.h"
+
+struct prueth_col_rx_context_info {
+ u16 buffer_offset;
+ u16 buffer_offset2;
+ u16 queue_desc_offset;
+ u16 buffer_desc_offset;
+ u16 buffer_desc_end;
+} __packed;
+
+struct prueth_col_tx_context_info {
+ u16 buffer_offset;
+ u16 buffer_offset2;
+ u16 buffer_offset_end;
+} __packed;
+
+static inline enum prueth_port other_port_id(enum prueth_port port_id)
+{
+ enum prueth_port other_port_id =
+ (port_id == PRUETH_PORT_MII0) ? PRUETH_PORT_MII1 :
+ PRUETH_PORT_MII0;
+ return other_port_id;
+}
+
+static inline
+void prueth_sw_port_set_stp_state(struct prueth *prueth,
+ enum prueth_port port, u8 state)
+{
+ struct fdb_tbl *t = prueth->fdb_tbl;
+
+ writeb(state, port - 1 ?
+ &t->port2_stp_cfg->state : &t->port1_stp_cfg->state);
+}
+
+void prueth_sw_hostconfig(struct prueth *prueth);
+int prueth_sw_emac_config(struct prueth_emac *emac);
+void prueth_sw_fdb_tbl_init(struct prueth *prueth);
+int prueth_sw_learn_fdb(struct prueth_emac *emac, u8 *src_mac);
+int prueth_sw_boot_prus(struct prueth *prueth, struct net_device *ndev);
+int prueth_sw_shutdown_prus(struct prueth_emac *emac, struct net_device *ndev);
+int prueth_sw_register_notifiers(struct prueth *prueth);
+void prueth_sw_unregister_notifiers(struct prueth *prueth);
+bool prueth_sw_port_dev_check(const struct net_device *ndev);
+int prueth_sw_init_fdb_table(struct prueth *prueth);
+void prueth_sw_free_fdb_table(struct prueth *prueth);
+
+
+extern const struct prueth_queue_info sw_queue_infos[][4];
+
+#endif /* __NET_TI_PRUETH_SWITCH_H */
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 99588192cc78..0f457c436335 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
return dev_addr;
}
-static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
@@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
mdiobb_get_bit(ctrl);
return ret;
}
+EXPORT_SYMBOL(mdiobb_read);
-static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
@@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
mdiobb_get_bit(ctrl);
return 0;
}
+EXPORT_SYMBOL(mdiobb_write);
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 5bae47f3da40..285a0d9703da 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -348,16 +348,7 @@ EXPORT_SYMBOL(of_mdiobus_register);
*/
struct mdio_device *of_mdio_find_device(struct device_node *np)
{
- struct device *d;
-
- if (!np)
- return NULL;
-
- d = bus_find_device_by_of_node(&mdio_bus_type, np);
- if (!d)
- return NULL;
-
- return to_mdio_device(d);
+ return fwnode_mdio_find_device(of_fwnode_handle(np));
}
EXPORT_SYMBOL(of_mdio_find_device);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c8031e297faf..cf867362831f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -41,6 +41,7 @@
#define DP83867_STRAP_STS1 0x006E
#define DP83867_STRAP_STS2 0x006f
#define DP83867_RGMIIDCTL 0x0086
+#define DP83867_DSP_FFE_CFG 0x012C
#define DP83867_RXFCFG 0x0134
#define DP83867_RXFPMD1 0x0136
#define DP83867_RXFPMD2 0x0137
@@ -807,8 +808,20 @@ static int dp83867_phy_reset(struct phy_device *phydev)
usleep_range(10, 20);
- return phy_modify(phydev, MII_DP83867_PHYCTRL,
+ err = phy_modify(phydev, MII_DP83867_PHYCTRL,
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
+ if (err < 0)
+ return err;
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_DSP_FFE_CFG, 0x0E81);
+
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ return 0;
}
static void dp83867_link_change_notify(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index a9daff88006b..84c7895df25f 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -659,8 +659,11 @@ static int dp83869_configure_mode(struct phy_device *phydev,
/* Below init sequence for each operational mode is defined in
* section 9.4.8 of the datasheet.
*/
+ phy_ctrl_val = dp83869->mode;
+ if (phydev->interface == PHY_INTERFACE_MODE_MII)
+ phy_ctrl_val |= DP83869_OP_MODE_MII;
ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
- dp83869->mode);
+ phy_ctrl_val);
if (ret)
return ret;
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index c2023f93c0b2..8091f79afe78 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -191,6 +191,8 @@ enum rgmii_clock_delay {
#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
/* Extended Page 3 Registers */
+#define MSCC_PHY_SERDES_PCS_CTRL 16
+#define MSCC_PHY_SERDES_ANEG BIT(7)
#define MSCC_PHY_SERDES_TX_VALID_CNT 21
#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
#define MSCC_PHY_SERDES_RX_VALID_CNT 28
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index e14fa72791b0..c1a212c5cccf 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -1516,6 +1516,21 @@ static void vsc8584_get_base_addr(struct phy_device *phydev)
vsc8531->addr = addr;
}
+static int vsc85xx_config_inband_aneg(struct phy_device *phydev, bool enabled)
+{
+ int rc;
+ u16 reg_val = 0;
+
+ if (enabled)
+ reg_val = MSCC_PHY_SERDES_ANEG;
+
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_3,
+ MSCC_PHY_SERDES_PCS_CTRL, MSCC_PHY_SERDES_ANEG,
+ reg_val);
+
+ return rc;
+}
+
static int vsc8584_config_init(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
@@ -2011,6 +2026,11 @@ static int vsc8514_config_init(struct phy_device *phydev)
if (ret)
return ret;
+ ret = vsc85xx_config_inband_aneg(phydev, true);
+
+ if (ret)
+ return ret;
+
for (i = 0; i < vsc8531->nleds; i++) {
ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
if (ret)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3ef5aa6b72a7..861b01a58c6c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -816,6 +817,27 @@ static int get_phy_c22_id(struct mii_bus *bus, int addr, u32 *phy_id)
return 0;
}
+/* Extract the phy ID from the compatible string of the form
+ * ethernet-phy-idAAAA.BBBB.
+ */
+int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id)
+{
+ unsigned int upper, lower;
+ const char *cp;
+ int ret;
+
+ ret = fwnode_property_read_string(fwnode, "compatible", &cp);
+ if (ret)
+ return ret;
+
+ if (sscanf(cp, "ethernet-phy-id%4x.%4x", &upper, &lower) != 2)
+ return -EINVAL;
+
+ *phy_id = ((upper & GENMASK(15, 0)) << 16) | (lower & GENMASK(15, 0));
+ return 0;
+}
+EXPORT_SYMBOL(fwnode_get_phy_id);
+
/**
* get_phy_device - reads the specified PHY device and returns its @phy_device
* struct
@@ -2808,6 +2830,90 @@ static bool phy_drv_supports_irq(struct phy_driver *phydrv)
}
/**
+ * fwnode_mdio_find_device - Given a fwnode, find the mdio_device
+ * @fwnode: pointer to the mdio_device's fwnode
+ *
+ * If successful, returns a pointer to the mdio_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
+ * The caller should call put_device() on the mdio_device after its use.
+ */
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode)
+{
+ struct device *d;
+
+ if (!fwnode)
+ return NULL;
+
+ d = bus_find_device_by_fwnode(&mdio_bus_type, fwnode);
+ if (!d)
+ return NULL;
+
+ return to_mdio_device(d);
+}
+EXPORT_SYMBOL(fwnode_mdio_find_device);
+
+/**
+ * fwnode_phy_find_device - For provided phy_fwnode, find phy_device.
+ *
+ * @phy_fwnode: Pointer to the phy's fwnode.
+ *
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
+ */
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
+{
+ struct mdio_device *mdiodev;
+
+ mdiodev = fwnode_mdio_find_device(phy_fwnode);
+ if (!mdiodev)
+ return NULL;
+
+ if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
+ return to_phy_device(&mdiodev->dev);
+
+ put_device(&mdiodev->dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(fwnode_phy_find_device);
+
+/**
+ * device_phy_find_device - For the given device, get the phy_device
+ * @dev: Pointer to the given device
+ *
+ * Refer return conditions of fwnode_phy_find_device().
+ */
+struct phy_device *device_phy_find_device(struct device *dev)
+{
+ return fwnode_phy_find_device(dev_fwnode(dev));
+}
+EXPORT_SYMBOL_GPL(device_phy_find_device);
+
+/**
+ * fwnode_get_phy_node - Get the phy_node using the named reference.
+ * @fwnode: Pointer to fwnode from which phy_node has to be obtained.
+ *
+ * Refer return conditions of fwnode_find_reference().
+ * For ACPI, only "phy-handle" is supported. Legacy DT properties "phy"
+ * and "phy-device" are not supported in ACPI. DT supports all the three
+ * named references to the phy node.
+ */
+struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *phy_node;
+
+ /* Only phy-handle is used for ACPI */
+ phy_node = fwnode_find_reference(fwnode, "phy-handle", 0);
+ if (is_acpi_node(fwnode) || !IS_ERR(phy_node))
+ return phy_node;
+ phy_node = fwnode_find_reference(fwnode, "phy", 0);
+ if (IS_ERR(phy_node))
+ phy_node = fwnode_find_reference(fwnode, "phy-device", 0);
+ return phy_node;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_phy_node);
+
+/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
*
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 57b1b138522e..ad94e6324ed0 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2015 Russell King
*/
+#include <linux/acpi.h>
#include <linux/ethtool.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
@@ -32,6 +33,7 @@
enum {
PHYLINK_DISABLE_STOPPED,
PHYLINK_DISABLE_LINK,
+ PHYLINK_DISABLE_MAC_WOL,
};
/**
@@ -153,6 +155,259 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
+static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
+ unsigned long caps)
+{
+ if (caps & MAC_SYM_PAUSE)
+ __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
+
+ if (caps & MAC_ASYM_PAUSE)
+ __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes);
+
+ if (caps & MAC_10HD)
+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
+
+ if (caps & MAC_10FD)
+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
+
+ if (caps & MAC_100HD) {
+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT, linkmodes);
+ }
+
+ if (caps & MAC_100FD) {
+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_1000HD)
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, linkmodes);
+
+ if (caps & MAC_1000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_2500FD) {
+ __set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_5000FD)
+ __set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, linkmodes);
+
+ if (caps & MAC_10000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_25000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_40000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_50000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_56000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_100000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_200000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_400000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
+ }
+}
+
+/**
+ * phylink_get_linkmodes() - get acceptable link modes
+ * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * @interface: phy interface mode defined by &typedef phy_interface_t
+ * @mac_capabilities: bitmask of MAC capabilities
+ *
+ * Set all possible pause, speed and duplex linkmodes in @linkmodes that
+ * are supported by the @interface mode and @mac_capabilities. @linkmodes
+ * must have been initialised previously.
+ */
+void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
+ unsigned long mac_capabilities)
+{
+ unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_USXGMII:
+ caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ caps |= MAC_1000HD | MAC_1000FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ caps |= MAC_10HD | MAC_10FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_100BASEX:
+ caps |= MAC_100HD | MAC_100FD;
+ break;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ caps |= MAC_1000HD;
+ fallthrough;
+ case PHY_INTERFACE_MODE_TRGMII:
+ caps |= MAC_1000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ caps |= MAC_2500FD;
+ break;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ caps |= MAC_5000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ caps |= MAC_10000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ caps |= MAC_25000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ caps |= MAC_40000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ caps |= ~0;
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ break;
+ }
+
+ phylink_caps_to_linkmodes(linkmodes, caps & mac_capabilities);
+}
+EXPORT_SYMBOL_GPL(phylink_get_linkmodes);
+
+/**
+ * phylink_generic_validate() - generic validate() callback implementation
+ * @config: a pointer to a &struct phylink_config.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Generic implementation of the validate() callback that MAC drivers can
+ * use when they pass the range of supported interfaces and MAC capabilities.
+ * This makes use of phylink_get_linkmodes().
+ */
+void phylink_generic_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_get_linkmodes(mask, state->interface, config->mac_capabilities);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+EXPORT_SYMBOL_GPL(phylink_generic_validate);
+
static int phylink_validate(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
@@ -769,6 +1024,12 @@ static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
}
}
+static void phylink_enable_and_run_resolve(struct phylink *pl, int bit)
+{
+ clear_bit(bit, &pl->phylink_disable_state);
+ phylink_run_resolve(pl);
+}
+
static void phylink_fixed_poll(struct timer_list *t)
{
struct phylink *pl = container_of(t, struct phylink, link_poll);
@@ -1027,7 +1288,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
mutex_unlock(&phy->lock);
phylink_dbg(pl,
- "phy: setting supported %*pb advertising %*pb\n",
+ "phy: %s setting supported %*pb advertising %*pb\n",
+ phy_modes(interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
@@ -1103,7 +1365,26 @@ EXPORT_SYMBOL_GPL(phylink_connect_phy);
int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
u32 flags)
{
- struct device_node *phy_node;
+ return phylink_fwnode_phy_connect(pl, of_fwnode_handle(dn), flags);
+}
+EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
+
+/**
+ * phylink_fwnode_phy_connect() - connect the PHY specified in the fwnode.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @fwnode: a pointer to a &struct fwnode_handle.
+ * @flags: PHY-specific flags to communicate to the PHY device driver
+ *
+ * Connect the phy specified @fwnode to the phylink instance specified
+ * by @pl.
+ *
+ * Returns 0 on success or a negative errno.
+ */
+int phylink_fwnode_phy_connect(struct phylink *pl,
+ struct fwnode_handle *fwnode,
+ u32 flags)
+{
+ struct fwnode_handle *phy_fwnode;
struct phy_device *phy_dev;
int ret;
@@ -1113,28 +1394,31 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
phy_interface_mode_is_8023z(pl->link_interface)))
return 0;
- phy_node = of_parse_phandle(dn, "phy-handle", 0);
- if (!phy_node)
- phy_node = of_parse_phandle(dn, "phy", 0);
- if (!phy_node)
- phy_node = of_parse_phandle(dn, "phy-device", 0);
-
- if (!phy_node) {
+ phy_fwnode = fwnode_get_phy_node(fwnode);
+ if (IS_ERR(phy_fwnode)) {
if (pl->cfg_link_an_mode == MLO_AN_PHY)
return -ENODEV;
return 0;
}
- phy_dev = of_phy_find_device(phy_node);
+ phy_dev = fwnode_phy_find_device(phy_fwnode);
/* We're done with the phy_node handle */
- of_node_put(phy_node);
+ fwnode_handle_put(phy_fwnode);
if (!phy_dev)
return -ENODEV;
+ /* Use PHY device/driver interface */
+ if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
+ pl->link_interface = phy_dev->interface;
+ pl->link_config.interface = pl->link_interface;
+ }
+
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
- if (ret)
+ if (ret) {
+ phy_device_free(phy_dev);
return ret;
+ }
ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
@@ -1142,7 +1426,7 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
return ret;
}
-EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
+EXPORT_SYMBOL_GPL(phylink_fwnode_phy_connect);
/**
* phylink_disconnect_phy() - disconnect any PHY attached to the phylink
@@ -1229,8 +1513,7 @@ void phylink_start(struct phylink *pl)
*/
phylink_mac_initial_config(pl, true);
- clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
- phylink_run_resolve(pl);
+ phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_STOPPED);
if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
int irq = gpiod_to_irq(pl->link_gpio);
@@ -1275,6 +1558,9 @@ EXPORT_SYMBOL_GPL(phylink_start);
* network device driver's &struct net_device_ops ndo_stop() method. The
* network device's carrier state should not be changed prior to calling this
* function.
+ *
+ * This will synchronously bring down the link if the link is not already
+ * down (in other words, it will trigger a mac_link_down() method call.)
*/
void phylink_stop(struct phylink *pl)
{
@@ -1295,6 +1581,83 @@ void phylink_stop(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_stop);
/**
+ * phylink_suspend() - handle a network device suspend event
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
+ *
+ * Handle a network device suspend event. There are several cases:
+ * - If Wake-on-Lan is not active, we can bring down the link between
+ * the MAC and PHY by calling phylink_stop().
+ * - If Wake-on-Lan is active, and being handled only by the PHY, we
+ * can also bring down the link between the MAC and PHY.
+ * - If Wake-on-Lan is active, but being handled by the MAC, the MAC
+ * still needs to receive packets, so we can not bring the link down.
+ */
+void phylink_suspend(struct phylink *pl, bool mac_wol)
+{
+ ASSERT_RTNL();
+
+ if (mac_wol && (!pl->netdev || pl->netdev->wol_enabled)) {
+ /* Wake-on-Lan enabled, MAC handling */
+ mutex_lock(&pl->state_mutex);
+
+ /* Stop the resolver bringing the link up */
+ __set_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
+
+ /* Disable the carrier, to prevent transmit timeouts,
+ * but one would hope all packets have been sent. This
+ * also means phylink_resolve() will do nothing.
+ */
+ netif_carrier_off(pl->netdev);
+
+ /* We do not call mac_link_down() here as we want the
+ * link to remain up to receive the WoL packets.
+ */
+ mutex_unlock(&pl->state_mutex);
+ } else {
+ phylink_stop(pl);
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_suspend);
+
+/**
+ * phylink_resume() - handle a network device resume event
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Undo the effects of phylink_suspend(), returning the link to an
+ * operational state.
+ */
+void phylink_resume(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) {
+ /* Wake-on-Lan enabled, MAC handling */
+
+ /* Call mac_link_down() so we keep the overall state balanced.
+ * Do this under the state_mutex lock for consistency. This
+ * will cause a "Link Down" message to be printed during
+ * resume, which is harmless - the true link state will be
+ * printed when we run a resolve.
+ */
+ mutex_lock(&pl->state_mutex);
+ phylink_link_down(pl);
+ mutex_unlock(&pl->state_mutex);
+
+ /* Re-apply the link parameters so that all the settings get
+ * restored to the MAC.
+ */
+ phylink_mac_initial_config(pl, true);
+
+ /* Re-enable and re-resolve the link parameters */
+ phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_MAC_WOL);
+ } else {
+ phylink_start(pl);
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_resume);
+
+/**
* phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @wol: a pointer to &struct ethtool_wolinfo to hold the read parameters
@@ -2215,8 +2578,7 @@ static void phylink_sfp_link_up(void *upstream)
ASSERT_RTNL();
- clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
- phylink_run_resolve(pl);
+ phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK);
}
/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig
index e77c587060ff..c325be526b80 100644
--- a/drivers/ntb/hw/Kconfig
+++ b/drivers/ntb/hw/Kconfig
@@ -2,4 +2,5 @@
source "drivers/ntb/hw/amd/Kconfig"
source "drivers/ntb/hw/idt/Kconfig"
source "drivers/ntb/hw/intel/Kconfig"
+source "drivers/ntb/hw/epf/Kconfig"
source "drivers/ntb/hw/mscc/Kconfig"
diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile
index 4714d6238845..223ca592b5f9 100644
--- a/drivers/ntb/hw/Makefile
+++ b/drivers/ntb/hw/Makefile
@@ -2,4 +2,5 @@
obj-$(CONFIG_NTB_AMD) += amd/
obj-$(CONFIG_NTB_IDT) += idt/
obj-$(CONFIG_NTB_INTEL) += intel/
+obj-$(CONFIG_NTB_EPF) += epf/
obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
diff --git a/drivers/ntb/hw/epf/Kconfig b/drivers/ntb/hw/epf/Kconfig
new file mode 100644
index 000000000000..6197d1aab344
--- /dev/null
+++ b/drivers/ntb/hw/epf/Kconfig
@@ -0,0 +1,6 @@
+config NTB_EPF
+ tristate "Generic EPF Non-Transparent Bridge support"
+ depends on m
+ help
+ This driver supports EPF NTB on configurable endpoint.
+ If unsure, say N.
diff --git a/drivers/ntb/hw/epf/Makefile b/drivers/ntb/hw/epf/Makefile
new file mode 100644
index 000000000000..2f560a422bc6
--- /dev/null
+++ b/drivers/ntb/hw/epf/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_EPF) += ntb_hw_epf.o
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
new file mode 100644
index 000000000000..b019755e4e21
--- /dev/null
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -0,0 +1,753 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Host side endpoint driver to implement Non-Transparent Bridge functionality
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/ntb.h>
+
+#define NTB_EPF_COMMAND 0x0
+#define CMD_CONFIGURE_DOORBELL 1
+#define CMD_TEARDOWN_DOORBELL 2
+#define CMD_CONFIGURE_MW 3
+#define CMD_TEARDOWN_MW 4
+#define CMD_LINK_UP 5
+#define CMD_LINK_DOWN 6
+
+#define NTB_EPF_ARGUMENT 0x4
+#define MSIX_ENABLE BIT(16)
+
+#define NTB_EPF_CMD_STATUS 0x8
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define NTB_EPF_LINK_STATUS 0x0A
+#define LINK_STATUS_UP BIT(0)
+
+#define NTB_EPF_TOPOLOGY 0x0C
+#define NTB_EPF_LOWER_ADDR 0x10
+#define NTB_EPF_UPPER_ADDR 0x14
+#define NTB_EPF_LOWER_SIZE 0x18
+#define NTB_EPF_UPPER_SIZE 0x1C
+#define NTB_EPF_MW_COUNT 0x20
+#define NTB_EPF_MW1_OFFSET 0x24
+#define NTB_EPF_SPAD_OFFSET 0x28
+#define NTB_EPF_SPAD_COUNT 0x2C
+#define NTB_EPF_DB_ENTRY_SIZE 0x30
+#define NTB_EPF_DB_DATA(n) (0x34 + (n) * 4)
+#define NTB_EPF_DB_OFFSET(n) (0xB4 + (n) * 4)
+
+#define NTB_EPF_MIN_DB_COUNT 3
+#define NTB_EPF_MAX_DB_COUNT 31
+#define NTB_EPF_MW_OFFSET 2
+
+#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */
+
+enum pci_barno {
+ BAR_0,
+ BAR_1,
+ BAR_2,
+ BAR_3,
+ BAR_4,
+ BAR_5,
+};
+
+struct ntb_epf_dev {
+ struct ntb_dev ntb;
+ struct device *dev;
+ /* Mutex to protect providing commands to NTB EPF */
+ struct mutex cmd_lock;
+
+ enum pci_barno ctrl_reg_bar;
+ enum pci_barno peer_spad_reg_bar;
+ enum pci_barno db_reg_bar;
+
+ unsigned int mw_count;
+ unsigned int spad_count;
+ unsigned int db_count;
+
+ void __iomem *ctrl_reg;
+ void __iomem *db_reg;
+ void __iomem *peer_spad_reg;
+
+ unsigned int self_spad;
+ unsigned int peer_spad;
+
+ int db_val;
+ u64 db_valid_mask;
+};
+
+#define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb)
+
+struct ntb_epf_data {
+ /* BAR that contains both control region and self spad region */
+ enum pci_barno ctrl_reg_bar;
+ /* BAR that contains peer spad region */
+ enum pci_barno peer_spad_reg_bar;
+ /* BAR that contains Doorbell region and Memory window '1' */
+ enum pci_barno db_reg_bar;
+};
+
+static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
+ u32 argument)
+{
+ ktime_t timeout;
+ bool timedout;
+ int ret = 0;
+ u32 status;
+
+ mutex_lock(&ndev->cmd_lock);
+ writel(argument, ndev->ctrl_reg + NTB_EPF_ARGUMENT);
+ writel(command, ndev->ctrl_reg + NTB_EPF_COMMAND);
+
+ timeout = ktime_add_ms(ktime_get(), NTB_EPF_COMMAND_TIMEOUT);
+ while (1) {
+ timedout = ktime_after(ktime_get(), timeout);
+ status = readw(ndev->ctrl_reg + NTB_EPF_CMD_STATUS);
+
+ if (status == COMMAND_STATUS_ERROR) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (status == COMMAND_STATUS_OK)
+ break;
+
+ if (WARN_ON(timedout)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ usleep_range(5, 10);
+ }
+
+ writew(0, ndev->ctrl_reg + NTB_EPF_CMD_STATUS);
+ mutex_unlock(&ndev->cmd_lock);
+
+ return ret;
+}
+
+static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx)
+{
+ struct device *dev = ndev->dev;
+
+ if (idx < 0 || idx > ndev->mw_count) {
+ dev_err(dev, "Unsupported Memory Window index %d\n", idx);
+ return -EINVAL;
+ }
+
+ return idx + 2;
+}
+
+static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ return ndev->mw_count;
+}
+
+static int ntb_epf_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ int bar;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ bar = ntb_epf_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ if (addr_align)
+ *addr_align = SZ_4K;
+
+ if (size_align)
+ *size_align = 1;
+
+ if (size_max)
+ *size_max = pci_resource_len(ndev->ntb.pdev, bar);
+
+ return 0;
+}
+
+static u64 ntb_epf_link_is_up(struct ntb_dev *ntb,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ u32 status;
+
+ status = readw(ndev->ctrl_reg + NTB_EPF_LINK_STATUS);
+
+ return status & LINK_STATUS_UP;
+}
+
+static u32 ntb_epf_spad_read(struct ntb_dev *ntb, int idx)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ u32 offset;
+
+ if (idx < 0 || idx >= ndev->spad_count) {
+ dev_err(dev, "READ: Invalid ScratchPad Index %d\n", idx);
+ return 0;
+ }
+
+ offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
+ offset += (idx << 2);
+
+ return readl(ndev->ctrl_reg + offset);
+}
+
+static int ntb_epf_spad_write(struct ntb_dev *ntb,
+ int idx, u32 val)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ u32 offset;
+
+ if (idx < 0 || idx >= ndev->spad_count) {
+ dev_err(dev, "WRITE: Invalid ScratchPad Index %d\n", idx);
+ return -EINVAL;
+ }
+
+ offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
+ offset += (idx << 2);
+ writel(val, ndev->ctrl_reg + offset);
+
+ return 0;
+}
+
+static u32 ntb_epf_peer_spad_read(struct ntb_dev *ntb, int pidx, int idx)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ u32 offset;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ if (idx < 0 || idx >= ndev->spad_count) {
+ dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx);
+ return -EINVAL;
+ }
+
+ offset = (idx << 2);
+ return readl(ndev->peer_spad_reg + offset);
+}
+
+static int ntb_epf_peer_spad_write(struct ntb_dev *ntb, int pidx,
+ int idx, u32 val)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ u32 offset;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ if (idx < 0 || idx >= ndev->spad_count) {
+ dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx);
+ return -EINVAL;
+ }
+
+ offset = (idx << 2);
+ writel(val, ndev->peer_spad_reg + offset);
+
+ return 0;
+}
+
+static int ntb_epf_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ int ret;
+
+ ret = ntb_epf_send_command(ndev, CMD_LINK_UP, 0);
+ if (ret) {
+ dev_err(dev, "Fail to enable link\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ntb_epf_link_disable(struct ntb_dev *ntb)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ int ret;
+
+ ret = ntb_epf_send_command(ndev, CMD_LINK_DOWN, 0);
+ if (ret) {
+ dev_err(dev, "Fail to disable link\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ntb_epf_vec_isr(int irq, void *dev)
+{
+ struct ntb_epf_dev *ndev = dev;
+ int irq_no;
+
+ irq_no = irq - pci_irq_vector(ndev->ntb.pdev, 0);
+ ndev->db_val = irq_no + 1;
+
+ if (irq_no == 0)
+ ntb_link_event(&ndev->ntb);
+ else
+ ntb_db_event(&ndev->ntb, irq_no);
+
+ return IRQ_HANDLED;
+}
+
+static int ntb_epf_init_isr(struct ntb_epf_dev *ndev, int msi_min, int msi_max)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+ struct device *dev = ndev->dev;
+ u32 argument = MSIX_ENABLE;
+ int irq;
+ int ret;
+ int i;
+
+ irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max, PCI_IRQ_MSIX);
+ if (irq < 0) {
+ dev_dbg(dev, "Failed to get MSIX interrupts\n");
+ irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max,
+ PCI_IRQ_MSI);
+ if (irq < 0) {
+ dev_err(dev, "Failed to get MSI interrupts\n");
+ return irq;
+ }
+ argument &= ~MSIX_ENABLE;
+ }
+
+ for (i = 0; i < irq; i++) {
+ ret = request_irq(pci_irq_vector(pdev, i), ntb_epf_vec_isr,
+ 0, "ntb_epf", ndev);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ goto err_request_irq;
+ }
+ }
+
+ ndev->db_count = irq - 1;
+
+ ret = ntb_epf_send_command(ndev, CMD_CONFIGURE_DOORBELL,
+ argument | irq);
+ if (ret) {
+ dev_err(dev, "Failed to configure doorbell\n");
+ goto err_configure_db;
+ }
+
+ return 0;
+
+err_configure_db:
+ for (i = 0; i < ndev->db_count + 1; i++)
+ free_irq(pci_irq_vector(pdev, i), ndev);
+
+err_request_irq:
+ pci_free_irq_vectors(pdev);
+
+ return ret;
+}
+
+static int ntb_epf_peer_mw_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->mw_count;
+}
+
+static int ntb_epf_spad_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->spad_count;
+}
+
+static u64 ntb_epf_db_valid_mask(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->db_valid_mask;
+}
+
+static int ntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return 0;
+}
+
+static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ resource_size_t mw_size;
+ int bar;
+
+ if (pidx != NTB_DEF_PEER_IDX) {
+ dev_err(dev, "Unsupported Peer ID %d\n", pidx);
+ return -EINVAL;
+ }
+
+ bar = idx + NTB_EPF_MW_OFFSET;
+
+ mw_size = pci_resource_len(ntb->pdev, bar);
+
+ if (size > mw_size) {
+ dev_err(dev, "Size:%pa is greater than the MW size %pa\n",
+ &size, &mw_size);
+ return -EINVAL;
+ }
+
+ writel(lower_32_bits(addr), ndev->ctrl_reg + NTB_EPF_LOWER_ADDR);
+ writel(upper_32_bits(addr), ndev->ctrl_reg + NTB_EPF_UPPER_ADDR);
+ writel(lower_32_bits(size), ndev->ctrl_reg + NTB_EPF_LOWER_SIZE);
+ writel(upper_32_bits(size), ndev->ctrl_reg + NTB_EPF_UPPER_SIZE);
+ ntb_epf_send_command(ndev, CMD_CONFIGURE_MW, idx);
+
+ return 0;
+}
+
+static int ntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ struct device *dev = ndev->dev;
+ int ret = 0;
+
+ ntb_epf_send_command(ndev, CMD_TEARDOWN_MW, idx);
+ if (ret)
+ dev_err(dev, "Failed to teardown memory window\n");
+
+ return ret;
+}
+
+static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base, resource_size_t *size)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ u32 offset = 0;
+ int bar;
+
+ if (idx == 0)
+ offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
+
+ bar = idx + NTB_EPF_MW_OFFSET;
+
+ if (base)
+ *base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
+
+ if (size)
+ *size = pci_resource_len(ndev->ntb.pdev, bar) - offset;
+
+ return 0;
+}
+
+static int ntb_epf_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+ u32 interrupt_num = ffs(db_bits) + 1;
+ struct device *dev = ndev->dev;
+ u32 db_entry_size;
+ u32 db_offset;
+ u32 db_data;
+
+ if (interrupt_num > ndev->db_count) {
+ dev_err(dev, "DB interrupt %d greater than Max Supported %d\n",
+ interrupt_num, ndev->db_count);
+ return -EINVAL;
+ }
+
+ db_entry_size = readl(ndev->ctrl_reg + NTB_EPF_DB_ENTRY_SIZE);
+
+ db_data = readl(ndev->ctrl_reg + NTB_EPF_DB_DATA(interrupt_num));
+ db_offset = readl(ndev->ctrl_reg + NTB_EPF_DB_OFFSET(interrupt_num));
+ writel(db_data, ndev->db_reg + (db_entry_size * interrupt_num) +
+ db_offset);
+
+ return 0;
+}
+
+static u64 ntb_epf_db_read(struct ntb_dev *ntb)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+
+ return ndev->db_val;
+}
+
+static int ntb_epf_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return 0;
+}
+
+static int ntb_epf_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct ntb_epf_dev *ndev = ntb_ndev(ntb);
+
+ ndev->db_val = 0;
+
+ return 0;
+}
+
+static const struct ntb_dev_ops ntb_epf_ops = {
+ .mw_count = ntb_epf_mw_count,
+ .spad_count = ntb_epf_spad_count,
+ .peer_mw_count = ntb_epf_peer_mw_count,
+ .db_valid_mask = ntb_epf_db_valid_mask,
+ .db_set_mask = ntb_epf_db_set_mask,
+ .mw_set_trans = ntb_epf_mw_set_trans,
+ .mw_clear_trans = ntb_epf_mw_clear_trans,
+ .peer_mw_get_addr = ntb_epf_peer_mw_get_addr,
+ .link_enable = ntb_epf_link_enable,
+ .spad_read = ntb_epf_spad_read,
+ .spad_write = ntb_epf_spad_write,
+ .peer_spad_read = ntb_epf_peer_spad_read,
+ .peer_spad_write = ntb_epf_peer_spad_write,
+ .peer_db_set = ntb_epf_peer_db_set,
+ .db_read = ntb_epf_db_read,
+ .mw_get_align = ntb_epf_mw_get_align,
+ .link_is_up = ntb_epf_link_is_up,
+ .db_clear_mask = ntb_epf_db_clear_mask,
+ .db_clear = ntb_epf_db_clear,
+ .link_disable = ntb_epf_link_disable,
+};
+
+static inline void ntb_epf_init_struct(struct ntb_epf_dev *ndev,
+ struct pci_dev *pdev)
+{
+ ndev->ntb.pdev = pdev;
+ ndev->ntb.topo = NTB_TOPO_NONE;
+ ndev->ntb.ops = &ntb_epf_ops;
+}
+
+static int ntb_epf_init_dev(struct ntb_epf_dev *ndev)
+{
+ struct device *dev = ndev->dev;
+ int ret;
+
+ /* One Link interrupt and rest doorbell interrupt */
+ ret = ntb_epf_init_isr(ndev, NTB_EPF_MIN_DB_COUNT + 1,
+ NTB_EPF_MAX_DB_COUNT + 1);
+ if (ret) {
+ dev_err(dev, "Failed to init ISR\n");
+ return ret;
+ }
+
+ ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+ ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT);
+ ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
+
+ return 0;
+}
+
+static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
+ struct pci_dev *pdev)
+{
+ struct device *dev = ndev->dev;
+ int ret;
+
+ pci_set_drvdata(pdev, ndev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "Cannot enable PCI device\n");
+ goto err_pci_enable;
+ }
+
+ ret = pci_request_regions(pdev, "ntb");
+ if (ret) {
+ dev_err(dev, "Cannot obtain PCI resources\n");
+ goto err_pci_regions;
+ }
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "Cannot set DMA mask\n");
+ goto err_dma_mask;
+ }
+ dev_warn(&pdev->dev, "Cannot DMA highmem\n");
+ }
+
+ ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0);
+ if (!ndev->ctrl_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+
+ ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
+ if (!ndev->peer_spad_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+
+ ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
+ if (!ndev->db_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+
+ return 0;
+
+err_dma_mask:
+ pci_clear_master(pdev);
+
+err_pci_regions:
+ pci_disable_device(pdev);
+
+err_pci_enable:
+ pci_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void ntb_epf_deinit_pci(struct ntb_epf_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+
+ pci_iounmap(pdev, ndev->ctrl_reg);
+ pci_iounmap(pdev, ndev->peer_spad_reg);
+ pci_iounmap(pdev, ndev->db_reg);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+ int i;
+
+ ntb_epf_send_command(ndev, CMD_TEARDOWN_DOORBELL, ndev->db_count + 1);
+
+ for (i = 0; i < ndev->db_count + 1; i++)
+ free_irq(pci_irq_vector(pdev, i), ndev);
+ pci_free_irq_vectors(pdev);
+}
+
+static int ntb_epf_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ enum pci_barno peer_spad_reg_bar = BAR_1;
+ enum pci_barno ctrl_reg_bar = BAR_0;
+ enum pci_barno db_reg_bar = BAR_2;
+ struct device *dev = &pdev->dev;
+ struct ntb_epf_data *data;
+ struct ntb_epf_dev *ndev;
+ int ret;
+
+ if (pci_is_bridge(pdev))
+ return -ENODEV;
+
+ ndev = devm_kzalloc(dev, sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ return -ENOMEM;
+
+ data = (struct ntb_epf_data *)id->driver_data;
+ if (data) {
+ if (data->peer_spad_reg_bar)
+ peer_spad_reg_bar = data->peer_spad_reg_bar;
+ if (data->ctrl_reg_bar)
+ ctrl_reg_bar = data->ctrl_reg_bar;
+ if (data->db_reg_bar)
+ db_reg_bar = data->db_reg_bar;
+ }
+
+ ndev->peer_spad_reg_bar = peer_spad_reg_bar;
+ ndev->ctrl_reg_bar = ctrl_reg_bar;
+ ndev->db_reg_bar = db_reg_bar;
+ ndev->dev = dev;
+
+ ntb_epf_init_struct(ndev, pdev);
+ mutex_init(&ndev->cmd_lock);
+
+ ret = ntb_epf_init_pci(ndev, pdev);
+ if (ret) {
+ dev_err(dev, "Failed to init PCI\n");
+ return ret;
+ }
+
+ ret = ntb_epf_init_dev(ndev);
+ if (ret) {
+ dev_err(dev, "Failed to init device\n");
+ goto err_init_dev;
+ }
+
+ ret = ntb_register_device(&ndev->ntb);
+ if (ret) {
+ dev_err(dev, "Failed to register NTB device\n");
+ goto err_register_dev;
+ }
+
+ return 0;
+
+err_register_dev:
+ ntb_epf_cleanup_isr(ndev);
+
+err_init_dev:
+ ntb_epf_deinit_pci(ndev);
+
+ return ret;
+}
+
+static void ntb_epf_pci_remove(struct pci_dev *pdev)
+{
+ struct ntb_epf_dev *ndev = pci_get_drvdata(pdev);
+
+ ntb_unregister_device(&ndev->ntb);
+ ntb_epf_cleanup_isr(ndev);
+ ntb_epf_deinit_pci(ndev);
+}
+
+static const struct ntb_epf_data j721e_data = {
+ .ctrl_reg_bar = BAR_0,
+ .peer_spad_reg_bar = BAR_1,
+ .db_reg_bar = BAR_2,
+};
+
+static const struct pci_device_id ntb_epf_pci_tbl[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
+ .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
+ { },
+};
+
+static struct pci_driver ntb_epf_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ntb_epf_pci_tbl,
+ .probe = ntb_epf_pci_probe,
+ .remove = ntb_epf_pci_remove,
+};
+module_pci_driver(ntb_epf_pci_driver);
+
+MODULE_DESCRIPTION("PCI ENDPOINT NTB HOST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 25d17b8a1a1a..41512243ec0f 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -675,6 +675,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
return NULL;
}
+EXPORT_SYMBOL_GPL(of_msi_get_domain);
/**
* of_msi_configure - Set the msi_domain field of a device
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 5d30564190e1..54464c9b3b29 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -4,16 +4,16 @@ menu "Cadence PCIe controllers support"
depends on PCI
config PCIE_CADENCE
- bool
+ tristate
config PCIE_CADENCE_HOST
- bool
+ tristate
depends on OF
select IRQ_DOMAIN
select PCIE_CADENCE
config PCIE_CADENCE_EP
- bool
+ tristate
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE
@@ -43,10 +43,10 @@ config PCIE_CADENCE_PLAT_EP
different vendors SoCs.
config PCI_J721E
- bool
+ tristate
config PCI_J721E_HOST
- bool "TI J721E PCIe platform host controller"
+ tristate "TI J721E PCIe platform host controller"
depends on OF
select PCIE_CADENCE_HOST
select PCI_J721E
@@ -56,7 +56,7 @@ config PCI_J721E_HOST
core.
config PCI_J721E_EP
- bool "TI J721E PCIe platform endpoint controller"
+ tristate "TI J721E PCIe platform endpoint controller"
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE_EP
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 8a6d68e13f30..8949f11fed31 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -6,12 +6,15 @@
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
@@ -27,6 +30,17 @@
#define LINK_DOWN BIT(1)
#define J7200_LINK_DOWN BIT(10)
+#define EOI_REG 0x10
+
+#define ENABLE_REG_SYS_0 0x100
+#define STATUS_REG_SYS_0 0x500
+#define STATUS_CLR_REG_SYS_0 0x700
+#define INTx_EN(num) (1 << (num))
+
+#define ENABLE_REG_SYS_1 0x104
+#define STATUS_REG_SYS_1 0x504
+#define SYS1_INTx_EN(num) (1 << (22 + (num)))
+
#define J721E_PCIE_USER_CMD_STATUS 0x4
#define LINK_TRAINING_ENABLE BIT(0)
@@ -40,21 +54,30 @@ enum link_status {
LINK_UP_DL_COMPLETED,
};
+#define USER_EOI_REG 0xC8
+enum eoi_reg {
+ EOI_DOWNSTREAM_INTERRUPT,
+ EOI_FLR_INTERRUPT,
+ EOI_LEGACY_INTERRUPT,
+ EOI_POWER_STATE_INTERRUPT,
+};
+
#define J721E_MODE_RC BIT(7)
-#define LANE_COUNT_MASK BIT(8)
#define LANE_COUNT(n) ((n) << 8)
#define GENERATION_SEL_MASK GENMASK(1, 0)
-#define MAX_LANES 2
-
struct j721e_pcie {
struct device *dev;
+ struct clk *refclk;
u32 mode;
+ u32 max_lanes;
u32 num_lanes;
struct cdns_pcie *cdns_pcie;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
+ struct irq_domain *legacy_irq_domain;
+ bool is_intc_v1;
u32 linkdown_irq_regfield;
};
@@ -65,10 +88,12 @@ enum j721e_pcie_mode {
struct j721e_pcie_data {
enum j721e_pcie_mode mode;
+ bool is_intc_v1;
unsigned int quirk_retrain_flag:1;
unsigned int quirk_detect_quiet_flag:1;
u32 linkdown_irq_regfield;
unsigned int byte_access_allowed:1;
+ unsigned int max_lanes;
};
static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
@@ -118,6 +143,117 @@ static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
}
+static void j721e_pcie_legacy_irq_handler(struct irq_desc *desc)
+{
+ struct j721e_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int virq;
+ u32 reg;
+ int i;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_1);
+ if (!(reg & SYS1_INTx_EN(i)))
+ continue;
+
+ virq = irq_find_mapping(pcie->legacy_irq_domain, i);
+ generic_handle_irq(virq);
+ j721e_pcie_user_writel(pcie, USER_EOI_REG,
+ EOI_LEGACY_INTERRUPT);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void j721e_pcie_v1_legacy_irq_handler(struct irq_desc *desc)
+{
+ struct j721e_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int virq, i;
+ u32 reg;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_0);
+ if (!(reg & INTx_EN(i)))
+ continue;
+
+ virq = irq_find_mapping(pcie->legacy_irq_domain, 3 - i);
+ generic_handle_irq(virq);
+ j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_0, INTx_EN(i));
+ j721e_pcie_intd_writel(pcie, EOI_REG, 3 - i);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int j721e_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops j721e_pcie_intx_domain_ops = {
+ .map = j721e_pcie_intx_map,
+};
+
+static int j721e_pcie_config_legacy_irq(struct j721e_pcie *pcie)
+{
+ struct irq_domain *legacy_irq_domain;
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *intc_node;
+ int irq, i;
+ u32 reg;
+
+ intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!intc_node) {
+ dev_WARN(dev, "legacy-interrupt-controller node is absent\n");
+ return -EINVAL;
+ }
+
+ irq = irq_of_parse_and_map(intc_node, 0);
+ if (!irq) {
+ dev_err(dev, "Failed to parse and map legacy irq\n");
+ return -EINVAL;
+ }
+
+ if (pcie->is_intc_v1)
+ irq_set_chained_handler_and_data(irq, j721e_pcie_v1_legacy_irq_handler, pcie);
+ else
+ irq_set_chained_handler_and_data(irq, j721e_pcie_legacy_irq_handler, pcie);
+
+ legacy_irq_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
+ &j721e_pcie_intx_domain_ops, NULL);
+ if (!legacy_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ return -EINVAL;
+ }
+ pcie->legacy_irq_domain = legacy_irq_domain;
+
+ if (pcie->is_intc_v1) {
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_0);
+ reg |= INTx_EN(i);
+ j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_0, reg);
+ }
+ } else {
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_1);
+ reg |= SYS1_INTx_EN(i);
+ j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_1, reg);
+ }
+ }
+
+ return 0;
+}
+
static int j721e_pcie_start_link(struct cdns_pcie *cdns_pcie)
{
struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
@@ -159,7 +295,14 @@ static const struct cdns_pcie_ops j721e_pcie_ops = {
.link_up = j721e_pcie_link_up,
};
-static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon)
+static const struct cdns_pcie_ops j7200_pcie_ops = {
+ .start_link = j721e_pcie_start_link,
+ .stop_link = j721e_pcie_stop_link,
+ .link_up = j721e_pcie_link_up,
+};
+
+static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
+ unsigned int offset)
{
struct device *dev = pcie->dev;
u32 mask = J721E_MODE_RC;
@@ -170,7 +313,7 @@ static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon)
if (mode == PCI_MODE_RC)
val = J721E_MODE_RC;
- ret = regmap_update_bits(syscon, 0, mask, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret)
dev_err(dev, "failed to set pcie mode\n");
@@ -178,7 +321,7 @@ static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon)
}
static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
- struct regmap *syscon)
+ struct regmap *syscon, unsigned int offset)
{
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
@@ -191,7 +334,7 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
link_speed = 2;
val = link_speed - 1;
- ret = regmap_update_bits(syscon, 0, GENERATION_SEL_MASK, val);
+ ret = regmap_update_bits(syscon, offset, GENERATION_SEL_MASK, val);
if (ret)
dev_err(dev, "failed to set link speed\n");
@@ -199,15 +342,19 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
}
static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
- struct regmap *syscon)
+ struct regmap *syscon, unsigned int offset)
{
struct device *dev = pcie->dev;
u32 lanes = pcie->num_lanes;
+ u32 mask = GENMASK(8, 8);
u32 val = 0;
int ret;
+ if (pcie->max_lanes == 4)
+ mask = GENMASK(9, 8);
+
val = LANE_COUNT(lanes - 1);
- ret = regmap_update_bits(syscon, 0, LANE_COUNT_MASK, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret)
dev_err(dev, "failed to set link count\n");
@@ -218,6 +365,8 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
struct regmap *syscon;
int ret;
@@ -227,19 +376,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return PTR_ERR(syscon);
}
- ret = j721e_pcie_set_mode(pcie, syscon);
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-pcie-ctrl", 1,
+ 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ ret = j721e_pcie_set_mode(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set pci mode\n");
return ret;
}
- ret = j721e_pcie_set_link_speed(pcie, syscon);
+ ret = j721e_pcie_set_link_speed(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set link speed\n");
return ret;
}
- ret = j721e_pcie_set_lane_count(pcie, syscon);
+ ret = j721e_pcie_set_lane_count(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set num-lanes\n");
return ret;
@@ -277,36 +432,59 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
static const struct j721e_pcie_data j721e_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_retrain_flag = true,
+ .is_intc_v1 = true,
.byte_access_allowed = false,
.linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j721e_pcie_ep_data = {
.mode = PCI_MODE_EP,
.linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j7200_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_detect_quiet_flag = true,
- .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .is_intc_v1 = false,
.byte_access_allowed = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data am64_pcie_rc_data = {
.mode = PCI_MODE_RC,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.byte_access_allowed = true,
+ .max_lanes = 1,
};
static const struct j721e_pcie_data am64_pcie_ep_data = {
.mode = PCI_MODE_EP,
.linkdown_irq_regfield = J7200_LINK_DOWN,
+ .max_lanes = 1,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .quirk_retrain_flag = true,
+ .is_intc_v1 = true,
+ .byte_access_allowed = false,
+ .linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 4,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 4,
};
static const struct of_device_id of_j721e_pcie_match[] = {
@@ -334,6 +512,14 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,am64-pcie-ep",
.data = &am64_pcie_ep_data,
},
+ {
+ .compatible = "ti,j784s4-pcie-host",
+ .data = &j784s4_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j784s4-pcie-ep",
+ .data = &j784s4_pcie_ep_data,
+ },
{},
};
@@ -349,6 +535,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
struct cdns_pcie_ep *ep;
struct gpio_desc *gpiod;
void __iomem *base;
+ struct clk *clk;
u32 num_lanes;
u32 mode;
int ret;
@@ -366,6 +553,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
pcie->mode = mode;
+ pcie->is_intc_v1 = data->is_intc_v1;
pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
@@ -379,8 +567,18 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->user_cfg_base = base;
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
- if (ret || num_lanes > MAX_LANES)
+ if (ret) {
+ dev_warn(dev, "no num-lanes defined, defaulting to 1\n");
+ num_lanes = 1;
+ }
+
+ if (num_lanes > data->max_lanes) {
+ dev_warn(dev, "defined num-lanes %d is greater than the "
+ "allowed maximum of %d, defaulting to 1\n",
+ num_lanes, data->max_lanes);
num_lanes = 1;
+ }
+ pcie->max_lanes = data->max_lanes;
pcie->num_lanes = num_lanes;
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
@@ -420,6 +618,10 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
+ ret = j721e_pcie_config_legacy_irq(pcie);
+ if (ret < 0)
+ goto err_get_sync;
+
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
if (!bridge) {
ret = -ENOMEM;
@@ -451,6 +653,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
+ clk = devm_clk_get_optional(dev, "pcie_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get pcie_refclk\n");
+ ret = PTR_ERR(clk);
+ goto err_pcie_setup;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pcie_refclk\n");
+ goto err_get_sync;
+ }
+ pcie->refclk = clk;
+
/*
* "Power Sequencing and Reset Signal Timings" table in
* PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
@@ -465,8 +681,10 @@ static int j721e_pcie_probe(struct platform_device *pdev)
}
ret = cdns_pcie_host_setup(rc);
- if (ret < 0)
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
goto err_pcie_setup;
+ }
break;
case PCI_MODE_EP:
@@ -520,6 +738,10 @@ static int j721e_pcie_remove(struct platform_device *pdev)
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
struct device *dev = &pdev->dev;
+ if (pcie->legacy_irq_domain)
+ irq_domain_remove(pcie->legacy_irq_domain);
+
+ clk_disable_unprepare(pcie->refclk);
cdns_pcie_disable_phy(cdns_pcie);
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -536,4 +758,7 @@ static struct platform_driver j721e_pcie_driver = {
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver(j721e_pcie_driver);
+module_platform_driver(j721e_pcie_driver);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 4c5e6349d78c..5370a717045a 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
@@ -16,11 +17,22 @@
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
-static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
+ u32 reg;
+
+ if (vfn > 1) {
+ dev_dbg(&epc->dev, "Only Virtual Function #1 has deviceID\n");
+ return 0;
+ } else if (vfn == 1) {
+ reg = cap + PCI_SRIOV_VF_DID;
+ cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
+ return 0;
+ }
cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
@@ -47,16 +59,18 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie_epf *epf = &ep->epf[fn];
struct cdns_pcie *pcie = &ep->pcie;
dma_addr_t bar_phys = epf_bar->phys_addr;
enum pci_barno bar = epf_bar->barno;
int flags = epf_bar->flags;
u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+ u32 first_vf_offset, stride;
u64 sz;
/* BAR size is 2^(aperture + 7) */
@@ -92,19 +106,39 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
addr0 = lower_32_bits(bar_phys);
addr1 = upper_32_bits(bar_phys);
+
+ if (vfn == 1) {
+ if (bar < BAR_4) {
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+ } else {
+ if (bar < BAR_4) {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+ }
+
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ epf = &epf->epf[vfn - 1];
+ }
+
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
addr1);
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
- }
-
cfg = cdns_pcie_readl(pcie, reg);
cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
@@ -117,21 +151,42 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
return 0;
}
-static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie_epf *epf = &ep->epf[fn];
struct cdns_pcie *pcie = &ep->pcie;
enum pci_barno bar = epf_bar->barno;
+ u32 first_vf_offset, stride;
u32 reg, cfg, b, ctrl;
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
+ if (vfn == 1) {
+ if (bar < BAR_4) {
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
} else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
+ if (bar < BAR_4) {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+ }
+
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ epf = &epf->epf[vfn - 1];
}
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
@@ -147,13 +202,23 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
epf->epf_bar[bar] = NULL;
}
-static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u32 r;
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
if (r >= ep->max_regions - 1) {
dev_err(&epc->dev, "no free outbound region\n");
@@ -168,7 +233,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
return 0;
}
-static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -188,13 +253,23 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u16 flags;
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
/*
* Set the Multiple Message Capable bitfield into the Message Control
* register.
@@ -208,13 +283,23 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
return 0;
}
-static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u16 flags, mme;
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -229,13 +314,23 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return mme;
}
-static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u32 val, reg;
+ if (vfunc_no > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, func_no, sriov_cap
+ + PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, func_no, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ func_no = func_no + first_vf_offset + ((vfunc_no - 1) * stride);
+ }
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
@@ -246,14 +341,25 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
+ u16 interrupts, enum pci_barno bir,
+ u32 offset)
{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u32 val, reg;
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
@@ -273,8 +379,8 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
return 0;
}
-static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
- u8 intx, bool is_asserted)
+static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
+ bool is_asserted)
{
struct cdns_pcie *pcie = &ep->pcie;
unsigned long flags;
@@ -316,7 +422,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
writel(0, ep->irq_cpu_addr + offset);
}
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
+static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
{
u16 cmd;
@@ -333,14 +440,24 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
return 0;
}
-static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme, data, data_mask;
u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ u32 first_vf_offset, stride;
+
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
@@ -381,19 +498,93 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset)
+{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ struct cdns_pcie *pcie = &ep->pcie;
+ u64 pci_addr, pci_addr_mask = 0xff;
+ u16 flags, mme, data, data_mask;
+ u32 first_vf_offset, stride;
+ u8 msi_count;
+ int ret;
+ int i;
+
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
+ /* Check whether the MSI feature has been enabled by the PCI host. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+ if (!(flags & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ /* Get the number of enabled MSIs */
+ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Compute the data value to be written. */
+ data_mask = msi_count - 1;
+ data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
+ data = data & ~data_mask;
+
+ /* Get the PCI address where to write the data into. */
+ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
+ pci_addr &= GENMASK_ULL(63, 2);
+
+ for (i = 0; i < interrupt_num; i++) {
+ ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
+ pci_addr & ~pci_addr_mask,
+ entry_size);
+ if (ret)
+ return ret;
+ addr = addr + entry_size;
+ }
+
+ *msi_data = data;
+ *msi_addr_offset = pci_addr & pci_addr_mask;
+
+ return 0;
+}
+
+static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 tbl_offset, msg_data, reg;
struct cdns_pcie *pcie = &ep->pcie;
struct pci_epf_msix_tbl *msix_tbl;
+ u32 first_vf_offset, stride;
struct cdns_pcie_epf *epf;
u64 pci_addr_mask = 0xff;
u64 msg_addr;
u16 flags;
u8 bir;
+ epf = &ep->epf[fn];
+
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ epf = &epf->epf[vfn - 1];
+ }
+
/* Check whether the MSI-X feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
if (!(flags & PCI_MSIX_FLAGS_ENABLE))
@@ -404,7 +595,6 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
bir = tbl_offset & PCI_MSIX_TABLE_BIR;
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- epf = &ep->epf[fn];
msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
@@ -426,21 +616,27 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct device *dev = pcie->dev;
switch (type) {
case PCI_EPC_IRQ_LEGACY:
- return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
+ if (vfn > 0) {
+ dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+ return -EINVAL;
+ }
+ return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
case PCI_EPC_IRQ_MSI:
- return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
case PCI_EPC_IRQ_MSIX:
- return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
default:
break;
@@ -454,18 +650,13 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
- struct pci_epf *epf;
- u32 cfg;
int ret;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
* and can't be disabled anyway.
*/
- cfg = BIT(0);
- list_for_each_entry(epf, &epc->pci_epf, list)
- cfg |= BIT(epf->func_no);
- cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
ret = cdns_pcie_start_link(pcie);
if (ret) {
@@ -476,16 +667,27 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
return 0;
}
+static const struct pci_epc_features cdns_pcie_epc_vf_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = 65536,
+};
+
static const struct pci_epc_features cdns_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
+ .align = 256,
};
static const struct pci_epc_features*
-cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
- return &cdns_pcie_epc_features;
+ if (!vfunc_no)
+ return &cdns_pcie_epc_features;
+
+ return &cdns_pcie_epc_vf_features;
}
static const struct pci_epc_ops cdns_pcie_epc_ops = {
@@ -499,6 +701,7 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.set_msix = cdns_pcie_ep_set_msix,
.get_msix = cdns_pcie_ep_get_msix,
.raise_irq = cdns_pcie_ep_raise_irq,
+ .map_msi_irq = cdns_pcie_ep_map_msi_irq,
.start = cdns_pcie_ep_start,
.get_features = cdns_pcie_ep_get_features,
};
@@ -510,9 +713,11 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct cdns_pcie *pcie = &ep->pcie;
+ struct cdns_pcie_epf *epf;
struct resource *res;
struct pci_epc *epc;
int ret;
+ int i;
pcie->is_rc = false;
@@ -529,12 +734,9 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
}
pcie->mem_res = res;
- ret = of_property_read_u32(np, "cdns,max-outbound-regions",
- &ep->max_regions);
- if (ret < 0) {
- dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
- return ret;
- }
+ ep->max_regions = CDNS_PCIE_MAX_OB;
+ of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions);
+
ep->ob_addr = devm_kcalloc(dev,
ep->max_regions, sizeof(*ep->ob_addr),
GFP_KERNEL);
@@ -560,6 +762,25 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
if (!ep->epf)
return -ENOMEM;
+ epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
+ sizeof(*epc->max_vfs), GFP_KERNEL);
+ if (!epc->max_vfs)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(np, "max-virtual-functions",
+ epc->max_vfs, epc->max_functions);
+ if (ret == 0) {
+ for (i = 0; i < epc->max_functions; i++) {
+ epf = &ep->epf[i];
+ if (epc->max_vfs[i] == 0)
+ continue;
+ epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
+ sizeof(*ep->epf), GFP_KERNEL);
+ if (!epf->epf)
+ return -ENOMEM;
+ }
+ }
+
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
resource_size(pcie->mem_res), PAGE_SIZE);
if (ret < 0) {
@@ -590,3 +811,6 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index fb96d37a135c..0a79e553e226 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -70,6 +71,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return rc->cfg_base + (where & 0xfff);
}
+EXPORT_SYMBOL_GPL(cdns_pci_map_bus);
static struct pci_ops cdns_pcie_host_ops = {
.map_bus = cdns_pci_map_bus,
@@ -532,3 +534,6 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_setup);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 52767f26048f..ae3deb8293b8 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -4,6 +4,7 @@
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/kernel.h>
+#include <linux/module.h>
#include "pcie-cadence.h"
@@ -22,6 +23,7 @@ void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
@@ -99,6 +101,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
u8 busnr, u8 fn,
@@ -133,6 +136,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
{
@@ -145,6 +149,7 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
{
@@ -155,6 +160,7 @@ void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
phy_exit(pcie->phy[i]);
}
}
+EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
{
@@ -242,6 +248,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
#ifdef CONFIG_PM_SLEEP
static int cdns_pcie_suspend_noirq(struct device *dev)
@@ -272,3 +279,5 @@ const struct dev_pm_ops cdns_pcie_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
cdns_pcie_resume_noirq)
};
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index e0b59730bffb..56fe4f843bcd 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -50,6 +50,10 @@
(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
(GENMASK(4, 0) << ((b) * 8))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
@@ -114,6 +118,7 @@
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
+#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
* Root Port Registers (PCI configuration space for the root port function)
@@ -205,6 +210,7 @@ enum cdns_pcie_rp_bar {
};
#define CDNS_PCIE_RP_MAX_IB 0x3
+#define CDNS_PCIE_MAX_OB 32
struct cdns_pcie_rp_ib_bar {
u64 size;
@@ -314,9 +320,11 @@ struct cdns_pcie_rc {
/**
* struct cdns_pcie_epf - Structure to hold info about endpoint function
+ * @epf: Info about virtual functions attached to the physical function
* @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
*/
struct cdns_pcie_epf {
+ struct cdns_pcie_epf *epf;
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
@@ -491,7 +499,7 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
return true;
}
-#ifdef CONFIG_PCIE_CADENCE_HOST
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
@@ -508,7 +516,7 @@ static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int d
}
#endif
-#ifdef CONFIG_PCIE_CADENCE_EP
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
#else
static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 90482d5246ff..7d56c0608d5e 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -35,6 +35,11 @@
#define PCIE_DEVICEID_SHIFT 16
/* Application registers */
+#define PID 0x000
+#define RTL GENMASK(15, 11)
+#define RTL_SHIFT 11
+#define AM6_PCI_PG1_RTL_VER 0x15
+
#define CMD_STATUS 0x004
#define LTSSM_EN_VAL BIT(0)
#define OB_XLAT_EN_VAL BIT(1)
@@ -69,6 +74,7 @@
#define IRQ_STATUS(n) (0x184 + ((n) << 4))
#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
+#define IRQ_ENABLE_CLR(n) (0x18c + ((n) << 4))
#define INTx_EN BIT(0)
#define ERR_IRQ_STATUS 0x1c4
@@ -105,6 +111,8 @@
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+#define PCI_DEVICE_ID_TI_AM654X 0xb00c
+
struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
@@ -116,15 +124,14 @@ struct keystone_pcie {
struct dw_pcie *pci;
/* PCI Device ID */
u32 device_id;
- int legacy_host_irqs[PCI_NUM_INTX];
struct device_node *legacy_intc_np;
+ struct irq_domain *legacy_irq_domain;
int msi_host_irq;
int num_lanes;
struct phy **phy;
struct device_link **link;
struct device_node *msi_intc_np;
- struct irq_domain *legacy_irq_domain;
struct device_node *np;
/* Application register space */
@@ -252,26 +259,6 @@ static int ks_pcie_msi_host_init(struct pcie_port *pp)
return dw_pcie_allocate_domains(pp);
}
-static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
- int offset)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct device *dev = pci->dev;
- u32 pending;
- int virq;
-
- pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
-
- if (BIT(0) & pending) {
- virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
- generic_handle_irq(virq);
- }
-
- /* EOI the INTx interrupt */
- ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
-}
-
/*
* Dummy function so that DW core doesn't configure MSI
*/
@@ -317,39 +304,143 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
return IRQ_HANDLED;
}
-static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+static void ks_pcie_am654_legacy_irq_handler(struct irq_desc *desc)
+{
+ struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int virq, i;
+ u32 reg;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ reg = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(i));
+ if (!(reg & INTx_EN))
+ continue;
+
+ virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, i);
+ generic_handle_irq(virq);
+ ks_pcie_app_writel(ks_pcie, IRQ_STATUS(i), INTx_EN);
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, i);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+void ks_pcie_irq_eoi(struct irq_data *data)
{
+ struct keystone_pcie *ks_pcie = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = data->hwirq;
+
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, hwirq);
+ irq_chip_eoi_parent(data);
}
-static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+void ks_pcie_irq_enable(struct irq_data *data)
{
+ struct keystone_pcie *ks_pcie = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = data->hwirq;
+
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(hwirq), INTx_EN);
+ irq_chip_enable_parent(data);
}
-static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+void ks_pcie_irq_disable(struct irq_data *data)
{
+ struct keystone_pcie *ks_pcie = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = data->hwirq;
+
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_CLR(hwirq), INTx_EN);
+ irq_chip_disable_parent(data);
}
static struct irq_chip ks_pcie_legacy_irq_chip = {
- .name = "Keystone-PCI-Legacy-IRQ",
- .irq_ack = ks_pcie_ack_legacy_irq,
- .irq_mask = ks_pcie_mask_legacy_irq,
- .irq_unmask = ks_pcie_unmask_legacy_irq,
+ .name = "Keystone-PCI-Legacy-IRQ",
+ .irq_enable = ks_pcie_irq_enable,
+ .irq_disable = ks_pcie_irq_disable,
+ .irq_eoi = ks_pcie_irq_eoi,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
};
-static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
- unsigned int irq,
- irq_hw_number_t hw_irq)
+static int ks_pcie_legacy_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
{
- irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
- handle_level_irq);
- irq_set_chip_data(irq, d->host_data);
+ struct keystone_pcie *ks_pcie = domain->host_data;
+ struct device_node *np = ks_pcie->legacy_intc_np;
+ struct irq_fwspec parent_fwspec, *fwspec = data;
+ struct of_phandle_args out_irq;
+ int ret;
+
+ if (nr_irqs != 1)
+ return -EINVAL;
+
+ /*
+ * Get the correct interrupt from legacy-interrupt-controller node
+ * corresponding to INTA/INTB/INTC/INTD (passed in fwspec->param[0])
+ * after performing mapping specified in "interrupt-map".
+ * interrupt-map = <0 0 0 1 &pcie_intc0 0>, INTA (4th cell in
+ * interrupt-map) corresponds to 1st entry in "interrupts" (6th cell
+ * in interrupt-map)
+ */
+ ret = of_irq_parse_one(np, fwspec->param[0], &out_irq);
+ if (ret < 0) {
+ pr_err("Failed to parse interrupt node\n");
+ return ret;
+ }
+
+ of_phandle_args_to_fwspec(np, out_irq.args, out_irq.args_count, &parent_fwspec);
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+ if (ret < 0) {
+ pr_err("Failed to allocate parent IRQ %u: %d\n",
+ parent_fwspec.param[0], ret);
+ return ret;
+ }
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, fwspec->param[0],
+ &ks_pcie_legacy_irq_chip, ks_pcie);
+ if (ret < 0) {
+ pr_err("Failed to set hwirq and chip\n");
+ goto err_set_hwirq_and_chip;
+ }
return 0;
+
+err_set_hwirq_and_chip:
+ irq_domain_free_irqs_parent(domain, virq, 1);
+
+ return ret;
+}
+
+static int ks_pcie_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+
+ if (fwspec->param[0] >= PCI_NUM_INTX)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+
+ return 0;
+ }
+
+ return -EINVAL;
}
static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
- .map = ks_pcie_init_legacy_irq_map,
- .xlate = irq_domain_xlate_onetwocell,
+ .alloc = ks_pcie_legacy_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+ .translate = ks_pcie_irq_domain_translate,
};
/**
@@ -439,6 +530,17 @@ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
CFG_FUNC(PCI_FUNC(devfn));
if (!pci_is_root_bus(bus->parent))
@@ -537,7 +639,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
+ struct keystone_pcie *ks_pcie;
+ struct device *bridge_dev;
struct pci_dev *bridge;
+ u32 val;
+
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
@@ -549,9 +655,20 @@ static void ks_pcie_quirk(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ 0, },
};
+ static const struct pci_device_id am6_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { 0, },
+ };
- if (pci_is_root_bus(bus))
+ if (pci_is_root_bus(bus)) {
bridge = dev;
+ if (pci_match_id(am6_pci_devids, bridge)) {
+ struct resource *r = &dev->resource[0];
+
+ r->flags |= IORESOURCE_UNSET;
+ }
+ }
/* look for the host bridge */
while (!pci_is_root_bus(bus)) {
@@ -574,6 +691,32 @@ static void ks_pcie_quirk(struct pci_dev *dev)
pcie_set_readrq(dev, 256);
}
}
+
+ /*
+ * Memory transactions fail with PCI controller in AM654 PG1.0
+ * when MRRS is set to more than 128 bytes. Force the MRRS to
+ * 128 Bytes in all downstream devices.
+ */
+ if (pci_match_id(am6_pci_devids, bridge)) {
+ bridge_dev = pci_get_host_bridge_device(dev);
+ if (!bridge_dev && !bridge_dev->parent)
+ return;
+
+ ks_pcie = dev_get_drvdata(bridge_dev->parent);
+ if (!ks_pcie)
+ return;
+
+ val = ks_pcie_app_readl(ks_pcie, PID);
+ val &= RTL;
+ val >>= RTL_SHIFT;
+ if (val != AM6_PCI_PG1_RTL_VER)
+ return;
+
+ if (pcie_get_readrq(dev) > 128) {
+ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+ pcie_set_readrq(dev, 128);
+ }
+ }
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
@@ -616,35 +759,6 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-/**
- * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
- * @irq: IRQ line for legacy interrupts
- * @desc: Pointer to irq descriptor
- *
- * Traverse through pending legacy interrupts and invoke handler for each. Also
- * takes care of interrupt controller level mask/ack operation.
- */
-static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
-{
- unsigned int irq = irq_desc_get_irq(desc);
- struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
- struct dw_pcie *pci = ks_pcie->pci;
- struct device *dev = pci->dev;
- u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- dev_dbg(dev, ": Handling legacy irq %d\n", irq);
-
- /*
- * The chained irq handler installation would have replaced normal
- * interrupt driver handler so we need to take care of mask/unmask and
- * ack operation.
- */
- chained_irq_enter(chip, desc);
- ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
- chained_irq_exit(chip, desc);
-}
-
static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
{
struct device *dev = ks_pcie->pci->dev;
@@ -699,25 +813,86 @@ err:
return ret;
}
+static int ks_pcie_am654_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ks_pcie_am654_irq_domain_ops = {
+ .map = ks_pcie_am654_intx_map,
+};
+
+static int ks_pcie_am654_config_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+ struct device *dev = ks_pcie->pci->dev;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ int ret = 0;
+ int irq;
+ int i;
+
+ intc_np = of_get_child_by_name(np, "interrupt-controller");
+ if (!intc_np) {
+ dev_warn(dev, "legacy interrupt-controller node is absent\n");
+ return -EINVAL;
+ }
+
+ irq = irq_of_parse_and_map(intc_np, 0);
+ if (!irq)
+ return -EINVAL;
+
+ irq_set_chained_handler_and_data(irq, ks_pcie_am654_legacy_irq_handler, ks_pcie);
+ legacy_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ &ks_pcie_am654_irq_domain_ops, ks_pcie);
+ if (!legacy_irq_domain) {
+ dev_err(dev, "Failed to add IRQ domain for legacy IRQS\n");
+ return -EINVAL;
+ }
+ ks_pcie->legacy_irq_domain = legacy_irq_domain;
+
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
+
+ return ret;
+}
+
static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
{
struct device *dev = ks_pcie->pci->dev;
struct irq_domain *legacy_irq_domain;
struct device_node *np = ks_pcie->np;
+ struct irq_domain *parent_domain;
+ struct device_node *parent_node;
struct device_node *intc_np;
- int irq_count, irq, ret = 0, i;
+ int irq_count, ret = 0;
- intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
+ intc_np = of_get_child_by_name(np, "interrupt-controller");
if (!intc_np) {
- /*
- * Since legacy interrupts are modeled as edge-interrupts in
- * AM6, keep it disabled for now.
- */
- if (ks_pcie->is_am6)
- return 0;
dev_warn(dev, "legacy-interrupt-controller node is absent\n");
return -EINVAL;
}
+ ks_pcie->legacy_intc_np = intc_np;
+
+ parent_node = of_irq_find_parent(intc_np);
+ if (!parent_node) {
+ dev_err(dev, "Unable to obtain parent node\n");
+ ret = -ENXIO;
+ goto err;
+ }
+
+ parent_domain = irq_find_host(parent_node);
+ if (!parent_domain) {
+ dev_err(dev, "Unable to obtain parent domain\n");
+ ret = -ENXIO;
+ goto err;
+ }
+
+ of_node_put(parent_node);
irq_count = of_irq_count(intc_np);
if (!irq_count) {
@@ -726,31 +901,13 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
goto err;
}
- for (i = 0; i < irq_count; i++) {
- irq = irq_of_parse_and_map(intc_np, i);
- if (!irq) {
- ret = -EINVAL;
- goto err;
- }
- ks_pcie->legacy_host_irqs[i] = irq;
-
- irq_set_chained_handler_and_data(irq,
- ks_pcie_legacy_irq_handler,
- ks_pcie);
- }
-
- legacy_irq_domain =
- irq_domain_add_linear(intc_np, PCI_NUM_INTX,
- &ks_pcie_legacy_irq_domain_ops, NULL);
+ legacy_irq_domain = irq_domain_add_hierarchy(parent_domain, 0, PCI_NUM_INTX, intc_np,
+ &ks_pcie_legacy_irq_domain_ops, ks_pcie);
if (!legacy_irq_domain) {
dev_err(dev, "Failed to add irq domain for legacy irqs\n");
ret = -EINVAL;
goto err;
}
- ks_pcie->legacy_irq_domain = legacy_irq_domain;
-
- for (i = 0; i < PCI_NUM_INTX; i++)
- ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
err:
of_node_put(intc_np);
@@ -811,10 +968,14 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
int ret;
pp->bridge->ops = &ks_pcie_ops;
- if (!ks_pcie->is_am6)
+
+ if (!ks_pcie->is_am6) {
pp->bridge->child_ops = &ks_child_pcie_ops;
+ ret = ks_pcie_config_legacy_irq(ks_pcie);
+ } else {
+ ret = ks_pcie_am654_config_legacy_irq(ks_pcie);
+ }
- ret = ks_pcie_config_legacy_irq(ks_pcie);
if (ret)
return ret;
@@ -1110,6 +1271,7 @@ static int ks_pcie_am654_set_mode(struct device *dev,
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
.version = 0x365A,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 95ed719402d7..fb7f3bf673d9 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -124,7 +124,7 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
-static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -201,7 +201,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -216,7 +216,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
ep->epf_bar[bar] = NULL;
}
-static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -274,7 +274,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
-static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
int ret;
@@ -290,9 +290,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
clear_bit(atu_index, ep->ob_window_map);
}
-static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
- phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -307,7 +306,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
return 0;
}
-static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -331,7 +330,8 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -356,7 +356,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
return 0;
}
-static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -380,8 +380,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -416,7 +416,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
return 0;
}
-static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -450,7 +450,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
}
static const struct pci_epc_features*
-dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -525,14 +525,14 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
msg_addr = ((u64)msg_addr_upper) << 32 |
(msg_addr_lower & ~aligned_offset);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
@@ -593,14 +593,14 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
}
aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data, ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index c91d85b15129..aa1cf24a5a72 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -159,7 +159,7 @@ static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
return 0;
}
-static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -195,7 +195,7 @@ static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
@@ -246,7 +246,7 @@ static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
return 0;
}
-static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -259,7 +259,8 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
+ u8 interrupts)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -272,7 +273,7 @@ static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
return 0;
}
-static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -285,7 +286,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
-static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr, size_t size)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -322,7 +323,7 @@ static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -403,7 +404,7 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
return 0;
}
-static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -451,7 +452,7 @@ static const struct pci_epc_features rcar_pcie_epc_features = {
};
static const struct pci_epc_features*
-rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rcar_pcie_epc_features;
}
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 379cde59988c..d1a200b93b2b 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -122,7 +122,7 @@ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
}
-static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -159,7 +159,7 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -227,7 +227,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -256,7 +256,7 @@ static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
}
-static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr,
size_t size)
{
@@ -283,7 +283,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -307,7 +307,7 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
u8 multi_msg_cap)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -328,7 +328,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
@@ -470,7 +470,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -509,7 +509,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
};
static const struct pci_epc_features*
-rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rockchip_pcie_epc_features;
}
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 8820d0f7ec77..5f1242ca2f4e 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -12,3 +12,16 @@ config PCI_EPF_TEST
for PCI Endpoint.
If in doubt, say "N" to disable Endpoint test driver.
+
+config PCI_EPF_NTB
+ tristate "PCI Endpoint NTB driver"
+ depends on PCI_ENDPOINT
+ select CONFIGFS_FS
+ help
+ Select this configuration option to enable the Non-Transparent
+ Bridge (NTB) driver for PCI Endpoint. NTB driver implements NTB
+ controller functionality using multiple PCIe endpoint instances.
+ It can support NTB endpoint function devices created using
+ device tree.
+
+ If in doubt, say "N" to disable Endpoint NTB driver.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index d6fafff080e2..96ab932a537a 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
+obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
new file mode 100644
index 000000000000..73714080a166
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -0,0 +1,2145 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+/*
+ * The PCI NTB function driver configures the SoC with multiple PCIe Endpoint
+ * (EP) controller instances (see diagram below) in such a way that
+ * transactions from one EP controller are routed to the other EP controller.
+ * Once PCI NTB function driver configures the SoC with multiple EP instances,
+ * HOST1 and HOST2 can communicate with each other using SoC as a bridge.
+ *
+ * +-------------+ +-------------+
+ * | | | |
+ * | HOST1 | | HOST2 |
+ * | | | |
+ * +------^------+ +------^------+
+ * | |
+ * | |
+ * +---------|-------------------------------------------------|---------+
+ * | +------v------+ +------v------+ |
+ * | | | | | |
+ * | | EP | | EP | |
+ * | | CONTROLLER1 | | CONTROLLER2 | |
+ * | | <-----------------------------------> | |
+ * | | | | | |
+ * | | | | | |
+ * | | | SoC With Multiple EP Instances | | |
+ * | | | (Configured using NTB Function) | | |
+ * | +-------------+ +-------------+ |
+ * +---------------------------------------------------------------------+
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL 1
+#define COMMAND_TEARDOWN_DOORBELL 2
+#define COMMAND_CONFIGURE_MW 3
+#define COMMAND_TEARDOWN_MW 4
+#define COMMAND_LINK_UP 5
+#define COMMAND_LINK_DOWN 6
+
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define LINK_STATUS_UP BIT(0)
+
+#define SPAD_COUNT 64
+#define DB_COUNT 4
+#define NTB_MW_OFFSET 2
+#define DB_COUNT_MASK GENMASK(15, 0)
+#define MSIX_ENABLE BIT(16)
+#define MAX_DB_COUNT 32
+#define MAX_MW 4
+
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_PEER_SPAD,
+ BAR_DB_MW1,
+ BAR_MW2,
+ BAR_MW3,
+ BAR_MW4,
+};
+
+struct epf_ntb {
+ u32 num_mws;
+ u32 db_count;
+ u32 spad_count;
+ struct pci_epf *epf;
+ u64 mws_size[MAX_MW];
+ struct config_group group;
+ struct epf_ntb_epc *epc[2];
+};
+
+#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
+
+struct epf_ntb_epc {
+ u8 func_no;
+ u8 vfunc_no;
+ bool linkup;
+ bool is_msix;
+ int msix_bar;
+ u32 spad_size;
+ struct pci_epc *epc;
+ struct epf_ntb *epf_ntb;
+ void __iomem *mw_addr[6];
+ size_t msix_table_offset;
+ struct epf_ntb_ctrl *reg;
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno epf_ntb_bar[6];
+ struct delayed_work cmd_handler;
+ enum pci_epc_interface_type type;
+ const struct pci_epc_features *epc_features;
+};
+
+struct epf_ntb_ctrl {
+ u32 command;
+ u32 argument;
+ u16 command_status;
+ u16 link_status;
+ u32 topology;
+ u64 addr;
+ u64 size;
+ u32 num_mws;
+ u32 mw1_offset;
+ u32 spad_offset;
+ u32 spad_count;
+ u32 db_entry_size;
+ u32 db_data[MAX_DB_COUNT];
+ u32 db_offset[MAX_DB_COUNT];
+} __packed;
+
+static struct pci_epf_header epf_ntb_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_BASE_CLASS_MEMORY,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+/**
+ * epf_ntb_link_up() - Raise link_up interrupt to both the hosts
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @link_up: true or false indicating Link is UP or Down
+ *
+ * Once NTB function in HOST1 and the NTB function in HOST2 invoke
+ * ntb_link_enable(), this NTB function driver will trigger a link event to
+ * the NTB client in both the hosts.
+ */
+static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+{
+ enum pci_epc_interface_type type;
+ enum pci_epc_irq_type irq_type;
+ struct epf_ntb_epc *ntb_epc;
+ struct epf_ntb_ctrl *ctrl;
+ struct pci_epc *epc;
+ u8 func_no, vfunc_no;
+ bool is_msix;
+ int ret;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ is_msix = ntb_epc->is_msix;
+ ctrl = ntb_epc->reg;
+ if (link_up)
+ ctrl->link_status |= LINK_STATUS_UP;
+ else
+ ctrl->link_status &= ~LINK_STATUS_UP;
+ irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
+ ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
+ if (ret) {
+ dev_err(&epc->dev,
+ "%s intf: Failed to raise Link Up IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_mw() - Configure the Outbound Address Space for one host
+ * to access the memory window of other host
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * This function performs stage (B) in the above diagram (see MW1) i.e., map OB
+ * address space of memory window to PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address in the outbound address space
+ * 2) Address in the PCI Address space
+ * 3) Size of the address region to be mapped
+ *
+ * The address in the outbound address space (for MW1, MW2, MW3 and MW4) is
+ * stored in epf_bar corresponding to BAR_DB_MW1 for MW1 and BAR_MW2, BAR_MW3
+ * BAR_MW4 for rest of the BARs of epf_ntb_epc that is connected to HOST1. This
+ * is populated in epf_ntb_alloc_peer_mem() in this driver.
+ *
+ * The address and size of the PCI address region that has to be mapped would
+ * be provided by HOST2 in ctrl->addr and ctrl->size of epf_ntb_epc that is
+ * connected to HOST2.
+ *
+ * Please note Memory window1 (MW1) and Doorbell registers together will be
+ * mapped to a single BAR (BAR2) above for 32-bit BARs. The exact BAR that's
+ * used for Memory window (MW) can be obtained from epf_ntb_bar[BAR_DB_MW1],
+ * epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2].
+ */
+static int epf_ntb_configure_mw(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u32 mw)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ struct epf_ntb_ctrl *ctrl;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ u64 addr, size;
+ int ret = 0;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ phys_addr = peer_epf_bar->phys_addr;
+ ctrl = ntb_epc->reg;
+ addr = ctrl->addr;
+ size = ctrl->size;
+ if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
+ phys_addr += ctrl->mw1_offset;
+
+ if (size > ntb->mws_size[mw]) {
+ dev_err(&epc->dev,
+ "%s intf: MW: %d Req Sz:%llxx > Supported Sz:%llx\n",
+ pci_epc_interface_string(type), mw, size,
+ ntb->mws_size[mw]);
+ ret = -EINVAL;
+ goto err_invalid_size;
+ }
+
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size);
+ if (ret)
+ dev_err(&epc->dev,
+ "%s intf: Failed to map memory window %d address\n",
+ pci_epc_interface_string(type), mw);
+
+err_invalid_size:
+
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+ * pci_epc_unmap_addr()
+ */
+static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u32 mw)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ struct epf_ntb_ctrl *ctrl;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ phys_addr = peer_epf_bar->phys_addr;
+ ctrl = ntb_epc->reg;
+ if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
+ phys_addr += ctrl->mw1_offset;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
+}
+
+/**
+ * epf_ntb_configure_msi() - Map OB address space to MSI address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Number of doorbell interrupts to map
+ *
+ *+-----------------+ +----->+----------------+-----------+-----------------+
+ *| BAR0 | | | Doorbell 1 +---+-------> MSI ADDRESS |
+ *+-----------------+ | +----------------+ | +-----------------+
+ *| BAR1 | | | Doorbell 2 +---+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR2 | | Doorbell 3 +---+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR3 | | | Doorbell 4 +---+ | |
+ *+-----------------+ | |----------------+ | |
+ *| BAR4 | | | | | |
+ *+-----------------+ | | MW1 | | |
+ *| BAR5 | | | | | |
+ *+-----------------+ +----->-----------------+ | |
+ * EP CONTROLLER 1 | | | |
+ * | | | |
+ * +----------------+ +-----------------+
+ * (A) EP CONTROLLER 2 | |
+ * (OB SPACE) | |
+ * | MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ *
+ * This function performs stage (B) in the above diagram (see Doorbell 1,
+ * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
+ * doorbell to MSI address in PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address reserved for doorbell in the outbound address space
+ * 2) MSI-X address in the PCIe Address space
+ * 3) Number of MSI-X interrupts that has to be configured
+ *
+ * The address in the outbound address space (for the Doorbell) is stored in
+ * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
+ * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
+ * with address for MW1.
+ *
+ * pci_epc_map_msi_irq() takes the MSI address from MSI capability register
+ * and maps the OB address (obtained in epf_ntb_alloc_peer_mem()) to the MSI
+ * address.
+ *
+ * epf_ntb_configure_msi() also stores the MSI data to raise each interrupt
+ * in db_data of the peer's control region. This helps the peer to raise
+ * doorbell of the other host by writing db_data to the BAR corresponding to
+ * BAR_DB_MW1.
+ */
+static int epf_ntb_configure_msi(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u16 db_count)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ u32 db_entry_size, db_data, db_offset;
+ struct pci_epf_bar *peer_epf_bar;
+ struct epf_ntb_ctrl *peer_ctrl;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ int ret, i;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ peer_ctrl = peer_ntb_epc->reg;
+ db_entry_size = peer_ctrl->db_entry_size;
+
+ phys_addr = peer_epf_bar->phys_addr;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count,
+ db_entry_size, &db_data, &db_offset);
+ if (ret) {
+ dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ for (i = 0; i < db_count; i++) {
+ peer_ctrl->db_data[i] = db_data | i;
+ peer_ctrl->db_offset[i] = db_offset;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_msix() - Map OB address space to MSI-X address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Number of doorbell interrupts to map
+ *
+ *+-----------------+ +----->+----------------+-----------+-----------------+
+ *| BAR0 | | | Doorbell 1 +-----------> MSI-X ADDRESS 1 |
+ *+-----------------+ | +----------------+ +-----------------+
+ *| BAR1 | | | Doorbell 2 +---------+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ *+-----------------+----+ +----------------+ | +-> MSI-X ADDRESS 2 |
+ *| BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ *+-----------------+ | |----------------+ | | | |
+ *| BAR4 | | | | | | +-----------------+
+ *+-----------------+ | | MW1 + | +-->+ MSI-X ADDRESS 3||
+ *| BAR5 | | | | | +-----------------+
+ *+-----------------+ +----->-----------------+ | | |
+ * EP CONTROLLER 1 | | | +-----------------+
+ * | | +---->+ MSI-X ADDRESS 4 |
+ * +----------------+ +-----------------+
+ * (A) EP CONTROLLER 2 | |
+ * (OB SPACE) | |
+ * | MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * This function performs stage (B) in the above diagram (see Doorbell 1,
+ * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
+ * doorbell to MSI-X address in PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address reserved for doorbell in the outbound address space
+ * 2) MSI-X address in the PCIe Address space
+ * 3) Number of MSI-X interrupts that has to be configured
+ *
+ * The address in the outbound address space (for the Doorbell) is stored in
+ * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
+ * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
+ * with address for MW1.
+ *
+ * The MSI-X address is in the MSI-X table of EP CONTROLLER 2 and
+ * the count of doorbell is in ctrl->argument of epf_ntb_epc that is connected
+ * to HOST2. MSI-X table is stored memory mapped to ntb_epc->msix_bar and the
+ * offset is in ntb_epc->msix_table_offset. From this epf_ntb_configure_msix()
+ * gets the MSI-X address and data.
+ *
+ * epf_ntb_configure_msix() also stores the MSI-X data to raise each interrupt
+ * in db_data of the peer's control region. This helps the peer to raise
+ * doorbell of the other host by writing db_data to the BAR corresponding to
+ * BAR_DB_MW1.
+ */
+static int epf_ntb_configure_msix(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type,
+ u16 db_count)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar, *epf_bar;
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct epf_ntb_ctrl *peer_ctrl;
+ u32 db_entry_size, msg_data;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ size_t align;
+ u64 msg_addr;
+ int ret, i;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ epf_bar = &ntb_epc->epf_bar[ntb_epc->msix_bar];
+ msix_tbl = epf_bar->addr + ntb_epc->msix_table_offset;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ phys_addr = peer_epf_bar->phys_addr;
+ peer_ctrl = peer_ntb_epc->reg;
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ db_entry_size = peer_ctrl->db_entry_size;
+
+ for (i = 0; i < db_count; i++) {
+ msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align);
+ msg_data = msix_tbl[i].msg_data;
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, msg_addr,
+ db_entry_size);
+ if (ret) {
+ dev_err(&epc->dev,
+ "%s intf: Failed to configure MSI-X IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ phys_addr = phys_addr + db_entry_size;
+ peer_ctrl->db_data[i] = msg_data;
+ peer_ctrl->db_offset[i] = msix_tbl[i].msg_addr & (align - 1);
+ }
+ ntb_epc->is_msix = true;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_db() - Configure the Outbound Address Space for one host
+ * to ring the doorbell of other host
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Count of the number of doorbells that has to be configured
+ * @msix: Indicates whether MSI-X or MSI should be used
+ *
+ * Invokes epf_ntb_configure_msix() or epf_ntb_configure_msi() required for
+ * one HOST to ring the doorbell of other HOST.
+ */
+static int epf_ntb_configure_db(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type,
+ u16 db_count, bool msix)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ int ret;
+
+ if (db_count > MAX_DB_COUNT)
+ return -EINVAL;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ if (msix)
+ ret = epf_ntb_configure_msix(ntb, type, db_count);
+ else
+ ret = epf_ntb_configure_msi(ntb, type, db_count);
+
+ if (ret)
+ dev_err(&epc->dev, "%s intf: Failed to configure DB\n",
+ pci_epc_interface_string(type));
+
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_db() - Unmap address in OB address space to MSI/MSI-X
+ * address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Invoke pci_epc_unmap_addr() to unmap OB address to MSI/MSI-X address.
+ */
+static void
+epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ phys_addr = peer_epf_bar->phys_addr;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
+}
+
+/**
+ * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
+ * @work: work_struct for the two epf_ntb_epc (PRIMARY and SECONDARY)
+ *
+ * Workqueue function that gets invoked for the two epf_ntb_epc
+ * periodically (once every 5ms) to see if it has received any commands
+ * from NTB host. The host can send commands to configure doorbell or
+ * configure memory window or to update link status.
+ */
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+ enum pci_epc_interface_type type;
+ struct epf_ntb_epc *ntb_epc;
+ struct epf_ntb_ctrl *ctrl;
+ u32 command, argument;
+ struct epf_ntb *ntb;
+ struct device *dev;
+ u16 db_count;
+ bool is_msix;
+ int ret;
+
+ ntb_epc = container_of(work, struct epf_ntb_epc, cmd_handler.work);
+ ctrl = ntb_epc->reg;
+ command = ctrl->command;
+ if (!command)
+ goto reset_handler;
+ argument = ctrl->argument;
+
+ ctrl->command = 0;
+ ctrl->argument = 0;
+
+ ctrl = ntb_epc->reg;
+ type = ntb_epc->type;
+ ntb = ntb_epc->epf_ntb;
+ dev = &ntb->epf->dev;
+
+ switch (command) {
+ case COMMAND_CONFIGURE_DOORBELL:
+ db_count = argument & DB_COUNT_MASK;
+ is_msix = argument & MSIX_ENABLE;
+ ret = epf_ntb_configure_db(ntb, type, db_count, is_msix);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_DOORBELL:
+ epf_ntb_teardown_db(ntb, type);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_CONFIGURE_MW:
+ ret = epf_ntb_configure_mw(ntb, type, argument);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_MW:
+ epf_ntb_teardown_mw(ntb, type, argument);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_UP:
+ ntb_epc->linkup = true;
+ if (ntb->epc[PRIMARY_INTERFACE]->linkup &&
+ ntb->epc[SECONDARY_INTERFACE]->linkup) {
+ ret = epf_ntb_link_up(ntb, true);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ goto reset_handler;
+ }
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_DOWN:
+ ntb_epc->linkup = false;
+ ret = epf_ntb_link_up(ntb, false);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ default:
+ dev_err(dev, "%s intf UNKNOWN command: %d\n",
+ pci_epc_interface_string(type), command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcintb_workqueue, &ntb_epc->cmd_handler,
+ msecs_to_jiffies(5));
+}
+
+/**
+ * epf_ntb_peer_spad_bar_clear() - Clear Peer Scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ *+-----------------+------->+------------------+ +-----------------+
+ *| BAR0 | | CONFIG REGION | | BAR0 |
+ *+-----------------+----+ +------------------+<-------+-----------------+
+ *| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ *+-----------------+ +-->+------------------+<-------+-----------------+
+ *| BAR2 | Local Memory | BAR2 |
+ *+-----------------+ +-----------------+
+ *| BAR3 | | BAR3 |
+ *+-----------------+ +-----------------+
+ *| BAR4 | | BAR4 |
+ *+-----------------+ +-----------------+
+ *| BAR5 | | BAR5 |
+ *+-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Clear BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
+ * region. While BAR1 is the default peer scratchpad BAR, an NTB could have
+ * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
+ * This function can get the exact BAR used for peer scratchpad from
+ * epf_ntb_bar[BAR_PEER_SPAD].
+ *
+ * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
+ * gets the address of peer scratchpad from
+ * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
+ */
+static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
+}
+
+/**
+ * epf_ntb_peer_spad_bar_set() - Set peer scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ *+-----------------+------->+------------------+ +-----------------+
+ *| BAR0 | | CONFIG REGION | | BAR0 |
+ *+-----------------+----+ +------------------+<-------+-----------------+
+ *| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ *+-----------------+ +-->+------------------+<-------+-----------------+
+ *| BAR2 | Local Memory | BAR2 |
+ *+-----------------+ +-----------------+
+ *| BAR3 | | BAR3 |
+ *+-----------------+ +-----------------+
+ *| BAR4 | | BAR4 |
+ *+-----------------+ +-----------------+
+ *| BAR5 | | BAR5 |
+ *+-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Set BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
+ * region. While BAR1 is the default peer scratchpad BAR, an NTB could have
+ * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
+ * This function can get the exact BAR used for peer scratchpad from
+ * epf_ntb_bar[BAR_PEER_SPAD].
+ *
+ * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
+ * gets the address of peer scratchpad from
+ * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
+ */
+static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar, *epf_bar;
+ enum pci_barno peer_barno, barno;
+ u32 peer_spad_offset;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ ntb_epc = ntb->epc[type];
+ barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ epc = ntb_epc->epc;
+
+ peer_spad_offset = peer_ntb_epc->reg->spad_offset;
+ epf_bar->phys_addr = peer_epf_bar->phys_addr + peer_spad_offset;
+ epf_bar->size = peer_ntb_epc->spad_size;
+ epf_bar->barno = barno;
+ epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+ * the default self scratchpad BAR, an NTB could have other BARs for self
+ * scratchpad (because of reserved BARs). This function can get the exact BAR
+ * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
+}
+
+/**
+ * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region. While BAR0 is the default self scratchpad BAR, an
+ * NTB could have other BARs for self scratchpad (because of reserved BARs).
+ * This function can get the exact BAR used for self scratchpad from
+ * epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct epf_ntb *ntb;
+ struct pci_epc *epc;
+ struct device *dev;
+ int ret;
+
+ ntb = ntb_epc->epf_ntb;
+ dev = &ntb->epf->dev;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb_epc->epf_bar[barno];
+
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
+ pci_epc_interface_string(ntb_epc->type));
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+ * config + scratchpad region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Free the Local Memory mentioned in the above diagram. After invoking this
+ * function, any of config + self scratchpad region of HOST1 or peer scratchpad
+ * region of HOST2 should not be accessed.
+ */
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct epf_ntb_epc *ntb_epc;
+ enum pci_barno barno;
+ struct pci_epf *epf;
+
+ epf = ntb->epf;
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ntb_epc = ntb->epc[type];
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ if (ntb_epc->reg)
+ pci_epf_free_space(epf, ntb_epc->reg, barno, type);
+ }
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+ * region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Allocate the Local Memory mentioned in the above diagram. The size of
+ * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+ * is obtained from "spad-count" configfs entry.
+ *
+ * The size of both config region and scratchpad region has to be aligned,
+ * since the scratchpad region will also be mapped as PEER SCRATCHPAD of
+ * other host using a separate BAR.
+ */
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *peer_epc_features, *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ size_t msix_table_size, pba_size, align;
+ enum pci_barno peer_barno, barno;
+ struct epf_ntb_ctrl *ctrl;
+ u32 spad_size, ctrl_size;
+ u64 size, peer_size;
+ struct pci_epf *epf;
+ struct device *dev;
+ bool msix_capable;
+ u32 spad_count;
+ void *base;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+ ntb_epc = ntb->epc[type];
+
+ epc_features = ntb_epc->epc_features;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ size = epc_features->bar_fixed_size[barno];
+ align = epc_features->align;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_epc_features = peer_ntb_epc->epc_features;
+ peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ peer_size = peer_epc_features->bar_fixed_size[peer_barno];
+
+ /* Check if epc_features is populated incorrectly */
+ if ((!IS_ALIGNED(size, align)))
+ return -EINVAL;
+
+ spad_count = ntb->spad_count;
+
+ ctrl_size = sizeof(struct epf_ntb_ctrl);
+ spad_size = spad_count * 4;
+
+ msix_capable = epc_features->msix_capable;
+ if (msix_capable) {
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * ntb->db_count;
+ ctrl_size = ALIGN(ctrl_size, 8);
+ ntb_epc->msix_table_offset = ctrl_size;
+ ntb_epc->msix_bar = barno;
+ /* Align to QWORD or 8 Bytes */
+ pba_size = ALIGN(DIV_ROUND_UP(ntb->db_count, 8), 8);
+ ctrl_size = ctrl_size + msix_table_size + pba_size;
+ }
+
+ if (!align) {
+ ctrl_size = roundup_pow_of_two(ctrl_size);
+ spad_size = roundup_pow_of_two(spad_size);
+ } else {
+ ctrl_size = ALIGN(ctrl_size, align);
+ spad_size = ALIGN(spad_size, align);
+ }
+
+ if (peer_size) {
+ if (peer_size < spad_size)
+ spad_count = peer_size / 4;
+ spad_size = peer_size;
+ }
+
+ /*
+ * In order to make sure SPAD offset is aligned to its size,
+ * expand control region size to the size of SPAD if SPAD size
+ * is greater than control region size.
+ */
+ if (spad_size > ctrl_size)
+ ctrl_size = spad_size;
+
+ if (!size)
+ size = ctrl_size + spad_size;
+ else if (size < ctrl_size + spad_size)
+ return -EINVAL;
+
+ base = pci_epf_alloc_space(epf, size, barno, align, type);
+ if (!base) {
+ dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
+ pci_epc_interface_string(type));
+ return -ENOMEM;
+ }
+
+ ntb_epc->reg = base;
+
+ ctrl = ntb_epc->reg;
+ ctrl->spad_offset = ctrl_size;
+ ctrl->spad_count = spad_count;
+ ctrl->num_mws = ntb->num_mws;
+ ctrl->db_entry_size = align ? align : 4;
+ ntb_epc->spad_size = spad_size;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc_interface() - Allocate memory for config +
+ * scratchpad region for each of PRIMARY and SECONDARY interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper for epf_ntb_config_spad_bar_alloc() which allocates memory for
+ * config + scratchpad region for a specific interface
+ */
+static int epf_ntb_config_spad_bar_alloc_interface(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_config_spad_bar_alloc(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Config/SPAD BAR alloc failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_free_peer_mem() - Free memory allocated in peers outbound address
+ * space
+ * @ntb_epc: EPC associated with one of the HOST which holds peers outbound
+ * address regions
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Free memory allocated in EP CONTROLLER 2 (OB SPACE) in the above diagram.
+ * It'll free Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3,
+ * MW4).
+ */
+static void epf_ntb_free_peer_mem(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ void __iomem *mw_addr;
+ phys_addr_t phys_addr;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ size_t size;
+
+ epc = ntb_epc->epc;
+
+ for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+ barno = ntb_epc->epf_ntb_bar[bar];
+ mw_addr = ntb_epc->mw_addr[barno];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ phys_addr = epf_bar->phys_addr;
+ size = epf_bar->size;
+ if (mw_addr) {
+ pci_epc_mem_free_addr(epc, phys_addr, mw_addr, size);
+ ntb_epc->mw_addr[barno] = NULL;
+ }
+ }
+}
+
+/**
+ * epf_ntb_db_mw_bar_clear() - Clear doorbell and memory BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Clear doorbell and memory BARs (remove inbound ATU configuration). In the above
+ * diagram it clears BAR2 TO BAR5 of EP CONTROLLER 1 (Doorbell BAR, MW1 BAR, MW2
+ * BAR, MW3 BAR and MW4 BAR).
+ */
+static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ epc = ntb_epc->epc;
+
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
+ }
+}
+
+/**
+ * epf_ntb_db_mw_bar_cleanup() - Clear doorbell/memory BAR and free memory
+ * allocated in peers outbound address space
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper for epf_ntb_db_mw_bar_clear() to clear HOST1's BAR and
+ * epf_ntb_free_peer_mem() which frees up HOST2 outbound memory.
+ */
+static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+
+ ntb_epc = ntb->epc[type];
+ peer_ntb_epc = ntb->epc[!type];
+
+ epf_ntb_db_mw_bar_clear(ntb_epc);
+ epf_ntb_free_peer_mem(peer_ntb_epc);
+}
+
+/**
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Configure MSI/MSI-X capability for each interface with number of
+ * interrupts equal to "db_count" configfs entry.
+ */
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ bool msix_capable, msi_capable;
+ struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct device *dev;
+ u32 db_count;
+ int ret;
+
+ ntb_epc = ntb->epc[type];
+ dev = &ntb->epf->dev;
+
+ epc_features = ntb_epc->epc_features;
+ msix_capable = epc_features->msix_capable;
+ msi_capable = epc_features->msi_capable;
+
+ if (!(msix_capable || msi_capable)) {
+ dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+ return -EINVAL;
+ }
+
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ db_count = ntb->db_count;
+ if (db_count > MAX_DB_COUNT) {
+ dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+ return -EINVAL;
+ }
+
+ ntb->db_count = db_count;
+ epc = ntb_epc->epc;
+
+ if (msi_capable) {
+ ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count);
+ if (ret) {
+ dev_err(dev, "%s intf: MSI configuration failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ if (msix_capable) {
+ ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count,
+ ntb_epc->msix_bar,
+ ntb_epc->msix_table_offset);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_alloc_peer_mem() - Allocate memory in peer's outbound address space
+ * @ntb_epc: EPC associated with one of the HOST whose BAR holds peer's outbound
+ * address
+ * @bar: BAR of @ntb_epc in for which memory has to be allocated (could be
+ * BAR_DB_MW1, BAR_MW2, BAR_MW3, BAR_MW4)
+ * @peer_ntb_epc: EPC associated with HOST whose outbound address space is
+ * used by @ntb_epc
+ * @size: Size of the address region that has to be allocated in peers OB SPACE
+ *
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Allocate memory in OB space of EP CONTROLLER 2 in the above diagram. Allocate
+ * for Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3, MW4).
+ */
+static int epf_ntb_alloc_peer_mem(struct device *dev,
+ struct epf_ntb_epc *ntb_epc,
+ enum epf_ntb_bar bar,
+ struct epf_ntb_epc *peer_ntb_epc,
+ size_t size)
+{
+ const struct pci_epc_features *epc_features;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *peer_epc;
+ phys_addr_t phys_addr;
+ void __iomem *mw_addr;
+ enum pci_barno barno;
+ size_t align;
+
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ peer_epc = peer_ntb_epc->epc;
+ mw_addr = pci_epc_mem_alloc_addr(peer_epc, &phys_addr, size);
+ if (!mw_addr) {
+ dev_err(dev, "%s intf: Failed to allocate OB address\n",
+ pci_epc_interface_string(peer_ntb_epc->type));
+ return -ENOMEM;
+ }
+
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ ntb_epc->mw_addr[barno] = mw_addr;
+
+ epf_bar->phys_addr = phys_addr;
+ epf_bar->size = size;
+ epf_bar->barno = barno;
+ epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_db_mw_bar_init() - Configure Doorbell and Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper for epf_ntb_alloc_peer_mem() and pci_epc_set_bar() that allocates
+ * memory in OB address space of HOST2 and configures BAR of HOST1
+ */
+static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *epf_bar;
+ struct epf_ntb_ctrl *ctrl;
+ u32 num_mws, db_count;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct device *dev;
+ size_t align;
+ int ret, i;
+ u64 size;
+
+ ntb_epc = ntb->epc[type];
+ peer_ntb_epc = ntb->epc[!type];
+
+ dev = &ntb->epf->dev;
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ epc = ntb_epc->epc;
+ num_mws = ntb->num_mws;
+ db_count = ntb->db_count;
+
+ for (bar = BAR_DB_MW1, i = 0; i < num_mws; bar++, i++) {
+ if (bar == BAR_DB_MW1) {
+ align = align ? align : 4;
+ size = db_count * align;
+ size = ALIGN(size, ntb->mws_size[i]);
+ ctrl = ntb_epc->reg;
+ ctrl->mw1_offset = size;
+ size += ntb->mws_size[i];
+ } else {
+ size = ntb->mws_size[i];
+ }
+
+ ret = epf_ntb_alloc_peer_mem(dev, ntb_epc, bar,
+ peer_ntb_epc, size);
+ if (ret) {
+ dev_err(dev, "%s intf: DoorBell mem alloc failed\n",
+ pci_epc_interface_string(type));
+ goto err_alloc_peer_mem;
+ }
+
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s intf: DoorBell BAR set failed\n",
+ pci_epc_interface_string(type));
+ goto err_alloc_peer_mem;
+ }
+ }
+
+ return 0;
+
+err_alloc_peer_mem:
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_epc_destroy_interface() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Unbind NTB function device from EPC and relinquish reference to pci_epc
+ * for each of the interface.
+ */
+static void epf_ntb_epc_destroy_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ if (type < 0)
+ return;
+
+ epf = ntb->epf;
+ ntb_epc = ntb->epc[type];
+ if (!ntb_epc)
+ return;
+ epc = ntb_epc->epc;
+ pci_epc_remove_epf(epc, epf, type);
+ pci_epc_put(epc);
+}
+
+/**
+ * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+ */
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
+ epf_ntb_epc_destroy_interface(ntb, type);
+}
+
+/**
+ * epf_ntb_epc_create_interface() - Create and initialize NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @epc: struct pci_epc to which a particular NTB interface should be associated
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Allocate memory for NTB EPC interface and initialize it.
+ */
+static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
+ struct pci_epc *epc,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct pci_epf_bar *epf_bar;
+ struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
+ struct pci_epf *epf;
+ struct device *dev;
+
+ dev = &ntb->epf->dev;
+
+ ntb_epc = devm_kzalloc(dev, sizeof(*ntb_epc), GFP_KERNEL);
+ if (!ntb_epc)
+ return -ENOMEM;
+
+ epf = ntb->epf;
+ vfunc_no = epf->vfunc_no;
+ if (type == PRIMARY_INTERFACE) {
+ func_no = epf->func_no;
+ epf_bar = epf->bar;
+ } else {
+ func_no = epf->sec_epc_func_no;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ ntb_epc->linkup = false;
+ ntb_epc->epc = epc;
+ ntb_epc->func_no = func_no;
+ ntb_epc->vfunc_no = vfunc_no;
+ ntb_epc->type = type;
+ ntb_epc->epf_bar = epf_bar;
+ ntb_epc->epf_ntb = ntb;
+
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
+ if (!epc_features)
+ return -EINVAL;
+ ntb_epc->epc_features = epc_features;
+
+ ntb->epc[type] = ntb_epc;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_create() - Create and initialize NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Get a reference to EPC device and bind NTB function device to that EPC
+ * for each of the interface. It is also a wrapper to
+ * epf_ntb_epc_create_interface() to allocate memory for NTB EPC interface
+ * and initialize it
+ */
+static int epf_ntb_epc_create(struct epf_ntb *ntb)
+{
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+
+ ret = epf_ntb_epc_create_interface(ntb, epf->epc, PRIMARY_INTERFACE);
+ if (ret) {
+ dev_err(dev, "PRIMARY intf: Fail to create NTB EPC\n");
+ return ret;
+ }
+
+ ret = epf_ntb_epc_create_interface(ntb, epf->sec_epc,
+ SECONDARY_INTERFACE);
+ if (ret) {
+ dev_err(dev, "SECONDARY intf: Fail to create NTB EPC\n");
+ goto err_epc_create;
+ }
+
+ return 0;
+
+err_epc_create:
+ epf_ntb_epc_destroy_interface(ntb, PRIMARY_INTERFACE);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_init_epc_bar_interface() - Identify BARs to be used for each of
+ * the NTB constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Identify the free BARs to be used for each of BAR_CONFIG, BAR_PEER_SPAD,
+ * BAR_DB_MW1, BAR_MW2, BAR_MW3 and BAR_MW4.
+ */
+static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *ntb_epc;
+ enum pci_barno barno;
+ enum epf_ntb_bar bar;
+ struct device *dev;
+ u32 num_mws;
+ int i;
+
+ barno = BAR_0;
+ ntb_epc = ntb->epc[type];
+ num_mws = ntb->num_mws;
+ dev = &ntb->epf->dev;
+ epc_features = ntb_epc->epc_features;
+
+ /* These are required BARs which are mandatory for NTB functionality */
+ for (bar = BAR_CONFIG; bar <= BAR_DB_MW1; bar++, barno++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ dev_err(dev, "%s intf: Fail to get NTB function BAR\n",
+ pci_epc_interface_string(type));
+ return barno;
+ }
+ ntb_epc->epf_ntb_bar[bar] = barno;
+ }
+
+ /* These are optional BARs which don't impact NTB functionality */
+ for (bar = BAR_MW2, i = 1; i < num_mws; bar++, barno++, i++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ ntb->num_mws = i;
+ dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+ }
+ ntb_epc->epf_ntb_bar[bar] = barno;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+ * constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to epf_ntb_init_epc_bar_interface() to identify the free BARs
+ * to be used for each of BAR_CONFIG, BAR_PEER_SPAD, BAR_DB_MW1, BAR_MW2,
+ * BAR_MW3 and BAR_MW4 for all the interfaces.
+ */
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_init_epc_bar_interface(ntb, type);
+ if (ret) {
+ dev_err(dev, "Fail to init EPC bar for %s interface\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_init_interface() - Initialize NTB interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to initialize a particular EPC interface and start the workqueue
+ * to check for commands from host. This function will write to the
+ * EP controller HW for configuring it.
+ */
+static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ ntb_epc = ntb->epc[type];
+ epf = ntb->epf;
+ dev = &epf->dev;
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
+ if (ret) {
+ dev_err(dev, "%s intf: Config/self SPAD BAR init failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ ret = epf_ntb_peer_spad_bar_set(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Peer SPAD BAR init failed\n",
+ pci_epc_interface_string(type));
+ goto err_peer_spad_bar_init;
+ }
+
+ ret = epf_ntb_configure_interrupt(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Interrupt configuration failed\n",
+ pci_epc_interface_string(type));
+ goto err_peer_spad_bar_init;
+ }
+
+ ret = epf_ntb_db_mw_bar_init(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: DB/MW BAR init failed\n",
+ pci_epc_interface_string(type));
+ goto err_db_mw_bar_init;
+ }
+
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "%s intf: Configuration header write failed\n",
+ pci_epc_interface_string(type));
+ goto err_write_header;
+ }
+
+ INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
+ queue_work(kpcintb_workqueue, &ntb->epc[type]->cmd_handler.work);
+
+ return 0;
+
+err_write_header:
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+
+err_db_mw_bar_init:
+ epf_ntb_peer_spad_bar_clear(ntb->epc[type]);
+
+err_peer_spad_bar_init:
+ epf_ntb_config_sspad_bar_clear(ntb->epc[type]);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_epc_cleanup_interface() - Cleanup NTB interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to cleanup a particular NTB interface.
+ */
+static void epf_ntb_epc_cleanup_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+
+ if (type < 0)
+ return;
+
+ ntb_epc = ntb->epc[type];
+ cancel_delayed_work(&ntb_epc->cmd_handler);
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+ epf_ntb_peer_spad_bar_clear(ntb_epc);
+ epf_ntb_config_sspad_bar_clear(ntb_epc);
+}
+
+/**
+ * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to cleanup all NTB interfaces.
+ */
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
+ epf_ntb_epc_cleanup_interface(ntb, type);
+}
+
+/**
+ * epf_ntb_epc_init() - Initialize all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to initialize all NTB interface and start the workqueue
+ * to check for commands from host.
+ */
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_epc_init_interface(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Failed to initialize\n",
+ pci_epc_interface_string(type));
+ goto err_init_type;
+ }
+ }
+
+ return 0;
+
+err_init_type:
+ epf_ntb_epc_cleanup_interface(ntb, type - 1);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
+ * @epf: NTB endpoint function device
+ *
+ * Initialize both the endpoint controllers associated with NTB function device.
+ * Invoked when a primary interface or secondary interface is bound to EPC
+ * device. This function will succeed only when EPC is bound to both the
+ * interfaces.
+ */
+static int epf_ntb_bind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (!epf->epc) {
+ dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ if (!epf->sec_epc) {
+ dev_dbg(dev, "SECONDARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ ret = epf_ntb_epc_create(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ return ret;
+ }
+
+ ret = epf_ntb_init_epc_bar(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ goto err_bar_init;
+ }
+
+ ret = epf_ntb_config_spad_bar_alloc_interface(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to allocate BAR memory\n");
+ goto err_bar_alloc;
+ }
+
+ ret = epf_ntb_epc_init(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to initialize EPC\n");
+ goto err_bar_alloc;
+ }
+
+ epf_set_drvdata(epf, ntb);
+
+ return 0;
+
+err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+ epf_ntb_epc_destroy(ntb);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
+ * @epf: NTB endpoint function device
+ *
+ * Cleanup the initialization from epf_ntb_bind()
+ */
+static void epf_ntb_unbind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+ epf_ntb_epc_cleanup(ntb);
+ epf_ntb_config_spad_bar_free(ntb);
+ epf_ntb_epc_destroy(ntb);
+}
+
+#define EPF_NTB_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->_name); \
+}
+
+#define EPF_NTB_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ ntb->_name = val; \
+ \
+ return len; \
+}
+
+#define EPF_NTB_MW_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ int win_no; \
+ \
+ sscanf(#_name, "mw%d", &win_no); \
+ \
+ return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+}
+
+#define EPF_NTB_MW_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ u64 val; \
+ int ret; \
+ \
+ ret = kstrtou64(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (ntb->num_mws < win_no) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ ntb->mws_size[win_no - 1] = val; \
+ \
+ return len; \
+}
+
+static ssize_t epf_ntb_num_mws_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct epf_ntb *ntb = to_epf_ntb(group);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > MAX_MW)
+ return -EINVAL;
+
+ ntb->num_mws = val;
+
+ return len;
+}
+
+EPF_NTB_R(spad_count)
+EPF_NTB_W(spad_count)
+EPF_NTB_R(db_count)
+EPF_NTB_W(db_count)
+EPF_NTB_R(num_mws)
+EPF_NTB_MW_R(mw1)
+EPF_NTB_MW_W(mw1)
+EPF_NTB_MW_R(mw2)
+EPF_NTB_MW_W(mw2)
+EPF_NTB_MW_R(mw3)
+EPF_NTB_MW_W(mw3)
+EPF_NTB_MW_R(mw4)
+EPF_NTB_MW_W(mw4)
+
+CONFIGFS_ATTR(epf_ntb_, spad_count);
+CONFIGFS_ATTR(epf_ntb_, db_count);
+CONFIGFS_ATTR(epf_ntb_, num_mws);
+CONFIGFS_ATTR(epf_ntb_, mw1);
+CONFIGFS_ATTR(epf_ntb_, mw2);
+CONFIGFS_ATTR(epf_ntb_, mw3);
+CONFIGFS_ATTR(epf_ntb_, mw4);
+
+static struct configfs_attribute *epf_ntb_attrs[] = {
+ &epf_ntb_attr_spad_count,
+ &epf_ntb_attr_db_count,
+ &epf_ntb_attr_num_mws,
+ &epf_ntb_attr_mw1,
+ &epf_ntb_attr_mw2,
+ &epf_ntb_attr_mw3,
+ &epf_ntb_attr_mw4,
+ NULL,
+};
+
+static const struct config_item_type ntb_group_type = {
+ .ct_attrs = epf_ntb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * epf_ntb_add_cfs() - Add configfs directory specific to NTB
+ * @epf: NTB endpoint function device
+ *
+ * Add configfs directory specific to NTB. This directory will hold
+ * NTB specific properties like db_count, spad_count, num_mws etc.,
+ */
+static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct config_group *ntb_group = &ntb->group;
+ struct device *dev = &epf->dev;
+
+ config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
+
+ return ntb_group;
+}
+
+/**
+ * epf_ntb_probe() - Probe NTB function driver
+ * @epf: NTB endpoint function device
+ *
+ * Probe NTB function driver when endpoint function bus detects a NTB
+ * endpoint function.
+ */
+static int epf_ntb_probe(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb;
+ struct device *dev;
+
+ dev = &epf->dev;
+
+ ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+ if (!ntb)
+ return -ENOMEM;
+
+ epf->header = &epf_ntb_header;
+ ntb->epf = epf;
+ epf_set_drvdata(epf, ntb);
+
+ return 0;
+}
+
+static struct pci_epf_ops epf_ntb_ops = {
+ .bind = epf_ntb_bind,
+ .unbind = epf_ntb_unbind,
+ .add_cfs = epf_ntb_add_cfs,
+};
+
+static const struct pci_epf_device_id epf_ntb_ids[] = {
+ {
+ .name = "pci_epf_ntb",
+ },
+ {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+ .driver.name = "pci_epf_ntb",
+ .probe = epf_ntb_probe,
+ .id_table = epf_ntb_ids,
+ .ops = &epf_ntb_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init epf_ntb_init(void)
+{
+ int ret;
+
+ kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&epf_ntb_driver);
+ if (ret) {
+ destroy_workqueue(kpcintb_workqueue);
+ pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(epf_ntb_init);
+
+static void __exit epf_ntb_exit(void)
+{
+ pci_epf_unregister_driver(&epf_ntb_driver);
+ destroy_workqueue(kpcintb_workqueue);
+}
+module_exit(epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index ef52f5097eb3..5f478f59b31f 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -247,8 +247,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -263,8 +263,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_src_map_addr;
}
- ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -301,13 +301,13 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
err_dst_addr:
pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
err_src_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
err_src_addr:
pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
@@ -324,12 +324,12 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
u32 crc32;
bool use_dma;
phys_addr_t phys_addr;
+ struct device *dma_dev;
phys_addr_t dst_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
- struct device *dma_dev = epf->epc->dev.parent;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -341,8 +341,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -363,6 +363,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err_dma_map;
}
+ dma_dev = dmaengine_get_dma_device(epf_test->dma_chan);
dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dst_phys_addr)) {
@@ -396,7 +397,7 @@ err_dma_map:
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
@@ -412,12 +413,12 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
void *buf;
bool use_dma;
phys_addr_t phys_addr;
+ struct device *dma_dev;
phys_addr_t src_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
- struct device *dma_dev = epf->epc->dev.parent;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -429,8 +430,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -454,6 +455,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
goto err_dma_map;
}
+ dma_dev = dmaengine_get_dma_device(epf_test->dma_chan);
src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, src_phys_addr)) {
@@ -489,7 +491,7 @@ err_dma_map:
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
@@ -511,13 +513,16 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
switch (irq_type) {
case IRQ_TYPE_LEGACY:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
break;
case IRQ_TYPE_MSI:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, irq);
break;
case IRQ_TYPE_MSIX:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, irq);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -552,7 +557,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
goto reset_handler;
}
@@ -590,22 +596,22 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
}
if (command & COMMAND_RAISE_MSI_IRQ) {
- count = pci_epc_get_msi(epc, epf->func_no);
+ count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, reg->irq_number);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSIX_IRQ) {
- count = pci_epc_get_msix(epc, epf->func_no);
+ count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, reg->irq_number);
goto reset_handler;
}
@@ -627,8 +633,10 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf_bar);
- pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
}
}
}
@@ -658,9 +666,11 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
- ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
if (ret) {
- pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
@@ -681,13 +691,13 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
bool msi_capable = true;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (epc_features) {
msix_capable = epc_features->msix_capable;
msi_capable = epc_features->msi_capable;
}
- ret = pci_epc_write_header(epc, epf->func_no, header);
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
if (ret) {
dev_err(dev, "Configuration header write failed\n");
return ret;
@@ -698,7 +708,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
return ret;
if (msi_capable) {
- ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ epf->msi_interrupts);
if (ret) {
dev_err(dev, "MSI configuration failed\n");
return ret;
@@ -706,7 +717,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
if (msix_capable) {
- ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
+ epf->msix_interrupts,
epf_test->test_reg_bar,
epf_test->msix_table_offset);
if (ret) {
@@ -780,7 +792,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
- epc_features->align);
+ epc_features->align, PRIMARY_INTERFACE);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -798,7 +810,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
- epc_features->align);
+ epc_features->align,
+ PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
@@ -838,7 +851,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (WARN_ON_ONCE(!epc))
return -EINVAL;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (!epc_features) {
dev_err(&epf->dev, "epc_features not implemented\n");
return -EOPNOTSUPP;
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 3710adf51912..999911801877 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -21,6 +21,9 @@ static struct config_group *controllers_group;
struct pci_epf_group {
struct config_group group;
+ struct config_group primary_epc_group;
+ struct config_group secondary_epc_group;
+ struct delayed_work cfs_work;
struct pci_epf *epf;
int index;
};
@@ -41,6 +44,127 @@ static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item)
return container_of(to_config_group(item), struct pci_epc_group, group);
}
+static int pci_secondary_epc_epf_link(struct config_item *epf_item,
+ struct config_item *epc_item)
+{
+ int ret;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc = epc_group->epc;
+ struct pci_epf *epf = epf_group->epf;
+
+ ret = pci_epc_add_epf(epc, epf, SECONDARY_INTERFACE);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_bind(epf);
+ if (ret) {
+ pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pci_secondary_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE);
+}
+
+static struct configfs_item_operations pci_secondary_epc_item_ops = {
+ .allow_link = pci_secondary_epc_epf_link,
+ .drop_link = pci_secondary_epc_epf_unlink,
+};
+
+static const struct config_item_type pci_secondary_epc_type = {
+ .ct_item_ops = &pci_secondary_epc_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group
+*pci_ep_cfs_add_secondary_group(struct pci_epf_group *epf_group)
+{
+ struct config_group *secondary_epc_group;
+
+ secondary_epc_group = &epf_group->secondary_epc_group;
+ config_group_init_type_name(secondary_epc_group, "secondary",
+ &pci_secondary_epc_type);
+ configfs_register_group(&epf_group->group, secondary_epc_group);
+
+ return secondary_epc_group;
+}
+
+static int pci_primary_epc_epf_link(struct config_item *epf_item,
+ struct config_item *epc_item)
+{
+ int ret;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc = epc_group->epc;
+ struct pci_epf *epf = epf_group->epf;
+
+ ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_bind(epf);
+ if (ret) {
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pci_primary_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+}
+
+static struct configfs_item_operations pci_primary_epc_item_ops = {
+ .allow_link = pci_primary_epc_epf_link,
+ .drop_link = pci_primary_epc_epf_unlink,
+};
+
+static const struct config_item_type pci_primary_epc_type = {
+ .ct_item_ops = &pci_primary_epc_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group
+*pci_ep_cfs_add_primary_group(struct pci_epf_group *epf_group)
+{
+ struct config_group *primary_epc_group = &epf_group->primary_epc_group;
+
+ config_group_init_type_name(primary_epc_group, "primary",
+ &pci_primary_epc_type);
+ configfs_register_group(&epf_group->group, primary_epc_group);
+
+ return primary_epc_group;
+}
+
static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
size_t len)
{
@@ -94,13 +218,13 @@ static int pci_epc_epf_link(struct config_item *epc_item,
struct pci_epc *epc = epc_group->epc;
struct pci_epf *epf = epf_group->epf;
- ret = pci_epc_add_epf(epc, epf);
+ ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE);
if (ret)
return ret;
ret = pci_epf_bind(epf);
if (ret) {
- pci_epc_remove_epf(epc, epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
return ret;
}
@@ -120,7 +244,7 @@ static void pci_epc_epf_unlink(struct config_item *epc_item,
epc = epc_group->epc;
epf = epf_group->epf;
pci_epf_unbind(epf);
- pci_epc_remove_epf(epc, epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
}
static struct configfs_item_operations pci_epc_item_ops = {
@@ -351,6 +475,28 @@ static struct configfs_attribute *pci_epf_attrs[] = {
NULL,
};
+static int pci_epf_vepf_link(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ return pci_epf_add_vepf(epf_pf, epf_vf);
+}
+
+static void pci_epf_vepf_unlink(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ pci_epf_remove_vepf(epf_pf, epf_vf);
+}
+
static void pci_epf_release(struct config_item *item)
{
struct pci_epf_group *epf_group = to_pci_epf_group(item);
@@ -363,15 +509,58 @@ static void pci_epf_release(struct config_item *item)
}
static struct configfs_item_operations pci_epf_ops = {
+ .allow_link = pci_epf_vepf_link,
+ .drop_link = pci_epf_vepf_unlink,
.release = pci_epf_release,
};
+static struct config_group *pci_epf_type_make(struct config_group *group,
+ const char *name)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(&group->cg_item);
+ struct config_group *epf_type_group;
+
+ epf_type_group = pci_epf_type_add_cfs(epf_group->epf, group);
+ return epf_type_group;
+}
+
+static void pci_epf_type_drop(struct config_group *group,
+ struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations pci_epf_type_group_ops = {
+ .make_group = &pci_epf_type_make,
+ .drop_item = &pci_epf_type_drop,
+};
+
static const struct config_item_type pci_epf_type = {
+ .ct_group_ops = &pci_epf_type_group_ops,
.ct_item_ops = &pci_epf_ops,
.ct_attrs = pci_epf_attrs,
.ct_owner = THIS_MODULE,
};
+static void pci_epf_cfs_work(struct work_struct *work)
+{
+ struct pci_epf_group *epf_group;
+ struct config_group *group;
+
+ epf_group = container_of(work, struct pci_epf_group, cfs_work.work);
+ group = pci_ep_cfs_add_primary_group(epf_group);
+ if (IS_ERR(group)) {
+ pr_err("failed to create 'primary' EPC interface\n");
+ return;
+ }
+
+ group = pci_ep_cfs_add_secondary_group(epf_group);
+ if (IS_ERR(group)) {
+ pr_err("failed to create 'secondary' EPC interface\n");
+ return;
+ }
+}
+
static struct config_group *pci_epf_make(struct config_group *group,
const char *name)
{
@@ -410,10 +599,15 @@ static struct config_group *pci_epf_make(struct config_group *group,
goto free_name;
}
+ epf->group = &epf_group->group;
epf_group->epf = epf;
kfree(epf_name);
+ INIT_DELAYED_WORK(&epf_group->cfs_work, pci_epf_cfs_work);
+ queue_delayed_work(system_wq, &epf_group->cfs_work,
+ msecs_to_jiffies(1));
+
return &epf_group->group;
free_name:
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index ea7e7465ce7a..23594165fa68 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -137,24 +137,29 @@ EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
* @epc: the features supported by *this* EPC device will be returned
* @func_no: the features supported by the EPC device specific to the
* endpoint function with func_no will be returned
+ * @vfunc_no: the features supported by the EPC device specific to the
+ * virtual endpoint function with vfunc_no will be returned
*
* Invoke to get the features provided by the EPC which may be
* specific to an endpoint function. Returns pci_epc_features on success
* and NULL for any failures.
*/
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no)
+ u8 func_no, u8 vfunc_no)
{
const struct pci_epc_features *epc_features;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return NULL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return NULL;
+
if (!epc->ops->get_features)
return NULL;
mutex_lock(&epc->lock);
- epc_features = epc->ops->get_features(epc, func_no);
+ epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
return epc_features;
@@ -205,13 +210,14 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
/**
* pci_epc_raise_irq() - interrupt the host system
* @epc: the EPC device which has to interrupt the host
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @type: specify the type of interrupt; legacy, MSI or MSI-X
* @interrupt_num: the MSI or MSI-X interrupt number
*
* Invoke to raise an legacy, MSI or MSI-X interrupt
*/
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
@@ -219,11 +225,14 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->raise_irq)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
+ ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
mutex_unlock(&epc->lock);
return ret;
@@ -231,24 +240,74 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
/**
+ * pci_epc_map_msi_irq() - Map physical address to MSI address and return
+ * MSI data
+ * @epc: the EPC device which has the MSI capability
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @phys_addr: the physical address of the outbound region
+ * @interrupt_num: the MSI interrupt number
+ * @entry_size: Size of Outbound address region for each interrupt
+ * @msi_data: the data that should be written in order to raise MSI interrupt
+ * with interrupt number as 'interrupt num'
+ * @msi_addr_offset: Offset of MSI address from the aligned outbound address
+ * to which the MSI address is mapped
+ *
+ * Invoke to map physical address to MSI address and return MSI data. The
+ * physical address should be an address in the outbound region. This is
+ * required to implement doorbell functionality of NTB wherein EPC on either
+ * side of the interface (primary and secondary) can directly write to the
+ * physical address (in outbound region) of the other interface to ring
+ * doorbell.
+ */
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
+ u32 *msi_data, u32 *msi_addr_offset)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(epc))
+ return -EINVAL;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ if (!epc->ops->map_msi_irq)
+ return -EINVAL;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
+ interrupt_num, entry_size, msi_data,
+ msi_addr_offset);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
+
+/**
* pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
* @epc: the EPC device to which MSI interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI interrupts allocated by the RC
*/
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msi)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msi(epc, func_no);
+ interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -263,12 +322,13 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
/**
* pci_epc_set_msi() - set the number of MSI interrupt numbers required
* @epc: the EPC device on which MSI has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
{
int ret;
u8 encode_int;
@@ -277,13 +337,16 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
interrupts > 32)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msi)
return 0;
encode_int = order_base_2(interrupts);
mutex_lock(&epc->lock);
- ret = epc->ops->set_msi(epc, func_no, encode_int);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
mutex_unlock(&epc->lock);
return ret;
@@ -293,22 +356,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
/**
* pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
* @epc: the EPC device to which MSI-X interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI-X interrupts allocated by the RC
*/
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msix)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msix(epc, func_no);
+ interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -321,15 +388,16 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
/**
* pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
* @epc: the EPC device on which MSI-X has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI-X interrupts required by the EPF
* @bir: BAR where the MSI-X table resides
* @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
int ret;
@@ -337,11 +405,15 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
interrupts < 1 || interrupts > 2048)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msix)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
+ offset);
mutex_unlock(&epc->lock);
return ret;
@@ -351,22 +423,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
/**
* pci_epc_unmap_addr() - unmap CPU address from PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
*
* Invoke to unmap the CPU address from PCI address.
*/
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->unmap_addr)
return;
mutex_lock(&epc->lock);
- epc->ops->unmap_addr(epc, func_no, phys_addr);
+ epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@@ -374,14 +450,15 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
/**
* pci_epc_map_addr() - map CPU address to PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
* @pci_addr: PCI address to which the physical address should be mapped
* @size: the size of the allocation
*
* Invoke to map CPU address with PCI address.
*/
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u64 pci_addr, size_t size)
{
int ret;
@@ -389,11 +466,15 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->map_addr)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
+ ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
+ size);
mutex_unlock(&epc->lock);
return ret;
@@ -403,12 +484,13 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
/**
* pci_epc_clear_bar() - reset the BAR
* @epc: the EPC device for which the BAR has to be cleared
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to reset the BAR of the endpoint device.
*/
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
@@ -416,11 +498,14 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->clear_bar)
return;
mutex_lock(&epc->lock);
- epc->ops->clear_bar(epc, func_no, epf_bar);
+ epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -428,12 +513,13 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
/**
* pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
* @epc: the EPC device on which BAR has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to configure the BAR of the endpoint device.
*/
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -448,11 +534,14 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_bar)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_bar(epc, func_no, epf_bar);
+ ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
return ret;
@@ -462,7 +551,8 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @header: standard configuration header fields
*
* Invoke to write the configuration header to the endpoint controller. Every
@@ -470,7 +560,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
* configuration header would be written. The callback function should write
* the header fields to this dedicated location.
*/
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *header)
{
int ret;
@@ -478,11 +568,14 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->write_header)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->write_header(epc, func_no, header);
+ ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
mutex_unlock(&epc->lock);
return ret;
@@ -493,21 +586,28 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header);
* pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
* @epc: the EPC device to which the endpoint function should be added
* @epf: the endpoint function to be added
+ * @type: Identifies if the EPC is connected to the primary or secondary
+ * interface of EPF
*
* A PCI endpoint device can have one or more functions. In the case of PCIe,
* the specification allows up to 8 PCIe endpoint functions. Invoke
* pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
*/
-int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type)
{
+ struct list_head *list;
u32 func_no;
int ret = 0;
- if (epf->epc)
+ if (IS_ERR_OR_NULL(epc) || epf->is_vf)
+ return -EINVAL;
+
+ if (type == PRIMARY_INTERFACE && epf->epc)
return -EBUSY;
- if (IS_ERR(epc))
- return -EINVAL;
+ if (type == SECONDARY_INTERFACE && epf->sec_epc)
+ return -EBUSY;
mutex_lock(&epc->lock);
func_no = find_first_zero_bit(&epc->function_num_map,
@@ -524,11 +624,17 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
}
set_bit(func_no, &epc->function_num_map);
- epf->func_no = func_no;
- epf->epc = epc;
-
- list_add_tail(&epf->list, &epc->pci_epf);
+ if (type == PRIMARY_INTERFACE) {
+ epf->func_no = func_no;
+ epf->epc = epc;
+ list = &epf->list;
+ } else {
+ epf->sec_epc_func_no = func_no;
+ epf->sec_epc = epc;
+ list = &epf->sec_epc_list;
+ }
+ list_add_tail(list, &epc->pci_epf);
ret:
mutex_unlock(&epc->lock);
@@ -543,14 +649,26 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
*
* Invoke to remove PCI endpoint function from the endpoint controller.
*/
-void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type)
{
+ struct list_head *list;
+ u32 func_no = 0;
+
if (!epc || IS_ERR(epc) || !epf)
return;
+ if (type == PRIMARY_INTERFACE) {
+ func_no = epf->func_no;
+ list = &epf->list;
+ } else {
+ func_no = epf->sec_epc_func_no;
+ list = &epf->sec_epc_list;
+ }
+
mutex_lock(&epc->lock);
- clear_bit(epf->func_no, &epc->function_num_map);
- list_del(&epf->list);
+ clear_bit(func_no, &epc->function_num_map);
+ list_del(list);
epf->epc = NULL;
mutex_unlock(&epc->lock);
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index c977cf9dce56..ff39b54e5d05 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -21,6 +21,38 @@ static struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
+ * pci_epf_type_add_cfs() - Help function drivers to expose function specific
+ * attributes in configfs
+ * @epf: the EPF device that has to be configured using configfs
+ * @group: the parent configfs group (corresponding to entries in
+ * pci_epf_device_id)
+ *
+ * Invoke to expose function specific attributes in configfs. If the function
+ * driver does not have anything to expose (attributes configured by user),
+ * return NULL.
+ */
+struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct config_group *epf_type_group;
+
+ if (!epf->driver) {
+ dev_err(&epf->dev, "epf device not bound to driver\n");
+ return NULL;
+ }
+
+ if (!epf->driver->ops->add_cfs)
+ return NULL;
+
+ mutex_lock(&epf->lock);
+ epf_type_group = epf->driver->ops->add_cfs(epf, group);
+ mutex_unlock(&epf->lock);
+
+ return epf_type_group;
+}
+EXPORT_SYMBOL_GPL(pci_epf_type_add_cfs);
+
+/**
* pci_epf_unbind() - Notify the function driver that the binding between the
* EPF device and EPC device has been lost
* @epf: the EPF device which has lost the binding with the EPC device
@@ -30,13 +62,20 @@ static const struct device_type pci_epf_type;
*/
void pci_epf_unbind(struct pci_epf *epf)
{
+ struct pci_epf *epf_vf;
+
if (!epf->driver) {
dev_WARN(&epf->dev, "epf device not bound to driver\n");
return;
}
mutex_lock(&epf->lock);
- epf->driver->ops->unbind(epf);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ if (epf_vf->is_bound)
+ epf_vf->driver->ops->unbind(epf_vf);
+ }
+ if (epf->is_bound)
+ epf->driver->ops->unbind(epf);
mutex_unlock(&epf->lock);
module_put(epf->driver->owner);
}
@@ -51,10 +90,14 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
*/
int pci_epf_bind(struct pci_epf *epf)
{
+ struct device *dev = &epf->dev;
+ struct pci_epf *epf_vf;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
int ret;
if (!epf->driver) {
- dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ dev_WARN(dev, "epf device not bound to driver\n");
return -EINVAL;
}
@@ -62,36 +105,174 @@ int pci_epf_bind(struct pci_epf *epf)
return -EAGAIN;
mutex_lock(&epf->lock);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ vfunc_no = epf_vf->vfunc_no;
+
+ if (vfunc_no < 1) {
+ dev_err(dev, "Invalid virtual function number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ epc = epf->epc;
+ func_no = epf->func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epc = epf->sec_epc;
+ func_no = epf->sec_epc_func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epf_vf->func_no = epf->func_no;
+ epf_vf->sec_epc_func_no = epf->sec_epc_func_no;
+ epf_vf->epc = epf->epc;
+ epf_vf->sec_epc = epf->sec_epc;
+ ret = epf_vf->driver->ops->bind(epf_vf);
+ if (ret)
+ goto ret;
+ epf_vf->is_bound = true;
+ }
+
ret = epf->driver->ops->bind(epf);
+ if (ret)
+ goto ret;
+ epf->is_bound = true;
+
mutex_unlock(&epf->lock);
+ return 0;
+
+ret:
+ mutex_unlock(&epf->lock);
+ pci_epf_unbind(epf);
return ret;
}
EXPORT_SYMBOL_GPL(pci_epf_bind);
/**
+ * pci_epf_add_vepf() - associate virtual EP function to physical EP function
+ * @epf_pf: the physical EP function to which the virtual EP function should be
+ * associated
+ * @epf_vf: the virtual EP function to be added
+ *
+ * A physical endpoint function can be associated with multiple virtual
+ * endpoint functions. Invoke pci_epf_add_epf() to add a virtual PCI endpoint
+ * function to a physical PCI endpoint function.
+ */
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ u32 vfunc_no;
+
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return -EINVAL;
+
+ if (epf_pf->epc || epf_vf->epc || epf_vf->epf_pf)
+ return -EBUSY;
+
+ if (epf_pf->sec_epc || epf_vf->sec_epc)
+ return -EBUSY;
+
+ mutex_lock(&epf_pf->lock);
+ vfunc_no = find_first_zero_bit(&epf_pf->vfunction_num_map,
+ BITS_PER_LONG);
+ if (vfunc_no >= BITS_PER_LONG)
+ return -EINVAL;
+
+ set_bit(vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->vfunc_no = vfunc_no;
+
+ epf_vf->epf_pf = epf_pf;
+ epf_vf->is_vf = true;
+
+ list_add_tail(&epf_vf->list, &epf_pf->pci_vepf);
+ mutex_unlock(&epf_pf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_add_vepf);
+
+/**
+ * pci_epf_remove_vepf() - remove virtual EP function from physical EP function
+ * @epf_pf: the physical EP function from which the virtual EP function should
+ * be removed
+ * @epf_vf: the virtual EP function to be removed
+ *
+ * Invoke to remove a virtual endpoint function from the physcial endpoint
+ * function.
+ */
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return;
+
+ mutex_lock(&epf_pf->lock);
+ clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ list_del(&epf_vf->list);
+ mutex_unlock(&epf_pf->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epf_remove_vepf);
+
+/**
* pci_epf_free_space() - free the allocated PCI EPF register space
* @epf: the EPF device from whom to free the memory
* @addr: the virtual address of the PCI EPF register space
* @bar: the BAR number corresponding to the register space
+ * @type: Identifies if the allocated space is for primary EPC or secondary EPC
*
* Invoke to free the allocated PCI EPF register space.
*/
-void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+ enum pci_epc_interface_type type)
{
struct device *dev = epf->epc->dev.parent;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *epc;
if (!addr)
return;
- dma_free_coherent(dev, epf->bar[bar].size, addr,
- epf->bar[bar].phys_addr);
+ if (type == PRIMARY_INTERFACE) {
+ epc = epf->epc;
+ epf_bar = epf->bar;
+ } else {
+ epc = epf->sec_epc;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ dev = epc->dev.parent;
+ dma_free_coherent(dev, epf_bar[bar].size, addr,
+ epf_bar[bar].phys_addr);
- epf->bar[bar].phys_addr = 0;
- epf->bar[bar].addr = NULL;
- epf->bar[bar].size = 0;
- epf->bar[bar].barno = 0;
- epf->bar[bar].flags = 0;
+ epf_bar[bar].phys_addr = 0;
+ epf_bar[bar].addr = NULL;
+ epf_bar[bar].size = 0;
+ epf_bar[bar].barno = 0;
+ epf_bar[bar].flags = 0;
}
EXPORT_SYMBOL_GPL(pci_epf_free_space);
@@ -101,15 +282,18 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
* @align: alignment size for the allocation region
+ * @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align)
+ size_t align, enum pci_epc_interface_type type)
{
- void *space;
- struct device *dev = epf->epc->dev.parent;
+ struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
+ struct pci_epc *epc;
+ struct device *dev;
+ void *space;
if (size < 128)
size = 128;
@@ -119,17 +303,26 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
else
size = roundup_pow_of_two(size);
+ if (type == PRIMARY_INTERFACE) {
+ epc = epf->epc;
+ epf_bar = epf->bar;
+ } else {
+ epc = epf->sec_epc;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ dev = epc->dev.parent;
space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
if (!space) {
dev_err(dev, "failed to allocate mem space\n");
return NULL;
}
- epf->bar[bar].phys_addr = phys_addr;
- epf->bar[bar].addr = space;
- epf->bar[bar].size = size;
- epf->bar[bar].barno = bar;
- epf->bar[bar].flags |= upper_32_bits(size) ?
+ epf_bar[bar].phys_addr = phys_addr;
+ epf_bar[bar].addr = space;
+ epf_bar[bar].size = size;
+ epf_bar[bar].barno = bar;
+ epf_bar[bar].flags |= upper_32_bits(size) ?
PCI_BASE_ADDRESS_MEM_TYPE_64 :
PCI_BASE_ADDRESS_MEM_TYPE_32;
@@ -260,6 +453,10 @@ struct pci_epf *pci_epf_create(const char *name)
return ERR_PTR(-ENOMEM);
}
+ /* VFs are numbered starting with 1. So set BIT(0) by default */
+ epf->vfunction_num_map = 1;
+ INIT_LIST_HEAD(&epf->pci_vepf);
+
dev = &epf->dev;
device_initialize(dev);
dev->bus = &pci_epf_bus_type;
@@ -282,22 +479,6 @@ struct pci_epf *pci_epf_create(const char *name)
}
EXPORT_SYMBOL_GPL(pci_epf_create);
-const struct pci_epf_device_id *
-pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf)
-{
- if (!id || !epf)
- return NULL;
-
- while (*id->name) {
- if (strcmp(epf->name, id->name) == 0)
- return id;
- id++;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(pci_epf_match_device);
-
static void pci_epf_dev_release(struct device *dev)
{
struct pci_epf *epf = to_pci_epf(dev);
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index e01d53f5b32f..afa50b446567 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -23,6 +23,7 @@ struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus)
return to_pci_host_bridge(root_bus->bridge);
}
+EXPORT_SYMBOL_GPL(pci_find_host_bridge);
struct device *pci_get_host_bridge_device(struct pci_dev *dev)
{
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 9ed5f167a9f3..371d9fdf5494 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -61,6 +61,16 @@ config USB_LGM_PHY
interface to interact with USB GEN-II and USB 3.x PHY that is part
of the Intel network SOC.
+config PHY_CAN_TRANSCEIVER
+ tristate "CAN transceiver PHY"
+ select GENERIC_PHY
+ select MULTIPLEXER
+ help
+ This option enables support for CAN transceivers as a PHY. This
+ driver provides function for putting the transceivers in various
+ functional modes using gpios and sets the attribute max link
+ rate, for CAN drivers.
+
source "drivers/phy/allwinner/Kconfig"
source "drivers/phy/amlogic/Kconfig"
source "drivers/phy/broadcom/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 6eb2916773c5..2ccb73df22cb 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_GENERIC_PHY) += phy-core.o
obj-$(CONFIG_GENERIC_PHY_MIPI_DPHY) += phy-core-mipi-dphy.o
+obj-$(CONFIG_PHY_CAN_TRANSCEIVER) += phy-can-transceiver.o
obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
index 432832bdbd16..a62910ff5591 100644
--- a/drivers/phy/cadence/Kconfig
+++ b/drivers/phy/cadence/Kconfig
@@ -7,6 +7,7 @@ config PHY_CADENCE_TORRENT
tristate "Cadence Torrent PHY driver"
depends on OF
depends on HAS_IOMEM
+ depends on COMMON_CLK
select GENERIC_PHY
help
Support for Cadence Torrent PHY.
@@ -24,6 +25,7 @@ config PHY_CADENCE_DPHY
config PHY_CADENCE_SIERRA
tristate "Cadence Sierra PHY Driver"
depends on OF && HAS_IOMEM && RESET_CONTROLLER
+ depends on COMMON_CLK
select GENERIC_PHY
help
Enable this to support the Cadence Sierra PHY driver
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
index 90c4e9b5aac8..68b871111e9d 100644
--- a/drivers/phy/cadence/cdns-dphy.c
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -1,14 +1,18 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright: 2017-2018 Cadence Design Systems, Inc.
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/sys_soc.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -17,6 +21,7 @@
#define REG_WAKEUP_TIME_NS 800
#define DPHY_PLL_RATE_HZ 108000000
+#define POLL_TIMEOUT_US 1000
/* DPHY registers */
#define DPHY_PMA_CMN(reg) (reg)
@@ -25,10 +30,15 @@
#define DPHY_PMA_RCLK(reg) (0x600 + (reg))
#define DPHY_PMA_RDATA(lane, reg) (0x700 + ((lane) * 0x100) + (reg))
#define DPHY_PCS(reg) (0xb00 + (reg))
+#define DPHY_ISO(reg) (0xc00 + (reg))
+#define DPHY_WRAP(reg) (0x1000 + (reg))
#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
#define DPHY_CMN_SSM_EN BIT(0)
+#define DPHY_CMN_RX_BANDGAP_TIMER_MASK GENMASK(8, 1)
#define DPHY_CMN_TX_MODE_EN BIT(9)
+#define DPHY_CMN_RX_MODE_EN BIT(10)
+#define DPHY_CMN_RX_BANDGAP_TIMER 0x14
#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
#define DPHY_CMN_PWM_DIV(x) ((x) << 20)
@@ -45,10 +55,30 @@
#define DPHY_CMN_OPDIV_FROM_REG BIT(6)
#define DPHY_CMN_OPDIV(x) ((x) << 7)
+#define DPHY_BAND_CFG DPHY_PCS(0x0)
+#define DPHY_BAND_CFG_LEFT_BAND GENMASK(4, 0)
+#define DPHY_BAND_CFG_RIGHT_BAND GENMASK(9, 5)
+
#define DPHY_PSM_CFG DPHY_PCS(0x4)
#define DPHY_PSM_CFG_FROM_REG BIT(0)
#define DPHY_PSM_CLK_DIV(x) ((x) << 1)
+#define DPHY_POWER_ISLAND_EN_DATA DPHY_PCS(0x8)
+#define DPHY_POWER_ISLAND_EN_DATA_VAL 0xaaaaaaaa
+#define DPHY_POWER_ISLAND_EN_CLK DPHY_PCS(0xc)
+#define DPHY_POWER_ISLAND_EN_CLK_VAL 0xaa
+
+#define DPHY_LANE DPHY_WRAP(0x0)
+#define DPHY_LANE_RESET_CMN_EN BIT(23)
+
+#define DPHY_ISO_CL_CTRL_L DPHY_ISO(0x10)
+#define DPHY_ISO_DL_CTRL_L0 DPHY_ISO(0x14)
+#define DPHY_ISO_DL_CTRL_L1 DPHY_ISO(0x20)
+#define DPHY_ISO_DL_CTRL_L2 DPHY_ISO(0x30)
+#define DPHY_ISO_DL_CTRL_L3 DPHY_ISO(0x3c)
+#define DPHY_ISO_LANE_READY_BIT 0
+#define DPHY_ISO_LANE_READY_TIMEOUT_MS 100UL
+
#define DSI_HBP_FRAME_OVERHEAD 12
#define DSI_HSA_FRAME_OVERHEAD 14
#define DSI_HFP_FRAME_OVERHEAD 6
@@ -57,6 +87,21 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
+#define DPHY_LANES_MIN 1
+#define DPHY_LANES_MAX 4
+
+#define DPHY_TX_J721E_WIZ_PLL_CTRL 0xF04
+#define DPHY_TX_J721E_WIZ_STATUS 0xF08
+#define DPHY_TX_J721E_WIZ_RST_CTRL 0xF0C
+#define DPHY_TX_J721E_WIZ_PSM_FREQ 0xF10
+
+#define DPHY_TX_J721E_WIZ_IPDIV GENMASK(4, 0)
+#define DPHY_TX_J721E_WIZ_OPDIV GENMASK(13, 8)
+#define DPHY_TX_J721E_WIZ_FBDIV GENMASK(25, 16)
+#define DPHY_TX_J721E_WIZ_LANE_RSTB BIT(31)
+#define DPHY_TX_WIZ_PLL_LOCK BIT(31)
+#define DPHY_TX_WIZ_O_CMN_READY BIT(31)
+
struct cdns_dphy_cfg {
u8 pll_ipdiv;
u8 pll_opdiv;
@@ -75,6 +120,11 @@ struct cdns_dphy;
struct cdns_dphy_ops {
int (*probe)(struct cdns_dphy *dphy);
void (*remove)(struct cdns_dphy *dphy);
+ int (*power_on)(struct cdns_dphy *dphy);
+ int (*power_off)(struct cdns_dphy *dphy);
+ int (*validate)(struct cdns_dphy *dphy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts);
+ int (*configure)(struct cdns_dphy *dphy, union phy_configure_opts *opts);
void (*set_psm_div)(struct cdns_dphy *dphy, u8 div);
void (*set_clk_lane_cfg)(struct cdns_dphy *dphy,
enum cdns_dphy_clk_lane_cfg cfg);
@@ -83,15 +133,51 @@ struct cdns_dphy_ops {
unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
};
+struct cdns_dphy_soc_data {
+ bool has_hw_cmn_rstb;
+};
+
struct cdns_dphy {
struct cdns_dphy_cfg cfg;
void __iomem *regs;
+ struct device *dev;
struct clk *psm_clk;
struct clk *pll_ref_clk;
const struct cdns_dphy_ops *ops;
struct phy *phy;
};
+struct cdns_dphy_driver_data {
+ const struct cdns_dphy_ops *tx;
+ const struct cdns_dphy_ops *rx;
+};
+
+struct cdns_dphy_band {
+ unsigned int min_rate;
+ unsigned int max_rate;
+};
+
+/* Order of bands is important since the index is the band number. */
+struct cdns_dphy_band rx_bands[] = {
+ {80, 100}, {100, 120}, {120, 160}, {160, 200}, {200, 240},
+ {240, 280}, {280, 320}, {320, 360}, {360, 400}, {400, 480},
+ {480, 560}, {560, 640}, {640, 720}, {720, 800}, {800, 880},
+ {880, 1040}, {1040, 1200}, {1200, 1350}, {1350, 1500}, {1500, 1750},
+ {1750, 2000}, {2000, 2250}, {2250, 2500}
+};
+
+int num_rx_bands = ARRAY_SIZE(rx_bands);
+
+struct cdns_dphy_band tx_bands[] = {
+ {80, 100}, {100, 120}, {120, 160}, {160, 200}, {200, 240},
+ {240, 320}, {320, 390}, {390, 450}, {450, 510}, {510, 560},
+ {560, 640}, {640, 690}, {690, 770}, {770, 870}, {870, 950},
+ {950, 1000}, {1000, 1200}, {1200, 1400}, {1400, 1600}, {1600, 1800},
+ {1800, 2000}, {2000, 2200}, {2200, 2500}
+};
+
+int num_tx_bands = ARRAY_SIZE(tx_bands);
+
static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
struct cdns_dphy_cfg *cfg,
struct phy_configure_opts_mipi_dphy *opts,
@@ -199,20 +285,9 @@ static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div)
dphy->regs + DPHY_PSM_CFG);
}
-/*
- * This is the reference implementation of DPHY hooks. Specific integration of
- * this IP may have to re-implement some of them depending on how they decided
- * to wire things in the SoC.
- */
-static const struct cdns_dphy_ops ref_dphy_ops = {
- .get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns,
- .set_pll_cfg = cdns_dphy_ref_set_pll_cfg,
- .set_psm_div = cdns_dphy_ref_set_psm_div,
-};
-
-static int cdns_dphy_config_from_opts(struct phy *phy,
- struct phy_configure_opts_mipi_dphy *opts,
- struct cdns_dphy_cfg *cfg)
+static int cdns_dphy_tx_config_from_opts(struct phy *phy,
+ struct phy_configure_opts_mipi_dphy *opts,
+ struct cdns_dphy_cfg *cfg)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
unsigned int dsi_hfp_ext = 0;
@@ -232,24 +307,34 @@ static int cdns_dphy_config_from_opts(struct phy *phy,
return 0;
}
-static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
- union phy_configure_opts *opts)
+static int cdns_dphy_tx_get_band_ctrl(unsigned long hs_clk_rate)
{
- struct cdns_dphy_cfg cfg = { 0 };
+ unsigned int rate;
+ int i;
- if (mode != PHY_MODE_MIPI_DPHY)
- return -EINVAL;
+ rate = hs_clk_rate / 1000000UL;
+
+ if (rate < tx_bands[0].min_rate || rate >= tx_bands[num_tx_bands - 1].max_rate)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < num_tx_bands; i++) {
+ if (rate >= tx_bands[i].min_rate && rate < tx_bands[i].max_rate)
+ return i;
+ }
- return cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+ /* Unreachable. */
+ WARN(1, "Reached unreachable code.");
+ return -EINVAL;
}
-static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+static int cdns_dphy_tx_configure(struct cdns_dphy *dphy,
+ union phy_configure_opts *opts)
{
- struct cdns_dphy *dphy = phy_get_drvdata(phy);
struct cdns_dphy_cfg cfg = { 0 };
- int ret;
+ int ret, band_ctrl;
+ unsigned int reg;
- ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+ ret = cdns_dphy_tx_config_from_opts(dphy->phy, &opts->mipi_dphy, &cfg);
if (ret)
return ret;
@@ -276,12 +361,32 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
*/
cdns_dphy_set_pll_cfg(dphy, &cfg);
+ band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
+ if (band_ctrl < 0)
+ return band_ctrl;
+
+ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
+ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
+ writel(reg, dphy->regs + DPHY_BAND_CFG);
+
return 0;
}
-static int cdns_dphy_power_on(struct phy *phy)
+static int cdns_dphy_tx_validate(struct cdns_dphy *dphy, enum phy_mode mode,
+ int submode, union phy_configure_opts *opts)
{
- struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ struct cdns_dphy_cfg cfg = { 0 };
+
+ if (submode != PHY_MIPI_DPHY_SUBMODE_TX)
+ return -EINVAL;
+
+ return cdns_dphy_tx_config_from_opts(dphy->phy, &opts->mipi_dphy, &cfg);
+}
+
+static int cdns_dphy_tx_power_on(struct cdns_dphy *dphy)
+{
+ if (!dphy->psm_clk || !dphy->pll_ref_clk)
+ return -EINVAL;
clk_prepare_enable(dphy->psm_clk);
clk_prepare_enable(dphy->pll_ref_clk);
@@ -293,12 +398,305 @@ static int cdns_dphy_power_on(struct phy *phy)
return 0;
}
+static int cdns_dphy_tx_power_off(struct cdns_dphy *dphy)
+{
+ clk_disable_unprepare(dphy->pll_ref_clk);
+ clk_disable_unprepare(dphy->psm_clk);
+
+ return 0;
+}
+
+static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+ return 1000000;
+}
+
+static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg)
+{
+ u32 status;
+
+ writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
+ DPHY_CMN_PWM_DIV(0x8),
+ dphy->regs + DPHY_CMN_PWM);
+
+ writel((FIELD_PREP(DPHY_TX_J721E_WIZ_IPDIV, cfg->pll_ipdiv) |
+ FIELD_PREP(DPHY_TX_J721E_WIZ_OPDIV, cfg->pll_opdiv) |
+ FIELD_PREP(DPHY_TX_J721E_WIZ_FBDIV, cfg->pll_fbdiv)),
+ dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL);
+
+ writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
+ dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
+
+ readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
+ (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
+
+ readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
+ (status & DPHY_TX_WIZ_O_CMN_READY), 0,
+ POLL_TIMEOUT_US);
+}
+
+static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
+{
+ writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
+}
+
+static const struct cdns_dphy_ops tx_ref_dphy_ops = {
+ .power_on = cdns_dphy_tx_power_on,
+ .power_off = cdns_dphy_tx_power_off,
+ .validate = cdns_dphy_tx_validate,
+ .configure = cdns_dphy_tx_configure,
+ .get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns,
+ .set_pll_cfg = cdns_dphy_ref_set_pll_cfg,
+ .set_psm_div = cdns_dphy_ref_set_psm_div,
+};
+
+static const struct cdns_dphy_ops tx_j721e_dphy_ops = {
+ .power_on = cdns_dphy_tx_power_on,
+ .power_off = cdns_dphy_tx_power_off,
+ .validate = cdns_dphy_tx_validate,
+ .configure = cdns_dphy_tx_configure,
+ .get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
+ .set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
+ .set_psm_div = cdns_dphy_j721e_set_psm_div,
+};
+
+static int cdns_dphy_rx_power_on(struct cdns_dphy *dphy)
+{
+ /* Start RX state machine. */
+ writel(DPHY_CMN_SSM_EN | DPHY_CMN_RX_MODE_EN |
+ FIELD_PREP(DPHY_CMN_RX_BANDGAP_TIMER_MASK,
+ DPHY_CMN_RX_BANDGAP_TIMER),
+ dphy->regs + DPHY_CMN_SSM);
+
+ return 0;
+}
+
+static int cdns_dphy_rx_power_off(struct cdns_dphy *dphy)
+{
+ writel(0, dphy->regs + DPHY_CMN_SSM);
+
+ return 0;
+}
+
+static int cdns_dphy_rx_get_band_ctrl(unsigned long hs_clk_rate)
+{
+ unsigned int rate;
+ int i;
+
+ rate = hs_clk_rate / 1000000UL;
+ /* Since CSI-2 clock is DDR, the bit rate is twice the clock rate. */
+ rate *= 2;
+
+ if (rate < rx_bands[0].min_rate || rate >= rx_bands[num_rx_bands - 1].max_rate)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < num_rx_bands; i++) {
+ if (rate >= rx_bands[i].min_rate && rate < rx_bands[i].max_rate)
+ return i;
+ }
+
+ /* Unreachable. */
+ WARN(1, "Reached unreachable code.");
+ return -EINVAL;
+}
+
+static int cdns_dphy_rx_wait_for_bit(void __iomem *addr, unsigned int bit)
+{
+ u32 val;
+
+ return readl_relaxed_poll_timeout(addr, val, val & BIT(bit), 10,
+ DPHY_ISO_LANE_READY_TIMEOUT_MS * 1000);
+}
+
+static int cdns_dphy_rx_wait_lane_ready(struct cdns_dphy *dphy, int lanes)
+{
+ void __iomem *reg = dphy->regs;
+ u32 data_lane_ctrl[] = {DPHY_ISO_DL_CTRL_L0, DPHY_ISO_DL_CTRL_L1,
+ DPHY_ISO_DL_CTRL_L2, DPHY_ISO_DL_CTRL_L3};
+ int ret, i;
+
+ /* Data lanes. Minimum one lane is mandatory. */
+ if (lanes < DPHY_LANES_MIN || lanes > DPHY_LANES_MAX)
+ return -EINVAL;
+
+ /* Clock lane */
+ ret = cdns_dphy_rx_wait_for_bit(reg + DPHY_ISO_CL_CTRL_L,
+ DPHY_ISO_LANE_READY_BIT);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lanes; i++) {
+ ret = cdns_dphy_rx_wait_for_bit(reg + data_lane_ctrl[i],
+ DPHY_ISO_LANE_READY_BIT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct cdns_dphy_soc_data j721e_soc_data = {
+ .has_hw_cmn_rstb = true,
+};
+
+static const struct soc_device_attribute cdns_dphy_socinfo[] = {
+ {
+ .family = "J721E",
+ .revision = "SR1.0",
+ .data = &j721e_soc_data,
+ },
+ {/* sentinel */}
+};
+
+static int cdns_dphy_rx_configure(struct cdns_dphy *dphy,
+ union phy_configure_opts *opts)
+{
+ const struct soc_device_attribute *soc;
+ const struct cdns_dphy_soc_data *soc_data = NULL;
+ unsigned int reg;
+ int band_ctrl, ret;
+
+ soc = soc_device_match(cdns_dphy_socinfo);
+ if (soc && soc->data)
+ soc_data = soc->data;
+ if (!soc || (soc_data && !soc_data->has_hw_cmn_rstb)) {
+ reg = DPHY_LANE_RESET_CMN_EN;
+ writel(reg, dphy->regs + DPHY_LANE);
+ }
+
+ band_ctrl = cdns_dphy_rx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
+ if (band_ctrl < 0)
+ return band_ctrl;
+
+ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
+ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
+ writel(reg, dphy->regs + DPHY_BAND_CFG);
+
+ /*
+ * Set the required power island phase 2 time. This is mandated by DPHY
+ * specs.
+ */
+ reg = DPHY_POWER_ISLAND_EN_DATA_VAL;
+ writel(reg, dphy->regs + DPHY_POWER_ISLAND_EN_DATA);
+ reg = DPHY_POWER_ISLAND_EN_CLK_VAL;
+ writel(reg, dphy->regs + DPHY_POWER_ISLAND_EN_CLK);
+
+ ret = cdns_dphy_rx_wait_lane_ready(dphy, opts->mipi_dphy.lanes);
+ if (ret) {
+ dev_err(dphy->dev, "DPHY wait for lane ready timeout\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_dphy_rx_validate(struct cdns_dphy *dphy, enum phy_mode mode,
+ int submode, union phy_configure_opts *opts)
+{
+ int ret;
+
+ if (submode != PHY_MIPI_DPHY_SUBMODE_RX)
+ return -EINVAL;
+
+ ret = cdns_dphy_rx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ return phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+}
+
+static const struct cdns_dphy_ops rx_ref_dphy_ops = {
+ .power_on = cdns_dphy_rx_power_on,
+ .power_off = cdns_dphy_rx_power_off,
+ .configure = cdns_dphy_rx_configure,
+ .validate = cdns_dphy_rx_validate,
+};
+
+/*
+ * This is the reference implementation of DPHY hooks. Specific integration of
+ * this IP may have to re-implement some of them depending on how they decided
+ * to wire things in the SoC.
+ */
+static const struct cdns_dphy_driver_data ref_dphy_ops = {
+ .tx = &tx_ref_dphy_ops,
+ .rx = &rx_ref_dphy_ops,
+};
+
+static const struct cdns_dphy_driver_data j721e_dphy_ops = {
+ .tx = &tx_j721e_dphy_ops,
+ .rx = &rx_ref_dphy_ops,
+};
+
+static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+
+ if (mode != PHY_MODE_MIPI_DPHY)
+ return -EINVAL;
+
+ if (dphy->ops->validate)
+ return dphy->ops->validate(dphy, mode, submode, opts);
+
+ return 0;
+}
+
+static int cdns_dphy_power_on(struct phy *phy)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+
+ if (dphy->ops->power_on)
+ return dphy->ops->power_on(dphy);
+
+ return 0;
+}
+
static int cdns_dphy_power_off(struct phy *phy)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
- clk_disable_unprepare(dphy->pll_ref_clk);
- clk_disable_unprepare(dphy->psm_clk);
+ if (dphy->ops->power_off)
+ return dphy->ops->power_off(dphy);
+
+ return 0;
+}
+
+static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+
+ if (dphy->ops->configure)
+ return dphy->ops->configure(dphy, opts);
+
+ return 0;
+}
+
+static int cdns_dphy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ const struct cdns_dphy_driver_data *ddata;
+
+ ddata = of_device_get_match_data(dphy->dev);
+ if (!ddata)
+ return -EINVAL;
+
+ if (mode != PHY_MODE_MIPI_DPHY)
+ return -EINVAL;
+
+ if (submode == PHY_MIPI_DPHY_SUBMODE_TX) {
+ if (!ddata->tx)
+ return -EOPNOTSUPP;
+
+ dphy->ops = ddata->tx;
+ } else if (submode == PHY_MIPI_DPHY_SUBMODE_RX) {
+ if (!ddata->rx)
+ return -EOPNOTSUPP;
+
+ dphy->ops = ddata->rx;
+ } else {
+ return -EOPNOTSUPP;
+ }
return 0;
}
@@ -308,34 +706,39 @@ static const struct phy_ops cdns_dphy_ops = {
.validate = cdns_dphy_validate,
.power_on = cdns_dphy_power_on,
.power_off = cdns_dphy_power_off,
+ .set_mode = cdns_dphy_set_mode,
};
static int cdns_dphy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct cdns_dphy *dphy;
- struct resource *res;
+ const struct cdns_dphy_driver_data *ddata;
int ret;
dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
if (!dphy)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, dphy);
+ dphy->dev = &pdev->dev;
+
+ ddata = of_device_get_match_data(&pdev->dev);
+ if (!ddata)
+ return -EINVAL;
- dphy->ops = of_device_get_match_data(&pdev->dev);
+ dphy->ops = ddata->tx;
if (!dphy->ops)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dphy->regs = devm_ioremap_resource(&pdev->dev, res);
+ dphy->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dphy->regs))
return PTR_ERR(dphy->regs);
- dphy->psm_clk = devm_clk_get(&pdev->dev, "psm");
+ dphy->psm_clk = devm_clk_get_optional(dphy->dev, "psm");
if (IS_ERR(dphy->psm_clk))
return PTR_ERR(dphy->psm_clk);
- dphy->pll_ref_clk = devm_clk_get(&pdev->dev, "pll_ref");
+ dphy->pll_ref_clk = devm_clk_get_optional(dphy->dev, "pll_ref");
if (IS_ERR(dphy->pll_ref_clk))
return PTR_ERR(dphy->pll_ref_clk);
@@ -372,6 +775,7 @@ static int cdns_dphy_remove(struct platform_device *pdev)
static const struct of_device_id cdns_dphy_of_match[] = {
{ .compatible = "cdns,dphy", .data = &ref_dphy_ops },
+ { .compatible = "ti,j721e-dphy", .data = &j721e_dphy_ops },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cdns_dphy_of_match);
@@ -387,5 +791,6 @@ static struct platform_driver cdns_dphy_platform_driver = {
module_platform_driver(cdns_dphy_platform_driver);
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");
MODULE_DESCRIPTION("Cadence MIPI D-PHY Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c
index 88e239adc3b8..51c0b98f5fd7 100644
--- a/drivers/phy/cadence/phy-cadence-salvo.c
+++ b/drivers/phy/cadence/phy-cadence-salvo.c
@@ -263,7 +263,6 @@ static int cdns_salvo_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct cdns_salvo_phy *salvo_phy;
- struct resource *res;
const struct of_device_id *match;
struct cdns_salvo_data *data;
@@ -281,8 +280,7 @@ static int cdns_salvo_phy_probe(struct platform_device *pdev)
if (IS_ERR(salvo_phy->clk))
return PTR_ERR(salvo_phy->clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- salvo_phy->base = devm_ioremap_resource(dev, res);
+ salvo_phy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(salvo_phy->base))
return PTR_ERR(salvo_phy->base);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 7d990613ce83..588a23b83b93 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -7,6 +7,7 @@
*
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -20,17 +21,36 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
+
+#define NUM_SSC_MODE 3
+#define NUM_PHY_TYPE 5
/* PHY register offsets */
#define SIERRA_COMMON_CDB_OFFSET 0x0
#define SIERRA_MACRO_ID_REG 0x0
+#define SIERRA_CMN_PLLLC_GEN_PREG 0x42
#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
+#define SIERRA_CMN_PLLLC_CLK1_PREG 0x4D
#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
+#define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51
+#define SIERRA_CMN_PLLLC_SS_PREG 0x52
+#define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53
+#define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54
#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
+#define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63
+#define SIERRA_CMN_REFRCV_PREG 0x98
+#define SIERRA_CMN_REFRCV1_PREG 0xB8
+#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
+#define SIERRA_CMN_PLLLC1_FBDIV_INT_PREG 0xC3
+#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
+#define SIERRA_CMN_PLLLC1_CLK0_PREG 0xCE
+#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
+#define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2
#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0x4000 << (block_offset)) + \
@@ -43,7 +63,11 @@
#define SIERRA_DET_STANDEC_E_PREG 0x004
#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
#define SIERRA_PSM_A0IN_TMR_PREG 0x009
+#define SIERRA_PSM_A3IN_TMR_PREG 0x00C
#define SIERRA_PSM_DIAG_PREG 0x015
+#define SIERRA_PSC_LN_A3_PREG 0x023
+#define SIERRA_PSC_LN_A4_PREG 0x024
+#define SIERRA_PSC_LN_IDLE_PREG 0x026
#define SIERRA_PSC_TX_A0_PREG 0x028
#define SIERRA_PSC_TX_A1_PREG 0x029
#define SIERRA_PSC_TX_A2_PREG 0x02A
@@ -52,19 +76,25 @@
#define SIERRA_PSC_RX_A1_PREG 0x031
#define SIERRA_PSC_RX_A2_PREG 0x032
#define SIERRA_PSC_RX_A3_PREG 0x033
+#define SIERRA_PLLCTRL_FBDIV_MODE01_PREG 0x039
#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
+#define SIERRA_PLLCTRL_GEN_A_PREG 0x03B
#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
#define SIERRA_PLLCTRL_STATUS_PREG 0x044
#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
#define SIERRA_DFE_BIASTRIM_PREG 0x04C
#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
+#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
+#define SIERRA_TX_RCVDET_OVRD_PREG 0x072
#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
+#define SIERRA_CREQ_DCBIASATTEN_OVR_PREG 0x08C
#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
+#define SIERRA_RX_CTLE_CAL_PREG 0x08F
#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
#define SIERRA_CREQ_EQ_CTRL_PREG 0x093
@@ -114,15 +144,28 @@
#define SIERRA_DEQ_ALUT12 0x114
#define SIERRA_DEQ_ALUT13 0x115
#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
+#define SIERRA_DEQ_DFETAP0 0x129
+#define SIERRA_DEQ_DFETAP1 0x12B
+#define SIERRA_DEQ_DFETAP2 0x12D
+#define SIERRA_DEQ_DFETAP3 0x12F
+#define SIERRA_DEQ_DFETAP4 0x131
#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
+#define SIERRA_DEQ_PRECUR_PREG 0x138
+#define SIERRA_DEQ_POSTCUR_PREG 0x140
+#define SIERRA_DEQ_POSTCUR_DECR_PREG 0x142
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
+#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
+#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
#define SIERRA_DEQ_PICTRL_PREG 0x161
#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
+#define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E
+#define SIERRA_CPI_TRIM_PREG 0x17F
#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_EPI_CTRL_PREG 0x187
#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
#define SIERRA_LFPSFILT_NS_PREG 0x18A
#define SIERRA_LFPSFILT_RD_PREG 0x18B
@@ -136,27 +179,145 @@
#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
-#define SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset) \
- (0xc000 << (block_offset))
+/* PHY PCS common registers */
+#define SIERRA_PHY_PCS_COMMON_OFFSET(block_offset) \
+ (0xc000 << (block_offset))
+#define SIERRA_PHY_PIPE_CMN_CTRL1 0x0
#define SIERRA_PHY_PLL_CFG 0xe
+/* PHY PCS lane registers */
+#define SIERRA_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xD000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
+#define SIERRA_PHY_ISO_LINK_CTRL 0xB
+
+/* PHY PMA common registers */
+#define SIERRA_PHY_PMA_COMMON_OFFSET(block_offset) \
+ (0xE000 << (block_offset))
+#define SIERRA_PHY_PMA_CMN_CTRL 0x000
+
+/* PHY PMA lane registers */
+#define SIERRA_PHY_PMA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xF000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
+#define SIERRA_PHY_PMA_XCVR_CTRL 0x000
+
#define SIERRA_MACRO_ID 0x00007364
#define SIERRA_MAX_LANES 16
#define PLL_LOCK_TIME 100000
+#define CDNS_SIERRA_OUTPUT_CLOCKS 3
+#define CDNS_SIERRA_INPUT_CLOCKS 5
+enum cdns_sierra_clock_input {
+ PHY_CLK,
+ CMN_REFCLK_DIG_DIV,
+ CMN_REFCLK1_DIG_DIV,
+ PLL0_REFCLK,
+ PLL1_REFCLK,
+};
+
+#define SIERRA_NUM_CMN_PLLC 2
+#define SIERRA_NUM_CMN_PLLC_PARENTS 2
+
static const struct reg_field macro_id_type =
REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
static const struct reg_field phy_pll_cfg_1 =
REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
+static const struct reg_field pma_cmn_ready =
+ REG_FIELD(SIERRA_PHY_PMA_CMN_CTRL, 0, 0);
static const struct reg_field pllctrl_lock =
REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
+static const struct reg_field phy_iso_link_ctrl_1 =
+ REG_FIELD(SIERRA_PHY_ISO_LINK_CTRL, 1, 1);
+static const struct reg_field cmn_plllc_clk1outdiv_preg =
+ REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 0, 6);
+static const struct reg_field cmn_plllc_clk1_en_preg =
+ REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 12, 12);
+
+static const char * const clk_names[] = {
+ [CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc",
+ [CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1",
+ [CDNS_SIERRA_DERIVED_REFCLK] = "refclk_der",
+};
+
+enum cdns_sierra_cmn_plllc {
+ CMN_PLLLC,
+ CMN_PLLLC1,
+};
+
+struct cdns_sierra_pll_mux_reg_fields {
+ struct reg_field pfdclk_sel_preg;
+ struct reg_field plllc1en_field;
+ struct reg_field termen_field;
+};
+
+static const struct cdns_sierra_pll_mux_reg_fields cmn_plllc_pfdclk1_sel_preg[] = {
+ [CMN_PLLLC] = {
+ .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC_GEN_PREG, 1, 1),
+ .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 8, 8),
+ .termen_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 0, 0),
+ },
+ [CMN_PLLLC1] = {
+ .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC1_GEN_PREG, 1, 1),
+ .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 8, 8),
+ .termen_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 0, 0),
+ },
+};
+
+struct cdns_sierra_pll_mux {
+ struct clk_hw hw;
+ struct regmap_field *pfdclk_sel_preg;
+ struct regmap_field *plllc1en_field;
+ struct regmap_field *termen_field;
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_sierra_pll_mux(_hw) \
+ container_of(_hw, struct cdns_sierra_pll_mux, hw)
+
+static const int pll_mux_parent_index[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+ [CMN_PLLLC] = { PLL0_REFCLK, PLL1_REFCLK },
+ [CMN_PLLLC1] = { PLL1_REFCLK, PLL0_REFCLK },
+};
+
+static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+ [CMN_PLLLC] = { 0, 1 },
+ [CMN_PLLLC1] = { 1, 0 },
+};
+
+struct cdns_sierra_derived_refclk {
+ struct clk_hw hw;
+ struct regmap_field *cmn_plllc_clk1outdiv_preg;
+ struct regmap_field *cmn_plllc_clk1_en_preg;
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_sierra_derived_refclk(_hw) \
+ container_of(_hw, struct cdns_sierra_derived_refclk, hw)
+
+enum cdns_sierra_phy_type {
+ TYPE_NONE,
+ TYPE_PCIE,
+ TYPE_USB,
+ TYPE_SGMII,
+ TYPE_QSGMII
+};
+
+enum cdns_sierra_ssc_mode {
+ NO_SSC,
+ EXTERNAL_SSC,
+ INTERNAL_SSC
+};
struct cdns_sierra_inst {
struct phy *phy;
- u32 phy_type;
+ enum cdns_sierra_phy_type phy_type;
u32 num_lanes;
u32 mlane;
struct reset_control *lnk_rst;
+ enum cdns_sierra_ssc_mode ssc_mode;
};
struct cdns_reg_pairs {
@@ -164,18 +325,23 @@ struct cdns_reg_pairs {
u32 off;
};
+struct cdns_sierra_vals {
+ const struct cdns_reg_pairs *reg_pairs;
+ u32 num_regs;
+};
+
struct cdns_sierra_data {
- u32 id_value;
- u8 block_offset_shift;
- u8 reg_offset_shift;
- u32 pcie_cmn_regs;
- u32 pcie_ln_regs;
- u32 usb_cmn_regs;
- u32 usb_ln_regs;
- const struct cdns_reg_pairs *pcie_cmn_vals;
- const struct cdns_reg_pairs *pcie_ln_vals;
- const struct cdns_reg_pairs *usb_cmn_vals;
- const struct cdns_reg_pairs *usb_ln_vals;
+ u32 id_value;
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+ struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
};
struct cdns_regmap_cdb_context {
@@ -187,22 +353,31 @@ struct cdns_regmap_cdb_context {
struct cdns_sierra_phy {
struct device *dev;
struct regmap *regmap;
- struct cdns_sierra_data *init_data;
+ const struct cdns_sierra_data *init_data;
struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
- struct regmap *regmap_phy_config_ctrl;
+ struct regmap *regmap_phy_pcs_common_cdb;
+ struct regmap *regmap_phy_pcs_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_phy_pma_common_cdb;
+ struct regmap *regmap_phy_pma_lane_cdb[SIERRA_MAX_LANES];
struct regmap *regmap_common_cdb;
struct regmap_field *macro_id_type;
struct regmap_field *phy_pll_cfg_1;
+ struct regmap_field *pma_cmn_ready;
struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
- struct clk *clk;
- struct clk *cmn_refclk_dig_div;
- struct clk *cmn_refclk1_dig_div;
+ struct regmap_field *phy_iso_link_ctrl_1[SIERRA_MAX_LANES];
+ struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC];
+ struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC];
+ struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC];
+ struct clk *input_clks[CDNS_SIERRA_INPUT_CLOCKS];
int nsubnodes;
u32 num_lanes;
bool autoconf;
+ int already_configured;
+ struct clk_onecell_data clk_data;
+ struct clk *output_clks[CDNS_SIERRA_OUTPUT_CLOCKS];
};
static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
@@ -260,51 +435,141 @@ static const struct regmap_config cdns_sierra_common_cdb_config = {
.reg_read = cdns_regmap_read,
};
-static const struct regmap_config cdns_sierra_phy_config_ctrl_config = {
- .name = "sierra_phy_config_ctrl",
+static const struct regmap_config cdns_sierra_phy_pcs_cmn_cdb_config = {
+ .name = "sierra_phy_pcs_cmn_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+#define SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_phy_pcs_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_sierra_phy_pcs_lane_cdb_config[] = {
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static const struct regmap_config cdns_sierra_phy_pma_cmn_cdb_config = {
+ .name = "sierra_phy_pma_cmn_cdb",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_write,
.reg_read = cdns_regmap_read,
};
+#define SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_phy_pma_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_sierra_phy_pma_lane_cdb_config[] = {
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("15"),
+};
+
static int cdns_sierra_phy_init(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
+ const struct cdns_sierra_data *init_data = phy->init_data;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ enum cdns_sierra_phy_type phy_type = ins->phy_type;
+ enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
struct regmap *regmap;
+ u32 num_regs;
int i, j;
- const struct cdns_reg_pairs *cmn_vals, *ln_vals;
- u32 num_cmn_regs, num_ln_regs;
/* Initialise the PHY registers, unless auto configured */
- if (phy->autoconf)
+ if (phy->autoconf || phy->already_configured || phy->nsubnodes > 1)
return 0;
- clk_set_rate(phy->cmn_refclk_dig_div, 25000000);
- clk_set_rate(phy->cmn_refclk1_dig_div, 25000000);
- if (ins->phy_type == PHY_TYPE_PCIE) {
- num_cmn_regs = phy->init_data->pcie_cmn_regs;
- num_ln_regs = phy->init_data->pcie_ln_regs;
- cmn_vals = phy->init_data->pcie_cmn_vals;
- ln_vals = phy->init_data->pcie_ln_vals;
- } else if (ins->phy_type == PHY_TYPE_USB3) {
- num_cmn_regs = phy->init_data->usb_cmn_regs;
- num_ln_regs = phy->init_data->usb_ln_regs;
- cmn_vals = phy->init_data->usb_cmn_vals;
- ln_vals = phy->init_data->usb_ln_vals;
- } else {
- return -EINVAL;
+ clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+ clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
+ regmap = phy->regmap_phy_pma_lane_cdb[i + ins->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
}
- regmap = phy->regmap_common_cdb;
- for (j = 0; j < num_cmn_regs ; j++)
- regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val);
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
- for (i = 0; i < ins->num_lanes; i++) {
- for (j = 0; j < num_ln_regs ; j++) {
+ /* PMA lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
regmap = phy->regmap_lane_cdb[i + ins->mlane];
- regmap_write(regmap, ln_vals[j].off, ln_vals[j].val);
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}
@@ -319,10 +584,13 @@ static int cdns_sierra_phy_on(struct phy *gphy)
u32 val;
int ret;
- ret = reset_control_deassert(sp->phy_rst);
- if (ret) {
- dev_err(dev, "Failed to take the PHY out of reset\n");
- return ret;
+ if (sp->nsubnodes == 1) {
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY out of reset\n");
+ return ret;
+ }
}
/* Take the PHY lane group out of reset */
@@ -332,6 +600,26 @@ static int cdns_sierra_phy_on(struct phy *gphy)
return ret;
}
+ if (ins->phy_type == TYPE_PCIE || ins->phy_type == TYPE_USB) {
+ ret = regmap_field_read_poll_timeout(sp->phy_iso_link_ctrl_1[ins->mlane],
+ val, !val, 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for PHY status ready\n");
+ return ret;
+ }
+ }
+
+ /*
+ * Wait for cmn_ready assertion
+ * PHY_PMA_CMN_CTRL[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(sp->pma_cmn_ready, val, val,
+ 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for CMN ready\n");
+ return ret;
+ }
+
ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
val, val, 1000, PLL_LOCK_TIME);
if (ret < 0)
@@ -364,23 +652,311 @@ static const struct phy_ops ops = {
.owner = THIS_MODULE,
};
+static int cdns_sierra_noop_phy_on(struct phy *gphy)
+{
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static const struct phy_ops noop_ops = {
+ .power_on = cdns_sierra_noop_phy_on,
+ .owner = THIS_MODULE,
+};
+
+static u8 cdns_sierra_pll_mux_get_parent(struct clk_hw *hw)
+{
+ struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+ struct regmap_field *plllc1en_field = mux->plllc1en_field;
+ struct regmap_field *termen_field = mux->termen_field;
+ struct regmap_field *field = mux->pfdclk_sel_preg;
+ unsigned int val;
+ int index;
+
+ regmap_field_read(field, &val);
+
+ if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1])) {
+ index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC1], 0, val);
+ if (index == 1) {
+ regmap_field_write(plllc1en_field, 1);
+ regmap_field_write(termen_field, 1);
+ }
+ } else {
+ index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC], 0, val);
+ }
+
+ return index;
+}
+
+static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+ struct regmap_field *plllc1en_field = mux->plllc1en_field;
+ struct regmap_field *termen_field = mux->termen_field;
+ struct regmap_field *field = mux->pfdclk_sel_preg;
+ int val, ret;
+
+ ret = regmap_field_write(plllc1en_field, 0);
+ ret |= regmap_field_write(termen_field, 0);
+ if (index == 1) {
+ ret |= regmap_field_write(plllc1en_field, 1);
+ ret |= regmap_field_write(termen_field, 1);
+ }
+
+ if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1]))
+ val = cdns_sierra_pll_mux_table[CMN_PLLLC1][index];
+ else
+ val = cdns_sierra_pll_mux_table[CMN_PLLLC][index];
+
+ ret |= regmap_field_write(field, val);
+
+ return ret;
+}
+
+static const struct clk_ops cdns_sierra_pll_mux_ops = {
+ .set_parent = cdns_sierra_pll_mux_set_parent,
+ .get_parent = cdns_sierra_pll_mux_get_parent,
+};
+
+static int cdns_sierra_pll_mux_register(struct cdns_sierra_phy *sp,
+ struct regmap_field *pfdclk1_sel_field,
+ struct regmap_field *plllc1en_field,
+ struct regmap_field *termen_field,
+ int clk_index)
+{
+ struct cdns_sierra_pll_mux *mux;
+ struct device *dev = sp->dev;
+ struct clk_init_data *init;
+ const char **parent_names;
+ unsigned int num_parents;
+ char clk_name[100];
+ struct clk *clk;
+ int i;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ num_parents = SIERRA_NUM_CMN_PLLC_PARENTS;
+ parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_parents; i++) {
+ clk = sp->input_clks[pll_mux_parent_index[clk_index][i]];
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(dev, "No parent clock for PLL mux clocks\n");
+ return IS_ERR(clk) ? PTR_ERR(clk) : -ENOENT;
+ }
+ parent_names[i] = __clk_get_name(clk);
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), clk_names[clk_index]);
+
+ init = &mux->clk_data;
+
+ init->ops = &cdns_sierra_pll_mux_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clk_name;
+
+ mux->pfdclk_sel_preg = pfdclk1_sel_field;
+ mux->plllc1en_field = plllc1en_field;
+ mux->termen_field = termen_field;
+ mux->hw.init = init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ sp->output_clks[clk_index] = clk;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_register_pll_mux(struct cdns_sierra_phy *sp)
+{
+ struct regmap_field *pfdclk1_sel_field;
+ struct regmap_field *plllc1en_field;
+ struct regmap_field *termen_field;
+ struct device *dev = sp->dev;
+ int ret = 0, i, clk_index;
+
+ clk_index = CDNS_SIERRA_PLL_CMNLC;
+ for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++, clk_index++) {
+ pfdclk1_sel_field = sp->cmn_plllc_pfdclk1_sel_preg[i];
+ plllc1en_field = sp->cmn_refrcv_refclk_plllc1en_preg[i];
+ termen_field = sp->cmn_refrcv_refclk_termen_preg[i];
+
+ ret = cdns_sierra_pll_mux_register(sp, pfdclk1_sel_field, plllc1en_field,
+ termen_field, clk_index);
+ if (ret) {
+ dev_err(dev, "Fail to register cmn plllc mux\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cdns_sierra_derived_refclk_enable(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0x1);
+
+ /* Programming to get 100Mhz clock output in ref_der_clk_out 5GHz VCO/50 = 100MHz */
+ regmap_field_write(derived_refclk->cmn_plllc_clk1outdiv_preg, 0x2E);
+
+ return 0;
+}
+
+static void cdns_sierra_derived_refclk_disable(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0);
+}
+
+static int cdns_sierra_derived_refclk_is_enabled(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+ int val;
+
+ regmap_field_read(derived_refclk->cmn_plllc_clk1_en_preg, &val);
+
+ return !!val;
+}
+
+static const struct clk_ops cdns_sierra_derived_refclk_ops = {
+ .enable = cdns_sierra_derived_refclk_enable,
+ .disable = cdns_sierra_derived_refclk_disable,
+ .is_enabled = cdns_sierra_derived_refclk_is_enabled,
+};
+
+static int cdns_sierra_derived_refclk_register(struct cdns_sierra_phy *sp)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk;
+ struct device *dev = sp->dev;
+ struct regmap_field *field;
+ struct clk_init_data *init;
+ struct regmap *regmap;
+ char clk_name[100];
+ struct clk *clk;
+
+ derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
+ if (!derived_refclk)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ clk_names[CDNS_SIERRA_DERIVED_REFCLK]);
+
+ init = &derived_refclk->clk_data;
+
+ init->ops = &cdns_sierra_derived_refclk_ops;
+ init->flags = 0;
+ init->name = clk_name;
+
+ regmap = sp->regmap_common_cdb;
+
+ field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1outdiv_preg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "cmn_plllc_clk1outdiv_preg reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_plllc_clk1outdiv_preg = field;
+
+ field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1_en_preg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "cmn_plllc_clk1_en_preg reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_plllc_clk1_en_preg = field;
+
+ derived_refclk->hw.init = init;
+
+ clk = devm_clk_register(dev, &derived_refclk->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ sp->output_clks[CDNS_SIERRA_DERIVED_REFCLK] = clk;
+
+ return 0;
+}
+
+static void cdns_sierra_clk_unregister(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct device_node *node = dev->of_node;
+
+ of_clk_del_provider(node);
+}
+
+static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ret = cdns_sierra_phy_register_pll_mux(sp);
+ if (ret) {
+ dev_err(dev, "Failed to pll mux clocks\n");
+ return ret;
+ }
+
+ ret = cdns_sierra_derived_refclk_register(sp);
+ if (ret) {
+ dev_err(dev, "Failed to register derived refclk\n");
+ return ret;
+ }
+
+ sp->clk_data.clks = sp->output_clks;
+ sp->clk_data.clk_num = CDNS_SIERRA_OUTPUT_CLOCKS;
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, &sp->clk_data);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+
+ return ret;
+}
+
static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
struct device_node *child)
{
+ u32 phy_type;
+
if (of_property_read_u32(child, "reg", &inst->mlane))
return -EINVAL;
if (of_property_read_u32(child, "cdns,num-lanes", &inst->num_lanes))
return -EINVAL;
- if (of_property_read_u32(child, "cdns,phy-type", &inst->phy_type))
+ if (of_property_read_u32(child, "cdns,phy-type", &phy_type))
return -EINVAL;
+ switch (phy_type) {
+ case PHY_TYPE_PCIE:
+ inst->phy_type = TYPE_PCIE;
+ break;
+ case PHY_TYPE_USB3:
+ inst->phy_type = TYPE_USB;
+ break;
+ case PHY_TYPE_SGMII:
+ inst->phy_type = TYPE_SGMII;
+ break;
+ case PHY_TYPE_QSGMII:
+ inst->phy_type = TYPE_QSGMII;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ inst->ssc_mode = EXTERNAL_SSC;
+ of_property_read_u32(child, "cdns,ssc-mode", &inst->ssc_mode);
+
return 0;
}
-static const struct of_device_id cdns_sierra_id_table[];
-
static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
u32 block_offset, u8 reg_offset_shift,
const struct regmap_config *config)
@@ -402,6 +978,7 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
{
struct device *dev = sp->dev;
struct regmap_field *field;
+ struct reg_field reg_field;
struct regmap *regmap;
int i;
@@ -413,7 +990,33 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
}
sp->macro_id_type = field;
- regmap = sp->regmap_phy_config_ctrl;
+ for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++) {
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].pfdclk_sel_preg;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PLLLC%d_PFDCLK1_SEL failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_plllc_pfdclk1_sel_preg[i] = field;
+
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].plllc1en_field;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "REFRCV%d_REFCLK_PLLLC1EN failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_refrcv_refclk_plllc1en_preg[i] = field;
+
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].termen_field;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "REFRCV%d_REFCLK_TERMEN failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_refrcv_refclk_termen_preg[i] = field;
+ }
+
+ regmap = sp->regmap_phy_pcs_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
@@ -421,6 +1024,14 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
}
sp->phy_pll_cfg_1 = field;
+ regmap = sp->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, pma_cmn_ready);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_CMN_CTRL reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->pma_cmn_ready = field;
+
for (i = 0; i < SIERRA_MAX_LANES; i++) {
regmap = sp->regmap_lane_cdb[i];
field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
@@ -428,7 +1039,17 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
return PTR_ERR(field);
}
- sp->pllctrl_lock[i] = field;
+ sp->pllctrl_lock[i] = field;
+ }
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ regmap = sp->regmap_phy_pcs_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, phy_iso_link_ctrl_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_ISO_LINK_CTRL reg field init for lane %d failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->phy_iso_link_ctrl_1[i] = field;
}
return 0;
@@ -465,14 +1086,275 @@ static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
}
sp->regmap_common_cdb = regmap;
- block_offset = SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset_shift);
+ block_offset = SIERRA_PHY_PCS_COMMON_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
+ &cdns_sierra_phy_pcs_cmn_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pcs_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_phy_pcs_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pcs_lane_cdb[i] = regmap;
+ }
+
+ block_offset = SIERRA_PHY_PMA_COMMON_OFFSET(block_offset_shift);
regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
- &cdns_sierra_phy_config_ctrl_config);
+ &cdns_sierra_phy_pma_cmn_cdb_config);
if (IS_ERR(regmap)) {
- dev_err(dev, "Failed to init PHY config and control regmap\n");
+ dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
return PTR_ERR(regmap);
}
- sp->regmap_phy_config_ctrl = regmap;
+ sp->regmap_phy_pma_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PMA_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_phy_pma_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pma_lane_cdb[i] = regmap;
+ }
+
+ return 0;
+}
+
+static int cdns_sierra_phy_get_clocks(struct cdns_sierra_phy *sp,
+ struct device *dev)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[CMN_REFCLK_DIG_DIV] = clk;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[CMN_REFCLK1_DIG_DIV] = clk;
+
+ clk = devm_clk_get_optional(dev, "pll0_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "pll0_refclk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[PLL0_REFCLK] = clk;
+
+ clk = devm_clk_get_optional(dev, "pll1_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "pll1_refclk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[PLL1_REFCLK] = clk;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_clk(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, "phy_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock phy_clk\n");
+ return PTR_ERR(clk);
+ }
+ sp->input_clks[PHY_CLK] = clk;
+
+ ret = clk_prepare_enable(sp->input_clks[PHY_CLK]);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_enable_clocks(struct cdns_sierra_phy *sp)
+{
+ int ret;
+
+ ret = clk_prepare_enable(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sp->output_clks[CDNS_SIERRA_PLL_CMNLC1]);
+ if (ret)
+ goto err_pll_cmnlc1;
+
+ return 0;
+
+err_pll_cmnlc1:
+ clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+
+ return ret;
+}
+
+static void cdns_sierra_phy_disable_clocks(struct cdns_sierra_phy *sp)
+{
+ clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC1]);
+ clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+ if (!sp->already_configured)
+ clk_disable_unprepare(sp->input_clks[PHY_CLK]);
+}
+
+static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
+ struct device *dev)
+{
+ struct reset_control *rst;
+
+ rst = devm_reset_control_get_exclusive(dev, "sierra_reset");
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get reset\n");
+ return PTR_ERR(rst);
+ }
+ sp->phy_rst = rst;
+
+ rst = devm_reset_control_get_optional_exclusive(dev, "sierra_apb");
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get apb reset\n");
+ return PTR_ERR(rst);
+ }
+ sp->apb_rst = rst;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
+{
+ const struct cdns_sierra_data *init_data = sp->init_data;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ enum cdns_sierra_phy_type phy_t1, phy_t2;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
+ int i, j, node, mlane, num_lanes, ret;
+ enum cdns_sierra_ssc_mode ssc;
+ struct regmap *regmap;
+ u32 num_regs;
+
+ /* Maximum 2 links (subnodes) are supported */
+ if (sp->nsubnodes != 2)
+ return -EINVAL;
+
+ clk_set_rate(sp->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+ clk_set_rate(sp->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
+
+ /* PHY configured to use both PLL LC and LC1 */
+ regmap_field_write(sp->phy_pll_cfg_1, 0x1);
+
+ phy_t1 = sp->phys[0].phy_type;
+ phy_t2 = sp->phys[1].phy_type;
+
+ /*
+ * PHY configuration for multi-link operation is done in two steps.
+ * e.g. Consider a case for a 4 lane PHY with PCIe using 2 lanes and QSGMII other 2 lanes.
+ * Sierra PHY has 2 PLLs, viz. PLLLC and PLLLC1. So in this case, PLLLC is used for PCIe
+ * and PLLLC1 is used for QSGMII. PHY is configured in two steps as described below.
+ *
+ * [1] For first step, phy_t1 = TYPE_PCIE and phy_t2 = TYPE_QSGMII
+ * So the register values are selected as [TYPE_PCIE][TYPE_QSGMII][ssc].
+ * This will configure PHY registers associated for PCIe (i.e. first protocol)
+ * involving PLLLC registers and registers for first 2 lanes of PHY.
+ * [2] In second step, the variables phy_t1 and phy_t2 are swapped. So now,
+ * phy_t1 = TYPE_QSGMII and phy_t2 = TYPE_PCIE. And the register values are selected as
+ * [TYPE_QSGMII][TYPE_PCIE][ssc].
+ * This will configure PHY registers associated for QSGMII (i.e. second protocol)
+ * involving PLLLC1 registers and registers for other 2 lanes of PHY.
+ *
+ * This completes the PHY configuration for multilink operation. This approach enables
+ * dividing the large number of PHY register configurations into protocol specific
+ * smaller groups.
+ */
+ for (node = 0; node < sp->nsubnodes; node++) {
+ if (node == 1) {
+ /*
+ * If first link with phy_t1 is configured, then configure the PHY for
+ * second link with phy_t2. Get the array values as [phy_t2][phy_t1][ssc].
+ */
+ swap(phy_t1, phy_t2);
+ }
+
+ mlane = sp->phys[node].mlane;
+ ssc = sp->phys[node].ssc_mode;
+ num_lanes = sp->phys[node].num_lanes;
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = sp->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_phy_pma_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = sp->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PMA lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ if (phy_t1 == TYPE_SGMII || phy_t1 == TYPE_QSGMII)
+ reset_control_deassert(sp->phys[node].lnk_rst);
+ }
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret)
+ return ret;
return 0;
}
@@ -482,25 +1364,20 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
struct cdns_sierra_phy *sp;
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
- struct cdns_sierra_data *data;
+ const struct cdns_sierra_data *data;
unsigned int id_value;
- struct resource *res;
- int i, ret, node = 0;
+ int ret, node = 0;
void __iomem *base;
- struct clk *clk;
struct device_node *dn = dev->of_node, *child;
if (of_get_child_count(dn) == 0)
return -ENODEV;
/* Get init data for this PHY */
- match = of_match_device(cdns_sierra_id_table, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct cdns_sierra_data *)match->data;
-
sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
if (!sp)
return -ENOMEM;
@@ -508,8 +1385,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
sp->dev = dev;
sp->init_data = data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "missing \"reg\"\n");
return PTR_ERR(base);
@@ -526,52 +1402,38 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sp);
- sp->clk = devm_clk_get_optional(dev, "phy_clk");
- if (IS_ERR(sp->clk)) {
- dev_err(dev, "failed to get clock phy_clk\n");
- return PTR_ERR(sp->clk);
- }
-
- sp->phy_rst = devm_reset_control_get(dev, "sierra_reset");
- if (IS_ERR(sp->phy_rst)) {
- dev_err(dev, "failed to get reset\n");
- return PTR_ERR(sp->phy_rst);
- }
-
- sp->apb_rst = devm_reset_control_get_optional(dev, "sierra_apb");
- if (IS_ERR(sp->apb_rst)) {
- dev_err(dev, "failed to get apb reset\n");
- return PTR_ERR(sp->apb_rst);
- }
-
- clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
- if (IS_ERR(clk)) {
- dev_err(dev, "cmn_refclk_dig_div clock not found\n");
- ret = PTR_ERR(clk);
+ ret = cdns_sierra_phy_get_clocks(sp, dev);
+ if (ret)
return ret;
- }
- sp->cmn_refclk_dig_div = clk;
- clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
- if (IS_ERR(clk)) {
- dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
- ret = PTR_ERR(clk);
+ ret = cdns_sierra_clk_register(sp);
+ if (ret)
return ret;
- }
- sp->cmn_refclk1_dig_div = clk;
- ret = clk_prepare_enable(sp->clk);
+ ret = cdns_sierra_phy_enable_clocks(sp);
if (ret)
- return ret;
+ goto unregister_clk;
+
+ regmap_field_read(sp->pma_cmn_ready, &sp->already_configured);
+
+ if (!sp->already_configured) {
+ ret = cdns_sierra_phy_clk(sp);
+ if (ret)
+ goto clk_disable;
- /* Enable APB */
- reset_control_deassert(sp->apb_rst);
+ ret = cdns_sierra_phy_get_resets(sp, dev);
+ if (ret)
+ goto clk_disable;
+
+ /* Enable APB */
+ reset_control_deassert(sp->apb_rst);
+ }
/* Check that PHY is present */
regmap_field_read(sp->macro_id_type, &id_value);
if (sp->init_data->id_value != id_value) {
ret = -EINVAL;
- goto clk_disable;
+ goto ctrl_assert;
}
sp->autoconf = of_property_read_bool(dn, "cdns,autoconf");
@@ -579,6 +1441,10 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
for_each_available_child_of_node(dn, child) {
struct phy *gphy;
+ if (!(of_node_name_eq(child, "phy") ||
+ of_node_name_eq(child, "link")))
+ continue;
+
sp->phys[node].lnk_rst =
of_reset_control_array_get_exclusive(child);
@@ -586,7 +1452,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
dev_err(dev, "failed to get reset %s\n",
child->full_name);
ret = PTR_ERR(sp->phys[node].lnk_rst);
- goto put_child2;
+ of_node_put(child);
+ goto put_control;
}
if (!sp->autoconf) {
@@ -594,17 +1461,23 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "missing property in node %s\n",
child->name);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
}
sp->num_lanes += sp->phys[node].num_lanes;
- gphy = devm_phy_create(dev, child, &ops);
-
+ if (!sp->already_configured)
+ gphy = devm_phy_create(dev, child, &ops);
+ else
+ gphy = devm_phy_create(dev, child, &noop_ops);
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
sp->phys[node].phy = gphy;
phy_set_drvdata(gphy, &sp->phys[node]);
@@ -616,26 +1489,35 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (sp->num_lanes > SIERRA_MAX_LANES) {
ret = -EINVAL;
dev_err(dev, "Invalid lane configuration\n");
- goto put_child2;
+ goto put_control;
}
/* If more than one subnode, configure the PHY as multilink */
- if (!sp->autoconf && sp->nsubnodes > 1)
- regmap_field_write(sp->phy_pll_cfg_1, 0x1);
+ if (!sp->already_configured && !sp->autoconf && sp->nsubnodes > 1) {
+ ret = cdns_sierra_phy_configure_multilink(sp);
+ if (ret)
+ goto put_control;
+ }
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- return PTR_ERR_OR_ZERO(phy_provider);
-
-put_child:
- node++;
-put_child2:
- for (i = 0; i < node; i++)
- reset_control_put(sp->phys[i].lnk_rst);
- of_node_put(child);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto put_control;
+ }
+
+ return 0;
+
+put_control:
+ while (--node >= 0)
+ reset_control_put(sp->phys[node].lnk_rst);
+ctrl_assert:
+ if (!sp->already_configured)
+ reset_control_assert(sp->apb_rst);
clk_disable:
- clk_disable_unprepare(sp->clk);
- reset_control_assert(sp->apb_rst);
+ cdns_sierra_phy_disable_clocks(sp);
+unregister_clk:
+ cdns_sierra_clk_unregister(sp);
return ret;
}
@@ -648,6 +1530,7 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
reset_control_assert(phy->apb_rst);
pm_runtime_disable(&pdev->dev);
+ cdns_sierra_phy_disable_clocks(phy);
/*
* The device level resets will be put automatically.
* Need to put the subnode resets here though.
@@ -656,9 +1539,706 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
reset_control_assert(phy->phys[i].lnk_rst);
reset_control_put(phy->phys[i].lnk_rst);
}
+
+ cdns_sierra_clk_unregister(phy);
+
return 0;
}
+/* SGMII PHY PMA lane configuration */
+static struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = {
+ {0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
+};
+
+static struct cdns_sierra_vals sgmii_phy_pma_ln_vals = {
+ .reg_pairs = sgmii_phy_pma_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_phy_pma_ln_regs),
+};
+
+/* SGMII refclk 100MHz, no ssc, opt3 and GE1 links using PLL LC1 */
+static const struct cdns_reg_pairs sgmii_100_no_ssc_plllc1_opt3_cmn_regs[] = {
+ {0x002D, SIERRA_CMN_PLLLC1_FBDIV_INT_PREG},
+ {0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
+ {0x1005, SIERRA_CMN_PLLLC1_CLK0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
+ {0x0800, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+static const struct cdns_reg_pairs sgmii_100_no_ssc_plllc1_opt3_ln_regs[] = {
+ {0x688E, SIERRA_DET_STANDEC_D_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x0FFE, SIERRA_PSC_RX_A0_PREG},
+ {0x0106, SIERRA_PLLCTRL_FBDIV_MODE01_PREG},
+ {0x0013, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0003, SIERRA_PLLCTRL_GEN_A_PREG},
+ {0x0106, SIERRA_PLLCTRL_GEN_D_PREG},
+ {0x5231, SIERRA_PLLCTRL_CPGAIN_MODE_PREG },
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x9702, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x0051, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x3C0E, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x3220, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x0186, SIERRA_DEQ_GLUT0},
+ {0x0186, SIERRA_DEQ_GLUT1},
+ {0x0186, SIERRA_DEQ_GLUT2},
+ {0x0186, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x0861, SIERRA_DEQ_ALUT0},
+ {0x07E0, SIERRA_DEQ_ALUT1},
+ {0x079E, SIERRA_DEQ_ALUT2},
+ {0x071D, SIERRA_DEQ_ALUT3},
+ {0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0000, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG},
+ {0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = {
+ .reg_pairs = sgmii_100_no_ssc_plllc1_opt3_cmn_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_cmn_regs),
+};
+
+static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = {
+ .reg_pairs = sgmii_100_no_ssc_plllc1_opt3_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_ln_regs),
+};
+
+/* QSGMII PHY PMA lane configuration */
+static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
+ {0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
+};
+
+static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
+ .reg_pairs = qsgmii_phy_pma_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs),
+};
+
+/* QSGMII refclk 100MHz, 20b, opt1, No BW cal, no ssc, PLL LC1 */
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_cmn_regs[] = {
+ {0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x0252, SIERRA_DET_STANDEC_E_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x0FFE, SIERRA_PSC_RX_A0_PREG},
+ {0x0011, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0001, SIERRA_PLLCTRL_GEN_A_PREG},
+ {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x0089, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x3C3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x3222, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x8422, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4111, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4111, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x9595, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0186, SIERRA_DEQ_GLUT0},
+ {0x0186, SIERRA_DEQ_GLUT1},
+ {0x0186, SIERRA_DEQ_GLUT2},
+ {0x0186, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x0861, SIERRA_DEQ_ALUT0},
+ {0x07E0, SIERRA_DEQ_ALUT1},
+ {0x079E, SIERRA_DEQ_ALUT2},
+ {0x071D, SIERRA_DEQ_ALUT3},
+ {0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0660, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x00D5, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG},
+ {0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs),
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs),
+};
+
+/* PCIE PHY PCS common configuration */
+static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
+ {0x0430, SIERRA_PHY_PIPE_CMN_CTRL1}
+};
+
+static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
+ .reg_pairs = pcie_phy_pcs_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_no_ssc_plllc_cmn_regs[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_no_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_no_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
+};
+
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_int_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_int_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
+};
+
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_ext_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
+};
+
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_int_ssc[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -670,13 +2250,62 @@ static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
/* refclk100MHz_32b_PCIe_ln_ext_ssc */
static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
- {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
};
/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
@@ -786,32 +2415,232 @@ static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
+static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_usb_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_usb_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+};
+
static const struct cdns_sierra_data cdns_map_sierra = {
- SIERRA_MACRO_ID,
- 0x2,
- 0x2,
- ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
- cdns_pcie_cmn_regs_ext_ssc,
- cdns_pcie_ln_regs_ext_ssc,
- cdns_usb_cmn_regs_ext_ssc,
- cdns_usb_ln_regs_ext_ssc,
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x2,
+ .reg_offset_shift = 0x2,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+ },
};
static const struct cdns_sierra_data cdns_ti_map_sierra = {
- SIERRA_MACRO_ID,
- 0x0,
- 0x1,
- ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
- cdns_pcie_cmn_regs_ext_ssc,
- cdns_pcie_ln_regs_ext_ssc,
- cdns_usb_cmn_regs_ext_ssc,
- cdns_usb_ln_regs_ext_ssc,
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x0,
+ .reg_offset_shift = 0x1,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .phy_pma_ln_vals = {
+ [TYPE_SGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_phy_pma_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_phy_pma_ln_vals,
+ [INTERNAL_SSC] = &sgmii_phy_pma_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_phy_pma_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+ },
};
static const struct of_device_id cdns_sierra_id_table[] = {
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 591a15834b48..f04619634c30 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -7,7 +7,9 @@
*/
#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -22,18 +24,24 @@
#include <linux/reset.h>
#include <linux/regmap.h>
-#define REF_CLK_19_2MHz 19200000
-#define REF_CLK_25MHz 25000000
+#define REF_CLK_19_2MHZ 19200000
+#define REF_CLK_25MHZ 25000000
+#define REF_CLK_100MHZ 100000000
+#define REF_CLK_156_25MHZ 156250000
#define MAX_NUM_LANES 4
#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
#define NUM_SSC_MODE 3
-#define NUM_PHY_TYPE 6
+#define NUM_REF_CLK 4
+#define NUM_PHY_TYPE 7
#define POLL_TIMEOUT_US 5000
#define PLL_LOCK_TIMEOUT 100000
+#define DP_PLL0 BIT(0)
+#define DP_PLL1 BIT(1)
+
#define TORRENT_COMMON_CDB_OFFSET 0x0
#define TORRENT_TX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
@@ -47,6 +55,10 @@
#define TORRENT_PHY_PCS_COMMON_OFFSET(block_offset) \
(0xC000 << (block_offset))
+#define TORRENT_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xD000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
#define TORRENT_PHY_PMA_COMMON_OFFSET(block_offset) \
(0xE000 << (block_offset))
@@ -58,16 +70,11 @@
*/
#define PHY_AUX_CTRL 0x04
#define PHY_RESET 0x20
-#define PMA_TX_ELEC_IDLE_MASK 0xF0U
#define PMA_TX_ELEC_IDLE_SHIFT 4
-#define PHY_L00_RESET_N_MASK 0x01U
#define PHY_PMA_XCVR_PLLCLK_EN 0x24
#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
-#define PHY_POWER_STATE_LN_0 0x0000
-#define PHY_POWER_STATE_LN_1 0x0008
-#define PHY_POWER_STATE_LN_2 0x0010
-#define PHY_POWER_STATE_LN_3 0x0018
+#define PHY_POWER_STATE_LN(ln) ((ln) * 8)
#define PMA_XCVR_POWER_STATE_REQ_LN_MASK 0x3FU
#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
#define PHY_PMA_CMN_READY 0x34
@@ -84,6 +91,8 @@
#define CMN_PLLSM1_PLLLOCK_TMR 0x0034U
#define CMN_CDIAG_CDB_PWRI_OVRD 0x0041U
#define CMN_CDIAG_XCVRC_PWRI_OVRD 0x0047U
+#define CMN_CDIAG_REFCLK_OVRD 0x004CU
+#define CMN_CDIAG_REFCLK_DRV0_CTRL 0x0050U
#define CMN_BGCAL_INIT_TMR 0x0064U
#define CMN_BGCAL_ITER_TMR 0x0065U
#define CMN_IBCAL_INIT_TMR 0x0074U
@@ -97,6 +106,8 @@
#define CMN_PLL0_FRACDIVH_M0 0x0092U
#define CMN_PLL0_HIGH_THR_M0 0x0093U
#define CMN_PLL0_DSM_DIAG_M0 0x0094U
+#define CMN_PLL0_DSM_FBH_OVRD_M0 0x0095U
+#define CMN_PLL0_DSM_FBL_OVRD_M0 0x0096U
#define CMN_PLL0_SS_CTRL1_M0 0x0098U
#define CMN_PLL0_SS_CTRL2_M0 0x0099U
#define CMN_PLL0_SS_CTRL3_M0 0x009AU
@@ -122,6 +133,8 @@
#define CMN_PLL1_FRACDIVH_M0 0x00D2U
#define CMN_PLL1_HIGH_THR_M0 0x00D3U
#define CMN_PLL1_DSM_DIAG_M0 0x00D4U
+#define CMN_PLL1_DSM_FBH_OVRD_M0 0x00D5U
+#define CMN_PLL1_DSM_FBL_OVRD_M0 0x00D6U
#define CMN_PLL1_SS_CTRL1_M0 0x00D8U
#define CMN_PLL1_SS_CTRL2_M0 0x00D9U
#define CMN_PLL1_SS_CTRL3_M0 0x00DAU
@@ -163,10 +176,12 @@
#define TX_TXCC_CPOST_MULT_00 0x004CU
#define TX_TXCC_CPOST_MULT_01 0x004DU
#define TX_TXCC_MGNFS_MULT_000 0x0050U
+#define TX_TXCC_MGNFS_MULT_100 0x0054U
#define DRV_DIAG_TX_DRV 0x00C6U
#define XCVR_DIAG_PLLDRC_CTRL 0x00E5U
#define XCVR_DIAG_HSCLK_SEL 0x00E6U
#define XCVR_DIAG_HSCLK_DIV 0x00E7U
+#define XCVR_DIAG_RXCLK_CTRL 0x00E9U
#define XCVR_DIAG_BIDI_CTRL 0x00EAU
#define XCVR_DIAG_PSC_OVRD 0x00EBU
#define TX_PSC_A0 0x0100U
@@ -183,6 +198,10 @@
#define RX_PSC_A2 0x0002U
#define RX_PSC_A3 0x0003U
#define RX_PSC_CAL 0x0006U
+#define RX_SDCAL0_INIT_TMR 0x0044U
+#define RX_SDCAL0_ITER_TMR 0x0045U
+#define RX_SDCAL1_INIT_TMR 0x004CU
+#define RX_SDCAL1_ITER_TMR 0x004DU
#define RX_CDRLF_CNFG 0x0080U
#define RX_CDRLF_CNFG3 0x0082U
#define RX_SIGDET_HL_FILT_TMR 0x0090U
@@ -206,16 +225,24 @@
#define RX_DIAG_ACYA 0x01FFU
/* PHY PCS common registers */
+#define PHY_PIPE_CMN_CTRL1 0x0000U
#define PHY_PLL_CFG 0x000EU
#define PHY_PIPE_USB3_GEN2_PRE_CFG0 0x0020U
#define PHY_PIPE_USB3_GEN2_POST_CFG0 0x0022U
#define PHY_PIPE_USB3_GEN2_POST_CFG1 0x0023U
+/* PHY PCS lane registers */
+#define PHY_PCS_ISO_LINK_CTRL 0x000BU
+
/* PHY PMA common registers */
#define PHY_PMA_CMN_CTRL1 0x0000U
#define PHY_PMA_CMN_CTRL2 0x0001U
#define PHY_PMA_PLL_RAW_CTRL 0x0003U
+static const char * const clk_names[] = {
+ [CDNS_TORRENT_REFCLK_DRIVER] = "refclk-driver",
+};
+
static const struct reg_field phy_pll_cfg =
REG_FIELD(PHY_PLL_CFG, 0, 1);
@@ -231,6 +258,29 @@ static const struct reg_field phy_pma_pll_raw_ctrl =
static const struct reg_field phy_reset_ctrl =
REG_FIELD(PHY_RESET, 8, 8);
+static const struct reg_field phy_pcs_iso_link_ctrl_1 =
+ REG_FIELD(PHY_PCS_ISO_LINK_CTRL, 1, 1);
+
+static const struct reg_field phy_pipe_cmn_ctrl1_0 = REG_FIELD(PHY_PIPE_CMN_CTRL1, 0, 0);
+
+#define REFCLK_OUT_NUM_CMN_CONFIG 5
+
+enum cdns_torrent_refclk_out_cmn {
+ CMN_CDIAG_REFCLK_OVRD_4,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_1,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_4,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_5,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_6,
+};
+
+static const struct reg_field refclk_out_cmn_cfg[] = {
+ [CMN_CDIAG_REFCLK_OVRD_4] = REG_FIELD(CMN_CDIAG_REFCLK_OVRD, 4, 4),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_1] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 1, 1),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_4] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 4, 4),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_5] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 5, 5),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_6] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 6, 6),
+};
+
enum cdns_torrent_phy_type {
TYPE_NONE,
TYPE_DP,
@@ -238,14 +288,49 @@ enum cdns_torrent_phy_type {
TYPE_SGMII,
TYPE_QSGMII,
TYPE_USB,
+ TYPE_USXGMII,
+};
+
+enum cdns_torrent_ref_clk {
+ CLK_19_2_MHZ,
+ CLK_25_MHZ,
+ CLK_100_MHZ,
+ CLK_156_25_MHZ,
+ CLK_ANY,
};
enum cdns_torrent_ssc_mode {
NO_SSC,
EXTERNAL_SSC,
- INTERNAL_SSC
+ INTERNAL_SSC,
+ ANY_SSC,
};
+/* Unique key id for vals table entry
+ * REFCLK0_RATE | REFCLK1_RATE | LINK0_TYPE | LINK1_TYPE | SSC_TYPE
+ */
+#define REFCLK0_SHIFT 12
+#define REFCLK0_MASK GENMASK(14, 12)
+#define REFCLK1_SHIFT 9
+#define REFCLK1_MASK GENMASK(11, 9)
+#define LINK0_SHIFT 6
+#define LINK0_MASK GENMASK(8, 6)
+#define LINK1_SHIFT 3
+#define LINK1_MASK GENMASK(5, 3)
+#define SSC_SHIFT 0
+#define SSC_MASK GENMASK(2, 0)
+
+#define CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc) \
+ ((((refclk0) << REFCLK0_SHIFT) & REFCLK0_MASK) | \
+ (((refclk1) << REFCLK1_SHIFT) & REFCLK1_MASK) | \
+ (((link0) << LINK0_SHIFT) & LINK0_MASK) | \
+ (((link1) << LINK1_SHIFT) & LINK1_MASK) | \
+ (((ssc) << SSC_SHIFT) & SSC_MASK))
+
+#define CDNS_TORRENT_KEY_ANYCLK(link0, link1) \
+ CDNS_TORRENT_KEY(CLK_ANY, CLK_ANY, \
+ (link0), (link1), ANY_SSC)
+
struct cdns_torrent_inst {
struct phy *phy;
u32 mlane;
@@ -259,11 +344,14 @@ struct cdns_torrent_phy {
void __iomem *base; /* DPTX registers base */
void __iomem *sd_base; /* SD0801 registers base */
u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
+ u32 dp_pll;
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct device *dev;
struct clk *clk;
- unsigned long ref_clk_rate;
+ struct clk *clk1;
+ enum cdns_torrent_ref_clk ref_clk_rate;
+ enum cdns_torrent_ref_clk ref_clk1_rate;
struct cdns_torrent_inst phys[MAX_NUM_LANES];
int nsubnodes;
const struct cdns_torrent_data *init_data;
@@ -273,12 +361,16 @@ struct cdns_torrent_phy {
struct regmap *regmap_phy_pma_common_cdb;
struct regmap *regmap_tx_lane_cdb[MAX_NUM_LANES];
struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES];
+ struct regmap *regmap_phy_pcs_lane_cdb[MAX_NUM_LANES];
struct regmap *regmap_dptx_phy_reg;
struct regmap_field *phy_pll_cfg;
struct regmap_field *phy_pma_cmn_ctrl_1;
struct regmap_field *phy_pma_cmn_ctrl_2;
struct regmap_field *phy_pma_pll_raw_ctrl;
struct regmap_field *phy_reset_ctrl;
+ struct regmap_field *phy_pcs_iso_link_ctrl_1[MAX_NUM_LANES];
+ struct clk *clks[CDNS_TORRENT_REFCLK_DRIVER + 1];
+ struct clk_onecell_data clk_data;
};
enum phy_powerstate {
@@ -288,44 +380,16 @@ enum phy_powerstate {
POWERSTATE_A3 = 3,
};
-static int cdns_torrent_phy_init(struct phy *phy);
-static int cdns_torrent_dp_init(struct phy *phy);
-static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy,
- u32 num_lanes);
-static
-int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy);
-static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy,
- struct cdns_torrent_inst *inst);
-static
-void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy);
-static
-void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy,
- u32 rate, bool ssc);
-static
-void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy);
-static
-void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy,
- u32 rate, bool ssc);
-static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy,
- unsigned int lane);
-static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
- u32 rate, u32 num_lanes);
-static int cdns_torrent_dp_configure(struct phy *phy,
- union phy_configure_opts *opts);
-static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
- u32 num_lanes,
- enum phy_powerstate powerstate);
-static int cdns_torrent_phy_on(struct phy *phy);
-static int cdns_torrent_phy_off(struct phy *phy);
-
-static const struct phy_ops cdns_torrent_phy_ops = {
- .init = cdns_torrent_phy_init,
- .configure = cdns_torrent_dp_configure,
- .power_on = cdns_torrent_phy_on,
- .power_off = cdns_torrent_phy_off,
- .owner = THIS_MODULE,
+struct cdns_torrent_derived_refclk {
+ struct clk_hw hw;
+ struct regmap_field *phy_pipe_cmn_ctrl1_0;
+ struct regmap_field *cmn_fields[REFCLK_OUT_NUM_CMN_CONFIG];
+ struct clk_init_data clk_data;
};
+#define to_cdns_torrent_derived_refclk(_hw) \
+ container_of(_hw, struct cdns_torrent_derived_refclk, hw)
+
struct cdns_reg_pairs {
u32 val;
u32 off;
@@ -336,21 +400,26 @@ struct cdns_torrent_vals {
u32 num_regs;
};
+struct cdns_torrent_vals_entry {
+ u32 key;
+ struct cdns_torrent_vals *vals;
+};
+
+struct cdns_torrent_vals_table {
+ struct cdns_torrent_vals_entry *entries;
+ u32 num_entries;
+};
+
struct cdns_torrent_data {
u8 block_offset_shift;
u8 reg_offset_shift;
- struct cdns_torrent_vals *link_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *xcvr_diag_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *tx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *rx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
+ struct cdns_torrent_vals_table link_cmn_vals_tbl;
+ struct cdns_torrent_vals_table xcvr_diag_vals_tbl;
+ struct cdns_torrent_vals_table pcs_cmn_vals_tbl;
+ struct cdns_torrent_vals_table phy_pma_cmn_vals_tbl;
+ struct cdns_torrent_vals_table cmn_vals_tbl;
+ struct cdns_torrent_vals_table tx_ln_vals_tbl;
+ struct cdns_torrent_vals_table rx_ln_vals_tbl;
};
struct cdns_regmap_cdb_context {
@@ -359,6 +428,24 @@ struct cdns_regmap_cdb_context {
u8 reg_offset_shift;
};
+static struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl,
+ enum cdns_torrent_ref_clk refclk0,
+ enum cdns_torrent_ref_clk refclk1,
+ enum cdns_torrent_phy_type link0,
+ enum cdns_torrent_phy_type link1,
+ enum cdns_torrent_ssc_mode ssc)
+{
+ int i;
+ u32 key = CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc);
+
+ for (i = 0; i < tbl->num_entries; i++) {
+ if (tbl->entries[i].key == key)
+ return tbl->entries[i].vals;
+ }
+
+ return NULL;
+}
+
static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
{
struct cdns_regmap_cdb_context *ctx = context;
@@ -439,6 +526,22 @@ static const struct regmap_config cdns_torrent_common_cdb_config = {
.reg_read = cdns_regmap_read,
};
+#define TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "torrent_phy_pcs_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_torrent_phy_pcs_lane_cdb_config[] = {
+ TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("0"),
+ TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("1"),
+ TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("2"),
+ TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("3"),
+};
+
static const struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = {
.name = "torrent_phy_pcs_cmn_cdb",
.reg_stride = 1,
@@ -557,47 +660,428 @@ static const struct coefficients vltg_coeff[4][4] = {
}
};
+static const char *cdns_torrent_get_phy_type(enum cdns_torrent_phy_type phy_type)
+{
+ switch (phy_type) {
+ case TYPE_DP:
+ return "DisplayPort";
+ case TYPE_PCIE:
+ return "PCIe";
+ case TYPE_SGMII:
+ return "SGMII";
+ case TYPE_QSGMII:
+ return "QSGMII";
+ case TYPE_USB:
+ return "USB";
+ case TYPE_USXGMII:
+ return "USXGMII";
+ default:
+ return "None";
+ }
+}
+
+/*
+ * Set registers responsible for enabling and configuring SSC, with second and
+ * third register values provided by parameters.
+ */
+static
+void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 ctrl2_val, u32 ctrl3_val)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl3_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl3_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* Assumes 19.2 MHz refclock */
+ switch (rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0119);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00BC);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0012);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0119);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00BC);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0012);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A, 0x006A);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 1620:
+ case 2430:
+ case 3240:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01FA);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0152);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01FA);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0152);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD, 0x0069);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01C2);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x012C);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01C2);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x012C);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536, 0x0069);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01A5);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xE000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x011A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01A5);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xE000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x011A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7, 0x006A);
+ break;
+ }
+
+ if (ssc) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_PLLCNT_START, 0x025E);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_PLLCNT_START, 0x025E);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
+ } else {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_PLLCNT_START, 0x0260);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_PLLCNT_START, 0x0260);
+ /* Set reset register values to disable SSC */
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
+ }
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x0099);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x0099);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x0099);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x0099);
+}
+
+/*
+ * Set registers responsible for enabling and configuring SSC, with second
+ * register value provided by a parameter.
+ */
+static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 ctrl2_val)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x007F);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x007F);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* Assumes 25 MHz refclock */
+ switch (rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01B0);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0120);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01B0);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0120);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 1620:
+ case 2430:
+ case 3240:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0184);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xCCCD);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0104);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0184);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xCCCD);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0104);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0159);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x999A);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00E7);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0159);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x999A);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00E7);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0144);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00D8);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0144);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00D8);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A);
+ break;
+ }
+
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+
+ if (ssc) {
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_VCOCAL_PLLCNT_START, 0x0315);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_VCOCAL_PLLCNT_START, 0x0315);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
+ } else {
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_VCOCAL_PLLCNT_START, 0x0317);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_VCOCAL_PLLCNT_START, 0x0317);
+ /* Set reset register values to disable SSC */
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
+ }
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x00C7);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x00C7);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x00C7);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* Assumes 100 MHz refclock */
+ switch (rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ if (cdns_phy->dp_pll & DP_PLL0)
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_FBH_OVRD_M0, 0x0022);
+
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0028);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBH_OVRD_M0, 0x0022);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBL_OVRD_M0, 0x000C);
+ }
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 1620:
+ case 2430:
+ case 3240:
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0061);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x3333);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0042);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ }
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0061);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x3333);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0042);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ }
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0056);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x6666);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x003A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ }
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0056);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x6666);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x003A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ }
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0051);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0036);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ }
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0051);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0036);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ }
+ break;
+ }
+}
+
+/* Set PLL used for DP configuration */
+static int cdns_torrent_dp_get_pll(struct cdns_torrent_phy *cdns_phy,
+ enum cdns_torrent_phy_type phy_t2)
+{
+ switch (phy_t2) {
+ case TYPE_PCIE:
+ case TYPE_USB:
+ cdns_phy->dp_pll = DP_PLL1;
+ break;
+ case TYPE_SGMII:
+ case TYPE_QSGMII:
+ cdns_phy->dp_pll = DP_PLL0;
+ break;
+ case TYPE_NONE:
+ cdns_phy->dp_pll = DP_PLL0 | DP_PLL1;
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Unsupported PHY configuration\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Enable or disable PLL for selected lanes.
*/
static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp,
bool enable)
{
- u32 rd_val;
- u32 ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u32 rd_val, i, pll_ack_val;
+ int ret;
/*
* Used to determine, which bits to check for or enable in
* PHY_PMA_XCVR_PLLCLK_EN register.
*/
- u32 pll_bits;
+ u32 pll_bits = 0;
/* Used to enable or disable lanes. */
u32 pll_val;
+ u32 clane = 0;
- /* Select values of registers and mask, depending on enabled lane
- * count.
- */
- switch (dp->lanes) {
- /* lane 0 */
- case (1):
- pll_bits = 0x00000001;
- break;
- /* lanes 0-1 */
- case (2):
- pll_bits = 0x00000003;
- break;
- /* lanes 0-3, all */
- default:
- pll_bits = 0x0000000F;
- break;
- }
+ /* Select values of registers and mask, depending on enabled lane count. */
+ pll_val = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
- if (enable)
- pll_val = pll_bits;
- else
- pll_val = 0x00000000;
+ if (enable) {
+ for (i = 0; i < dp->lanes; i++)
+ pll_bits |= (0x01U << (clane + i));
+ pll_val |= pll_bits;
+ pll_ack_val = pll_bits;
+ } else {
+ for (i = 0; i < inst->num_lanes; i++)
+ pll_bits |= (0x01U << (clane + i));
+ pll_val &= (~pll_bits);
+ pll_ack_val = 0;
+ }
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_val);
@@ -605,62 +1089,243 @@ static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
ret = regmap_read_poll_timeout(regmap,
PHY_PMA_XCVR_PLLCLK_EN_ACK,
rd_val,
- (rd_val & pll_bits) == pll_val,
+ (rd_val & pll_bits) == pll_ack_val,
0, POLL_TIMEOUT_US);
ndelay(100);
return ret;
}
+static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
+ u32 num_lanes,
+ enum phy_powerstate powerstate)
+{
+ /* Register value for power state for a single byte. */
+ u32 value_part;
+ u32 value = 0;
+ u32 mask = 0;
+ u32 read_val;
+ int ret;
+ u32 i;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u32 clane = 0;
+
+ switch (powerstate) {
+ case (POWERSTATE_A0):
+ value_part = 0x01U;
+ break;
+ case (POWERSTATE_A2):
+ value_part = 0x04U;
+ break;
+ default:
+ /* Powerstate A3 */
+ value_part = 0x08U;
+ break;
+ }
+
+ /* Select values of registers and mask, depending on enabled lane count. */
+
+ for (i = 0; i < num_lanes; i++) {
+ value |= (value_part << PHY_POWER_STATE_LN(clane + i));
+ mask |= (PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN(clane + i));
+ }
+
+ /* Set power state A<n>. */
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, value);
+ /* Wait, until PHY acknowledges power state completion. */
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == value, 0,
+ POLL_TIMEOUT_US);
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000);
+ ndelay(100);
+
+ return ret;
+}
+
+static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst, u32 num_lanes)
+{
+ unsigned int read_val;
+ int ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u32 clane = 0;
+
+ /*
+ * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the
+ * master lane
+ */
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_PLLCLK_EN_ACK,
+ read_val, (read_val & (1 << clane)),
+ 0, POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT) {
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link PLL clock enable ack\n");
+ return ret;
+ }
+
+ ndelay(100);
+
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, num_lanes,
+ POWERSTATE_A2);
+ if (ret)
+ return ret;
+
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, num_lanes,
+ POWERSTATE_A0);
+
+ return ret;
+}
+
+static int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy)
+{
+ unsigned int reg;
+ int ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_CMN_READY, reg,
+ reg & 1, 0, POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT) {
+ dev_err(cdns_phy->dev,
+ "timeout waiting for PMA common ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
+ u32 rate, u32 num_lanes)
+{
+ unsigned int clk_sel_val = 0;
+ unsigned int hsclk_div_val = 0;
+ unsigned int i;
+
+ switch (rate) {
+ case 1620:
+ clk_sel_val = 0x0f01;
+ hsclk_div_val = 2;
+ break;
+ case 2160:
+ case 2430:
+ case 2700:
+ clk_sel_val = 0x0701;
+ hsclk_div_val = 1;
+ break;
+ case 3240:
+ clk_sel_val = 0x0b00;
+ hsclk_div_val = 2;
+ break;
+ case 4320:
+ case 5400:
+ clk_sel_val = 0x0301;
+ hsclk_div_val = 0;
+ break;
+ case 8100:
+ clk_sel_val = 0x0200;
+ hsclk_div_val = 0;
+ break;
+ }
+
+ if (cdns_phy->dp_pll & DP_PLL0)
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val);
+
+ if (cdns_phy->dp_pll & DP_PLL1)
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < num_lanes; i++)
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + i],
+ XCVR_DIAG_HSCLK_DIV, hsclk_div_val);
+}
+
/*
* Perform register operations related to setting link rate, once powerstate is
* set and PLL disable request was processed.
*/
static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
- u32 ret;
- u32 read_val;
+ u32 read_val, field_val;
+ int ret;
- /* Disable the cmn_pll0_en before re-programming the new data rate. */
- regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x0);
+ /*
+ * Disable the associated PLL (cmn_pll0_en or cmn_pll1_en) before
+ * re-programming the new data rate.
+ */
+ ret = regmap_field_read(cdns_phy->phy_pma_pll_raw_ctrl, &field_val);
+ if (ret)
+ return ret;
+ field_val &= ~(cdns_phy->dp_pll);
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, field_val);
/*
* Wait for PLL ready de-assertion.
* For PLL0 - PHY_PMA_CMN_CTRL2[2] == 1
+ * For PLL1 - PHY_PMA_CMN_CTRL2[3] == 1
*/
- ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
- read_val,
- ((read_val >> 2) & 0x01) != 0,
- 0, POLL_TIMEOUT_US);
- if (ret)
- return ret;
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 2) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ }
+
+ if ((cdns_phy->dp_pll & DP_PLL1) && cdns_phy->nsubnodes != 1) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 3) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ }
ndelay(200);
/* DP Rate Change - VCO Output settings. */
- if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz) {
+ if (cdns_phy->ref_clk_rate == CLK_19_2_MHZ)
/* PMA common configuration 19.2MHz */
- cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate,
- dp->ssc);
- cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy);
- } else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) {
+ cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, dp->ssc);
+ else if (cdns_phy->ref_clk_rate == CLK_25_MHZ)
/* PMA common configuration 25MHz */
- cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate,
- dp->ssc);
- cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy);
- }
- cdns_torrent_dp_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes);
+ cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, dp->ssc);
+ else if (cdns_phy->ref_clk_rate == CLK_100_MHZ)
+ /* PMA common configuration 100MHz */
+ cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy, dp->link_rate, dp->ssc);
- /* Enable the cmn_pll0_en. */
- regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x3);
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, inst, dp->link_rate, dp->lanes);
+
+ /* Enable the associated PLL (cmn_pll0_en or cmn_pll1_en) */
+ ret = regmap_field_read(cdns_phy->phy_pma_pll_raw_ctrl, &field_val);
+ if (ret)
+ return ret;
+ field_val |= cdns_phy->dp_pll;
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, field_val);
/*
* Wait for PLL ready assertion.
* For PLL0 - PHY_PMA_CMN_CTRL2[0] == 1
+ * For PLL1 - PHY_PMA_CMN_CTRL2[1] == 1
*/
- ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
- read_val,
- (read_val & 0x01) != 0,
- 0, POLL_TIMEOUT_US);
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ (read_val & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ }
+
+ if ((cdns_phy->dp_pll & DP_PLL1) && cdns_phy->nsubnodes != 1)
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 1) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+
return ret;
}
@@ -728,6 +1393,7 @@ static int cdns_torrent_dp_verify_config(struct cdns_torrent_inst *inst,
/* Set power state A0 and PLL clock enable to 0 on enabled lanes. */
static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
u32 num_lanes)
{
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
@@ -735,27 +1401,13 @@ static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
PHY_PMA_XCVR_POWER_STATE_REQ);
u32 pll_clk_en = cdns_torrent_dp_read(regmap,
PHY_PMA_XCVR_PLLCLK_EN);
+ u32 i;
- /* Lane 0 is always enabled. */
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_0);
- pll_clk_en &= ~0x01U;
-
- if (num_lanes > 1) {
- /* lane 1 */
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_1);
- pll_clk_en &= ~(0x01U << 1);
- }
+ for (i = 0; i < num_lanes; i++) {
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK
+ << PHY_POWER_STATE_LN(inst->mlane + i));
- if (num_lanes > 2) {
- /* lanes 2 and 3 */
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_2);
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_3);
- pll_clk_en &= ~(0x01U << 2);
- pll_clk_en &= ~(0x01U << 3);
+ pll_clk_en &= ~(0x01U << (inst->mlane + i));
}
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, pwr_state);
@@ -764,36 +1416,58 @@ static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
/* Configure lane count as required. */
static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
- u32 value;
- u32 ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
u8 lane_mask = (1 << dp->lanes) - 1;
+ u8 pma_tx_elec_idle_mask = 0;
+ u32 value, i;
+ int ret;
+ u32 clane = inst->mlane;
+
+ lane_mask <<= clane;
value = cdns_torrent_dp_read(regmap, PHY_RESET);
+
/* clear pma_tx_elec_idle_ln_* bits. */
- value &= ~PMA_TX_ELEC_IDLE_MASK;
+ pma_tx_elec_idle_mask = ((1 << inst->num_lanes) - 1) << clane;
+
+ pma_tx_elec_idle_mask <<= PMA_TX_ELEC_IDLE_SHIFT;
+
+ value &= ~pma_tx_elec_idle_mask;
+
/* Assert pma_tx_elec_idle_ln_* for disabled lanes. */
value |= ((~lane_mask) << PMA_TX_ELEC_IDLE_SHIFT) &
- PMA_TX_ELEC_IDLE_MASK;
+ pma_tx_elec_idle_mask;
+
cdns_torrent_dp_write(regmap, PHY_RESET, value);
- /* reset the link by asserting phy_l00_reset_n low */
+ /* reset the link by asserting master lane phy_l0*_reset_n low */
cdns_torrent_dp_write(regmap, PHY_RESET,
- value & (~PHY_L00_RESET_N_MASK));
+ value & (~(1 << clane)));
/*
- * Assert lane reset on unused lanes and lane 0 so they remain in reset
+ * Assert lane reset on unused lanes and master lane so they remain in reset
* and powered down when re-enabling the link
*/
- value = (value & 0x0000FFF0) | (0x0000000E & lane_mask);
+ for (i = 0; i < inst->num_lanes; i++)
+ value &= (~(1 << (clane + i)));
+
+ for (i = 1; i < inst->num_lanes; i++)
+ value |= ((1 << (clane + i)) & lane_mask);
+
cdns_torrent_dp_write(regmap, PHY_RESET, value);
- cdns_torrent_dp_set_a0_pll(cdns_phy, dp->lanes);
+ cdns_torrent_dp_set_a0_pll(cdns_phy, inst, dp->lanes);
/* release phy_l0*_reset_n based on used laneCount */
- value = (value & 0x0000FFF0) | (0x0000000F & lane_mask);
+ for (i = 0; i < inst->num_lanes; i++)
+ value &= (~(1 << (clane + i)));
+
+ for (i = 0; i < inst->num_lanes; i++)
+ value |= ((1 << (clane + i)) & lane_mask);
+
cdns_torrent_dp_write(regmap, PHY_RESET, value);
/* Wait, until PHY gets ready after releasing PHY reset signal. */
@@ -804,41 +1478,44 @@ static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
ndelay(100);
/* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
- cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+ value = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
+ value |= (1 << clane);
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, value);
- ret = cdns_torrent_dp_run(cdns_phy, dp->lanes);
+ ret = cdns_torrent_dp_run(cdns_phy, inst, dp->lanes);
return ret;
}
/* Configure link rate as required. */
static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
- u32 ret;
+ int ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A3);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, false);
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, inst, dp, false);
if (ret)
return ret;
ndelay(200);
- ret = cdns_torrent_dp_configure_rate(cdns_phy, dp);
+ ret = cdns_torrent_dp_configure_rate(cdns_phy, inst, dp);
if (ret)
return ret;
ndelay(200);
- ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, true);
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, inst, dp, true);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A2);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A0);
if (ret)
return ret;
@@ -849,44 +1526,45 @@ static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
/* Configure voltage swing and pre-emphasis for all enabled lanes. */
static void cdns_torrent_dp_set_voltages(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
u8 lane;
u16 val;
for (lane = 0; lane < dp->lanes; lane++) {
- val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA);
/*
* Write 1 to register bit TX_DIAG_ACYA[0] to freeze the
* current state of the analog TX driver.
*/
val |= TX_DIAG_ACYA_HBDC_MASK;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA, val);
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_CTRL, 0x08A4);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].diag_tx_drv;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
DRV_DIAG_TX_DRV, val);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].mgnfs_mult;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_MGNFS_MULT_000,
val);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].cpost_mult;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_CPOST_MULT_00,
val);
- val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA);
/*
* Write 0 to register bit TX_DIAG_ACYA[0] to allow the state of
* analog TX driver to reflect the new programmed one.
*/
val &= ~TX_DIAG_ACYA_HBDC_MASK;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA, val);
}
};
@@ -905,7 +1583,7 @@ static int cdns_torrent_dp_configure(struct phy *phy,
}
if (opts->dp.set_lanes) {
- ret = cdns_torrent_dp_set_lanes(cdns_phy, &opts->dp);
+ ret = cdns_torrent_dp_set_lanes(cdns_phy, inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "cdns_torrent_dp_set_lanes failed\n");
return ret;
@@ -913,7 +1591,7 @@ static int cdns_torrent_dp_configure(struct phy *phy,
}
if (opts->dp.set_rate) {
- ret = cdns_torrent_dp_set_rate(cdns_phy, &opts->dp);
+ ret = cdns_torrent_dp_set_rate(cdns_phy, inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "cdns_torrent_dp_set_rate failed\n");
return ret;
@@ -921,737 +1599,282 @@ static int cdns_torrent_dp_configure(struct phy *phy,
}
if (opts->dp.set_voltages)
- cdns_torrent_dp_set_voltages(cdns_phy, &opts->dp);
+ cdns_torrent_dp_set_voltages(cdns_phy, inst, &opts->dp);
return ret;
}
-static int cdns_torrent_dp_init(struct phy *phy)
+static int cdns_torrent_phy_on(struct phy *phy)
{
- unsigned char lane_bits;
- int ret;
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
- struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u32 read_val;
+ int ret;
- switch (cdns_phy->ref_clk_rate) {
- case REF_CLK_19_2MHz:
- case REF_CLK_25MHz:
- /* Valid Ref Clock Rate */
- break;
- default:
- dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
- return -EINVAL;
+ if (cdns_phy->nsubnodes == 1) {
+ /* Take the PHY lane group out of reset */
+ reset_control_deassert(inst->lnk_rst);
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(cdns_phy->phy_rst);
+ if (ret)
+ return ret;
}
- cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */
+ /*
+ * Wait for cmn_ready assertion
+ * PHY_PMA_CMN_CTRL1[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_1,
+ read_val, read_val, 1000,
+ PLL_LOCK_TIMEOUT);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Timeout waiting for CMN ready\n");
+ return ret;
+ }
+
+ if (inst->phy_type == TYPE_PCIE || inst->phy_type == TYPE_USB) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pcs_iso_link_ctrl_1[inst->mlane],
+ read_val, !read_val, 1000,
+ PLL_LOCK_TIMEOUT);
+ if (ret == -ETIMEDOUT) {
+ dev_err(cdns_phy->dev, "Timeout waiting for PHY status ready\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_phy_off(struct phy *phy)
+{
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ if (cdns_phy->nsubnodes != 1)
+ return 0;
+
+ ret = reset_control_assert(cdns_phy->phy_rst);
+ if (ret)
+ return ret;
+
+ return reset_control_assert(inst->lnk_rst);
+}
- /* PHY PMA registers configuration function */
- cdns_torrent_dp_pma_cfg(cdns_phy, inst);
+static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst)
+{
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ unsigned char lane_bits;
+ u32 val;
+ u32 clane = 0;
+
+ cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */
/*
* Set lines power state to A0
* Set lines pll clk enable to 0
*/
- cdns_torrent_dp_set_a0_pll(cdns_phy, inst->num_lanes);
+ cdns_torrent_dp_set_a0_pll(cdns_phy, inst, inst->num_lanes);
/*
* release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
* used lanes
*/
lane_bits = (1 << inst->num_lanes) - 1;
- cdns_torrent_dp_write(regmap, PHY_RESET,
- ((0xF & ~lane_bits) << 4) | (0xF & lane_bits));
+ lane_bits <<= clane;
+
+ val = cdns_torrent_dp_read(regmap, PHY_RESET);
+ val |= (0xF & lane_bits);
+ val &= ~(lane_bits << 4);
+ cdns_torrent_dp_write(regmap, PHY_RESET, val);
/* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
- cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+ val = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
+ val |= (1 << clane);
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, val);
- /* PHY PMA registers configuration functions */
- /* Initialize PHY with max supported link rate, without SSC. */
- if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz)
+ /*
+ * PHY PMA registers configuration functions
+ * Initialize PHY with max supported link rate, without SSC.
+ */
+ if (cdns_phy->ref_clk_rate == CLK_19_2_MHZ)
cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy,
cdns_phy->max_bit_rate,
false);
- else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+ else if (cdns_phy->ref_clk_rate == CLK_25_MHZ)
cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy,
cdns_phy->max_bit_rate,
false);
- cdns_torrent_dp_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate,
+ else if (cdns_phy->ref_clk_rate == CLK_100_MHZ)
+ cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy,
+ cdns_phy->max_bit_rate,
+ false);
+
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, inst, cdns_phy->max_bit_rate,
inst->num_lanes);
/* take out of reset */
regmap_field_write(cdns_phy->phy_reset_ctrl, 0x1);
+}
- cdns_torrent_phy_on(phy);
+static int cdns_torrent_dp_start(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
+ struct phy *phy)
+{
+ int ret;
+
+ ret = cdns_torrent_phy_on(phy);
+ if (ret)
+ return ret;
ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
if (ret)
return ret;
- ret = cdns_torrent_dp_run(cdns_phy, inst->num_lanes);
+ ret = cdns_torrent_dp_run(cdns_phy, inst, inst->num_lanes);
return ret;
}
-static
-int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy)
+static int cdns_torrent_dp_init(struct phy *phy)
{
- unsigned int reg;
- int ret;
- struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
- ret = regmap_read_poll_timeout(regmap, PHY_PMA_CMN_READY, reg,
- reg & 1, 0, POLL_TIMEOUT_US);
- if (ret == -ETIMEDOUT) {
- dev_err(cdns_phy->dev,
- "timeout waiting for PMA common ready\n");
- return -ETIMEDOUT;
+ switch (cdns_phy->ref_clk_rate) {
+ case CLK_19_2_MHZ:
+ case CLK_25_MHZ:
+ case CLK_100_MHZ:
+ /* Valid Ref Clock Rate */
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
+ return -EINVAL;
}
- return 0;
-}
-
-static void cdns_torrent_dp_pma_cfg(struct cdns_torrent_phy *cdns_phy,
- struct cdns_torrent_inst *inst)
-{
- unsigned int i;
-
- if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz)
- /* PMA common configuration 19.2MHz */
- cdns_torrent_dp_pma_cmn_cfg_19_2mhz(cdns_phy);
- else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
- /* PMA common configuration 25MHz */
- cdns_torrent_dp_pma_cmn_cfg_25mhz(cdns_phy);
-
- /* PMA lane configuration to deal with multi-link operation */
- for (i = 0; i < inst->num_lanes; i++)
- cdns_torrent_dp_pma_lane_cfg(cdns_phy, i);
-}
+ cdns_torrent_dp_get_pll(cdns_phy, TYPE_NONE);
-static
-void cdns_torrent_dp_pma_cmn_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy)
-{
- struct regmap *regmap = cdns_phy->regmap_common_cdb;
+ cdns_torrent_dp_common_init(cdns_phy, inst);
- /* refclock registers - assumes 19.2 MHz refclock */
- cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0014);
- cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0027);
- cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00A1);
- cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0027);
- cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00A1);
- cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x0060);
- cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x0060);
- cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0014);
- cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x0018);
- cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0005);
- cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x0018);
- cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0005);
- cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x0240);
- cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0005);
- cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000B);
- cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x0137);
-
- /* PLL registers */
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00C0);
- cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00C0);
- cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0260);
- cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003);
- cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0260);
- cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003);
+ return cdns_torrent_dp_start(cdns_phy, inst, phy);
}
-/*
- * Set registers responsible for enabling and configuring SSC, with second and
- * third register values provided by parameters.
- */
-static
-void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy,
- u32 ctrl2_val, u32 ctrl3_val)
+static int cdns_torrent_dp_multilink_init(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
+ struct phy *phy)
{
- struct regmap *regmap = cdns_phy->regmap_common_cdb;
-
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl3_val);
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl3_val);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
-}
-
-static
-void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy,
- u32 rate, bool ssc)
-{
- struct regmap *regmap = cdns_phy->regmap_common_cdb;
-
- /* Assumes 19.2 MHz refclock */
- switch (rate) {
- /* Setting VCO for 10.8GHz */
- case 2700:
- case 5400:
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_INTDIV_M0, 0x0119);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_FRACDIVL_M0, 0x4000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_HIGH_THR_M0, 0x00BC);
- cdns_torrent_phy_write(regmap,
- CMN_PDIAG_PLL0_CTRL_M0, 0x0012);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_INTDIV_M0, 0x0119);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_FRACDIVL_M0, 0x4000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_HIGH_THR_M0, 0x00BC);
- cdns_torrent_phy_write(regmap,
- CMN_PDIAG_PLL1_CTRL_M0, 0x0012);
- if (ssc)
- cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A,
- 0x006A);
- break;
- /* Setting VCO for 9.72GHz */
- case 1620:
- case 2430:
- case 3240:
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_INTDIV_M0, 0x01FA);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_FRACDIVL_M0, 0x4000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_HIGH_THR_M0, 0x0152);
- cdns_torrent_phy_write(regmap,
- CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_INTDIV_M0, 0x01FA);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_FRACDIVL_M0, 0x4000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_HIGH_THR_M0, 0x0152);
- cdns_torrent_phy_write(regmap,
- CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
- if (ssc)
- cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD,
- 0x0069);
- break;
- /* Setting VCO for 8.64GHz */
- case 2160:
- case 4320:
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_INTDIV_M0, 0x01C2);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_FRACDIVL_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_HIGH_THR_M0, 0x012C);
- cdns_torrent_phy_write(regmap,
- CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_INTDIV_M0, 0x01C2);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_FRACDIVL_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_HIGH_THR_M0, 0x012C);
- cdns_torrent_phy_write(regmap,
- CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
- if (ssc)
- cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536,
- 0x0069);
- break;
- /* Setting VCO for 8.1GHz */
- case 8100:
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_INTDIV_M0, 0x01A5);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_FRACDIVL_M0, 0xE000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_HIGH_THR_M0, 0x011A);
- cdns_torrent_phy_write(regmap,
- CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_INTDIV_M0, 0x01A5);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_FRACDIVL_M0, 0xE000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_HIGH_THR_M0, 0x011A);
- cdns_torrent_phy_write(regmap,
- CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
- if (ssc)
- cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7,
- 0x006A);
- break;
+ if (cdns_phy->ref_clk_rate != CLK_100_MHZ) {
+ dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
+ return -EINVAL;
}
- if (ssc) {
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_VCOCAL_PLLCNT_START, 0x025E);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_VCOCAL_PLLCNT_START, 0x025E);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
- } else {
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_VCOCAL_PLLCNT_START, 0x0260);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_VCOCAL_PLLCNT_START, 0x0260);
- /* Set reset register values to disable SSC */
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_SS_CTRL1_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_SS_CTRL2_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_SS_CTRL3_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_SS_CTRL4_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_SS_CTRL1_M0, 0x0002);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_SS_CTRL2_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_SS_CTRL3_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_SS_CTRL4_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
- }
+ cdns_torrent_dp_common_init(cdns_phy, inst);
- cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x0099);
- cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x0099);
- cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x0099);
- cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x0099);
+ return cdns_torrent_dp_start(cdns_phy, inst, phy);
}
-static
-void cdns_torrent_dp_pma_cmn_cfg_25mhz(struct cdns_torrent_phy *cdns_phy)
+static int cdns_torrent_derived_refclk_enable(struct clk_hw *hw)
{
- struct regmap *regmap = cdns_phy->regmap_common_cdb;
+ struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
- /* refclock registers - assumes 25 MHz refclock */
- cdns_torrent_phy_write(regmap, CMN_SSM_BIAS_TMR, 0x0019);
- cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLPRE_TMR, 0x0032);
- cdns_torrent_phy_write(regmap, CMN_PLLSM0_PLLLOCK_TMR, 0x00D1);
- cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLPRE_TMR, 0x0032);
- cdns_torrent_phy_write(regmap, CMN_PLLSM1_PLLLOCK_TMR, 0x00D1);
- cdns_torrent_phy_write(regmap, CMN_BGCAL_INIT_TMR, 0x007D);
- cdns_torrent_phy_write(regmap, CMN_BGCAL_ITER_TMR, 0x007D);
- cdns_torrent_phy_write(regmap, CMN_IBCAL_INIT_TMR, 0x0019);
- cdns_torrent_phy_write(regmap, CMN_TXPUCAL_INIT_TMR, 0x001E);
- cdns_torrent_phy_write(regmap, CMN_TXPUCAL_ITER_TMR, 0x0006);
- cdns_torrent_phy_write(regmap, CMN_TXPDCAL_INIT_TMR, 0x001E);
- cdns_torrent_phy_write(regmap, CMN_TXPDCAL_ITER_TMR, 0x0006);
- cdns_torrent_phy_write(regmap, CMN_RXCAL_INIT_TMR, 0x02EE);
- cdns_torrent_phy_write(regmap, CMN_RXCAL_ITER_TMR, 0x0006);
- cdns_torrent_phy_write(regmap, CMN_SD_CAL_INIT_TMR, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_SD_CAL_ITER_TMR, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_SD_CAL_REFTIM_START, 0x000E);
- cdns_torrent_phy_write(regmap, CMN_SD_CAL_PLLCNT_START, 0x012B);
-
- /* PLL registers */
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_INIT_TMR, 0x00FA);
- cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_ITER_TMR, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_INIT_TMR, 0x00FA);
- cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_ITER_TMR, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_REFTIM_START, 0x0317);
- cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_TCTRL, 0x0003);
- cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_REFTIM_START, 0x0317);
- cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_TCTRL, 0x0003);
-}
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_6], 0);
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_4], 1);
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_5], 1);
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_1], 0);
+ regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_OVRD_4], 1);
+ regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 1);
-/*
- * Set registers responsible for enabling and configuring SSC, with second
- * register value provided by a parameter.
- */
-static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy,
- u32 ctrl2_val)
-{
- struct regmap *regmap = cdns_phy->regmap_common_cdb;
-
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x007F);
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x007F);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
+ return 0;
}
-static
-void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy,
- u32 rate, bool ssc)
+static void cdns_torrent_derived_refclk_disable(struct clk_hw *hw)
{
- struct regmap *regmap = cdns_phy->regmap_common_cdb;
-
- /* Assumes 25 MHz refclock */
- switch (rate) {
- /* Setting VCO for 10.8GHz */
- case 2700:
- case 5400:
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01B0);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0120);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01B0);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0120);
- if (ssc)
- cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423);
- break;
- /* Setting VCO for 9.72GHz */
- case 1620:
- case 2430:
- case 3240:
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0184);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xCCCD);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0104);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0184);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xCCCD);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0104);
- if (ssc)
- cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9);
- break;
- /* Setting VCO for 8.64GHz */
- case 2160:
- case 4320:
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0159);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x999A);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00E7);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0159);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x999A);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00E7);
- if (ssc)
- cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F);
- break;
- /* Setting VCO for 8.1GHz */
- case 8100:
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0144);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00D8);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0144);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00D8);
- if (ssc)
- cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A);
- break;
- }
-
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
-
- if (ssc) {
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_VCOCAL_PLLCNT_START, 0x0315);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_VCOCAL_PLLCNT_START, 0x0315);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
- } else {
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_VCOCAL_PLLCNT_START, 0x0317);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_VCOCAL_PLLCNT_START, 0x0317);
- /* Set reset register values to disable SSC */
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000);
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000);
- cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000);
- cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000);
- cdns_torrent_phy_write(regmap,
- CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
- }
+ struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
- cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x00C7);
- cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x00C7);
- cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x00C7);
- cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7);
+ regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 0);
}
-static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
- u32 rate, u32 num_lanes)
+static int cdns_torrent_derived_refclk_is_enabled(struct clk_hw *hw)
{
- unsigned int clk_sel_val = 0;
- unsigned int hsclk_div_val = 0;
- unsigned int i;
+ struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+ int val;
- /* 16'h0000 for single DP link configuration */
- regmap_field_write(cdns_phy->phy_pll_cfg, 0x0);
+ regmap_field_read(derived_refclk->phy_pipe_cmn_ctrl1_0, &val);
- switch (rate) {
- case 1620:
- clk_sel_val = 0x0f01;
- hsclk_div_val = 2;
- break;
- case 2160:
- case 2430:
- case 2700:
- clk_sel_val = 0x0701;
- hsclk_div_val = 1;
- break;
- case 3240:
- clk_sel_val = 0x0b00;
- hsclk_div_val = 2;
- break;
- case 4320:
- case 5400:
- clk_sel_val = 0x0301;
- hsclk_div_val = 0;
- break;
- case 8100:
- clk_sel_val = 0x0200;
- hsclk_div_val = 0;
- break;
- }
-
- cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
- CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val);
- cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
- CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val);
-
- /* PMA lane configuration to deal with multi-link operation */
- for (i = 0; i < num_lanes; i++)
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[i],
- XCVR_DIAG_HSCLK_DIV, hsclk_div_val);
+ return !!val;
}
-static void cdns_torrent_dp_pma_lane_cfg(struct cdns_torrent_phy *cdns_phy,
- unsigned int lane)
-{
- /* Per lane, refclock-dependent receiver detection setting */
- if (cdns_phy->ref_clk_rate == REF_CLK_19_2MHz)
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
- TX_RCVDET_ST_TMR, 0x0780);
- else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
- TX_RCVDET_ST_TMR, 0x09C4);
-
- /* Writing Tx/Rx Power State Controllers registers */
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
- TX_PSC_A0, 0x00FB);
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
- TX_PSC_A2, 0x04AA);
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
- TX_PSC_A3, 0x04AA);
- cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
- RX_PSC_A0, 0x0000);
- cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
- RX_PSC_A2, 0x0000);
- cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
- RX_PSC_A3, 0x0000);
-
- cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
- RX_PSC_CAL, 0x0000);
-
- cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
- RX_REE_GCSM1_CTRL, 0x0000);
- cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
- RX_REE_GCSM2_CTRL, 0x0000);
- cdns_torrent_phy_write(cdns_phy->regmap_rx_lane_cdb[lane],
- RX_REE_PERGCSM_CTRL, 0x0000);
-
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
- XCVR_DIAG_BIDI_CTRL, 0x000F);
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
- XCVR_DIAG_PLLDRC_CTRL, 0x0001);
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
- XCVR_DIAG_HSCLK_SEL, 0x0000);
-}
+static const struct clk_ops cdns_torrent_derived_refclk_ops = {
+ .enable = cdns_torrent_derived_refclk_enable,
+ .disable = cdns_torrent_derived_refclk_disable,
+ .is_enabled = cdns_torrent_derived_refclk_is_enabled,
+};
-static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
- u32 num_lanes,
- enum phy_powerstate powerstate)
+static int cdns_torrent_derived_refclk_register(struct cdns_torrent_phy *cdns_phy)
{
- /* Register value for power state for a single byte. */
- u32 value_part;
- u32 value;
- u32 mask;
- u32 read_val;
- u32 ret;
- struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
-
- switch (powerstate) {
- case (POWERSTATE_A0):
- value_part = 0x01U;
- break;
- case (POWERSTATE_A2):
- value_part = 0x04U;
- break;
- default:
- /* Powerstate A3 */
- value_part = 0x08U;
- break;
- }
-
- /* Select values of registers and mask, depending on enabled
- * lane count.
- */
- switch (num_lanes) {
- /* lane 0 */
- case (1):
- value = value_part;
- mask = 0x0000003FU;
- break;
- /* lanes 0-1 */
- case (2):
- value = (value_part
- | (value_part << 8));
- mask = 0x00003F3FU;
- break;
- /* lanes 0-3, all */
- default:
- value = (value_part
- | (value_part << 8)
- | (value_part << 16)
- | (value_part << 24));
- mask = 0x3F3F3F3FU;
- break;
- }
-
- /* Set power state A<n>. */
- cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, value);
- /* Wait, until PHY acknowledges power state completion. */
- ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK,
- read_val, (read_val & mask) == value, 0,
- POLL_TIMEOUT_US);
- cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000);
- ndelay(100);
+ struct cdns_torrent_derived_refclk *derived_refclk;
+ struct device *dev = cdns_phy->dev;
+ struct regmap_field *field;
+ struct clk_init_data *init;
+ const char *parent_name;
+ struct regmap *regmap;
+ char clk_name[100];
+ struct clk *clk;
+ int i;
- return ret;
-}
+ derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
+ if (!derived_refclk)
+ return -ENOMEM;
-static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes)
-{
- unsigned int read_val;
- int ret;
- struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ clk_names[CDNS_TORRENT_REFCLK_DRIVER]);
- /*
- * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the
- * master lane
- */
- ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_PLLCLK_EN_ACK,
- read_val, read_val & 1,
- 0, POLL_TIMEOUT_US);
- if (ret == -ETIMEDOUT) {
- dev_err(cdns_phy->dev,
- "timeout waiting for link PLL clock enable ack\n");
- return ret;
+ clk = devm_clk_get_optional(dev, "phy_en_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "No parent clock for derived_refclk\n");
+ return PTR_ERR(clk);
}
- ndelay(100);
-
- ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
- POWERSTATE_A2);
- if (ret)
- return ret;
+ init = &derived_refclk->clk_data;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
- POWERSTATE_A0);
-
- return ret;
-}
-
-static int cdns_torrent_phy_on(struct phy *phy)
-{
- struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
- struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
- u32 read_val;
- int ret;
-
- if (cdns_phy->nsubnodes == 1) {
- /* Take the PHY lane group out of reset */
- reset_control_deassert(inst->lnk_rst);
-
- /* Take the PHY out of reset */
- ret = reset_control_deassert(cdns_phy->phy_rst);
- if (ret)
- return ret;
+ if (clk) {
+ parent_name = __clk_get_name(clk);
+ init->parent_names = &parent_name;
+ init->num_parents = 1;
}
+ init->ops = &cdns_torrent_derived_refclk_ops;
+ init->flags = 0;
+ init->name = clk_name;
- /*
- * Wait for cmn_ready assertion
- * PHY_PMA_CMN_CTRL1[0] == 1
- */
- ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_1,
- read_val, read_val, 1000,
- PLL_LOCK_TIMEOUT);
- if (ret) {
- dev_err(cdns_phy->dev, "Timeout waiting for CMN ready\n");
- return ret;
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pipe_cmn_ctrl1_0);
+ if (IS_ERR(field)) {
+ dev_err(dev, "phy_pipe_cmn_ctrl1_0 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->phy_pipe_cmn_ctrl1_0 = field;
+
+ regmap = cdns_phy->regmap_common_cdb;
+ for (i = 0; i < REFCLK_OUT_NUM_CMN_CONFIG; i++) {
+ field = devm_regmap_field_alloc(dev, regmap, refclk_out_cmn_cfg[i]);
+ if (IS_ERR(field)) {
+ dev_err(dev, "CMN reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_fields[i] = field;
}
- mdelay(10);
+ derived_refclk->hw.init = init;
- return 0;
-}
+ clk = devm_clk_register(dev, &derived_refclk->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
-static int cdns_torrent_phy_off(struct phy *phy)
-{
- struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
- struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
- int ret;
+ cdns_phy->clks[CDNS_TORRENT_REFCLK_DRIVER] = clk;
- if (cdns_phy->nsubnodes != 1)
- return 0;
-
- ret = reset_control_assert(cdns_phy->phy_rst);
- if (ret)
- return ret;
-
- return reset_control_assert(inst->lnk_rst);
+ return 0;
}
static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
@@ -1694,6 +1917,7 @@ static int cdns_torrent_regfield_init(struct cdns_torrent_phy *cdns_phy)
struct device *dev = cdns_phy->dev;
struct regmap_field *field;
struct regmap *regmap;
+ int i;
regmap = cdns_phy->regmap_phy_pcs_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg);
@@ -1727,6 +1951,16 @@ static int cdns_torrent_regfield_init(struct cdns_torrent_phy *cdns_phy)
}
cdns_phy->phy_pma_pll_raw_ctrl = field;
+ for (i = 0; i < MAX_NUM_LANES; i++) {
+ regmap = cdns_phy->regmap_phy_pcs_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, phy_pcs_iso_link_ctrl_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PCS_ISO_LINK_CTRL reg field init for ln %d failed\n", i);
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pcs_iso_link_ctrl_1[i] = field;
+ }
+
return 0;
}
@@ -1787,6 +2021,17 @@ static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy)
return PTR_ERR(regmap);
}
cdns_phy->regmap_rx_lane_cdb[i] = regmap;
+
+ block_offset = TORRENT_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_phy_pcs_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_phy_pcs_lane_cdb[i] = regmap;
}
block_offset = TORRENT_COMMON_CDB_OFFSET;
@@ -1827,31 +2072,36 @@ static int cdns_torrent_phy_init(struct phy *phy)
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
const struct cdns_torrent_data *init_data = cdns_phy->init_data;
struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
enum cdns_torrent_phy_type phy_type = inst->phy_type;
enum cdns_torrent_ssc_mode ssc = inst->ssc_mode;
+ struct cdns_torrent_vals *phy_pma_cmn_vals;
struct cdns_torrent_vals *pcs_cmn_vals;
struct cdns_reg_pairs *reg_pairs;
struct regmap *regmap;
u32 num_regs;
int i, j;
- if (cdns_phy->nsubnodes > 1)
+ if (cdns_phy->nsubnodes > 1) {
+ if (phy_type == TYPE_DP)
+ return cdns_torrent_dp_multilink_init(cdns_phy, inst, phy);
return 0;
-
- if (phy_type == TYPE_DP)
- return cdns_torrent_dp_init(phy);
+ }
/**
* Spread spectrum generation is not required or supported
- * for SGMII/QSGMII
+ * for SGMII/QSGMII/USXGMII
*/
- if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII)
+ if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII || phy_type == TYPE_USXGMII)
ssc = NO_SSC;
/* PHY configuration specific registers for single link */
- link_cmn_vals = init_data->link_cmn_vals[phy_type][TYPE_NONE][ssc];
+ link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
if (link_cmn_vals) {
reg_pairs = link_cmn_vals->reg_pairs;
num_regs = link_cmn_vals->num_regs;
@@ -1868,7 +2118,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
reg_pairs[i].val);
}
- xcvr_diag_vals = init_data->xcvr_diag_vals[phy_type][TYPE_NONE][ssc];
+ xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
if (xcvr_diag_vals) {
reg_pairs = xcvr_diag_vals->reg_pairs;
num_regs = xcvr_diag_vals->num_regs;
@@ -1881,7 +2134,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
/* PHY PCS common registers configurations */
- pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
@@ -1891,8 +2147,25 @@ static int cdns_torrent_phy_init(struct phy *phy)
reg_pairs[i].val);
}
+ /* PHY PMA common registers configurations */
+ phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
+ if (phy_pma_cmn_vals) {
+ reg_pairs = phy_pma_cmn_vals->reg_pairs;
+ num_regs = phy_pma_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
/* PMA common registers configurations */
- cmn_vals = init_data->cmn_vals[phy_type][TYPE_NONE][ssc];
+ cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
+ ref_clk, ref_clk,
+ phy_type, TYPE_NONE,
+ ssc);
if (cmn_vals) {
reg_pairs = cmn_vals->reg_pairs;
num_regs = cmn_vals->num_regs;
@@ -1903,7 +2176,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
/* PMA TX lane registers configurations */
- tx_ln_vals = init_data->tx_ln_vals[phy_type][TYPE_NONE][ssc];
+ tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_type, TYPE_NONE,
+ ssc);
if (tx_ln_vals) {
reg_pairs = tx_ln_vals->reg_pairs;
num_regs = tx_ln_vals->num_regs;
@@ -1916,7 +2192,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
/* PMA RX lane registers configurations */
- rx_ln_vals = init_data->rx_ln_vals[phy_type][TYPE_NONE][ssc];
+ rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_type, TYPE_NONE,
+ ssc);
if (rx_ln_vals) {
reg_pairs = rx_ln_vals->reg_pairs;
num_regs = rx_ln_vals->num_regs;
@@ -1928,16 +2207,43 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
}
+ if (phy_type == TYPE_DP)
+ return cdns_torrent_dp_init(phy);
+
+ return 0;
+}
+
+static const struct phy_ops cdns_torrent_phy_ops = {
+ .init = cdns_torrent_phy_init,
+ .configure = cdns_torrent_dp_configure,
+ .power_on = cdns_torrent_phy_on,
+ .power_off = cdns_torrent_phy_off,
+ .owner = THIS_MODULE,
+};
+
+static int cdns_torrent_noop_phy_on(struct phy *phy)
+{
+ /* Give 5ms to 10ms delay for the PIPE clock to be stable */
+ usleep_range(5000, 10000);
+
return 0;
}
+static const struct phy_ops noop_ops = {
+ .power_on = cdns_torrent_noop_phy_on,
+ .owner = THIS_MODULE,
+};
+
static
int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
{
const struct cdns_torrent_data *init_data = cdns_phy->init_data;
struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ enum cdns_torrent_ref_clk ref_clk1 = cdns_phy->ref_clk1_rate;
+ enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
- enum cdns_torrent_phy_type phy_t1, phy_t2, tmp_phy_type;
+ enum cdns_torrent_phy_type phy_t1, phy_t2;
+ struct cdns_torrent_vals *phy_pma_cmn_vals;
struct cdns_torrent_vals *pcs_cmn_vals;
int i, j, node, mlane, num_lanes, ret;
struct cdns_reg_pairs *reg_pairs;
@@ -1963,9 +2269,8 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
* configure the PHY for second link with phy_t2.
* Get the array values as [phy_t2][phy_t1][ssc].
*/
- tmp_phy_type = phy_t1;
- phy_t1 = phy_t2;
- phy_t2 = tmp_phy_type;
+ swap(phy_t1, phy_t2);
+ swap(ref_clk, ref_clk1);
}
mlane = cdns_phy->phys[node].mlane;
@@ -1982,7 +2287,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
* being configured, but these can be different for particular
* PHY type and are per lane.
*/
- link_cmn_vals = init_data->link_cmn_vals[phy_t1][phy_t2][ssc];
+ link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
if (link_cmn_vals) {
reg_pairs = link_cmn_vals->reg_pairs;
num_regs = link_cmn_vals->num_regs;
@@ -2000,7 +2307,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
reg_pairs[i].val);
}
- xcvr_diag_vals = init_data->xcvr_diag_vals[phy_t1][phy_t2][ssc];
+ xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
if (xcvr_diag_vals) {
reg_pairs = xcvr_diag_vals->reg_pairs;
num_regs = xcvr_diag_vals->num_regs;
@@ -2013,7 +2322,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PHY PCS common registers configurations */
- pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
@@ -2023,8 +2334,23 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
reg_pairs[i].val);
}
+ /* PHY PMA common registers configurations */
+ phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
+ if (phy_pma_cmn_vals) {
+ reg_pairs = phy_pma_cmn_vals->reg_pairs;
+ num_regs = phy_pma_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
/* PMA common registers configurations */
- cmn_vals = init_data->cmn_vals[phy_t1][phy_t2][ssc];
+ cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
+ ref_clk, ref_clk1,
+ phy_t1, phy_t2, ssc);
if (cmn_vals) {
reg_pairs = cmn_vals->reg_pairs;
num_regs = cmn_vals->num_regs;
@@ -2035,7 +2361,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PMA TX lane registers configurations */
- tx_ln_vals = init_data->tx_ln_vals[phy_t1][phy_t2][ssc];
+ tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
+ ref_clk, ref_clk1,
+ phy_t1, phy_t2, ssc);
if (tx_ln_vals) {
reg_pairs = tx_ln_vals->reg_pairs;
num_regs = tx_ln_vals->num_regs;
@@ -2048,7 +2376,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PMA RX lane registers configurations */
- rx_ln_vals = init_data->rx_ln_vals[phy_t1][phy_t2][ssc];
+ rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
+ ref_clk, ref_clk1,
+ phy_t1, phy_t2, ssc);
if (rx_ln_vals) {
reg_pairs = rx_ln_vals->reg_pairs;
num_regs = rx_ln_vals->num_regs;
@@ -2060,6 +2390,12 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
}
+ if (phy_t1 == TYPE_DP) {
+ ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2);
+ if (ret)
+ return ret;
+ }
+
reset_control_deassert(cdns_phy->phys[node].lnk_rst);
}
@@ -2071,6 +2407,154 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
return 0;
}
+static void cdns_torrent_clk_cleanup(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+
+ of_clk_del_provider(dev->of_node);
+}
+
+static int cdns_torrent_clk_register(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ret = cdns_torrent_derived_refclk_register(cdns_phy);
+ if (ret) {
+ dev_err(dev, "failed to register derived refclk\n");
+ return ret;
+ }
+
+ cdns_phy->clk_data.clks = cdns_phy->clks;
+ cdns_phy->clk_data.clk_num = CDNS_TORRENT_REFCLK_DRIVER + 1;
+
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, &cdns_phy->clk_data);
+ if (ret) {
+ dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+
+ cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
+ if (IS_ERR(cdns_phy->phy_rst)) {
+ dev_err(dev, "%s: failed to get reset\n",
+ dev->of_node->full_name);
+ return PTR_ERR(cdns_phy->phy_rst);
+ }
+
+ cdns_phy->apb_rst = devm_reset_control_get_optional_exclusive(dev, "torrent_apb");
+ if (IS_ERR(cdns_phy->apb_rst)) {
+ dev_err(dev, "%s: failed to get apb reset\n",
+ dev->of_node->full_name);
+ return PTR_ERR(cdns_phy->apb_rst);
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ unsigned long ref_clk1_rate;
+ unsigned long ref_clk_rate;
+ int ret;
+
+ /* refclk */
+ cdns_phy->clk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(cdns_phy->clk)) {
+ dev_err(dev, "phy ref clock not found\n");
+ return PTR_ERR(cdns_phy->clk);
+ }
+
+ ret = clk_prepare_enable(cdns_phy->clk);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+ return ret;
+ }
+
+ ref_clk_rate = clk_get_rate(cdns_phy->clk);
+ if (!ref_clk_rate) {
+ dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
+ clk_disable_unprepare(cdns_phy->clk);
+ return -EINVAL;
+ }
+
+ switch (ref_clk_rate) {
+ case REF_CLK_19_2MHZ:
+ cdns_phy->ref_clk_rate = CLK_19_2_MHZ;
+ break;
+ case REF_CLK_25MHZ:
+ cdns_phy->ref_clk_rate = CLK_25_MHZ;
+ break;
+ case REF_CLK_100MHZ:
+ cdns_phy->ref_clk_rate = CLK_100_MHZ;
+ break;
+ case REF_CLK_156_25MHZ:
+ cdns_phy->ref_clk_rate = CLK_156_25_MHZ;
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Invalid Ref Clock Rate\n");
+ clk_disable_unprepare(cdns_phy->clk);
+ return -EINVAL;
+ }
+
+ /* refclk1 */
+ cdns_phy->clk1 = devm_clk_get_optional(dev, "refclk1");
+ if (IS_ERR(cdns_phy->clk1)) {
+ dev_err(dev, "phy ref clock1 not found\n");
+ return PTR_ERR(cdns_phy->clk1);
+ }
+
+ if (cdns_phy->clk1) {
+ ret = clk_prepare_enable(cdns_phy->clk1);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Failed to prepare ref clock1\n");
+ clk_disable_unprepare(cdns_phy->clk);
+ return ret;
+ }
+
+ ref_clk1_rate = clk_get_rate(cdns_phy->clk1);
+ if (!ref_clk1_rate) {
+ dev_err(cdns_phy->dev, "Failed to get ref clock1 rate\n");
+ goto refclk1_err;
+ }
+
+ switch (ref_clk1_rate) {
+ case REF_CLK_19_2MHZ:
+ cdns_phy->ref_clk1_rate = CLK_19_2_MHZ;
+ break;
+ case REF_CLK_25MHZ:
+ cdns_phy->ref_clk1_rate = CLK_25_MHZ;
+ break;
+ case REF_CLK_100MHZ:
+ cdns_phy->ref_clk1_rate = CLK_100_MHZ;
+ break;
+ case REF_CLK_156_25MHZ:
+ cdns_phy->ref_clk1_rate = CLK_156_25_MHZ;
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Invalid Ref Clock1 Rate\n");
+ goto refclk1_err;
+ }
+ } else {
+ cdns_phy->ref_clk1_rate = cdns_phy->ref_clk_rate;
+ }
+
+ return 0;
+
+refclk1_err:
+ clk_disable_unprepare(cdns_phy->clk1);
+ clk_disable_unprepare(cdns_phy->clk);
+ return -EINVAL;
+}
+
static int cdns_torrent_phy_probe(struct platform_device *pdev)
{
struct cdns_torrent_phy *cdns_phy;
@@ -2080,6 +2564,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
struct device_node *child;
int ret, subnodes, node = 0, i;
u32 total_num_lanes = 0;
+ int already_configured;
u8 init_dp_regmap = 0;
u32 phy_type;
@@ -2096,26 +2581,6 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
cdns_phy->dev = dev;
cdns_phy->init_data = data;
- cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
- if (IS_ERR(cdns_phy->phy_rst)) {
- dev_err(dev, "%s: failed to get reset\n",
- dev->of_node->full_name);
- return PTR_ERR(cdns_phy->phy_rst);
- }
-
- cdns_phy->apb_rst = devm_reset_control_get_optional(dev, "torrent_apb");
- if (IS_ERR(cdns_phy->apb_rst)) {
- dev_err(dev, "%s: failed to get apb reset\n",
- dev->of_node->full_name);
- return PTR_ERR(cdns_phy->apb_rst);
- }
-
- cdns_phy->clk = devm_clk_get(dev, "refclk");
- if (IS_ERR(cdns_phy->clk)) {
- dev_err(dev, "phy ref clock not found\n");
- return PTR_ERR(cdns_phy->clk);
- }
-
cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cdns_phy->sd_base))
return PTR_ERR(cdns_phy->sd_base);
@@ -2134,21 +2599,24 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(cdns_phy->clk);
- if (ret) {
- dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+ ret = cdns_torrent_clk_register(cdns_phy);
+ if (ret)
return ret;
- }
- cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
- if (!(cdns_phy->ref_clk_rate)) {
- dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
- clk_disable_unprepare(cdns_phy->clk);
- return -EINVAL;
- }
+ regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &already_configured);
+
+ if (!already_configured) {
+ ret = cdns_torrent_reset(cdns_phy);
+ if (ret)
+ goto clk_cleanup;
- /* Enable APB */
- reset_control_deassert(cdns_phy->apb_rst);
+ ret = cdns_torrent_clk(cdns_phy);
+ if (ret)
+ goto clk_cleanup;
+
+ /* Enable APB */
+ reset_control_deassert(cdns_phy->apb_rst);
+ }
for_each_available_child_of_node(dev->of_node, child) {
struct phy *gphy;
@@ -2197,6 +2665,9 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
case PHY_TYPE_USB3:
cdns_phy->phys[node].phy_type = TYPE_USB;
break;
+ case PHY_TYPE_USXGMII:
+ cdns_phy->phys[node].phy_type = TYPE_USXGMII;
+ break;
default:
dev_err(dev, "Unsupported protocol\n");
ret = -EINVAL;
@@ -2218,7 +2689,10 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
of_property_read_u32(child, "cdns,ssc-mode",
&cdns_phy->phys[node].ssc_mode);
- gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
+ if (!already_configured)
+ gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
+ else
+ gphy = devm_phy_create(dev, child, &noop_ops);
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
goto put_child;
@@ -2279,10 +2753,9 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
init_dp_regmap++;
}
- dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n",
- cdns_phy->phys[node].num_lanes,
- cdns_phy->max_bit_rate / 1000,
- cdns_phy->max_bit_rate % 1000);
+ dev_dbg(dev, "DP max bit rate %d.%03d Gbps\n",
+ cdns_phy->max_bit_rate / 1000,
+ cdns_phy->max_bit_rate % 1000);
gphy->attrs.bus_width = cdns_phy->phys[node].num_lanes;
gphy->attrs.max_link_rate = cdns_phy->max_bit_rate;
@@ -2302,7 +2775,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
goto put_lnk_rst;
}
- if (cdns_phy->nsubnodes > 1) {
+ if (cdns_phy->nsubnodes > 1 && !already_configured) {
ret = cdns_torrent_phy_configure_multilink(cdns_phy);
if (ret)
goto put_lnk_rst;
@@ -2314,6 +2787,17 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
goto put_lnk_rst;
}
+ if (cdns_phy->nsubnodes > 1)
+ dev_dbg(dev, "Multi-link: %s (%d lanes) & %s (%d lanes)",
+ cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type),
+ cdns_phy->phys[0].num_lanes,
+ cdns_torrent_get_phy_type(cdns_phy->phys[1].phy_type),
+ cdns_phy->phys[1].num_lanes);
+ else
+ dev_dbg(dev, "Single link: %s (%d lanes)",
+ cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type),
+ cdns_phy->phys[0].num_lanes);
+
return 0;
put_child:
@@ -2324,6 +2808,9 @@ put_lnk_rst:
of_node_put(child);
reset_control_assert(cdns_phy->apb_rst);
clk_disable_unprepare(cdns_phy->clk);
+ clk_disable_unprepare(cdns_phy->clk1);
+clk_cleanup:
+ cdns_torrent_clk_cleanup(cdns_phy);
return ret;
}
@@ -2340,10 +2827,642 @@ static int cdns_torrent_phy_remove(struct platform_device *pdev)
}
clk_disable_unprepare(cdns_phy->clk);
+ clk_disable_unprepare(cdns_phy->clk1);
+ cdns_torrent_clk_cleanup(cdns_phy);
return 0;
}
+/* USB and DP link configuration */
+static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG},
+ {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0041, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = {
+ {0x0001, XCVR_DIAG_HSCLK_SEL},
+ {0x0009, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals usb_dp_link_cmn_vals = {
+ .reg_pairs = usb_dp_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_dp_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = {
+ .reg_pairs = usb_dp_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_dp_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = dp_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs),
+};
+
+/* USXGMII and SGMII/QSGMII link configuration */
+static struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0001, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = {
+ {0x0111, XCVR_DIAG_HSCLK_SEL},
+ {0x0103, XCVR_DIAG_HSCLK_DIV},
+ {0x0A9B, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = {
+ .reg_pairs = usxgmii_sgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_sgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = usxgmii_sgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_sgmii_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = sgmii_usxgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_usxgmii_xcvr_diag_ln_regs),
+};
+
+/* Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
+ {0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
+ {0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
+ {0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0019, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x1354, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0138, CMN_PLL0_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL0_LOCK_PLLCNT_START}
+};
+
+static struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
+ .reg_pairs = ml_usxgmii_pll0_156_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_usxgmii_pll0_156_25_no_ssc_cmn_regs),
+};
+
+/* Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = {
+ .reg_pairs = ml_sgmii_pll1_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_sgmii_pll1_100_no_ssc_cmn_regs),
+};
+
+/* TI J7200, Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
+ {0x0014, CMN_SSM_BIAS_TMR},
+ {0x0028, CMN_PLLSM0_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
+ {0x0062, CMN_BGCAL_INIT_TMR},
+ {0x0062, CMN_BGCAL_ITER_TMR},
+ {0x0014, CMN_IBCAL_INIT_TMR},
+ {0x0018, CMN_TXPUCAL_INIT_TMR},
+ {0x0005, CMN_TXPUCAL_ITER_TMR},
+ {0x0018, CMN_TXPDCAL_INIT_TMR},
+ {0x0005, CMN_TXPDCAL_ITER_TMR},
+ {0x024A, CMN_RXCAL_INIT_TMR},
+ {0x0005, CMN_RXCAL_ITER_TMR},
+ {0x000B, CMN_SD_CAL_REFTIM_START},
+ {0x0132, CMN_SD_CAL_PLLCNT_START},
+ {0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
+ {0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
+ {0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0019, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x1354, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0138, CMN_PLL0_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL0_LOCK_PLLCNT_START}
+};
+
+static struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
+ .reg_pairs = j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs),
+};
+
+/* TI J7200, Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PLLSM1_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM1_PLLLOCK_TMR},
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = {
+ .reg_pairs = j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs),
+};
+
+/* PCIe and USXGMII link configuration */
+static struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = {
+ {0x0003, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
+ {0x0400, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0012, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = {
+ {0x0011, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0089, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = {
+ .reg_pairs = pcie_usxgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_usxgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = pcie_usxgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_usxgmii_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = usxgmii_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_pcie_xcvr_diag_ln_regs),
+};
+
+/*
+ * Multilink USXGMII, using PLL1, 156.25 MHz Ref clk, no SSC
+ */
+static struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0014, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x0005, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x061B, CMN_PLL1_VCOCAL_INIT_TMR},
+ {0x0019, CMN_PLL1_VCOCAL_ITER_TMR},
+ {0x1354, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x0138, CMN_PLL1_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3 },
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0000, XCVR_DIAG_PSC_OVRD}
+};
+
+static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = {
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x0030, RX_REE_SMGM_CTRL1},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x00B9, RX_DIAG_NQST_CTRL},
+ {0x0C21, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0002, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0033, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG}
+};
+
+static struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = {
+ .reg_pairs = ml_usxgmii_pll1_156_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_usxgmii_pll1_156_25_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = {
+ .reg_pairs = ml_usxgmii_156_25_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = {
+ .reg_pairs = ml_usxgmii_156_25_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_rx_ln_regs),
+};
+
+/* TI USXGMII configuration: Enable cmn_refclk_rcv_out_en */
+static struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = {
+ {0x0040, PHY_PMA_CMN_CTRL1},
+};
+
+static struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = {
+ .reg_pairs = ti_usxgmii_phy_pma_cmn_regs,
+ .num_regs = ARRAY_SIZE(ti_usxgmii_phy_pma_cmn_regs),
+};
+
+/* Single USXGMII link configuration */
+static struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = {
+ {0x0000, PHY_PLL_CFG},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0001, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = {
+ .reg_pairs = sl_usxgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usxgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = sl_usxgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_usxgmii_xcvr_diag_ln_regs),
+};
+
+/* Single link USXGMII, 156.25 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
+ {0x0014, CMN_SSM_BIAS_TMR},
+ {0x0028, CMN_PLLSM0_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
+ {0x0028, CMN_PLLSM1_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM1_PLLLOCK_TMR},
+ {0x0062, CMN_BGCAL_INIT_TMR},
+ {0x0062, CMN_BGCAL_ITER_TMR},
+ {0x0014, CMN_IBCAL_INIT_TMR},
+ {0x0018, CMN_TXPUCAL_INIT_TMR},
+ {0x0005, CMN_TXPUCAL_ITER_TMR},
+ {0x0018, CMN_TXPDCAL_INIT_TMR},
+ {0x0005, CMN_TXPDCAL_ITER_TMR},
+ {0x024A, CMN_RXCAL_INIT_TMR},
+ {0x0005, CMN_RXCAL_ITER_TMR},
+ {0x000B, CMN_SD_CAL_REFTIM_START},
+ {0x0132, CMN_SD_CAL_PLLCNT_START},
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
+ {0x0014, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
+ {0x0005, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x061B, CMN_PLL1_VCOCAL_INIT_TMR},
+ {0x0019, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x0019, CMN_PLL1_VCOCAL_ITER_TMR},
+ {0x1354, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x1354, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x0138, CMN_PLL0_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL1_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x0138, CMN_PLL1_LOCK_PLLCNT_START}
+};
+
+static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
+ {0x07A2, TX_RCVDET_ST_TMR},
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0000, XCVR_DIAG_PSC_OVRD}
+};
+
+static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
+ {0x0014, RX_SDCAL0_INIT_TMR},
+ {0x0062, RX_SDCAL0_ITER_TMR},
+ {0x0014, RX_SDCAL1_INIT_TMR},
+ {0x0062, RX_SDCAL1_ITER_TMR},
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x0030, RX_REE_SMGM_CTRL1},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x00B9, RX_DIAG_NQST_CTRL},
+ {0x0C21, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0002, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0033, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG}
+};
+
+static struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = {
+ .reg_pairs = sl_usxgmii_156_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usxgmii_156_25_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = {
+ .reg_pairs = usxgmii_156_25_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = {
+ .reg_pairs = usxgmii_156_25_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_rx_ln_regs),
+};
+
+/* PCIe and DP link configuration */
+static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
+ {0x0003, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}
+};
+
+static struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0012, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = {
+ {0x0001, XCVR_DIAG_HSCLK_SEL},
+ {0x0009, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals pcie_dp_link_cmn_vals = {
+ .reg_pairs = pcie_dp_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_dp_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = {
+ .reg_pairs = pcie_dp_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_dp_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = dp_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_pcie_xcvr_diag_ln_regs),
+};
+
+/* DP Multilink, 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = {
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = {
+ {0x00FB, TX_PSC_A0},
+ {0x04AA, TX_PSC_A2},
+ {0x04AA, TX_PSC_A3},
+ {0x000F, XCVR_DIAG_BIDI_CTRL}
+};
+
+static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
+ {0x0000, RX_PSC_A0},
+ {0x0000, RX_PSC_A2},
+ {0x0000, RX_PSC_A3},
+ {0x0000, RX_PSC_CAL},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0000, RX_REE_GCSM2_CTRL},
+ {0x0000, RX_REE_PERGCSM_CTRL}
+};
+
+static struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = {
+ .reg_pairs = dp_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(dp_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = dp_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = dp_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_100_no_ssc_rx_ln_regs),
+};
+
+/* Single DisplayPort(DP) link configuration */
+static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = {
+ {0x0000, PHY_PLL_CFG},
+};
+
+static struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sl_dp_link_cmn_vals = {
+ .reg_pairs = sl_dp_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = {
+ .reg_pairs = sl_dp_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_xcvr_diag_ln_regs),
+};
+
+/* Single DP, 19.2 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = {
+ {0x0014, CMN_SSM_BIAS_TMR},
+ {0x0027, CMN_PLLSM0_PLLPRE_TMR},
+ {0x00A1, CMN_PLLSM0_PLLLOCK_TMR},
+ {0x0027, CMN_PLLSM1_PLLPRE_TMR},
+ {0x00A1, CMN_PLLSM1_PLLLOCK_TMR},
+ {0x0060, CMN_BGCAL_INIT_TMR},
+ {0x0060, CMN_BGCAL_ITER_TMR},
+ {0x0014, CMN_IBCAL_INIT_TMR},
+ {0x0018, CMN_TXPUCAL_INIT_TMR},
+ {0x0005, CMN_TXPUCAL_ITER_TMR},
+ {0x0018, CMN_TXPDCAL_INIT_TMR},
+ {0x0005, CMN_TXPDCAL_ITER_TMR},
+ {0x0240, CMN_RXCAL_INIT_TMR},
+ {0x0005, CMN_RXCAL_ITER_TMR},
+ {0x0002, CMN_SD_CAL_INIT_TMR},
+ {0x0002, CMN_SD_CAL_ITER_TMR},
+ {0x000B, CMN_SD_CAL_REFTIM_START},
+ {0x0137, CMN_SD_CAL_PLLCNT_START},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x00C0, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0004, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x00C0, CMN_PLL1_VCOCAL_INIT_TMR},
+ {0x0004, CMN_PLL1_VCOCAL_ITER_TMR},
+ {0x0260, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0260, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = {
+ {0x0780, TX_RCVDET_ST_TMR},
+ {0x00FB, TX_PSC_A0},
+ {0x04AA, TX_PSC_A2},
+ {0x04AA, TX_PSC_A3},
+ {0x000F, XCVR_DIAG_BIDI_CTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = {
+ {0x0000, RX_PSC_A0},
+ {0x0000, RX_PSC_A2},
+ {0x0000, RX_PSC_A3},
+ {0x0000, RX_PSC_CAL},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0000, RX_REE_GCSM2_CTRL},
+ {0x0000, RX_REE_PERGCSM_CTRL}
+};
+
+static struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = {
+ .reg_pairs = sl_dp_19_2_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = {
+ .reg_pairs = sl_dp_19_2_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = {
+ .reg_pairs = sl_dp_19_2_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_rx_ln_regs),
+};
+
+/* Single DP, 25 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = {
+ {0x0019, CMN_SSM_BIAS_TMR},
+ {0x0032, CMN_PLLSM0_PLLPRE_TMR},
+ {0x00D1, CMN_PLLSM0_PLLLOCK_TMR},
+ {0x0032, CMN_PLLSM1_PLLPRE_TMR},
+ {0x00D1, CMN_PLLSM1_PLLLOCK_TMR},
+ {0x007D, CMN_BGCAL_INIT_TMR},
+ {0x007D, CMN_BGCAL_ITER_TMR},
+ {0x0019, CMN_IBCAL_INIT_TMR},
+ {0x001E, CMN_TXPUCAL_INIT_TMR},
+ {0x0006, CMN_TXPUCAL_ITER_TMR},
+ {0x001E, CMN_TXPDCAL_INIT_TMR},
+ {0x0006, CMN_TXPDCAL_ITER_TMR},
+ {0x02EE, CMN_RXCAL_INIT_TMR},
+ {0x0006, CMN_RXCAL_ITER_TMR},
+ {0x0002, CMN_SD_CAL_INIT_TMR},
+ {0x0002, CMN_SD_CAL_ITER_TMR},
+ {0x000E, CMN_SD_CAL_REFTIM_START},
+ {0x012B, CMN_SD_CAL_PLLCNT_START},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x00FA, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0004, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x00FA, CMN_PLL1_VCOCAL_INIT_TMR},
+ {0x0004, CMN_PLL1_VCOCAL_ITER_TMR},
+ {0x0317, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0317, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = {
+ {0x09C4, TX_RCVDET_ST_TMR},
+ {0x00FB, TX_PSC_A0},
+ {0x04AA, TX_PSC_A2},
+ {0x04AA, TX_PSC_A3},
+ {0x000F, XCVR_DIAG_BIDI_CTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = {
+ {0x0000, RX_PSC_A0},
+ {0x0000, RX_PSC_A2},
+ {0x0000, RX_PSC_A3},
+ {0x0000, RX_PSC_CAL},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0000, RX_REE_GCSM2_CTRL},
+ {0x0000, RX_REE_PERGCSM_CTRL}
+};
+
+static struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = {
+ .reg_pairs = sl_dp_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = {
+ .reg_pairs = sl_dp_25_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = {
+ .reg_pairs = sl_dp_25_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_rx_ln_regs),
+};
+
+/* Single DP, 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = {
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = {
+ {0x00FB, TX_PSC_A0},
+ {0x04AA, TX_PSC_A2},
+ {0x04AA, TX_PSC_A3},
+ {0x000F, XCVR_DIAG_BIDI_CTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = {
+ {0x0000, RX_PSC_A0},
+ {0x0000, RX_PSC_A2},
+ {0x0000, RX_PSC_A3},
+ {0x0000, RX_PSC_CAL},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0000, RX_REE_GCSM2_CTRL},
+ {0x0000, RX_REE_PERGCSM_CTRL}
+};
+
+static struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_dp_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = sl_dp_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = sl_dp_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_rx_ln_regs),
+};
+
/* USB and SGMII/QSGMII link configuration */
static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
@@ -2455,8 +3574,6 @@ static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
@@ -2464,7 +3581,9 @@ static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
- {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
@@ -2507,13 +3626,28 @@ static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
};
/* USB 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
};
+static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_usb_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+ {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
{0x02FF, TX_PSC_A0},
{0x06AF, TX_PSC_A1},
@@ -2645,12 +3779,22 @@ static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
};
/* SGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
- {0x3700, CMN_DIAG_BIAS_OVRD1},
- {0x0008, CMN_TXPUCAL_TUNE},
- {0x0008, CMN_TXPDCAL_TUNE}
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_sgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
@@ -2661,6 +3805,15 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
{0x00B3, DRV_DIAG_TX_DRV}
};
+static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x00B3, DRV_DIAG_TX_DRV},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL},
+};
+
static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
@@ -2689,11 +3842,60 @@ static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs),
};
+static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs),
+};
+
static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs),
};
+/* TI J7200, multilink SGMII */
+static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x07A2, TX_RCVDET_ST_TMR},
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3 },
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x00B3, DRV_DIAG_TX_DRV},
+ {0x0002, XCVR_DIAG_PSC_OVRD},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL}
+};
+
+static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = j7200_sgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = {
+ {0x0014, RX_SDCAL0_INIT_TMR},
+ {0x0062, RX_SDCAL0_ITER_TMR},
+ {0x0014, RX_SDCAL1_INIT_TMR},
+ {0x0062, RX_SDCAL1_ITER_TMR},
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0098, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0000, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0010, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG}
+};
+
+static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = j7200_sgmii_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_rx_ln_regs),
+};
+
/* SGMII 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
@@ -2736,17 +3938,14 @@ static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
- {0x3700, CMN_DIAG_BIAS_OVRD1},
- {0x0008, CMN_TXPUCAL_TUNE},
- {0x0008, CMN_TXPDCAL_TUNE}
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
@@ -2755,19 +3954,43 @@ static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
};
/* QSGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
+static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
{0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0011, TX_TXCC_MGNFS_MULT_100},
{0x0003, DRV_DIAG_TX_DRV}
};
+static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0011, TX_TXCC_MGNFS_MULT_100},
+ {0x0003, DRV_DIAG_TX_DRV},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL},
+};
+
static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
@@ -2796,11 +4019,61 @@ static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs),
};
+static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs),
+};
+
static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs),
};
+/* TI J7200, multilink QSGMII */
+static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x07A2, TX_RCVDET_ST_TMR},
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3 },
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0011, TX_TXCC_MGNFS_MULT_100},
+ {0x0003, DRV_DIAG_TX_DRV},
+ {0x0002, XCVR_DIAG_PSC_OVRD},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL}
+};
+
+static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = j7200_qsgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = {
+ {0x0014, RX_SDCAL0_INIT_TMR},
+ {0x0062, RX_SDCAL0_ITER_TMR},
+ {0x0014, RX_SDCAL1_INIT_TMR},
+ {0x0062, RX_SDCAL1_ITER_TMR},
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0098, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0000, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0010, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG}
+};
+
+static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = j7200_qsgmii_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_rx_ln_regs),
+};
+
/* QSGMII 100 MHz Ref clk, internal SSC */
static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
@@ -2843,14 +4116,14 @@ static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
{0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
{0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
- {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
};
static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
@@ -2922,8 +4195,6 @@ static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
@@ -2979,8 +4250,6 @@ static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
{0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
{0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
{0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL},
{0x00C7, CMN_PLL0_LOCK_REFCNT_START},
{0x00C7, CMN_PLL1_LOCK_REFCNT_START},
{0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
@@ -2996,8 +4265,9 @@ static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
/* PCIe, 100 MHz Ref clk, no SSC & external SSC */
static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
- {0x0003, CMN_PLL0_VCOCAL_TCTRL},
- {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}
};
static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
@@ -3016,821 +4286,728 @@ static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
.num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs),
};
+static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &pcie_dp_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USXGMII), &pcie_usxgmii_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &pcie_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &pcie_usxgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &usxgmii_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_link_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &dp_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USXGMII), &pcie_usxgmii_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &usxgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &usxgmii_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_xcvr_diag_ln_vals},
+};
+
+static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &sl_usxgmii_156_25_no_ssc_cmn_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &ml_sgmii_pll1_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &ml_sgmii_pll1_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_pll1_156_25_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+};
+
+static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_rx_ln_vals},
+};
+
static const struct cdns_torrent_data cdns_map_torrent = {
.block_offset_shift = 0x2,
.reg_offset_shift = 0x2,
- .link_cmn_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
+ .link_cmn_vals_tbl = {
+ .entries = link_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(link_cmn_vals_entries),
},
- .xcvr_diag_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- },
+ .xcvr_diag_vals_tbl = {
+ .entries = xcvr_diag_vals_entries,
+ .num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
},
- .pcs_cmn_vals = {
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- },
+ .pcs_cmn_vals_tbl = {
+ .entries = pcs_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
},
- .cmn_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- },
+ .cmn_vals_tbl = {
+ .entries = cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(cmn_vals_entries),
},
- .tx_ln_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_USB] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- },
+ .tx_ln_vals_tbl = {
+ .entries = cdns_tx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(cdns_tx_ln_vals_entries),
},
- .rx_ln_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- },
+ .rx_ln_vals_tbl = {
+ .entries = cdns_rx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(cdns_rx_ln_vals_entries),
},
};
+static struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &ti_usxgmii_phy_pma_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &ti_usxgmii_phy_pma_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &ti_usxgmii_phy_pma_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &ti_usxgmii_phy_pma_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+};
+
static const struct cdns_torrent_data ti_j721e_map_torrent = {
.block_offset_shift = 0x0,
.reg_offset_shift = 0x1,
- .link_cmn_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
+ .link_cmn_vals_tbl = {
+ .entries = link_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(link_cmn_vals_entries),
+ },
+ .xcvr_diag_vals_tbl = {
+ .entries = xcvr_diag_vals_entries,
+ .num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
},
- .xcvr_diag_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- },
+ .pcs_cmn_vals_tbl = {
+ .entries = pcs_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
},
- .pcs_cmn_vals = {
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- },
+ .phy_pma_cmn_vals_tbl = {
+ .entries = j721e_phy_pma_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(j721e_phy_pma_cmn_vals_entries),
},
- .cmn_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- },
+ .cmn_vals_tbl = {
+ .entries = cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(cmn_vals_entries),
},
- .tx_ln_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_USB] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- },
+ .tx_ln_vals_tbl = {
+ .entries = ti_tx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(ti_tx_ln_vals_entries),
},
- .rx_ln_vals = {
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- },
+ .rx_ln_vals_tbl = {
+ .entries = cdns_rx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(cdns_rx_ln_vals_entries),
+ },
+};
+
+/* TI J7200 (Torrent SD0805) */
+static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &sl_usxgmii_156_25_no_ssc_cmn_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_pll1_156_25_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &j7200_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &j7200_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+};
+
+static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+
+ /* Dual refclk */
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_PCIE, TYPE_USXGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_SGMII, TYPE_USXGMII, NO_SSC), &j7200_sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_156_25_MHZ, TYPE_QSGMII, TYPE_USXGMII, NO_SSC), &j7200_qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_PCIE, NO_SSC), &ml_usxgmii_156_25_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_SGMII, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+};
+
+static const struct cdns_torrent_data ti_j7200_map_torrent = {
+ .block_offset_shift = 0x0,
+ .reg_offset_shift = 0x1,
+ .link_cmn_vals_tbl = {
+ .entries = link_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(link_cmn_vals_entries),
+ },
+ .xcvr_diag_vals_tbl = {
+ .entries = xcvr_diag_vals_entries,
+ .num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
+ },
+ .pcs_cmn_vals_tbl = {
+ .entries = pcs_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
+ },
+ .phy_pma_cmn_vals_tbl = {
+ .entries = j721e_phy_pma_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(j721e_phy_pma_cmn_vals_entries),
+ },
+ .cmn_vals_tbl = {
+ .entries = ti_j7200_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(ti_j7200_cmn_vals_entries),
+ },
+ .tx_ln_vals_tbl = {
+ .entries = ti_j7200_tx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(ti_j7200_tx_ln_vals_entries),
+ },
+ .rx_ln_vals_tbl = {
+ .entries = ti_j7200_rx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(ti_j7200_rx_ln_vals_entries),
},
};
@@ -3843,6 +5020,10 @@ static const struct of_device_id cdns_torrent_phy_of_match[] = {
.compatible = "ti,j721e-serdes-10g",
.data = &ti_j721e_map_torrent,
},
+ {
+ .compatible = "ti,j7200-serdes-10g",
+ .data = &ti_j7200_map_torrent,
+ },
{}
};
MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match);
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
new file mode 100644
index 000000000000..95c6dbb52da7
--- /dev/null
+++ b/drivers/phy/phy-can-transceiver.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-can-transceiver.c - phy driver for CAN transceivers
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ *
+ */
+#include<linux/phy/phy.h>
+#include<linux/platform_device.h>
+#include<linux/module.h>
+#include<linux/gpio.h>
+#include<linux/gpio/consumer.h>
+#include <linux/mux/consumer.h>
+
+struct can_transceiver_data {
+ u32 flags;
+#define CAN_TRANSCEIVER_STB_PRESENT BIT(0)
+#define CAN_TRANSCEIVER_EN_PRESENT BIT(1)
+};
+
+struct can_transceiver_phy {
+ struct phy *generic_phy;
+ struct gpio_desc *standby_gpio;
+ struct gpio_desc *enable_gpio;
+ struct mux_state *mux_state;
+};
+
+/* Power on function */
+static int can_transceiver_phy_power_on(struct phy *phy)
+{
+ struct can_transceiver_phy *can_transceiver_phy = phy_get_drvdata(phy);
+ int ret;
+
+ if (can_transceiver_phy->mux_state) {
+ ret = mux_state_select(can_transceiver_phy->mux_state);
+ if (ret) {
+ dev_err(&phy->dev, "Failed to select CAN mux: %d\n", ret);
+ return ret;
+ }
+ }
+ if (can_transceiver_phy->standby_gpio)
+ gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 0);
+ if (can_transceiver_phy->enable_gpio)
+ gpiod_set_value_cansleep(can_transceiver_phy->enable_gpio, 1);
+
+ return 0;
+}
+
+/* Power off function */
+static int can_transceiver_phy_power_off(struct phy *phy)
+{
+ struct can_transceiver_phy *can_transceiver_phy = phy_get_drvdata(phy);
+
+ if (can_transceiver_phy->standby_gpio)
+ gpiod_set_value_cansleep(can_transceiver_phy->standby_gpio, 1);
+ if (can_transceiver_phy->enable_gpio)
+ gpiod_set_value_cansleep(can_transceiver_phy->enable_gpio, 0);
+ if (can_transceiver_phy->mux_state)
+ mux_state_deselect(can_transceiver_phy->mux_state);
+
+ return 0;
+}
+
+static const struct phy_ops can_transceiver_phy_ops = {
+ .power_on = can_transceiver_phy_power_on,
+ .power_off = can_transceiver_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct can_transceiver_data tcan1042_drvdata = {
+ .flags = CAN_TRANSCEIVER_STB_PRESENT,
+};
+
+static const struct can_transceiver_data tcan1043_drvdata = {
+ .flags = CAN_TRANSCEIVER_STB_PRESENT | CAN_TRANSCEIVER_EN_PRESENT,
+};
+
+static const struct of_device_id can_transceiver_phy_ids[] = {
+ {
+ .compatible = "ti,tcan1042",
+ .data = &tcan1042_drvdata
+ },
+ {
+ .compatible = "ti,tcan1043",
+ .data = &tcan1043_drvdata
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, can_transceiver_phy_ids);
+
+static int can_transceiver_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct can_transceiver_phy *can_transceiver_phy;
+ const struct can_transceiver_data *drvdata;
+ const struct of_device_id *match;
+ struct phy *phy;
+ struct gpio_desc *standby_gpio;
+ struct gpio_desc *enable_gpio;
+ u32 max_bitrate = 0;
+
+ can_transceiver_phy = devm_kzalloc(dev, sizeof(struct can_transceiver_phy), GFP_KERNEL);
+ if (!can_transceiver_phy)
+ return -ENOMEM;
+
+ match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node);
+ drvdata = match->data;
+
+ if (of_property_read_bool(dev->of_node, "mux-states")) {
+ struct mux_state *mux_state;
+
+ mux_state = devm_mux_state_get(dev, NULL);
+ if (IS_ERR(mux_state))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mux_state),
+ "failed to get mux\n");
+ can_transceiver_phy->mux_state = mux_state;
+ }
+
+ phy = devm_phy_create(dev, dev->of_node,
+ &can_transceiver_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create can transceiver phy\n");
+ return PTR_ERR(phy);
+ }
+
+ device_property_read_u32(dev, "max-bitrate", &max_bitrate);
+ if (!max_bitrate)
+ dev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit\n");
+ phy->attrs.max_link_rate = max_bitrate;
+
+ can_transceiver_phy->generic_phy = phy;
+
+ if (drvdata->flags & CAN_TRANSCEIVER_STB_PRESENT) {
+ standby_gpio = devm_gpiod_get_optional(dev, "standby", GPIOD_OUT_HIGH);
+ if (IS_ERR(standby_gpio))
+ return PTR_ERR(standby_gpio);
+ can_transceiver_phy->standby_gpio = standby_gpio;
+ }
+
+ if (drvdata->flags & CAN_TRANSCEIVER_EN_PRESENT) {
+ enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(enable_gpio))
+ return PTR_ERR(enable_gpio);
+ can_transceiver_phy->enable_gpio = enable_gpio;
+ }
+
+ phy_set_drvdata(can_transceiver_phy->generic_phy, can_transceiver_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver can_transceiver_phy_driver = {
+ .probe = can_transceiver_phy_probe,
+ .driver = {
+ .name = "can-transceiver-phy",
+ .of_match_table = can_transceiver_phy_ids,
+ },
+};
+
+module_platform_driver(can_transceiver_phy_driver);
+
+MODULE_AUTHOR("Faiz Abbas <faiz_abbas@ti.com>");
+MODULE_AUTHOR("Aswath Govindraju <a-govindraju@ti.com>");
+MODULE_DESCRIPTION("CAN TRANSCEIVER PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 71cb10826326..43e2b2aaeef9 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -667,16 +667,18 @@ struct phy *phy_get(struct device *dev, const char *string)
struct phy *phy;
struct device_link *link;
- if (string == NULL) {
- dev_WARN(dev, "missing string\n");
- return ERR_PTR(-EINVAL);
- }
-
if (dev->of_node) {
- index = of_property_match_string(dev->of_node, "phy-names",
- string);
+ if (string)
+ index = of_property_match_string(dev->of_node, "phy-names",
+ string);
+ else
+ index = 0;
phy = _of_phy_get(dev->of_node, index);
} else {
+ if (!string) {
+ dev_WARN(dev, "missing string\n");
+ return ERR_PTR(-EINVAL);
+ }
phy = phy_find(dev, string);
}
if (IS_ERR(phy))
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index d0ab69750c6b..a0cf83b45aac 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -22,6 +22,14 @@
#define AM33XX_GMII_SEL_MODE_RMII 1
#define AM33XX_GMII_SEL_MODE_RGMII 2
+/* J72xx SoC specific definitions for the CONTROL port */
+#define J72XX_GMII_SEL_MODE_SGMII 3
+#define J72XX_GMII_SEL_MODE_QSGMII 4
+#define J72XX_GMII_SEL_MODE_XFI 5
+#define J72XX_GMII_SEL_MODE_QSGMII_SUB 6
+
+#define PHY_GMII_PORT(n) BIT((n) - 1)
+
enum {
PHY_GMII_SEL_PORT_MODE = 0,
PHY_GMII_SEL_RGMII_ID_MODE,
@@ -43,6 +51,7 @@ struct phy_gmii_sel_soc_data {
u32 features;
const struct reg_field (*regfields)[PHY_GMII_SEL_LAST];
bool use_of_data;
+ u64 extra_modes;
};
struct phy_gmii_sel_priv {
@@ -53,6 +62,7 @@ struct phy_gmii_sel_priv {
struct phy_gmii_sel_phy_priv *if_phys;
u32 num_ports;
u32 reg_offset;
+ u32 qsgmii_main_ports;
};
static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
@@ -88,10 +98,36 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
gmii_sel_mode = AM33XX_GMII_SEL_MODE_MII;
break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_QSGMII)))
+ goto unsupported;
+ if (if_phy->priv->qsgmii_main_ports & BIT(if_phy->id - 1))
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_QSGMII;
+ else
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_QSGMII_SUB;
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_SGMII)))
+ goto unsupported;
+ else
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_SGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_XAUI:
+ if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_XAUI)))
+ goto unsupported;
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_XFI;
+ break;
+
+ case PHY_INTERFACE_MODE_USXGMII:
+ if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_USXGMII)))
+ goto unsupported;
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_XFI;
+ break;
+
default:
- dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
- if_phy->id, phy_modes(submode));
- return -EINVAL;
+ goto unsupported;
}
if_phy->phy_if_mode = submode;
@@ -123,6 +159,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
}
return 0;
+
+unsupported:
+ dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
+ if_phy->id, phy_modes(submode));
+ return -EINVAL;
}
static const
@@ -172,14 +213,38 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_dm814 = {
static const
struct reg_field phy_gmii_sel_fields_am654[][PHY_GMII_SEL_LAST] = {
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x0, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x8, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0xC, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x10, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x14, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x18, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x1C, 0, 2), },
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x0, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x0, 4, 4),
+ },
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x4, 4, 4),
+ },
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x8, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x8, 4, 4),
+ },
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0xC, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0xC, 4, 4),
+ },
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x10, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x10, 4, 4),
+ },
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x14, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x14, 4, 4),
+ },
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x18, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x18, 4, 4),
+ },
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x1C, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x1C, 4, 4),
+ },
};
static const
@@ -188,6 +253,33 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = {
.regfields = phy_gmii_sel_fields_am654,
};
+static const
+struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = {
+ .use_of_data = true,
+ .regfields = phy_gmii_sel_fields_am654,
+ .features = BIT(PHY_GMII_SEL_RGMII_ID_MODE),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) |
+ BIT(PHY_INTERFACE_MODE_SGMII) |
+ BIT(PHY_INTERFACE_MODE_XAUI) |
+ BIT(PHY_INTERFACE_MODE_USXGMII),
+};
+
+static const
+struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j721e = {
+ .use_of_data = true,
+ .regfields = phy_gmii_sel_fields_am654,
+ .features = BIT(PHY_GMII_SEL_RGMII_ID_MODE),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
+};
+
+static const
+struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j784s4 = {
+ .use_of_data = true,
+ .regfields = phy_gmii_sel_fields_am654,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) |
+ BIT(PHY_INTERFACE_MODE_USXGMII),
+};
+
static const struct of_device_id phy_gmii_sel_id_table[] = {
{
.compatible = "ti,am3352-phy-gmii-sel",
@@ -209,6 +301,18 @@ static const struct of_device_id phy_gmii_sel_id_table[] = {
.compatible = "ti,am654-phy-gmii-sel",
.data = &phy_gmii_sel_soc_am654,
},
+ {
+ .compatible = "ti,j7200-cpsw5g-phy-gmii-sel",
+ .data = &phy_gmii_sel_cpsw5g_soc_j7200,
+ },
+ {
+ .compatible = "ti,j721e-cpsw9g-phy-gmii-sel",
+ .data = &phy_gmii_sel_cpsw9g_soc_j721e,
+ },
+ {
+ .compatible = "ti,j784s4-cpsw9g-phy-gmii-sel",
+ .data = &phy_gmii_sel_cpsw9g_soc_j784s4,
+ },
{}
};
MODULE_DEVICE_TABLE(of, phy_gmii_sel_id_table);
@@ -350,6 +454,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
const struct of_device_id *of_id;
struct phy_gmii_sel_priv *priv;
+ u32 main_ports[2] = {1, 1};
int ret;
of_id = of_match_node(phy_gmii_sel_id_table, pdev->dev.of_node);
@@ -363,6 +468,16 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->soc_data = of_id->data;
priv->num_ports = priv->soc_data->num_ports;
+ /* Differentiate between J7200 CPSW5G, J721e CPSW9G and J784s4 CPSW9G*/
+ if (of_device_is_compatible(node, "ti,j7200-cpsw5g-phy-gmii-sel") > 0) {
+ of_property_read_u32_array(node, "ti,qsgmii-main-ports", &main_ports[0], 1);
+ priv->qsgmii_main_ports = PHY_GMII_PORT(main_ports[0]);
+ } else if (of_device_is_compatible(node, "ti,j721e-cpsw9g-phy-gmii-sel") ||
+ of_device_is_compatible(node, "ti,j784s4-cpsw9g-phy-gmii-sel")) {
+ of_property_read_u32_array(node, "ti,qsgmii-main-ports", &main_ports[0], 2);
+ priv->qsgmii_main_ports = PHY_GMII_PORT(main_ports[0]);
+ priv->qsgmii_main_ports |= PHY_GMII_PORT(main_ports[1] + 4);
+ }
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 5536b8f4bfd1..680925a663c5 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -7,12 +7,14 @@
*/
#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-ti.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include <linux/mux/consumer.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -21,11 +23,25 @@
#include <linux/regmap.h>
#include <linux/reset-controller.h>
+#define REF_CLK_19_2MHZ 19200000
+#define REF_CLK_25MHZ 25000000
+#define REF_CLK_100MHZ 100000000
+#define REF_CLK_156_25MHZ 156250000
+
+/* SCM offsets */
+#define SERDES_SUP_CTRL 0x4400
+
+/* SERDES offsets */
#define WIZ_SERDES_CTRL 0x404
#define WIZ_SERDES_TOP_CTRL 0x408
#define WIZ_SERDES_RST 0x40c
#define WIZ_SERDES_TYPEC 0x410
#define WIZ_LANECTL(n) (0x480 + (0x40 * (n)))
+#define WIZ_LANEDIV(n) (0x484 + (0x40 * (n)))
+
+#define WIZ_MAX_INPUT_CLOCKS 4
+/* To include mux clocks, divider clocks and gate clocks */
+#define WIZ_MAX_OUTPUT_CLOCKS 32
#define WIZ_MAX_LANES 4
#define WIZ_MUX_NUM_CLOCKS 3
@@ -41,6 +57,14 @@ enum wiz_lane_standard_mode {
LANE_MODE_GEN4,
};
+/*
+ * List of master lanes used for lane swapping
+ */
+enum wiz_typec_master_lane {
+ LANE0 = 0,
+ LANE2 = 2,
+};
+
enum wiz_refclk_mux_sel {
PLL0_REFCLK,
PLL1_REFCLK,
@@ -52,18 +76,32 @@ enum wiz_refclk_div_sel {
CMN_REFCLK1_DIG_DIV,
};
+enum wiz_clock_input {
+ WIZ_CORE_REFCLK,
+ WIZ_EXT_REFCLK,
+ WIZ_CORE_REFCLK1,
+ WIZ_EXT_REFCLK1,
+};
+
static const struct reg_field por_en = REG_FIELD(WIZ_SERDES_CTRL, 31, 31);
static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31);
+static const struct reg_field phy_en_refclk = REG_FIELD(WIZ_SERDES_RST, 30, 30);
static const struct reg_field pll1_refclk_mux_sel =
REG_FIELD(WIZ_SERDES_RST, 29, 29);
+static const struct reg_field pll1_refclk_mux_sel_2 =
+ REG_FIELD(WIZ_SERDES_RST, 22, 23);
static const struct reg_field pll0_refclk_mux_sel =
REG_FIELD(WIZ_SERDES_RST, 28, 28);
+static const struct reg_field pll0_refclk_mux_sel_2 =
+ REG_FIELD(WIZ_SERDES_RST, 28, 29);
static const struct reg_field refclk_dig_sel_16g =
REG_FIELD(WIZ_SERDES_RST, 24, 25);
static const struct reg_field refclk_dig_sel_10g =
REG_FIELD(WIZ_SERDES_RST, 24, 24);
static const struct reg_field pma_cmn_refclk_int_mode =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 28, 29);
+static const struct reg_field pma_cmn_refclk1_int_mode =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 20, 21);
static const struct reg_field pma_cmn_refclk_mode =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 30, 31);
static const struct reg_field pma_cmn_refclk_dig_div =
@@ -71,6 +109,24 @@ static const struct reg_field pma_cmn_refclk_dig_div =
static const struct reg_field pma_cmn_refclk1_dig_div =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
+static const struct reg_field sup_pll0_refclk_mux_sel =
+ REG_FIELD(SERDES_SUP_CTRL, 0, 1);
+static const struct reg_field sup_pll1_refclk_mux_sel =
+ REG_FIELD(SERDES_SUP_CTRL, 2, 3);
+static const struct reg_field sup_pma_cmn_refclk1_int_mode =
+ REG_FIELD(SERDES_SUP_CTRL, 4, 5);
+static const struct reg_field sup_refclk_dig_sel_10g =
+ REG_FIELD(SERDES_SUP_CTRL, 6, 7);
+static const struct reg_field sup_legacy_clk_override =
+ REG_FIELD(SERDES_SUP_CTRL, 8, 8);
+
+static const char * const output_clk_names[] = {
+ [TI_WIZ_PLL0_REFCLK] = "pll0-refclk",
+ [TI_WIZ_PLL1_REFCLK] = "pll1-refclk",
+ [TI_WIZ_REFCLK_DIG] = "refclk-dig",
+ [TI_WIZ_PHY_EN_REFCLK] = "phy-en-refclk",
+};
+
static const struct reg_field p_enable[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 30, 31),
REG_FIELD(WIZ_LANECTL(1), 30, 31),
@@ -101,13 +157,57 @@ static const struct reg_field p_standard_mode[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(3), 24, 25),
};
+static const struct reg_field p0_fullrt_div[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 22, 23),
+ REG_FIELD(WIZ_LANECTL(1), 22, 23),
+ REG_FIELD(WIZ_LANECTL(2), 22, 23),
+ REG_FIELD(WIZ_LANECTL(3), 22, 23),
+};
+
+static const struct reg_field p0_mac_src_sel[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 20, 21),
+ REG_FIELD(WIZ_LANECTL(1), 20, 21),
+ REG_FIELD(WIZ_LANECTL(2), 20, 21),
+ REG_FIELD(WIZ_LANECTL(3), 20, 21),
+};
+
+static const struct reg_field p0_rxfclk_sel[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 6, 7),
+ REG_FIELD(WIZ_LANECTL(1), 6, 7),
+ REG_FIELD(WIZ_LANECTL(2), 6, 7),
+ REG_FIELD(WIZ_LANECTL(3), 6, 7),
+};
+
+static const struct reg_field p0_refclk_sel[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 18, 19),
+ REG_FIELD(WIZ_LANECTL(1), 18, 19),
+ REG_FIELD(WIZ_LANECTL(2), 18, 19),
+ REG_FIELD(WIZ_LANECTL(3), 18, 19),
+};
+static const struct reg_field p_mac_div_sel0[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANEDIV(0), 16, 22),
+ REG_FIELD(WIZ_LANEDIV(1), 16, 22),
+ REG_FIELD(WIZ_LANEDIV(2), 16, 22),
+ REG_FIELD(WIZ_LANEDIV(3), 16, 22),
+};
+
+static const struct reg_field p_mac_div_sel1[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANEDIV(0), 0, 8),
+ REG_FIELD(WIZ_LANEDIV(1), 0, 8),
+ REG_FIELD(WIZ_LANEDIV(2), 0, 8),
+ REG_FIELD(WIZ_LANEDIV(3), 0, 8),
+};
+
static const struct reg_field typec_ln10_swap =
REG_FIELD(WIZ_SERDES_TYPEC, 30, 30);
+static const struct reg_field typec_ln23_swap =
+ REG_FIELD(WIZ_SERDES_TYPEC, 31, 31);
+
struct wiz_clk_mux {
struct clk_hw hw;
struct regmap_field *field;
- u32 *table;
+ const u32 *table;
struct clk_init_data clk_data;
};
@@ -123,18 +223,26 @@ struct wiz_clk_divider {
#define to_wiz_clk_div(_hw) container_of(_hw, struct wiz_clk_divider, hw)
struct wiz_clk_mux_sel {
- struct regmap_field *field;
- u32 table[4];
+ u32 table[WIZ_MAX_INPUT_CLOCKS];
const char *node_name;
+ u32 num_parents;
+ u32 parents[WIZ_MAX_INPUT_CLOCKS];
};
struct wiz_clk_div_sel {
- struct regmap_field *field;
- const struct clk_div_table *table;
+ const struct clk_div_table *table;
const char *node_name;
};
-static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
+struct wiz_phy_en_refclk {
+ struct clk_hw hw;
+ struct regmap_field *phy_en_refclk;
+ struct clk_init_data clk_data;
+};
+
+#define to_wiz_phy_en_refclk(_hw) container_of(_hw, struct wiz_phy_en_refclk, hw)
+
+static const struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
{
/*
* Mux value to be configured for each of the input clocks
@@ -153,25 +261,52 @@ static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
},
};
-static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
+static const struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
{
/*
* Mux value to be configured for each of the input clocks
* in the order populated in device tree
*/
+ .num_parents = 2,
+ .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
.table = { 1, 0 },
.node_name = "pll0-refclk",
},
{
+ .num_parents = 2,
+ .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
.table = { 1, 0 },
.node_name = "pll1-refclk",
},
{
+ .num_parents = 2,
+ .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
.table = { 1, 0 },
.node_name = "refclk-dig",
},
};
+static const struct wiz_clk_mux_sel clk_mux_sel_10g_2_refclk[] = {
+ {
+ .num_parents = 3,
+ .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
+ .table = { 2, 3, 0 },
+ .node_name = "pll0-refclk",
+ },
+ {
+ .num_parents = 3,
+ .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
+ .table = { 2, 3, 0 },
+ .node_name = "pll1-refclk",
+ },
+ {
+ .num_parents = 3,
+ .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
+ .table = { 2, 3, 0 },
+ .node_name = "refclk-dig",
+ },
+};
+
static const struct clk_div_table clk_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
@@ -180,7 +315,7 @@ static const struct clk_div_table clk_div_table[] = {
{ /* sentinel */ },
};
-static struct wiz_clk_div_sel clk_div_sel[] = {
+static const struct wiz_clk_div_sel clk_div_sel[] = {
{
.table = clk_div_table,
.node_name = "cmn-refclk-dig-div",
@@ -193,7 +328,21 @@ static struct wiz_clk_div_sel clk_div_sel[] = {
enum wiz_type {
J721E_WIZ_16G,
- J721E_WIZ_10G,
+ J721E_WIZ_10G, /* Also for J7200 SR1.0 */
+ AM64_WIZ_10G,
+ J7200_WIZ_10G, /* J7200 SR2.0 */
+ J784S4_WIZ_10G,
+};
+
+struct wiz_data {
+ enum wiz_type type;
+ const struct reg_field *pll0_refclk_mux_sel;
+ const struct reg_field *pll1_refclk_mux_sel;
+ const struct reg_field *refclk_dig_sel;
+ const struct reg_field *pma_cmn_refclk1_dig_div;
+ const struct reg_field *pma_cmn_refclk1_int_mode;
+ const struct wiz_clk_mux_sel *clk_mux_sel;
+ unsigned int clk_div_sel_num;
};
#define WIZ_TYPEC_DIR_DEBOUNCE_MIN 100 /* ms */
@@ -201,21 +350,34 @@ enum wiz_type {
struct wiz {
struct regmap *regmap;
+ struct regmap *scm_regmap;
enum wiz_type type;
- struct wiz_clk_mux_sel *clk_mux_sel;
- struct wiz_clk_div_sel *clk_div_sel;
+ const struct wiz_clk_mux_sel *clk_mux_sel;
+ const struct wiz_clk_div_sel *clk_div_sel;
unsigned int clk_div_sel_num;
struct regmap_field *por_en;
struct regmap_field *phy_reset_n;
+ struct regmap_field *phy_en_refclk;
struct regmap_field *p_enable[WIZ_MAX_LANES];
struct regmap_field *p_align[WIZ_MAX_LANES];
struct regmap_field *p_raw_auto_start[WIZ_MAX_LANES];
struct regmap_field *p_standard_mode[WIZ_MAX_LANES];
+ struct regmap_field *p_mac_div_sel0[WIZ_MAX_LANES];
+ struct regmap_field *p_mac_div_sel1[WIZ_MAX_LANES];
+ struct regmap_field *p0_fullrt_div[WIZ_MAX_LANES];
+ struct regmap_field *p0_mac_src_sel[WIZ_MAX_LANES];
+ struct regmap_field *p0_rxfclk_sel[WIZ_MAX_LANES];
+ struct regmap_field *p0_refclk_sel[WIZ_MAX_LANES];
struct regmap_field *pma_cmn_refclk_int_mode;
+ struct regmap_field *pma_cmn_refclk1_int_mode;
struct regmap_field *pma_cmn_refclk_mode;
struct regmap_field *pma_cmn_refclk_dig_div;
struct regmap_field *pma_cmn_refclk1_dig_div;
+ struct regmap_field *mux_sel_field[WIZ_MUX_NUM_CLOCKS];
+ struct regmap_field *div_sel_field[WIZ_DIV_NUM_CLOCKS_16G];
struct regmap_field *typec_ln10_swap;
+ struct regmap_field *typec_ln23_swap;
+ struct regmap_field *sup_legacy_clk_override;
struct device *dev;
u32 num_lanes;
@@ -224,6 +386,11 @@ struct wiz {
struct gpio_desc *gpio_typec_dir;
int typec_dir_delay;
u32 lane_phy_type[WIZ_MAX_LANES];
+ u32 master_lane_num[WIZ_MAX_LANES];
+ struct clk *input_clks[WIZ_MAX_INPUT_CLOCKS];
+ struct clk *output_clks[WIZ_MAX_OUTPUT_CLOCKS];
+ struct clk_onecell_data clk_data;
+ const struct wiz_data *data;
};
static int wiz_reset(struct wiz *wiz)
@@ -243,6 +410,29 @@ static int wiz_reset(struct wiz *wiz)
return 0;
}
+static int wiz_p_mac_div_sel(struct wiz *wiz)
+{
+ u32 num_lanes = wiz->num_lanes;
+ int ret;
+ int i;
+
+ for (i = 0; i < num_lanes; i++) {
+ if (wiz->lane_phy_type[i] == PHY_TYPE_SGMII ||
+ wiz->lane_phy_type[i] == PHY_TYPE_QSGMII ||
+ wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
+ ret = regmap_field_write(wiz->p_mac_div_sel0[i], 1);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(wiz->p_mac_div_sel1[i], 2);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int wiz_mode_select(struct wiz *wiz)
{
u32 num_lanes = wiz->num_lanes;
@@ -253,8 +443,15 @@ static int wiz_mode_select(struct wiz *wiz)
for (i = 0; i < num_lanes; i++) {
if (wiz->lane_phy_type[i] == PHY_TYPE_DP)
mode = LANE_MODE_GEN1;
- else
- mode = LANE_MODE_GEN4;
+ else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII)
+ mode = LANE_MODE_GEN2;
+
+ if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
+ ret = regmap_field_write(wiz->p0_mac_src_sel[i], 0x3);
+ ret = regmap_field_write(wiz->p0_rxfclk_sel[i], 0x3);
+ ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x3);
+ mode = LANE_MODE_GEN1;
+ }
ret = regmap_field_write(wiz->p_standard_mode[i], mode);
if (ret)
@@ -300,6 +497,12 @@ static int wiz_init(struct wiz *wiz)
return ret;
}
+ ret = wiz_p_mac_div_sel(wiz);
+ if (ret) {
+ dev_err(dev, "Configuring P0 MAC DIV SEL failed\n");
+ return ret;
+ }
+
ret = wiz_init_raw_interface(wiz, true);
if (ret) {
dev_err(dev, "WIZ interface initialization failed\n");
@@ -311,11 +514,11 @@ static int wiz_init(struct wiz *wiz)
static int wiz_regfield_init(struct wiz *wiz)
{
- struct wiz_clk_mux_sel *clk_mux_sel;
- struct wiz_clk_div_sel *clk_div_sel;
struct regmap *regmap = wiz->regmap;
+ struct regmap *scm_regmap = wiz->regmap; /* updated to scm_regmap later if applicable */
int num_lanes = wiz->num_lanes;
struct device *dev = wiz->dev;
+ const struct wiz_data *data = wiz->data;
int i;
wiz->por_en = devm_regmap_field_alloc(dev, regmap, por_en);
@@ -345,54 +548,61 @@ static int wiz_regfield_init(struct wiz *wiz)
return PTR_ERR(wiz->pma_cmn_refclk_mode);
}
- clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK_DIG_DIV];
- clk_div_sel->field = devm_regmap_field_alloc(dev, regmap,
- pma_cmn_refclk_dig_div);
- if (IS_ERR(clk_div_sel->field)) {
+ wiz->div_sel_field[CMN_REFCLK_DIG_DIV] =
+ devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_dig_div);
+ if (IS_ERR(wiz->div_sel_field[CMN_REFCLK_DIG_DIV])) {
dev_err(dev, "PMA_CMN_REFCLK_DIG_DIV reg field init failed\n");
- return PTR_ERR(clk_div_sel->field);
+ return PTR_ERR(wiz->div_sel_field[CMN_REFCLK_DIG_DIV]);
}
- if (wiz->type == J721E_WIZ_16G) {
- clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK1_DIG_DIV];
- clk_div_sel->field =
+ if (data->pma_cmn_refclk1_dig_div) {
+ wiz->div_sel_field[CMN_REFCLK1_DIG_DIV] =
devm_regmap_field_alloc(dev, regmap,
- pma_cmn_refclk1_dig_div);
- if (IS_ERR(clk_div_sel->field)) {
+ *data->pma_cmn_refclk1_dig_div);
+ if (IS_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV])) {
dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
- return PTR_ERR(clk_div_sel->field);
+ return PTR_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV]);
}
}
- clk_mux_sel = &wiz->clk_mux_sel[PLL0_REFCLK];
- clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
- pll0_refclk_mux_sel);
- if (IS_ERR(clk_mux_sel->field)) {
+ if (wiz->scm_regmap) {
+ scm_regmap = wiz->scm_regmap;
+ wiz->sup_legacy_clk_override =
+ devm_regmap_field_alloc(dev, scm_regmap, sup_legacy_clk_override);
+ if (IS_ERR(wiz->sup_legacy_clk_override)) {
+ dev_err(dev, "SUP_LEGACY_CLK_OVERRIDE reg field init failed\n");
+ return PTR_ERR(wiz->sup_legacy_clk_override);
+ }
+ }
+
+ wiz->mux_sel_field[PLL0_REFCLK] =
+ devm_regmap_field_alloc(dev, scm_regmap, *data->pll0_refclk_mux_sel);
+ if (IS_ERR(wiz->mux_sel_field[PLL0_REFCLK])) {
dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
- return PTR_ERR(clk_mux_sel->field);
+ return PTR_ERR(wiz->mux_sel_field[PLL0_REFCLK]);
}
- clk_mux_sel = &wiz->clk_mux_sel[PLL1_REFCLK];
- clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
- pll1_refclk_mux_sel);
- if (IS_ERR(clk_mux_sel->field)) {
+ wiz->mux_sel_field[PLL1_REFCLK] =
+ devm_regmap_field_alloc(dev, scm_regmap, *data->pll1_refclk_mux_sel);
+ if (IS_ERR(wiz->mux_sel_field[PLL1_REFCLK])) {
dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
- return PTR_ERR(clk_mux_sel->field);
+ return PTR_ERR(wiz->mux_sel_field[PLL1_REFCLK]);
}
- clk_mux_sel = &wiz->clk_mux_sel[REFCLK_DIG];
- if (wiz->type == J721E_WIZ_10G)
- clk_mux_sel->field =
- devm_regmap_field_alloc(dev, regmap,
- refclk_dig_sel_10g);
- else
- clk_mux_sel->field =
- devm_regmap_field_alloc(dev, regmap,
- refclk_dig_sel_16g);
-
- if (IS_ERR(clk_mux_sel->field)) {
+ wiz->mux_sel_field[REFCLK_DIG] = devm_regmap_field_alloc(dev, scm_regmap,
+ *data->refclk_dig_sel);
+ if (IS_ERR(wiz->mux_sel_field[REFCLK_DIG])) {
dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
- return PTR_ERR(clk_mux_sel->field);
+ return PTR_ERR(wiz->mux_sel_field[REFCLK_DIG]);
+ }
+
+ if (data->pma_cmn_refclk1_int_mode) {
+ wiz->pma_cmn_refclk1_int_mode =
+ devm_regmap_field_alloc(dev, scm_regmap, *data->pma_cmn_refclk1_int_mode);
+ if (IS_ERR(wiz->pma_cmn_refclk1_int_mode)) {
+ dev_err(dev, "PMA_CMN_REFCLK1_INT_MODE reg field init failed\n");
+ return PTR_ERR(wiz->pma_cmn_refclk1_int_mode);
+ }
}
for (i = 0; i < num_lanes; i++) {
@@ -425,6 +635,46 @@ static int wiz_regfield_init(struct wiz *wiz)
i);
return PTR_ERR(wiz->p_standard_mode[i]);
}
+
+ wiz->p0_fullrt_div[i] = devm_regmap_field_alloc(dev, regmap, p0_fullrt_div[i]);
+ if (IS_ERR(wiz->p0_fullrt_div[i])) {
+ dev_err(dev, "P%d_FULLRT_DIV reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_fullrt_div[i]);
+ }
+
+ wiz->p0_mac_src_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_mac_src_sel[i]);
+ if (IS_ERR(wiz->p0_mac_src_sel[i])) {
+ dev_err(dev, "P%d_MAC_SRC_SEL reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_mac_src_sel[i]);
+ }
+
+ wiz->p0_rxfclk_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_rxfclk_sel[i]);
+ if (IS_ERR(wiz->p0_rxfclk_sel[i])) {
+ dev_err(dev, "P%d_RXFCLK_SEL reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_rxfclk_sel[i]);
+ }
+
+ wiz->p0_refclk_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_refclk_sel[i]);
+ if (IS_ERR(wiz->p0_refclk_sel[i])) {
+ dev_err(dev, "P%d_REFCLK_SEL reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_refclk_sel[i]);
+ }
+
+ wiz->p_mac_div_sel0[i] =
+ devm_regmap_field_alloc(dev, regmap, p_mac_div_sel0[i]);
+ if (IS_ERR(wiz->p_mac_div_sel0[i])) {
+ dev_err(dev, "P%d_MAC_DIV_SEL0 reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_mac_div_sel0[i]);
+ }
+
+ wiz->p_mac_div_sel1[i] =
+ devm_regmap_field_alloc(dev, regmap, p_mac_div_sel1[i]);
+ if (IS_ERR(wiz->p_mac_div_sel1[i])) {
+ dev_err(dev, "P%d_MAC_DIV_SEL1 reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_mac_div_sel1[i]);
+ }
}
wiz->typec_ln10_swap = devm_regmap_field_alloc(dev, regmap,
@@ -434,6 +684,96 @@ static int wiz_regfield_init(struct wiz *wiz)
return PTR_ERR(wiz->typec_ln10_swap);
}
+ wiz->typec_ln23_swap = devm_regmap_field_alloc(dev, regmap,
+ typec_ln23_swap);
+ if (IS_ERR(wiz->typec_ln23_swap)) {
+ dev_err(dev, "LN23_SWAP reg field init failed\n");
+ return PTR_ERR(wiz->typec_ln23_swap);
+ }
+
+ wiz->phy_en_refclk = devm_regmap_field_alloc(dev, regmap, phy_en_refclk);
+ if (IS_ERR(wiz->phy_en_refclk)) {
+ dev_err(dev, "PHY_EN_REFCLK reg field init failed\n");
+ return PTR_ERR(wiz->phy_en_refclk);
+ }
+
+ return 0;
+}
+
+static int wiz_phy_en_refclk_enable(struct clk_hw *hw)
+{
+ struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
+ struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
+
+ regmap_field_write(phy_en_refclk, 1);
+
+ return 0;
+}
+
+static void wiz_phy_en_refclk_disable(struct clk_hw *hw)
+{
+ struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
+ struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
+
+ regmap_field_write(phy_en_refclk, 0);
+}
+
+static int wiz_phy_en_refclk_is_enabled(struct clk_hw *hw)
+{
+ struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
+ struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
+ int val;
+
+ regmap_field_read(phy_en_refclk, &val);
+
+ return !!val;
+}
+
+static const struct clk_ops wiz_phy_en_refclk_ops = {
+ .enable = wiz_phy_en_refclk_enable,
+ .disable = wiz_phy_en_refclk_disable,
+ .is_enabled = wiz_phy_en_refclk_is_enabled,
+};
+
+static int wiz_phy_en_refclk_register(struct wiz *wiz)
+{
+ struct wiz_phy_en_refclk *wiz_phy_en_refclk;
+ struct device *dev = wiz->dev;
+ struct clk_init_data *init;
+ struct clk *clk;
+ char *clk_name;
+ unsigned int sz;
+
+ wiz_phy_en_refclk = devm_kzalloc(dev, sizeof(*wiz_phy_en_refclk), GFP_KERNEL);
+ if (!wiz_phy_en_refclk)
+ return -ENOMEM;
+
+ init = &wiz_phy_en_refclk->clk_data;
+
+ init->ops = &wiz_phy_en_refclk_ops;
+ init->flags = 0;
+
+ sz = strlen(dev_name(dev)) + strlen(output_clk_names[TI_WIZ_PHY_EN_REFCLK]) + 2;
+
+ clk_name = kzalloc(sz, GFP_KERNEL);
+ if (!clk_name)
+ return -ENOMEM;
+
+ snprintf(clk_name, sz, "%s_%s", dev_name(dev), output_clk_names[TI_WIZ_PHY_EN_REFCLK]);
+ init->name = clk_name;
+
+ wiz_phy_en_refclk->phy_en_refclk = wiz->phy_en_refclk;
+ wiz_phy_en_refclk->hw.init = init;
+
+ clk = devm_clk_register(dev, &wiz_phy_en_refclk->hw);
+
+ kfree(clk_name);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ wiz->output_clks[TI_WIZ_PHY_EN_REFCLK] = clk;
+
return 0;
}
@@ -444,7 +784,7 @@ static u8 wiz_clk_mux_get_parent(struct clk_hw *hw)
unsigned int val;
regmap_field_read(field, &val);
- return clk_mux_val_to_index(hw, mux->table, 0, val);
+ return clk_mux_val_to_index(hw, (u32 *)mux->table, 0, val);
}
static int wiz_clk_mux_set_parent(struct clk_hw *hw, u8 index)
@@ -462,8 +802,69 @@ static const struct clk_ops wiz_clk_mux_ops = {
.get_parent = wiz_clk_mux_get_parent,
};
-static int wiz_mux_clk_register(struct wiz *wiz, struct device_node *node,
- struct regmap_field *field, u32 *table)
+static int wiz_mux_clk_register(struct wiz *wiz, struct regmap_field *field,
+ const struct wiz_clk_mux_sel *mux_sel, int clk_index)
+{
+ struct device *dev = wiz->dev;
+ struct clk_init_data *init;
+ const char **parent_names;
+ unsigned int num_parents;
+ struct wiz_clk_mux *mux;
+ char clk_name[100];
+ struct clk *clk;
+ int ret = 0, i;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ num_parents = mux_sel->num_parents;
+
+ parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_parents; i++) {
+ clk = wiz->input_clks[mux_sel->parents[i]];
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(dev, "Failed to get parent clk for %s\n",
+ output_clk_names[clk_index]);
+ ret = -EINVAL;
+ goto err;
+ }
+ parent_names[i] = __clk_get_name(clk);
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), output_clk_names[clk_index]);
+
+ init = &mux->clk_data;
+
+ init->ops = &wiz_clk_mux_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clk_name;
+
+ mux->field = field;
+ mux->table = mux_sel->table;
+ mux->hw.init = init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err;
+ }
+
+ wiz->output_clks[clk_index] = clk;
+
+err:
+ kfree(parent_names);
+
+ return ret;
+}
+
+static int wiz_mux_of_clk_register(struct wiz *wiz, struct device_node *node,
+ struct regmap_field *field, const u32 *table)
{
struct device *dev = wiz->dev;
struct clk_init_data *init;
@@ -607,10 +1008,21 @@ static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
{
- struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device *dev = wiz->dev;
struct device_node *clk_node;
int i;
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ case J7200_WIZ_10G:
+ case J784S4_WIZ_10G:
+ of_clk_del_provider(dev->of_node);
+ return;
+ default:
+ break;
+ }
+
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
clk_node = of_get_child_by_name(node, clk_mux_sel[i].node_name);
of_clk_del_provider(clk_node);
@@ -622,11 +1034,46 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
of_clk_del_provider(clk_node);
of_node_put(clk_node);
}
+
+ of_clk_del_provider(wiz->dev->of_node);
+}
+
+static int wiz_clock_register(struct wiz *wiz)
+{
+ const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device *dev = wiz->dev;
+ struct device_node *node = dev->of_node;
+ int clk_index;
+ int ret;
+ int i;
+
+ clk_index = TI_WIZ_PLL0_REFCLK;
+ for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++, clk_index++) {
+ ret = wiz_mux_clk_register(wiz, wiz->mux_sel_field[i], &clk_mux_sel[i], clk_index);
+ if (ret) {
+ dev_err(dev, "Failed to register clk: %s\n", output_clk_names[clk_index]);
+ return ret;
+ }
+ }
+
+ ret = wiz_phy_en_refclk_register(wiz);
+ if (ret) {
+ dev_err(dev, "Failed to add phy-en-refclk\n");
+ return ret;
+ }
+
+ wiz->clk_data.clks = wiz->output_clks;
+ wiz->clk_data.clk_num = WIZ_MAX_OUTPUT_CLOCKS;
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, &wiz->clk_data);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+
+ return ret;
}
static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
{
- struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
struct device *dev = wiz->dev;
struct device_node *clk_node;
const char *node_name;
@@ -641,6 +1088,7 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
ret = PTR_ERR(clk);
return ret;
}
+ wiz->input_clks[WIZ_CORE_REFCLK] = clk;
rate = clk_get_rate(clk);
if (rate >= 100000000)
@@ -648,12 +1096,48 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
else
regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3);
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ case J7200_WIZ_10G:
+ switch (rate) {
+ case REF_CLK_100MHZ:
+ regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0x2);
+ break;
+ case REF_CLK_156_25MHZ:
+ regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0x3);
+ break;
+ default:
+ regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0);
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (wiz->data->pma_cmn_refclk1_int_mode) {
+ clk = devm_clk_get(dev, "core_ref1_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "core_ref1_clk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ wiz->input_clks[WIZ_CORE_REFCLK1] = clk;
+
+ rate = clk_get_rate(clk);
+ if (rate >= 100000000)
+ regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x1);
+ else
+ regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x3);
+ }
+
clk = devm_clk_get(dev, "ext_ref_clk");
if (IS_ERR(clk)) {
dev_err(dev, "ext_ref_clk clock not found\n");
ret = PTR_ERR(clk);
return ret;
}
+ wiz->input_clks[WIZ_EXT_REFCLK] = clk;
rate = clk_get_rate(clk);
if (rate >= 100000000)
@@ -661,6 +1145,18 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
else
regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ case J7200_WIZ_10G:
+ case J784S4_WIZ_10G:
+ ret = wiz_clock_register(wiz);
+ if (ret)
+ dev_err(dev, "Failed to register wiz clocks\n");
+ return ret;
+ default:
+ break;
+ }
+
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
node_name = clk_mux_sel[i].node_name;
clk_node = of_get_child_by_name(node, node_name);
@@ -670,8 +1166,8 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
goto err;
}
- ret = wiz_mux_clk_register(wiz, clk_node, clk_mux_sel[i].field,
- clk_mux_sel[i].table);
+ ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i],
+ clk_mux_sel[i].table);
if (ret) {
dev_err(dev, "Failed to register %s clock\n",
node_name);
@@ -691,7 +1187,7 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
goto err;
}
- ret = wiz_div_clk_register(wiz, clk_node, clk_div_sel[i].field,
+ ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i],
clk_div_sel[i].table);
if (ret) {
dev_err(dev, "Failed to register %s clock\n",
@@ -726,6 +1222,25 @@ static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
return ret;
}
+static int wiz_phy_fullrt_div(struct wiz *wiz, int lane)
+{
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ if (wiz->lane_phy_type[lane] == PHY_TYPE_PCIE)
+ return regmap_field_write(wiz->p0_fullrt_div[lane], 0x1);
+ break;
+ case J721E_WIZ_16G:
+ case J721E_WIZ_10G:
+ case J7200_WIZ_10G:
+ if (wiz->lane_phy_type[lane] == PHY_TYPE_SGMII)
+ return regmap_field_write(wiz->p0_fullrt_div[lane], 0x2);
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
@@ -733,15 +1248,39 @@ static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
struct wiz *wiz = dev_get_drvdata(dev);
int ret;
- /* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
- if (id == 0 && wiz->gpio_typec_dir) {
- if (wiz->typec_dir_delay)
- msleep_interruptible(wiz->typec_dir_delay);
-
- if (gpiod_get_value_cansleep(wiz->gpio_typec_dir))
- regmap_field_write(wiz->typec_ln10_swap, 1);
- else
- regmap_field_write(wiz->typec_ln10_swap, 0);
+ if (id == 0) {
+ /* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
+ if (wiz->gpio_typec_dir) {
+ if (wiz->typec_dir_delay)
+ msleep_interruptible(wiz->typec_dir_delay);
+
+ if (gpiod_get_value_cansleep(wiz->gpio_typec_dir))
+ regmap_field_write(wiz->typec_ln10_swap, 1);
+ else
+ regmap_field_write(wiz->typec_ln10_swap, 0);
+ } else {
+ /* if no typec-dir gpio is specified and PHY type is USB3
+ * with master lane number is '0' or '2', then set LN10 or
+ * LN23 SWAP bit to '1' respectively.
+ */
+ u32 num_lanes = wiz->num_lanes;
+ int i;
+
+ for (i = 0; i < num_lanes; i++) {
+ if (wiz->lane_phy_type[i] == PHY_TYPE_USB3) {
+ switch (wiz->master_lane_num[i]) {
+ case LANE0:
+ regmap_field_write(wiz->typec_ln10_swap, 1);
+ break;
+ case LANE2:
+ regmap_field_write(wiz->typec_ln23_swap, 1);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
}
if (id == 0) {
@@ -749,6 +1288,10 @@ static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
return ret;
}
+ ret = wiz_phy_fullrt_div(wiz, id - 1);
+ if (ret)
+ return ret;
+
if (wiz->lane_phy_type[id - 1] == PHY_TYPE_DP)
ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE);
else
@@ -769,12 +1312,69 @@ static const struct regmap_config wiz_regmap_config = {
.fast_io = true,
};
+static struct wiz_data j721e_16g_data = {
+ .type = J721E_WIZ_16G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
+ .refclk_dig_sel = &refclk_dig_sel_16g,
+ .pma_cmn_refclk1_dig_div = &pma_cmn_refclk1_dig_div,
+ .clk_mux_sel = clk_mux_sel_16g,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G,
+};
+
+static struct wiz_data j721e_10g_data = {
+ .type = J721E_WIZ_10G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
+ .refclk_dig_sel = &refclk_dig_sel_10g,
+ .clk_mux_sel = clk_mux_sel_10g,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
+};
+
+static struct wiz_data am64_10g_data = {
+ .type = AM64_WIZ_10G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
+ .refclk_dig_sel = &refclk_dig_sel_10g,
+ .clk_mux_sel = clk_mux_sel_10g,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
+};
+
+static struct wiz_data j7200_pg2_10g_data = {
+ .type = J7200_WIZ_10G,
+ .pll0_refclk_mux_sel = &sup_pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &sup_pll1_refclk_mux_sel,
+ .refclk_dig_sel = &sup_refclk_dig_sel_10g,
+ .pma_cmn_refclk1_int_mode = &sup_pma_cmn_refclk1_int_mode,
+ .clk_mux_sel = clk_mux_sel_10g_2_refclk,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
+};
+
+static struct wiz_data j784s4_10g_data = {
+ .type = J784S4_WIZ_10G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel_2,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel_2,
+ .refclk_dig_sel = &refclk_dig_sel_16g,
+ .pma_cmn_refclk1_int_mode = &pma_cmn_refclk1_int_mode,
+ .clk_mux_sel = clk_mux_sel_10g_2_refclk,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
+};
+
static const struct of_device_id wiz_id_table[] = {
{
- .compatible = "ti,j721e-wiz-16g", .data = (void *)J721E_WIZ_16G
+ .compatible = "ti,j721e-wiz-16g", .data = &j721e_16g_data,
+ },
+ {
+ .compatible = "ti,j721e-wiz-10g", .data = &j721e_10g_data,
+ },
+ {
+ .compatible = "ti,am64-wiz-10g", .data = &am64_10g_data,
},
{
- .compatible = "ti,j721e-wiz-10g", .data = (void *)J721E_WIZ_10G
+ .compatible = "ti,j7200-wiz-10g", .data = &j7200_pg2_10g_data,
+ },
+ {
+ .compatible = "ti,j784s4-wiz-10g", .data = &j784s4_10g_data,
},
{}
};
@@ -794,6 +1394,10 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
u32 reg, num_lanes = 1, phy_type = PHY_NONE;
int ret, i;
+ if (!(of_node_name_eq(subnode, "phy") ||
+ of_node_name_eq(subnode, "link")))
+ continue;
+
ret = of_property_read_u32(subnode, "reg", &reg);
if (ret) {
dev_err(dev,
@@ -807,8 +1411,10 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
dev_dbg(dev, "%s: Lanes %u-%u have phy-type %u\n", __func__,
reg, reg + num_lanes - 1, phy_type);
- for (i = reg; i < reg + num_lanes; i++)
+ for (i = reg; i < reg + num_lanes; i++) {
+ wiz->master_lane_num[i] = reg;
wiz->lane_phy_type[i] = phy_type;
+ }
}
return 0;
@@ -820,19 +1426,28 @@ static int wiz_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct platform_device *serdes_pdev;
+ bool already_configured = false;
struct device_node *child_node;
struct regmap *regmap;
struct resource res;
void __iomem *base;
struct wiz *wiz;
+ int ret, val, i;
u32 num_lanes;
- int ret;
+ const struct wiz_data *data;
wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
if (!wiz)
return -ENOMEM;
- wiz->type = (enum wiz_type)of_device_get_match_data(dev);
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "NULL device data\n");
+ return -EINVAL;
+ }
+
+ wiz->data = data;
+ wiz->type = data->type;
child_node = of_get_child_by_name(node, "serdes");
if (!child_node) {
@@ -859,6 +1474,16 @@ static int wiz_probe(struct platform_device *pdev)
goto err_addr_to_resource;
}
+ wiz->scm_regmap = syscon_regmap_lookup_by_phandle(node, "ti,scm");
+ if (IS_ERR(wiz->scm_regmap)) {
+ if (wiz->type == J7200_WIZ_10G) {
+ dev_err(dev, "Couldn't get ti,scm regmap\n");
+ return -ENODEV;
+ }
+
+ wiz->scm_regmap = NULL;
+ }
+
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
if (ret) {
dev_err(dev, "Failed to read num-lanes property\n");
@@ -908,17 +1533,9 @@ static int wiz_probe(struct platform_device *pdev)
wiz->dev = dev;
wiz->regmap = regmap;
wiz->num_lanes = num_lanes;
- if (wiz->type == J721E_WIZ_10G)
- wiz->clk_mux_sel = clk_mux_sel_10g;
- else
- wiz->clk_mux_sel = clk_mux_sel_16g;
-
+ wiz->clk_mux_sel = data->clk_mux_sel;
wiz->clk_div_sel = clk_div_sel;
-
- if (wiz->type == J721E_WIZ_10G)
- wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G;
- else
- wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G;
+ wiz->clk_div_sel_num = data->clk_div_sel_num;
platform_set_drvdata(pdev, wiz);
@@ -928,6 +1545,10 @@ static int wiz_probe(struct platform_device *pdev)
goto err_addr_to_resource;
}
+ /* Enable supplemental Control override if available */
+ if (wiz->scm_regmap)
+ regmap_field_write(wiz->sup_legacy_clk_override, 1);
+
phy_reset_dev = &wiz->wiz_phy_reset_dev;
phy_reset_dev->dev = dev;
phy_reset_dev->ops = &wiz_phy_reset_ops,
@@ -955,10 +1576,20 @@ static int wiz_probe(struct platform_device *pdev)
goto err_get_sync;
}
- ret = wiz_init(wiz);
- if (ret) {
- dev_err(dev, "WIZ initialization failed\n");
- goto err_wiz_init;
+ for (i = 0; i < wiz->num_lanes; i++) {
+ regmap_field_read(wiz->p_enable[i], &val);
+ if (val & (P_ENABLE | P_ENABLE_FORCE)) {
+ already_configured = true;
+ break;
+ }
+ }
+
+ if (!already_configured) {
+ ret = wiz_init(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ initialization failed\n");
+ goto err_wiz_init;
+ }
}
serdes_pdev = of_platform_device_create(child_node, NULL, dev);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 020a00d6696b..9b08145ec15d 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1167,6 +1167,15 @@ config REGULATOR_TPS65218
voltage regulators. It supports software based voltage control
for different voltage domains
+config REGULATOR_TPS65219
+ tristate "TI TPS65219 Power regulators"
+ depends on MFD_TPS65219 && OF
+ help
+ This driver supports TPS65219 voltage regulator chips.
+ TPS65219 series of PMICs have 3 single phase BUCKs & 4 LDOs
+ voltage regulators. It supports software based voltage control
+ for different voltage domains.
+
config REGULATOR_TPS6524X
tristate "TI TPS6524X Power regulators"
depends on SPI
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 6ebae516258e..32a569ac6088 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -140,6 +140,7 @@ obj-$(CONFIG_REGULATOR_TPS65086) += tps65086-regulator.o
obj-$(CONFIG_REGULATOR_TPS65090) += tps65090-regulator.o
obj-$(CONFIG_REGULATOR_TPS65217) += tps65217-regulator.o
obj-$(CONFIG_REGULATOR_TPS65218) += tps65218-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65219) += tps65219-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 337dd614695e..eee35363cdea 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1016,6 +1016,7 @@ static int tps65917_ldo_registration(struct palmas_pmic *pmic,
struct palmas_reg_init *reg_init;
struct palmas_regs_info *rinfo;
struct regulator_desc *desc;
+ unsigned int reg;
for (id = ddata->ldo_begin; id < ddata->max_reg; id++) {
if (pdata && pdata->reg_init[id])
@@ -1064,6 +1065,31 @@ static int tps65917_ldo_registration(struct palmas_pmic *pmic,
TPS65917_LDO1_CTRL_BYPASS_EN;
desc->bypass_mask =
TPS65917_LDO1_CTRL_BYPASS_EN;
+
+ /*
+ * OTP Values are set to bypass enable.
+ * Switch to disable so that use count
+ * does not go negative while directly
+ * disabling bypass.
+ */
+ ret = palmas_ldo_read(pmic->palmas,
+ rinfo->ctrl_addr, &reg);
+ if (ret) {
+ dev_err(pmic->dev,
+ "Error reading %s ctrl_addr reg, ret = %d\n",
+ rinfo->name, ret);
+ return ret;
+ }
+ reg &= ~TPS65917_LDO1_CTRL_BYPASS_EN;
+ ret = palmas_ldo_write(pmic->palmas,
+ rinfo->ctrl_addr, reg);
+ if (ret) {
+ dev_err(pmic->dev,
+ "Error disabling bypass mode for %s, ret = %d\n",
+ rinfo->name, ret);
+ return ret;
+ }
+
}
} else {
desc->n_voltages = 1;
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
new file mode 100644
index 000000000000..a35c86c4df68
--- /dev/null
+++ b/drivers/regulator/tps65219-regulator.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * tps65219-regulator.c
+ *
+ * Regulator driver for TPS65219 PMIC
+ *
+ * Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+/* This implementation derived from tps65218 authored by "J Keerthy <j-keerthy@ti.com>" */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps65219.h>
+
+struct tps65219_regulator_irq_type {
+ const char *irq_name;
+ const char *regulator_name;
+ const char *event_name;
+};
+
+struct tps65219_regulator_irq_type tps65219_regulator_irq_types[] = {
+ { "LDO3_SCG", "LDO3", "short circuit to ground" },
+ { "LDO3_OC", "LDO3", "overcurrent" },
+ { "LDO3_UV", "LDO3", "undervoltage" },
+ { "LDO4_SCG", "LDO4", "short circuit to ground" },
+ { "LDO4_OC", "LDO4", "overcurrent" },
+ { "LDO4_UV", "LDO4", "undervoltage" },
+ { "LDO1_SCG", "LDO1", "short circuit to ground" },
+ { "LDO1_OC", "LDO1", "overcurrent" },
+ { "LDO1_UV", "LDO1", "undervoltage" },
+ { "LDO2_SCG", "LDO2", "short circuit to ground" },
+ { "LDO2_OC", "LDO2", "overcurrent" },
+ { "LDO2_UV", "LDO2", "undervoltage" },
+ { "BUCK3_SCG", "BUCK3", "short circuit to ground" },
+ { "BUCK3_OC", "BUCK3", "overcurrent" },
+ { "BUCK3_NEG_OC", "BUCK3", "negative overcurrent" },
+ { "BUCK3_UV", "BUCK3", "undervoltage" },
+ { "BUCK1_SCG", "BUCK1", "short circuit to ground" },
+ { "BUCK1_OC", "BUCK1", "overcurrent" },
+ { "BUCK1_NEG_OC", "BUCK1", "negative overcurrent" },
+ { "BUCK1_UV", "BUCK1", "undervoltage" },
+ { "BUCK2_SCG", "BUCK2", "short circuit to ground" },
+ { "BUCK2_OC", "BUCK2", "overcurrent" },
+ { "BUCK2_NEG_OC", "BUCK2", "negative overcurrent" },
+ { "BUCK2_UV", "BUCK2", "undervoltage" },
+ { "BUCK1_RV", "BUCK1", "residual voltage" },
+ { "BUCK2_RV", "BUCK2", "residual voltage" },
+ { "BUCK3_RV", "BUCK3", "residual voltage" },
+ { "LDO1_RV", "LDO1", "residual voltage" },
+ { "LDO2_RV", "LDO2", "residual voltage" },
+ { "LDO3_RV", "LDO3", "residual voltage" },
+ { "LDO4_RV", "LDO4", "residual voltage" },
+ { "BUCK1_RV_SD", "BUCK1", "residual voltage on shutdown" },
+ { "BUCK2_RV_SD", "BUCK2", "residual voltage on shutdown" },
+ { "BUCK3_RV_SD", "BUCK3", "residual voltage on shutdown" },
+ { "LDO1_RV_SD", "LDO1", "residual voltage on shutdown" },
+ { "LDO2_RV_SD", "LDO2", "residual voltage on shutdown" },
+ { "LDO3_RV_SD", "LDO3", "residual voltage on shutdown" },
+ { "LDO4_RV_SD", "LDO4", "residual voltage on shutdown" },
+ { "SENSOR_3_WARM", "SENSOR3", "warm temperature" },
+ { "SENSOR_2_WARM", "SENSOR2", "warm temperature" },
+ { "SENSOR_1_WARM", "SENSOR1", "warm temperature" },
+ { "SENSOR_0_WARM", "SENSOR0", "warm temperature" },
+ { "SENSOR_3_HOT", "SENSOR3", "hot temperature" },
+ { "SENSOR_2_HOT", "SENSOR2", "hot temperature" },
+ { "SENSOR_1_HOT", "SENSOR1", "hot temperature" },
+ { "SENSOR_0_HOT", "SENSOR0", "hot temperature" },
+ { "TIMEOUT", "", "" },
+};
+
+struct tps65219_regulator_irq_data {
+ struct device *dev;
+ struct tps65219_regulator_irq_type *type;
+};
+
+#define TPS65219_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
+ _em, _cr, _cm, _lr, _nlr, _delay, _fuv, \
+ _ct, _ncl, _bpm) \
+ { \
+ .name = _name, \
+ .of_match = _of, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .supply_name = _of, \
+ .id = _id, \
+ .ops = &(_ops), \
+ .n_voltages = _n, \
+ .type = _type, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .csel_reg = _cr, \
+ .csel_mask = _cm, \
+ .curr_table = _ct, \
+ .n_current_limits = _ncl, \
+ .enable_reg = _er, \
+ .enable_mask = _em, \
+ .volt_table = NULL, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = _nlr, \
+ .ramp_delay = _delay, \
+ .fixed_uV = _fuv, \
+ .bypass_reg = _vr, \
+ .bypass_mask = _bpm, \
+ .bypass_val_on = 1, \
+ } \
+
+static const struct linear_range bucks_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x1f, 25000),
+ REGULATOR_LINEAR_RANGE(1400000, 0x20, 0x33, 100000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x34, 0x3f, 0),
+};
+
+static const struct linear_range ldos_1_2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x37, 50000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x38, 0x3f, 0),
+};
+
+static const struct linear_range ldos_3_4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x0, 0xC, 0),
+ REGULATOR_LINEAR_RANGE(1250000, 0xD, 0x35, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x36, 0x3F, 0),
+};
+
+static int tps65219_pmic_set_voltage_sel(struct regulator_dev *dev,
+ unsigned int selector)
+{
+ int ret;
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+
+ /* Set the voltage based on vsel value */
+ ret = regmap_update_bits(tps->regmap, dev->desc->vsel_reg,
+ dev->desc->vsel_mask, selector);
+ if (ret) {
+ dev_dbg(tps->dev, "%s failed for regulator %s: %d ",
+ __func__, dev->desc->name, ret);
+ }
+ return ret;
+}
+
+static int tps65219_pmic_enable(struct regulator_dev *dev)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+
+ return regmap_set_bits(tps->regmap, dev->desc->enable_reg,
+ dev->desc->enable_mask);
+}
+
+static int tps65219_pmic_disable(struct regulator_dev *dev)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+
+ return regmap_clear_bits(tps->regmap, dev->desc->enable_reg,
+ dev->desc->enable_mask);
+}
+
+static int tps65219_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ return regmap_set_bits(tps->regmap, TPS65219_REG_STBY_1_CONFIG,
+ dev->desc->enable_mask);
+
+ case REGULATOR_MODE_STANDBY:
+ return regmap_clear_bits(tps->regmap,
+ TPS65219_REG_STBY_1_CONFIG,
+ dev->desc->enable_mask);
+ }
+
+ return -EINVAL;
+}
+
+static unsigned int tps65219_get_mode(struct regulator_dev *dev)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+ int ret, value = 0;
+
+ ret = regmap_read(tps->regmap, TPS65219_REG_STBY_1_CONFIG, &value);
+ if (ret) {
+ dev_dbg(tps->dev, "%s failed for regulator %s: %d ",
+ __func__, dev->desc->name, ret);
+ return ret;
+ }
+ value = (value & BIT(rid)) >> rid;
+ if (value)
+ return REGULATOR_MODE_STANDBY;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+/*
+ * generic regulator_set_bypass_regmap does not fully match requirements
+ * TPS65219 Requires explicitly that regulator is disabled before switch
+ */
+static int tps65219_set_bypass(struct regulator_dev *dev, bool enable)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+ int ret = 0;
+
+ if (dev->desc->ops->enable) {
+ dev_err(tps->dev,
+ "%s LDO%d enabled, must be shut down to set bypass ",
+ __func__, rid);
+ return -EBUSY;
+ }
+ ret = regulator_set_bypass_regmap(dev, enable);
+ return ret;
+}
+
+/* Operations permitted on BUCK1/2/3 */
+static const struct regulator_ops tps65219_bucks_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps65219_pmic_enable,
+ .disable = tps65219_pmic_disable,
+ .set_mode = tps65219_set_mode,
+ .get_mode = tps65219_get_mode,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = tps65219_pmic_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+
+};
+
+/* Operations permitted on LDO1/2 */
+static const struct regulator_ops tps65219_ldos_1_2_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps65219_pmic_enable,
+ .disable = tps65219_pmic_disable,
+ .set_mode = tps65219_set_mode,
+ .get_mode = tps65219_get_mode,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = tps65219_pmic_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_bypass = tps65219_set_bypass,
+ .get_bypass = regulator_get_bypass_regmap,
+};
+
+/* Operations permitted on LDO3/4 */
+static const struct regulator_ops tps65219_ldos_3_4_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = tps65219_pmic_enable,
+ .disable = tps65219_pmic_disable,
+ .set_mode = tps65219_set_mode,
+ .get_mode = tps65219_get_mode,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = tps65219_pmic_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_desc regulators[] = {
+ TPS65219_REGULATOR("BUCK1", "buck1", TPS65219_BUCK_1,
+ REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ TPS65219_REG_BUCK1_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_BUCK1_EN_MASK, 0, 0, bucks_ranges,
+ 3, 4000, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("BUCK2", "buck2", TPS65219_BUCK_2,
+ REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ TPS65219_REG_BUCK2_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_BUCK2_EN_MASK, 0, 0, bucks_ranges,
+ 3, 4000, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("BUCK3", "buck3", TPS65219_BUCK_3,
+ REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ TPS65219_REG_BUCK3_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_BUCK3_EN_MASK, 0, 0, bucks_ranges,
+ 3, 0, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("LDO1", "ldo1", TPS65219_LDO_1,
+ REGULATOR_VOLTAGE, tps65219_ldos_1_2_ops, 64,
+ TPS65219_REG_LDO1_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO1_EN_MASK, 0, 0, ldos_1_2_ranges,
+ 2, 0, 0, NULL, 0, TPS65219_LDOS_BYP_CONFIG_MASK),
+ TPS65219_REGULATOR("LDO2", "ldo2", TPS65219_LDO_2,
+ REGULATOR_VOLTAGE, tps65219_ldos_1_2_ops, 64,
+ TPS65219_REG_LDO2_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO2_EN_MASK, 0, 0, ldos_1_2_ranges,
+ 2, 0, 0, NULL, 0, TPS65219_LDOS_BYP_CONFIG_MASK),
+ TPS65219_REGULATOR("LDO3", "ldo3", TPS65219_LDO_3,
+ REGULATOR_VOLTAGE, tps65219_ldos_3_4_ops, 64,
+ TPS65219_REG_LDO3_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO3_EN_MASK, 0, 0, ldos_3_4_ranges,
+ 3, 0, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("LDO4", "ldo4", TPS65219_LDO_4,
+ REGULATOR_VOLTAGE, tps65219_ldos_3_4_ops, 64,
+ TPS65219_REG_LDO4_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO4_EN_MASK, 0, 0, ldos_3_4_ranges,
+ 3, 0, 0, NULL, 0, 0),
+};
+
+static irqreturn_t tps65219_regulator_irq_handler(int irq, void *data)
+{
+ struct tps65219_regulator_irq_data *irq_data = data;
+
+ if (irq_data->type->event_name[0] == '\0') {
+ /* This is the timeout interrupt */
+ dev_err(irq_data->dev, "System was put in shutdown during an active or standby transition.\n");
+ return IRQ_HANDLED;
+ }
+
+ dev_err(irq_data->dev, "Registered %s for %s\n",
+ irq_data->type->event_name, irq_data->type->regulator_name);
+ return IRQ_HANDLED;
+}
+
+static int tps65219_regulator_probe(struct platform_device *pdev)
+{
+ struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ int i;
+ int error;
+ int irq;
+ struct tps65219_regulator_irq_data *irq_data;
+ struct tps65219_regulator_irq_type *irq_type;
+
+ config.dev = tps->dev;
+ config.driver_data = tps;
+ config.regmap = tps->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ dev_dbg(tps->dev, "%s regul i= %d START", __func__, i);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(tps->dev, "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+
+ dev_dbg(tps->dev, "%s regul i= %d COMPLETED", __func__, i);
+ }
+
+ irq_data = devm_kmalloc(tps->dev,
+ ARRAY_SIZE(tps65219_regulator_irq_types) *
+ sizeof(struct tps65219_regulator_irq_data),
+ GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(tps65219_regulator_irq_types); ++i) {
+ irq_type = &tps65219_regulator_irq_types[i];
+
+ irq = platform_get_irq_byname(pdev, irq_type->irq_name);
+ if (irq < 0) {
+ dev_err(tps->dev, "Failed to get IRQ %s: %d\n",
+ irq_type->irq_name, irq);
+ return -EINVAL;
+ }
+ irq_data[i].dev = tps->dev;
+ irq_data[i].type = irq_type;
+
+ error = devm_request_threaded_irq(tps->dev, irq, NULL,
+ tps65219_regulator_irq_handler,
+ IRQF_ONESHOT,
+ irq_type->irq_name,
+ &irq_data[i]);
+ if (error) {
+ dev_err(tps->dev, "failed to request %s IRQ %d: %d\n",
+ irq_type->irq_name, irq, error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id tps65219_regulator_id_table[] = {
+ { "tps65219-regulator", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65219_regulator_id_table);
+
+static struct platform_driver tps65219_regulator_driver = {
+ .driver = {
+ .name = "tps65219-pmic",
+ },
+ .probe = tps65219_regulator_probe,
+ .id_table = tps65219_regulator_id_table,
+};
+
+module_platform_driver(tps65219_regulator_driver);
+
+MODULE_AUTHOR("Jerome Neanne <j-neanne@baylibre.com>");
+MODULE_DESCRIPTION("TPS65219 voltage regulator driver");
+MODULE_ALIAS("platform:tps65219-pmic");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index d99548fb5dde..1e8725863f7e 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -118,6 +118,7 @@ config DA8XX_REMOTEPROC
config KEYSTONE_REMOTEPROC
tristate "Keystone Remoteproc support"
depends on ARCH_KEYSTONE
+ depends on UIO
help
Say Y here here to support Keystone remote processors (DSP)
via the remote processor framework.
@@ -125,6 +126,18 @@ config KEYSTONE_REMOTEPROC
It's safe to say N here if you're not interested in the Keystone
DSPs or just want to use a bare minimum kernel.
+config PRU_REMOTEPROC
+ tristate "TI PRU remoteproc support"
+ depends on TI_PRUSS
+ default TI_PRUSS
+ help
+ Support for TI PRU remote processors present within a PRU-ICSS
+ subsystem via the remote processor framework.
+
+ Say Y or M here to support the Programmable Realtime Unit (PRU)
+ processors on various TI SoCs. It's safe to say N here if you're
+ not interested in the PRU or if you are unsure.
+
config QCOM_PIL_INFO
tristate
@@ -288,6 +301,19 @@ config TI_K3_R5_REMOTEPROC
It's safe to say N here if you're not interested in utilizing
a slave processor.
+config TI_K3_M4_REMOTEPROC
+ tristate "TI K3 M4 remoteproc support"
+ depends on ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's M4 remote processor subsystems
+ on various TI K3 family of SoCs through the remote processor
+ framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ a slave processor.
+
endif # REMOTEPROC
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index da2ace4ec86c..ab492b36ebe9 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
+obj-$(CONFIG_PRU_REMOTEPROC) += pru_rproc.o
obj-$(CONFIG_QCOM_PIL_INFO) += qcom_pil_info.o
obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o
@@ -34,3 +35,4 @@ obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
+obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 98e0be9476a4..e6ca59aa9b8c 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -223,7 +223,7 @@ static int da8xx_rproc_get_internal_memories(struct platform_device *pdev,
res->start & DA8XX_RPROC_LOCAL_ADDRESS_MASK;
drproc->mem[i].size = resource_size(res);
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
mem_names[i], &drproc->mem[i].bus_addr,
drproc->mem[i].size, drproc->mem[i].cpu_addr,
drproc->mem[i].dev_addr);
@@ -347,6 +347,9 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
goto free_rproc;
}
+ if (rproc_get_id(rproc) < 0)
+ dev_warn(dev, "device does not have an alias id or platform device id\n");
+
return 0;
free_rproc:
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index cd266163a65f..93a5a0b5d7b8 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -2,13 +2,15 @@
/*
* TI Keystone DSP remoteproc driver
*
- * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/module.h>
+#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/workqueue.h>
@@ -18,24 +20,40 @@
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/remoteproc.h>
+#include <linux/miscdevice.h>
+#include <linux/uio_driver.h>
#include <linux/reset.h>
+#include <uapi/linux/keystone_remoteproc.h>
+
#include "remoteproc_internal.h"
+#define DRIVER_UIO_VERSION "0.1"
+
+#define KEYSTONE_RPROC_MAX_RSC_TABLE SZ_1K
#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+/*
+ * XXX: evaluate if this param needs to be enhanced so that the switch between
+ * userspace and remoteproc core loaders can be controlled per device.
+ */
+static bool use_rproc_core_loader;
+module_param(use_rproc_core_loader, bool, 0444);
+
/**
* struct keystone_rproc_mem - internal memory structure
* @cpu_addr: MPU virtual address of the memory region
* @bus_addr: Bus address used to access the memory region
* @dev_addr: Device address of the memory region from DSP view
* @size: Size of the memory region
+ * @kobj: kobject for the sysfs directory file
*/
struct keystone_rproc_mem {
void __iomem *cpu_addr;
phys_addr_t bus_addr;
u32 dev_addr;
size_t size;
+ struct kobject kobj;
};
/**
@@ -51,6 +69,18 @@ struct keystone_rproc_mem {
* @irq_fault: irq entry for exception
* @kick_gpio: gpio used for virtio kicks
* @workqueue: workqueue for processing virtio interrupts
+ * @misc: misc device structure used to expose fops to user-space
+ * @uio: uio device information
+ * @mlock: lock to protect resources in fops
+ * @lock: lock to protect shared resources within UIO interrupt handlers
+ * @flags: flags to keep track of UIO interrupt occurrence
+ * @rsc_table: resource table pointer copied from userspace
+ * @rsc_table_size: size of resource table
+ * @loaded_rsc_table: kernel pointer of loaded resource table
+ * @boot_addr: remote processor boot address used with userspace loader
+ * @open_count: fops open reference counter
+ * @use_userspace_loader: flag to denote if driver is configured for userspace
+ * loader
*/
struct keystone_rproc {
struct device *dev;
@@ -64,8 +94,281 @@ struct keystone_rproc {
int irq_fault;
int kick_gpio;
struct work_struct workqueue;
+ struct miscdevice misc;
+ struct uio_info uio;
+ struct mutex mlock; /* fops lock */
+ spinlock_t lock; /* uio handler lock */
+ unsigned long flags;
+ struct resource_table *rsc_table;
+ int rsc_table_size;
+ void *loaded_rsc_table;
+ u32 boot_addr;
+ int open_count;
+ unsigned int use_userspace_loader : 1;
+};
+
+struct mem_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct keystone_rproc_mem *mem, char *buf);
+ ssize_t (*store)(struct keystone_rproc_mem *mem, const char *buf,
+ size_t len);
+};
+
+static ssize_t mem_addr_show(struct keystone_rproc_mem *mem, char *buf)
+{
+ return sprintf(buf, "%pa\n", &mem->bus_addr);
+}
+
+static ssize_t mem_size_show(struct keystone_rproc_mem *mem, char *buf)
+{
+ return sprintf(buf, "0x%016zx\n", mem->size);
+}
+
+static struct mem_sysfs_entry addr_attribute =
+ __ATTR(addr, 0444, mem_addr_show, NULL);
+static struct mem_sysfs_entry size_attribute =
+ __ATTR(size, 0444, mem_size_show, NULL);
+
+static struct attribute *attrs[] = {
+ &addr_attribute.attr,
+ &size_attribute.attr,
+ NULL, /* sentinel */
};
+#define to_dsp_mem(m) container_of(m, struct keystone_rproc_mem, kobj)
+
+static ssize_t mem_type_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct keystone_rproc_mem *mem = to_dsp_mem(kobj);
+ struct mem_sysfs_entry *entry;
+
+ entry = container_of(attr, struct mem_sysfs_entry, attr);
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(mem, buf);
+}
+
+static const struct sysfs_ops mem_sysfs_ops = {
+ .show = mem_type_show,
+};
+
+static struct kobj_type mem_attr_type = {
+ .sysfs_ops = &mem_sysfs_ops,
+ .default_attrs = attrs,
+};
+
+static int keystone_rproc_mem_add_attrs(struct keystone_rproc *ksproc)
+{
+ int i, ret;
+ struct keystone_rproc_mem *mem;
+ struct kobject *kobj_parent = &ksproc->misc.this_device->kobj;
+
+ for (i = 0; i < ksproc->num_mems; i++) {
+ mem = &ksproc->mem[i];
+ kobject_init(&mem->kobj, &mem_attr_type);
+ ret = kobject_add(&mem->kobj, kobj_parent, "memory%d", i);
+ if (ret)
+ goto err_kobj;
+ ret = kobject_uevent(&mem->kobj, KOBJ_ADD);
+ if (ret)
+ goto err_kobj;
+ }
+
+ return 0;
+
+err_kobj:
+ for (; i >= 0; i--) {
+ mem = &ksproc->mem[i];
+ kobject_put(&mem->kobj);
+ }
+ return ret;
+}
+
+static void keystone_rproc_mem_del_attrs(struct keystone_rproc *ksproc)
+{
+ int i;
+ struct keystone_rproc_mem *mem;
+
+ for (i = 0; i < ksproc->num_mems; i++) {
+ mem = &ksproc->mem[i];
+ kobject_put(&mem->kobj);
+ }
+}
+
+static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len);
+
+/* uio handler dealing with userspace controlled exception interrupt */
+static irqreturn_t keystone_rproc_uio_handler(int irq, struct uio_info *uio)
+{
+ struct keystone_rproc *ksproc = uio->priv;
+
+ spin_lock(&ksproc->lock);
+ if (!__test_and_set_bit(0, &ksproc->flags))
+ disable_irq_nosync(irq);
+ spin_unlock(&ksproc->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* uio driver interrupt control dealing with exception interrupt */
+static int keystone_rproc_uio_irqcontrol(struct uio_info *uio, s32 irq_on)
+{
+ struct keystone_rproc *ksproc = uio->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ksproc->lock, flags);
+ if (irq_on) {
+ if (__test_and_clear_bit(0, &ksproc->flags))
+ enable_irq(uio->irq);
+ } else {
+ if (!__test_and_set_bit(0, &ksproc->flags))
+ disable_irq(uio->irq);
+ }
+ spin_unlock_irqrestore(&ksproc->lock, flags);
+
+ return 0;
+}
+
+/* Reset previously set rsc table variables */
+static void keystone_rproc_reset_rsc_table(struct keystone_rproc *ksproc)
+{
+ kfree(ksproc->rsc_table);
+ ksproc->rsc_table = NULL;
+ ksproc->loaded_rsc_table = NULL;
+ ksproc->rsc_table_size = 0;
+}
+
+/*
+ * Create/delete the virtio devices in kernel once the user-space loading is
+ * complete, configure the remoteproc states appropriately, and boot or reset
+ * the remote processor. The resource table should have been published through
+ * KEYSTONE_RPROC_IOC_SET_RSC_TABLE & KEYSTONE_RPROC_IOC_SET_LOADED_RSC_TABLE
+ * ioctls before invoking this. The boot address is passed through the
+ * KEYSTONE_RPROC_IOC_SET_STATE ioctl when setting the KEYSTONE_RPROC_RUNNING
+ * state.
+ *
+ * NOTE:
+ * The ioctls KEYSTONE_RPROC_IOC_DSP_RESET and KEYSTONE_RPROC_IOC_DSP_BOOT
+ * are restricted to support the booting or resetting the DSP devices only
+ * for firmware images without any resource table.
+ */
+static int keystone_rproc_set_state(struct keystone_rproc *ksproc,
+ void __user *argp)
+{
+ struct rproc *rproc = ksproc->rproc;
+ struct keystone_rproc_set_state_params set_state_params;
+ int ret = 0;
+
+ if (copy_from_user(&set_state_params, argp, sizeof(set_state_params)))
+ return -EFAULT;
+
+ switch (set_state_params.state) {
+ case KEYSTONE_RPROC_RUNNING:
+ if (!ksproc->rsc_table || !ksproc->loaded_rsc_table)
+ return -EINVAL;
+
+ /*
+ * store boot address for .get_boot_addr() rproc fw ops
+ * XXX: validate the boot address so it is not set to a
+ * random address
+ */
+ ksproc->boot_addr = set_state_params.boot_addr;
+
+ /*
+ * invoke rproc_boot to trigger the boot, the resource table
+ * is parsed during the process and is agnostic of the presence
+ * or absence of virtio devices
+ */
+ ret = rproc_boot(rproc);
+ break;
+
+ case KEYSTONE_RPROC_OFFLINE:
+ if (rproc->state != RPROC_RUNNING)
+ return -EINVAL;
+
+ /* invoke rproc_shutdown to match rproc_boot */
+ rproc_shutdown(rproc);
+
+ mutex_lock(&ksproc->mlock);
+ keystone_rproc_reset_rsc_table(ksproc);
+ mutex_unlock(&ksproc->mlock);
+
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/* Copy the resource table from userspace into kernel */
+static int keystone_rproc_set_rsc_table(struct keystone_rproc *ksproc,
+ void __user *data)
+{
+ unsigned long len = 0;
+ void *rsc_table = NULL;
+
+ if (!data)
+ return -EFAULT;
+
+ if (copy_from_user(&len, data, sizeof(len)))
+ return -EFAULT;
+
+ if (len >= KEYSTONE_RPROC_MAX_RSC_TABLE)
+ return -EOVERFLOW;
+
+ data += sizeof(len);
+
+ rsc_table = kzalloc(len, GFP_KERNEL);
+ if (!rsc_table)
+ return -ENOMEM;
+
+ if (copy_from_user(rsc_table, data, len))
+ goto error_return;
+
+ mutex_lock(&ksproc->mlock);
+
+ kfree(ksproc->rsc_table);
+
+ ksproc->rsc_table = rsc_table;
+ ksproc->rsc_table_size = len;
+ ksproc->loaded_rsc_table = NULL;
+
+ mutex_unlock(&ksproc->mlock);
+
+ return 0;
+
+error_return:
+ kfree(rsc_table);
+ return -EFAULT;
+}
+
+/*
+ * Store the equivalent kernel virtual address of the loaded resource table in
+ * device memory. Userspace published the device address of the loaded resource
+ * table.
+ */
+static int keystone_rproc_set_loaded_rsc_table(struct keystone_rproc *ksproc,
+ unsigned int dma_addr)
+{
+ struct rproc *rproc = ksproc->rproc;
+ void *ptr;
+
+ if (!ksproc->rsc_table_size || !ksproc->rsc_table)
+ return -EINVAL;
+
+ ptr = keystone_rproc_da_to_va(rproc, dma_addr, ksproc->rsc_table_size);
+ if (!ptr)
+ return -EINVAL;
+
+ ksproc->loaded_rsc_table = ptr;
+
+ return 0;
+}
+
/* Put the DSP processor into reset */
static void keystone_rproc_dsp_reset(struct keystone_rproc *ksproc)
{
@@ -73,7 +376,8 @@ static void keystone_rproc_dsp_reset(struct keystone_rproc *ksproc)
}
/* Configure the boot address and boot the DSP processor */
-static int keystone_rproc_dsp_boot(struct keystone_rproc *ksproc, u32 boot_addr)
+static int keystone_rproc_dsp_boot(struct keystone_rproc *ksproc,
+ uint32_t boot_addr)
{
int ret;
@@ -95,6 +399,235 @@ static int keystone_rproc_dsp_boot(struct keystone_rproc *ksproc, u32 boot_addr)
return 0;
}
+static long
+keystone_rproc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct miscdevice *misc = filp->private_data;
+ struct keystone_rproc *ksproc =
+ container_of(misc, struct keystone_rproc, misc);
+ void __user *argp = (void __user *)arg;
+ int ret = 0;
+
+ dev_dbg(ksproc->dev, "%s: cmd 0x%.8x (%d), arg 0x%lx\n",
+ __func__, cmd, _IOC_NR(cmd), arg);
+
+ if (_IOC_TYPE(cmd) != KEYSTONE_RPROC_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_NR(cmd) >= KEYSTONE_RPROC_IOC_MAXNR)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case KEYSTONE_RPROC_IOC_SET_STATE:
+ ret = keystone_rproc_set_state(ksproc, argp);
+ break;
+
+ case KEYSTONE_RPROC_IOC_SET_RSC_TABLE:
+ ret = keystone_rproc_set_rsc_table(ksproc, argp);
+ break;
+
+ case KEYSTONE_RPROC_IOC_SET_LOADED_RSC_TABLE:
+ ret = keystone_rproc_set_loaded_rsc_table(ksproc, arg);
+ break;
+
+ case KEYSTONE_RPROC_IOC_DSP_RESET:
+ if (ksproc->rsc_table) {
+ ret = -EINVAL;
+ break;
+ }
+
+ keystone_rproc_dsp_reset(ksproc);
+ break;
+
+ case KEYSTONE_RPROC_IOC_DSP_BOOT:
+ if (ksproc->rsc_table) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = keystone_rproc_dsp_boot(ksproc, arg);
+ break;
+
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ if (ret) {
+ dev_err(ksproc->dev, "error in ioctl call: cmd 0x%.8x (%d), ret %d\n",
+ cmd, _IOC_NR(cmd), ret);
+ }
+
+ return ret;
+}
+
+/*
+ * Map DSP memories into userspace for supporting Userspace loading.
+ *
+ * This is a custom mmap function following semantics based on the UIO
+ * mmap implementation. The vm_pgoff passed in the vma structure is a
+ * combination of the memory region index and the actual page offset in
+ * that region. This checks if user request is in valid range before
+ * providing mmap access.
+ *
+ * XXX: Evaluate this approach, as the internal memories can be mapped in
+ * whole into userspace as they are not super-large, or switch to using
+ * direct addresses to look more like a traditional implementation.
+ */
+static int keystone_rproc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct miscdevice *misc = file->private_data;
+ struct keystone_rproc *ksproc =
+ container_of(misc, struct keystone_rproc, misc);
+ size_t size = vma->vm_end - vma->vm_start;
+ size_t req_offset;
+ u32 idx;
+
+ idx = vma->vm_pgoff & KEYSTONE_RPROC_UIO_MAP_INDEX_MASK;
+
+ if (idx >= ksproc->num_mems) {
+ dev_err(ksproc->dev, "invalid mmap region index %d\n", idx);
+ return -EINVAL;
+ }
+
+ req_offset = (vma->vm_pgoff - idx) << PAGE_SHIFT;
+ if (req_offset + size < req_offset) {
+ dev_err(ksproc->dev, "invalid request - overflow, mmap offset = 0x%zx size 0x%zx region %d\n",
+ req_offset, size, idx);
+ return -EINVAL;
+ }
+
+ if ((req_offset + size) > ksproc->mem[idx].size) {
+ dev_err(ksproc->dev, "invalid request - out of range, mmap offset 0x%zx size 0x%zx region %d\n",
+ req_offset, size, idx);
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot =
+ phys_mem_access_prot(file,
+ (ksproc->mem[idx].bus_addr >> PAGE_SHIFT) +
+ (vma->vm_pgoff - idx), size,
+ vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ (ksproc->mem[idx].bus_addr >> PAGE_SHIFT) +
+ (vma->vm_pgoff - idx), size, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int keystone_rproc_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *misc = file->private_data;
+ struct keystone_rproc *ksproc =
+ container_of(misc, struct keystone_rproc, misc);
+
+ mutex_lock(&ksproc->mlock);
+ ksproc->open_count++;
+ mutex_unlock(&ksproc->mlock);
+
+ return 0;
+}
+
+static int keystone_rproc_release(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *misc = filp->private_data;
+ struct keystone_rproc *ksproc =
+ container_of(misc, struct keystone_rproc, misc);
+ struct rproc *rproc = ksproc->rproc;
+
+ mutex_lock(&ksproc->mlock);
+
+ if ((WARN_ON(ksproc->open_count == 0)))
+ goto end;
+
+ if (--ksproc->open_count > 0)
+ goto end;
+
+ if (rproc->state != RPROC_OFFLINE) {
+ rproc_shutdown(rproc);
+ WARN_ON(rproc->state != RPROC_OFFLINE);
+ }
+
+ keystone_rproc_reset_rsc_table(ksproc);
+
+end:
+ mutex_unlock(&ksproc->mlock);
+ return 0;
+}
+
+/*
+ * File operations exposed through a miscdevice for supporting
+ * the userspace loader/boot mechanism.
+ */
+static const struct file_operations keystone_rproc_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = keystone_rproc_ioctl,
+ .mmap = keystone_rproc_mmap,
+ .open = keystone_rproc_open,
+ .release = keystone_rproc_release,
+};
+
+/*
+ * Used only with userspace loader/boot mechanism, the parsing of the firmware
+ * is done in userspace, and a copy of the resource table is added for the
+ * kernel-level access through an ioctl. Create the remoteproc cached table
+ * using this resource table and configure the table pointer and table size
+ * accordingly to allow the remoteproc core to process the resource table for
+ * creating the vrings and traces.
+ */
+static int keystone_rproc_load_rsc_table(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+
+ rproc->cached_table = kmemdup(ksproc->rsc_table, ksproc->rsc_table_size,
+ GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ rproc->table_ptr = rproc->cached_table;
+ rproc->table_sz = ksproc->rsc_table_size;
+
+ return 0;
+}
+
+/*
+ * Used only with userspace loader/boot mechanism, the device address of the
+ * loaded resource table is published to the kernel-level through an ioctl
+ * at which point the equivalent kernel virtual pointer is stored in a local
+ * variable in the keystone_rproc device structure. Return this kernel pointer
+ * to the remoteproc core for runtime publishing/modification of the resource
+ * table entries.
+ *
+ * NOTE: Only loaded resource tables in the DSP internal memories is supported
+ * at present.
+ */
+static struct resource_table *
+keystone_rproc_find_loaded_rsc_table(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+
+ return ksproc->loaded_rsc_table;
+}
+
+/*
+ * Used only with userspace loader/boot mechanism, the boot address
+ * is published to the kernel-level through an ioctl call and is
+ * stored in a local variable in the keystone_rproc device structure.
+ * Return this address to the remoteproc core through the .get_boot_addr()
+ * remoteproc firmware ops
+ */
+static u64 keystone_rproc_get_boot_addr(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+
+ return ksproc->boot_addr;
+}
+
/*
* Process the remoteproc exceptions
*
@@ -104,7 +637,9 @@ static int keystone_rproc_dsp_boot(struct keystone_rproc *ksproc, u32 boot_addr)
* generation register.
*
* This function just invokes the rproc_report_crash to report the exception
- * to the remoteproc driver core, to trigger a recovery.
+ * to the remoteproc driver core, to trigger a recovery. This is the case
+ * only when using in-kernel remoteproc core loader/boot mechanism, and is
+ * handled through an UIO interrupt otherwise.
*/
static irqreturn_t keystone_rproc_exception_interrupt(int irq, void *dev_id)
{
@@ -164,7 +699,11 @@ static irqreturn_t keystone_rproc_vring_interrupt(int irq, void *dev_id)
*
* This function will be invoked only after the firmware for this rproc
* was loaded, parsed successfully, and all of its resource requirements
- * were met.
+ * were met. The function skips releasing the processor from reset and
+ * registering for the exception interrupt if using the userspace controlled
+ * load/boot mechanism. The processor will be started through an ioctl when
+ * controlled from userspace, but the virtio interrupt still is handled at
+ * the kernel layer.
*/
static int keystone_rproc_start(struct rproc *rproc)
{
@@ -181,12 +720,15 @@ static int keystone_rproc_start(struct rproc *rproc)
goto out;
}
- ret = request_irq(ksproc->irq_fault, keystone_rproc_exception_interrupt,
- 0, dev_name(ksproc->dev), ksproc);
- if (ret) {
- dev_err(ksproc->dev, "failed to enable exception interrupt, ret = %d\n",
- ret);
- goto free_vring_irq;
+ if (!ksproc->use_userspace_loader) {
+ ret = request_irq(ksproc->irq_fault,
+ keystone_rproc_exception_interrupt, 0,
+ dev_name(ksproc->dev), ksproc);
+ if (ret) {
+ dev_err(ksproc->dev, "failed to enable exception interrupt, ret = %d\n",
+ ret);
+ goto free_vring_irq;
+ }
}
ret = keystone_rproc_dsp_boot(ksproc, rproc->bootaddr);
@@ -208,24 +750,29 @@ out:
* Stop the DSP remote processor.
*
* This function puts the DSP processor into reset, and finishes processing
- * of any pending messages.
+ * of any pending messages. The reset procedure is completed only if using
+ * kernel-mode remoteproc loading/booting mechanism, it is handled outside
+ * if using userspace load/boot mechanism either through an ioctl, or when
+ * the handle to the device is closed without triggering a reset.
*/
static int keystone_rproc_stop(struct rproc *rproc)
{
struct keystone_rproc *ksproc = rproc->priv;
- keystone_rproc_dsp_reset(ksproc);
- free_irq(ksproc->irq_fault, ksproc);
+ if (!ksproc->use_userspace_loader) {
+ keystone_rproc_dsp_reset(ksproc);
+ free_irq(ksproc->irq_fault, ksproc);
+ }
+
free_irq(ksproc->irq_ring, ksproc);
flush_work(&ksproc->workqueue);
-
return 0;
}
/*
* Kick the remote processor to notify about pending unprocessed messages.
* The vqid usage is not used and is inconsequential, as the kick is performed
- * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * through a simulated GPIO (an bit in an IPC interrupt-triggering register),
* the remote processor is expected to process both its Tx and Rx virtqueues.
*/
static void keystone_rproc_kick(struct rproc *rproc, int vqid)
@@ -363,8 +910,11 @@ static int keystone_rproc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct keystone_rproc *ksproc;
+ struct miscdevice *misc;
+ struct uio_info *uio;
struct rproc *rproc;
int dsp_id;
+ char *uio_name = NULL;
char *fw_name = NULL;
char *template = "keystone-dsp%d-fw";
int name_len = 0;
@@ -381,6 +931,13 @@ static int keystone_rproc_probe(struct platform_device *pdev)
return dsp_id;
}
+ /* construct a name for uio devices - assuming a single digit alias */
+ name_len = strlen("dsp%d");
+ uio_name = devm_kzalloc(dev, name_len, GFP_KERNEL);
+ if (!uio_name)
+ return -ENOMEM;
+ snprintf(uio_name, name_len, "dsp%d", dsp_id);
+
/* construct a custom default fw name - subject to change in future */
name_len = strlen(template); /* assuming a single digit alias */
fw_name = devm_kzalloc(dev, name_len, GFP_KERNEL);
@@ -394,9 +951,32 @@ static int keystone_rproc_probe(struct platform_device *pdev)
return -ENOMEM;
rproc->has_iommu = false;
+
ksproc = rproc->priv;
ksproc->rproc = rproc;
ksproc->dev = dev;
+ ksproc->use_userspace_loader = !use_rproc_core_loader;
+
+ /*
+ * customize the remoteproc core config flags and ELF fw ops for
+ * userspace loader/boot mechanism
+ */
+ if (ksproc->use_userspace_loader) {
+ rproc->recovery_disabled = true;
+ rproc->auto_boot = false;
+ rproc->skip_firmware_load = true;
+ rproc->deny_sysfs_ops = true;
+
+ rproc->ops->parse_fw = keystone_rproc_load_rsc_table;
+ rproc->ops->find_loaded_rsc_table =
+ keystone_rproc_find_loaded_rsc_table;
+ rproc->ops->get_boot_addr = keystone_rproc_get_boot_addr;
+ rproc->ops->sanity_check = NULL;
+ rproc->ops->load = NULL;
+ }
+
+ mutex_init(&ksproc->mlock);
+ spin_lock_init(&ksproc->lock);
ret = keystone_rproc_of_get_dev_syscon(pdev, ksproc);
if (ret)
@@ -463,8 +1043,52 @@ static int keystone_rproc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ksproc);
+ if (ksproc->use_userspace_loader) {
+ uio = &ksproc->uio;
+ uio->name = uio_name;
+ uio->version = DRIVER_UIO_VERSION;
+ uio->irq = ksproc->irq_fault;
+ uio->priv = ksproc;
+ uio->handler = keystone_rproc_uio_handler;
+ uio->irqcontrol = keystone_rproc_uio_irqcontrol;
+ ret = uio_register_device(dev, uio);
+ if (ret) {
+ dev_err(dev, "failed to register uio device, status = %d\n",
+ ret);
+ goto del_rproc;
+ }
+ dev_dbg(dev, "registered uio device %s\n", uio->name);
+
+ misc = &ksproc->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = uio->name;
+ misc->fops = &keystone_rproc_fops;
+ misc->parent = dev;
+ ret = misc_register(misc);
+ if (ret) {
+ dev_err(dev, "failed to register misc device, status = %d\n",
+ ret);
+ goto unregister_uio;
+ }
+
+ ret = keystone_rproc_mem_add_attrs(ksproc);
+ if (ret) {
+ dev_err(ksproc->dev, "error creating sysfs files (%d)\n",
+ ret);
+ goto unregister_misc;
+ }
+
+ dev_dbg(dev, "registered misc device %s\n", misc->name);
+ }
+
return 0;
+unregister_misc:
+ misc_deregister(misc);
+unregister_uio:
+ uio_unregister_device(uio);
+del_rproc:
+ rproc_del(rproc);
release_mem:
of_reserved_mem_device_release(dev);
disable_clk:
@@ -480,6 +1104,11 @@ static int keystone_rproc_remove(struct platform_device *pdev)
{
struct keystone_rproc *ksproc = platform_get_drvdata(pdev);
+ if (ksproc->use_userspace_loader) {
+ keystone_rproc_mem_del_attrs(ksproc);
+ misc_deregister(&ksproc->misc);
+ uio_unregister_device(&ksproc->uio);
+ }
rproc_del(ksproc->rproc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -507,8 +1136,22 @@ static struct platform_driver keystone_rproc_driver = {
},
};
-module_platform_driver(keystone_rproc_driver);
+static int __init keystone_rproc_init(void)
+{
+ keystone_rproc_driver.driver.suppress_bind_attrs =
+ !use_rproc_core_loader;
+
+ return platform_driver_register(&keystone_rproc_driver);
+}
+module_init(keystone_rproc_init);
+
+static void __exit keystone_rproc_exit(void)
+{
+ platform_driver_unregister(&keystone_rproc_driver);
+}
+module_exit(keystone_rproc_exit);
MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_AUTHOR("Sam Nelson <sam.nelson@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI Keystone DSP Remoteproc driver");
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index d94b7391bf9d..c67fd9f50141 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -703,6 +703,19 @@ static int omap_rproc_stop(struct rproc *rproc)
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
+ /*
+ * If remoteproc has crashed, we must trigger IOMMU to reset it also.
+ * Otherwise we may have stale data in IOMMU causing remoteproc to
+ * hang / die again. Sleep between is needed to make sure reset
+ * actually gets triggered, otherwise the runtime PM framework may
+ * consider this sequence as a NOP.
+ */
+ if (rproc->state == RPROC_CRASHED) {
+ omap_iommu_domain_deactivate(rproc->domain);
+ msleep(5);
+ omap_iommu_domain_activate(rproc->domain);
+ }
+
return 0;
enable_device:
@@ -1356,6 +1369,9 @@ static int omap_rproc_probe(struct platform_device *pdev)
if (ret)
goto release_mem;
+ if (rproc_get_id(rproc) < 0)
+ dev_warn(&pdev->dev, "device does not have an alias id\n");
+
return 0;
release_mem:
diff --git a/drivers/remoteproc/omap_remoteproc.h b/drivers/remoteproc/omap_remoteproc.h
index 828e13256c02..131483fe08e7 100644
--- a/drivers/remoteproc/omap_remoteproc.h
+++ b/drivers/remoteproc/omap_remoteproc.h
@@ -59,7 +59,9 @@ enum omap_rp_mbox_messages {
RP_MBOX_SUSPEND_SYSTEM = 0xFFFFFF11,
RP_MBOX_SUSPEND_ACK = 0xFFFFFF12,
RP_MBOX_SUSPEND_CANCEL = 0xFFFFFF13,
- RP_MBOX_END_MSG = 0xFFFFFF14,
+ RP_MBOX_SHUTDOWN = 0xFFFFFF14,
+ RP_MBOX_SHUTDOWN_ACK = 0xFFFFFF15,
+ RP_MBOX_END_MSG = 0xFFFFFF16,
};
#endif /* _OMAP_RPMSG_H */
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
new file mode 100644
index 000000000000..1cf71a932249
--- /dev/null
+++ b/drivers/remoteproc/pru_rproc.c
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS remoteproc driver for various TI SoCs
+ *
+ * Copyright (C) 2014-2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Author(s):
+ * Suman Anna <s-anna@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ * Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org> for Texas Instruments
+ */
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pruss.h>
+#include <linux/pruss_driver.h>
+#include <linux/remoteproc.h>
+
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+#include "pru_rproc.h"
+
+/* PRU_ICSS_PRU_CTRL registers */
+#define PRU_CTRL_CTRL 0x0000
+#define PRU_CTRL_STS 0x0004
+#define PRU_CTRL_WAKEUP_EN 0x0008
+#define PRU_CTRL_CYCLE 0x000C
+#define PRU_CTRL_STALL 0x0010
+#define PRU_CTRL_CTBIR0 0x0020
+#define PRU_CTRL_CTBIR1 0x0024
+#define PRU_CTRL_CTPPR0 0x0028
+#define PRU_CTRL_CTPPR1 0x002C
+
+/* CTRL register bit-fields */
+#define CTRL_CTRL_SOFT_RST_N BIT(0)
+#define CTRL_CTRL_EN BIT(1)
+#define CTRL_CTRL_SLEEPING BIT(2)
+#define CTRL_CTRL_CTR_EN BIT(3)
+#define CTRL_CTRL_SINGLE_STEP BIT(8)
+#define CTRL_CTRL_RUNSTATE BIT(15)
+
+/* PRU_ICSS_PRU_DEBUG registers */
+#define PRU_DEBUG_GPREG(x) (0x0000 + (x) * 4)
+#define PRU_DEBUG_CT_REG(x) (0x0080 + (x) * 4)
+
+/* PRU/RTU/Tx_PRU Core IRAM address masks */
+#define PRU_IRAM_ADDR_MASK 0x3ffff
+#define PRU0_IRAM_ADDR_MASK 0x34000
+#define PRU1_IRAM_ADDR_MASK 0x38000
+#define RTU0_IRAM_ADDR_MASK 0x4000
+#define RTU1_IRAM_ADDR_MASK 0x6000
+#define TX_PRU0_IRAM_ADDR_MASK 0xa000
+#define TX_PRU1_IRAM_ADDR_MASK 0xc000
+
+/* PRU device addresses for various type of PRU RAMs */
+#define PRU_IRAM_DA 0 /* Instruction RAM */
+#define PRU_PDRAM_DA 0 /* Primary Data RAM */
+#define PRU_SDRAM_DA 0x2000 /* Secondary Data RAM */
+#define PRU_SHRDRAM_DA 0x10000 /* Shared Data RAM */
+
+#define MAX_PRU_SYS_EVENTS 160
+
+/**
+ * enum pru_iomem - PRU core memory/register range identifiers
+ *
+ * @PRU_IOMEM_IRAM: PRU Instruction RAM range
+ * @PRU_IOMEM_CTRL: PRU Control register range
+ * @PRU_IOMEM_DEBUG: PRU Debug register range
+ * @PRU_IOMEM_MAX: just keep this one at the end
+ */
+enum pru_iomem {
+ PRU_IOMEM_IRAM = 0,
+ PRU_IOMEM_CTRL,
+ PRU_IOMEM_DEBUG,
+ PRU_IOMEM_MAX,
+};
+
+/**
+ * enum pru_type - PRU core type identifier
+ *
+ * @PRU_TYPE_PRU: Programmable Real-time Unit
+ * @PRU_TYPE_RTU: Auxiliary Programmable Real-Time Unit
+ * @PRU_TYPE_TX_PRU: Transmit Programmable Real-Time Unit
+ * @PRU_TYPE_MAX: just keep this one at the end
+ */
+enum pru_type {
+ PRU_TYPE_PRU = 0,
+ PRU_TYPE_RTU,
+ PRU_TYPE_TX_PRU,
+ PRU_TYPE_MAX,
+};
+
+/**
+ * struct pru_private_data - device data for a PRU core
+ * @type: type of the PRU core (PRU, RTU, Tx_PRU)
+ * @is_k3: flag used to identify the need for special load handling
+ */
+struct pru_private_data {
+ enum pru_type type;
+ unsigned int is_k3 : 1;
+};
+
+/**
+ * struct pru_rproc - PRU remoteproc structure
+ * @id: id of the PRU core within the PRUSS
+ * @dev: PRU core device pointer
+ * @pruss: back-reference to parent PRUSS structure
+ * @rproc: remoteproc pointer for this PRU core
+ * @data: PRU core specific data
+ * @mem_regions: data for each of the PRU memory regions
+ * @client_np: client device node
+ * @lock: mutex to protect client usage
+ * @fw_name: name of firmware image used during loading
+ * @mapped_irq: virtual interrupt numbers of created fw specific mapping
+ * @pru_interrupt_map: pointer to interrupt mapping description (firmware)
+ * @pru_interrupt_map_sz: pru_interrupt_map size
+ * @rmw_lock: lock for read, modify, write operations on registers
+ * @irq_vring: IRQ number to use for processing vring buffers
+ * @dbg_single_step: debug state variable to set PRU into single step mode
+ * @dbg_continuous: debug state variable to restore PRU execution mode
+ * @evt_count: number of mapped events
+ * @gpmux_save: saved value for gpmux config
+ */
+struct pru_rproc {
+ int id;
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *rproc;
+ const struct pru_private_data *data;
+ struct pruss_mem_region mem_regions[PRU_IOMEM_MAX];
+ struct device_node *client_np;
+ struct mutex lock; /* client access lock */
+ const char *fw_name;
+ unsigned int *mapped_irq;
+ struct pru_irq_rsc *pru_interrupt_map;
+ size_t pru_interrupt_map_sz;
+ spinlock_t rmw_lock; /* register access lock */
+ int irq_vring;
+ u32 dbg_single_step;
+ u32 dbg_continuous;
+ u8 evt_count;
+ u8 gpmux_save;
+};
+
+static inline u32 pru_control_read_reg(struct pru_rproc *pru, unsigned int reg)
+{
+ return readl_relaxed(pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
+}
+
+static inline
+void pru_control_write_reg(struct pru_rproc *pru, unsigned int reg, u32 val)
+{
+ writel_relaxed(val, pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
+}
+
+static inline
+void pru_control_set_reg(struct pru_rproc *pru, unsigned int reg,
+ u32 mask, u32 set)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pru->rmw_lock, flags);
+
+ val = pru_control_read_reg(pru, reg);
+ val &= ~mask;
+ val |= (set & mask);
+ pru_control_write_reg(pru, reg, val);
+
+ spin_unlock_irqrestore(&pru->rmw_lock, flags);
+}
+
+/**
+ * pru_rproc_set_firmware() - set firmware for a pru core
+ * @rproc: the rproc instance of the PRU
+ * @fw_name: the new firmware name, or NULL if default is desired
+ *
+ * Return: 0 on success, or errno in error case.
+ */
+static int pru_rproc_set_firmware(struct rproc *rproc, const char *fw_name)
+{
+ struct pru_rproc *pru = rproc->priv;
+
+ if (!fw_name)
+ fw_name = pru->fw_name;
+
+ return rproc_set_firmware(rproc, fw_name);
+}
+
+static struct rproc *__pru_rproc_get(struct device_node *np, int index)
+{
+ struct device_node *rproc_np = NULL;
+ struct platform_device *pdev;
+ struct rproc *rproc;
+
+ rproc_np = of_parse_phandle(np, "ti,prus", index);
+ if (!rproc_np || !of_device_is_available(rproc_np))
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(rproc_np);
+ of_node_put(rproc_np);
+
+ if (!pdev)
+ /* probably PRU not yet probed */
+ return ERR_PTR(-EPROBE_DEFER);
+
+ /* make sure it is PRU rproc */
+ if (!is_pru_rproc(&pdev->dev)) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ rproc = platform_get_drvdata(pdev);
+ put_device(&pdev->dev);
+ if (!rproc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ get_device(&rproc->dev);
+
+ return rproc;
+}
+
+/**
+ * pru_rproc_get() - get the PRU rproc instance from a device node
+ * @np: the user/client device node
+ * @index: index to use for the ti,prus property
+ * @pru_id: optional pointer to return the PRU remoteproc processor id
+ *
+ * This function looks through a client device node's "ti,prus" property at
+ * index @index and returns the rproc handle for a valid PRU remote processor if
+ * found. The function allows only one user to own the PRU rproc resource at a
+ * time. Caller must call pru_rproc_put() when done with using the rproc, not
+ * required if the function returns a failure.
+ *
+ * When optional @pru_id pointer is passed the PRU remoteproc processor id is
+ * returned.
+ *
+ * Return: rproc handle on success, and an ERR_PTR on failure using one
+ * of the following error values
+ * -ENODEV if device is not found
+ * -EBUSY if PRU is already acquired by anyone
+ * -EPROBE_DEFER is PRU device is not probed yet
+ */
+struct rproc *pru_rproc_get(struct device_node *np, int index,
+ enum pruss_pru_id *pru_id)
+{
+ struct rproc *rproc;
+ struct pru_rproc *pru;
+ const char *fw_name;
+ struct device *dev;
+ int ret;
+ u32 mux;
+
+ rproc = __pru_rproc_get(np, index);
+ if (IS_ERR(rproc))
+ return rproc;
+
+ pru = rproc->priv;
+ dev = &rproc->dev;
+
+ mutex_lock(&pru->lock);
+
+ if (pru->client_np) {
+ mutex_unlock(&pru->lock);
+ put_device(dev);
+ return ERR_PTR(-EBUSY);
+ }
+
+ pru->client_np = np;
+ rproc->deny_sysfs_ops = true;
+
+ mutex_unlock(&pru->lock);
+
+ ret = pruss_cfg_get_gpmux(pru->pruss, pru->id, &pru->gpmux_save);
+ if (ret) {
+ dev_err(dev, "failed to get cfg gpmux: %d\n", ret);
+ goto err;
+ }
+
+ ret = of_property_read_u32_index(np, "ti,pruss-gp-mux-sel", index,
+ &mux);
+ if (!ret) {
+ ret = pruss_cfg_set_gpmux(pru->pruss, pru->id, mux);
+ if (ret) {
+ dev_err(dev, "failed to set cfg gpmux: %d\n", ret);
+ goto err;
+ }
+ }
+
+ ret = of_property_read_string_index(np, "firmware-name", index,
+ &fw_name);
+ if (!ret) {
+ ret = pru_rproc_set_firmware(rproc, fw_name);
+ if (ret) {
+ dev_err(dev, "failed to set firmware: %d\n", ret);
+ goto err;
+ }
+ }
+
+ if (pru_id)
+ *pru_id = pru->id;
+
+ return rproc;
+
+err:
+ pru_rproc_put(rproc);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pru_rproc_get);
+
+/**
+ * pru_rproc_put() - release the PRU rproc resource
+ * @rproc: the rproc resource to release
+ *
+ * Releases the PRU rproc resource and makes it available to other
+ * users.
+ */
+void pru_rproc_put(struct rproc *rproc)
+{
+ struct pru_rproc *pru;
+
+ if (IS_ERR_OR_NULL(rproc) || !is_pru_rproc(rproc->dev.parent))
+ return;
+
+ pru = rproc->priv;
+ if (!pru->client_np)
+ return;
+
+ pruss_cfg_set_gpmux(pru->pruss, pru->id, pru->gpmux_save);
+
+ pru_rproc_set_firmware(rproc, NULL);
+
+ mutex_lock(&pru->lock);
+ pru->client_np = NULL;
+ rproc->deny_sysfs_ops = false;
+ mutex_unlock(&pru->lock);
+
+ put_device(&rproc->dev);
+}
+EXPORT_SYMBOL_GPL(pru_rproc_put);
+
+/**
+ * pru_rproc_set_ctable() - set the constant table index for the PRU
+ * @rproc: the rproc instance of the PRU
+ * @c: constant table index to set
+ * @addr: physical address to set it to
+ *
+ * Return: 0 on success, or errno in error case.
+ */
+int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr)
+{
+ struct pru_rproc *pru = rproc->priv;
+ unsigned int reg;
+ u32 mask, set;
+ u16 idx;
+ u16 idx_mask;
+
+ if (IS_ERR_OR_NULL(rproc))
+ return -EINVAL;
+
+ if (!rproc->dev.parent || !is_pru_rproc(rproc->dev.parent))
+ return -ENODEV;
+
+ /* pointer is 16 bit and index is 8-bit so mask out the rest */
+ idx_mask = (c >= PRU_C28) ? 0xFFFF : 0xFF;
+
+ /* ctable uses bit 8 and upwards only */
+ idx = (addr >> 8) & idx_mask;
+
+ /* configurable ctable (i.e. C24) starts at PRU_CTRL_CTBIR0 */
+ reg = PRU_CTRL_CTBIR0 + 4 * (c >> 1);
+ mask = idx_mask << (16 * (c & 1));
+ set = idx << (16 * (c & 1));
+
+ pru_control_set_reg(pru, reg, mask, set);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pru_rproc_set_ctable);
+
+static inline u32 pru_debug_read_reg(struct pru_rproc *pru, unsigned int reg)
+{
+ return readl_relaxed(pru->mem_regions[PRU_IOMEM_DEBUG].va + reg);
+}
+
+static int regs_show(struct seq_file *s, void *data)
+{
+ struct rproc *rproc = s->private;
+ struct pru_rproc *pru = rproc->priv;
+ int i, nregs = 32;
+ u32 pru_sts;
+ int pru_is_running;
+
+ seq_puts(s, "============== Control Registers ==============\n");
+ seq_printf(s, "CTRL := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTRL));
+ pru_sts = pru_control_read_reg(pru, PRU_CTRL_STS);
+ seq_printf(s, "STS (PC) := 0x%08x (0x%08x)\n", pru_sts, pru_sts << 2);
+ seq_printf(s, "WAKEUP_EN := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_WAKEUP_EN));
+ seq_printf(s, "CYCLE := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CYCLE));
+ seq_printf(s, "STALL := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_STALL));
+ seq_printf(s, "CTBIR0 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTBIR0));
+ seq_printf(s, "CTBIR1 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTBIR1));
+ seq_printf(s, "CTPPR0 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTPPR0));
+ seq_printf(s, "CTPPR1 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTPPR1));
+
+ seq_puts(s, "=============== Debug Registers ===============\n");
+ pru_is_running = pru_control_read_reg(pru, PRU_CTRL_CTRL) &
+ CTRL_CTRL_RUNSTATE;
+ if (pru_is_running) {
+ seq_puts(s, "PRU is executing, cannot print/access debug registers.\n");
+ return 0;
+ }
+
+ for (i = 0; i < nregs; i++) {
+ seq_printf(s, "GPREG%-2d := 0x%08x\tCT_REG%-2d := 0x%08x\n",
+ i, pru_debug_read_reg(pru, PRU_DEBUG_GPREG(i)),
+ i, pru_debug_read_reg(pru, PRU_DEBUG_CT_REG(i)));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(regs);
+
+/*
+ * Control PRU single-step mode
+ *
+ * This is a debug helper function used for controlling the single-step
+ * mode of the PRU. The PRU Debug registers are not accessible when the
+ * PRU is in RUNNING state.
+ *
+ * Writing a non-zero value sets the PRU into single-step mode irrespective
+ * of its previous state. The PRU mode is saved only on the first set into
+ * a single-step mode. Writing a zero value will restore the PRU into its
+ * original mode.
+ */
+static int pru_rproc_debug_ss_set(void *data, u64 val)
+{
+ struct rproc *rproc = data;
+ struct pru_rproc *pru = rproc->priv;
+ u32 reg_val;
+
+ val = val ? 1 : 0;
+ if (!val && !pru->dbg_single_step)
+ return 0;
+
+ reg_val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
+
+ if (val && !pru->dbg_single_step)
+ pru->dbg_continuous = reg_val;
+
+ if (val)
+ reg_val |= CTRL_CTRL_SINGLE_STEP | CTRL_CTRL_EN;
+ else
+ reg_val = pru->dbg_continuous;
+
+ pru->dbg_single_step = val;
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, reg_val);
+
+ return 0;
+}
+
+static int pru_rproc_debug_ss_get(void *data, u64 *val)
+{
+ struct rproc *rproc = data;
+ struct pru_rproc *pru = rproc->priv;
+
+ *val = pru->dbg_single_step;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get,
+ pru_rproc_debug_ss_set, "%llu\n");
+
+/*
+ * Create PRU-specific debugfs entries
+ *
+ * The entries are created only if the parent remoteproc debugfs directory
+ * exists, and will be cleaned up by the remoteproc core.
+ */
+static void pru_rproc_create_debug_entries(struct rproc *rproc)
+{
+ if (!rproc->dbg_dir)
+ return;
+
+ debugfs_create_file("regs", 0400, rproc->dbg_dir,
+ rproc, &regs_fops);
+ debugfs_create_file("single_step", 0600, rproc->dbg_dir,
+ rproc, &pru_rproc_debug_ss_fops);
+}
+
+static void pru_dispose_irq_mapping(struct pru_rproc *pru)
+{
+ if (!pru->mapped_irq)
+ return;
+
+ while (pru->evt_count) {
+ pru->evt_count--;
+ if (pru->mapped_irq[pru->evt_count] > 0)
+ irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
+ }
+
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+}
+
+/*
+ * pru_rproc_vring_interrupt() - interrupt handler for processing vrings
+ * @irq: irq number associated with the PRU event MPU is listening on
+ * @data: interrupt handler data, will be a PRU rproc structure
+ *
+ * This handler is used by the PRU remoteproc driver when using PRU system
+ * events for processing the virtqueues. Unlike the mailbox IP, there is
+ * no payload associated with an interrupt, so either a unique event is
+ * used for each virtqueue kick, or both virtqueues are processed on a
+ * single event. The latter is chosen to conserve the usable PRU system
+ * events.
+ */
+static irqreturn_t pru_rproc_vring_interrupt(int irq, void *data)
+{
+ struct pru_rproc *pru = data;
+
+ dev_dbg(&pru->rproc->dev, "got vring irq\n");
+
+ /* process incoming buffers on both the Rx and Tx vrings */
+ rproc_vq_interrupt(pru->rproc, 0);
+ rproc_vq_interrupt(pru->rproc, 1);
+
+ return IRQ_HANDLED;
+}
+
+/* Kick a virtqueue. */
+static void pru_rproc_kick(struct rproc *rproc, int vq_id)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ int ret;
+ const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
+
+ if (list_empty(&pru->rproc->rvdevs))
+ return;
+
+ dev_dbg(dev, "kicking vqid %d on %s%d\n", vq_id,
+ names[pru->data->type], pru->id);
+
+ ret = irq_set_irqchip_state(pru->mapped_irq[0], IRQCHIP_STATE_PENDING, true);
+ if (ret < 0)
+ dev_err(dev, "pruss_intc_trigger failed: %d\n", ret);
+}
+
+/* Register vring irq handler if needed. */
+static int pru_vring_interrupt_setup(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ struct platform_device *pdev = to_platform_device(pru->dev);
+ int ret;
+
+ if (list_empty(&pru->rproc->rvdevs))
+ return 0;
+
+ /* get vring interrupts for supporting virtio rpmsg */
+ pru->irq_vring = platform_get_irq_byname(pdev, "vring");
+ if (pru->irq_vring <= 0) {
+ ret = pru->irq_vring;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to get vring interrupt, status = %d\n",
+ ret);
+
+ return ret;
+ }
+
+ ret = request_threaded_irq(pru->irq_vring, NULL,
+ pru_rproc_vring_interrupt, IRQF_ONESHOT,
+ dev_name(dev), pru);
+ if (ret) {
+ dev_err(dev, "failed to register vring irq handler: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Parse the custom PRU interrupt map resource and configure the INTC
+ * appropriately.
+ */
+static int pru_handle_intrmap(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct pru_rproc *pru = rproc->priv;
+ struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
+ struct irq_fwspec fwspec;
+ struct device_node *parent, *irq_parent;
+ int i, ret = 0;
+
+ /* not having pru_interrupt_map is not an error */
+ if (!rsc)
+ return 0;
+
+ /* currently supporting only type 0 */
+ if (rsc->type != 0) {
+ dev_err(dev, "unsupported rsc type: %d\n", rsc->type);
+ return -EINVAL;
+ }
+
+ if (rsc->num_evts > MAX_PRU_SYS_EVENTS)
+ return -EINVAL;
+
+ if (sizeof(*rsc) + rsc->num_evts * sizeof(struct pruss_int_map) !=
+ pru->pru_interrupt_map_sz)
+ return -EINVAL;
+
+ pru->evt_count = rsc->num_evts;
+ pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!pru->mapped_irq) {
+ pru->evt_count = 0;
+ return -ENOMEM;
+ }
+
+ /*
+ * parse and fill in system event to interrupt channel and
+ * channel-to-host mapping. The interrupt controller to be used
+ * for these mappings for a given PRU remoteproc is always its
+ * corresponding sibling PRUSS INTC node.
+ */
+ parent = of_get_parent(dev_of_node(pru->dev));
+ if (!parent) {
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
+ return -ENODEV;
+ }
+
+ irq_parent = of_get_child_by_name(parent, "interrupt-controller");
+ of_node_put(parent);
+ if (!irq_parent) {
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
+ return -ENODEV;
+ }
+
+ fwspec.fwnode = of_node_to_fwnode(irq_parent);
+ fwspec.param_count = 3;
+ for (i = 0; i < pru->evt_count; i++) {
+ fwspec.param[0] = rsc->pru_intc_map[i].event;
+ fwspec.param[1] = rsc->pru_intc_map[i].chnl;
+ fwspec.param[2] = rsc->pru_intc_map[i].host;
+
+ dev_dbg(dev, "mapping%d: event %d, chnl %d, host %d\n",
+ i, fwspec.param[0], fwspec.param[1], fwspec.param[2]);
+
+ pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
+ if (!pru->mapped_irq[i]) {
+ dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
+ i, fwspec.param[0], fwspec.param[1],
+ fwspec.param[2]);
+ ret = -EINVAL;
+ goto map_fail;
+ }
+ }
+ of_node_put(irq_parent);
+
+ return ret;
+
+map_fail:
+ pru_dispose_irq_mapping(pru);
+ of_node_put(irq_parent);
+
+ return ret;
+}
+
+static int pru_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
+ u32 val;
+ int ret;
+
+ dev_dbg(dev, "starting %s%d: entry-point = 0x%llx\n",
+ names[pru->data->type], pru->id, (rproc->bootaddr >> 2));
+
+ ret = pru_handle_intrmap(rproc);
+ /*
+ * reset references to pru interrupt map - they will stop being valid
+ * after rproc_start returns
+ */
+ pru->pru_interrupt_map = NULL;
+ pru->pru_interrupt_map_sz = 0;
+ if (ret)
+ return ret;
+
+ ret = pru_vring_interrupt_setup(rproc);
+ if (ret)
+ goto fail;
+
+ val = CTRL_CTRL_EN | ((rproc->bootaddr >> 2) << 16);
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+
+ return 0;
+
+fail:
+ /* dispose irq mapping - new firmware can provide new mapping */
+ if (pru->mapped_irq)
+ pru_dispose_irq_mapping(pru);
+
+ return ret;
+}
+
+static int pru_rproc_stop(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
+ u32 val;
+
+ dev_dbg(dev, "stopping %s%d\n", names[pru->data->type], pru->id);
+
+ val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
+ val &= ~CTRL_CTRL_EN;
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+
+ if (!list_empty(&pru->rproc->rvdevs) && pru->irq_vring > 0)
+ free_irq(pru->irq_vring, pru);
+
+ /* dispose irq mapping - new firmware can provide new mapping */
+ pru_dispose_irq_mapping(pru);
+
+ /* dispose vring mapping as well */
+ if (pru->irq_vring > 0)
+ irq_dispose_mapping(pru->irq_vring);
+
+ return 0;
+}
+
+/*
+ * Convert PRU device address (data spaces only) to kernel virtual address.
+ *
+ * Each PRU has access to all data memories within the PRUSS, accessible at
+ * different ranges. So, look through both its primary and secondary Data
+ * RAMs as well as any shared Data RAM to convert a PRU device address to
+ * kernel virtual address. Data RAM0 is primary Data RAM for PRU0 and Data
+ * RAM1 is primary Data RAM for PRU1.
+ */
+static void *pru_d_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
+{
+ struct pruss_mem_region dram0, dram1, shrd_ram;
+ struct pruss *pruss = pru->pruss;
+ u32 offset;
+ void *va = NULL;
+
+ if (len == 0)
+ return NULL;
+
+ dram0 = pruss->mem_regions[PRUSS_MEM_DRAM0];
+ dram1 = pruss->mem_regions[PRUSS_MEM_DRAM1];
+ /* PRU1 has its local RAM addresses reversed */
+ if (pru->id == 1)
+ swap(dram0, dram1);
+ shrd_ram = pruss->mem_regions[PRUSS_MEM_SHRD_RAM2];
+
+ if (da >= PRU_PDRAM_DA && da + len <= PRU_PDRAM_DA + dram0.size) {
+ offset = da - PRU_PDRAM_DA;
+ va = (__force void *)(dram0.va + offset);
+ } else if (da >= PRU_SDRAM_DA &&
+ da + len <= PRU_SDRAM_DA + dram1.size) {
+ offset = da - PRU_SDRAM_DA;
+ va = (__force void *)(dram1.va + offset);
+ } else if (da >= PRU_SHRDRAM_DA &&
+ da + len <= PRU_SHRDRAM_DA + shrd_ram.size) {
+ offset = da - PRU_SHRDRAM_DA;
+ va = (__force void *)(shrd_ram.va + offset);
+ }
+
+ return va;
+}
+
+/*
+ * Convert PRU device address (instruction space) to kernel virtual address.
+ *
+ * A PRU does not have an unified address space. Each PRU has its very own
+ * private Instruction RAM, and its device address is identical to that of
+ * its primary Data RAM device address.
+ */
+static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
+{
+ u32 offset;
+ void *va = NULL;
+
+ if (len == 0)
+ return NULL;
+
+ /*
+ * GNU binutils do not support multiple address spaces. The GNU
+ * linker's default linker script places IRAM at an arbitrary high
+ * offset, in order to differentiate it from DRAM. Hence we need to
+ * strip the artificial offset in the IRAM addresses coming from the
+ * ELF file.
+ *
+ * The TI proprietary linker would never set those higher IRAM address
+ * bits anyway. PRU architecture limits the program counter to 16-bit
+ * word-address range. This in turn corresponds to 18-bit IRAM
+ * byte-address range for ELF.
+ *
+ * Two more bits are added just in case to make the final 20-bit mask.
+ * Idea is to have a safeguard in case TI decides to add banking
+ * in future SoCs.
+ */
+ da &= 0xfffff;
+
+ if (da >= PRU_IRAM_DA &&
+ da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
+ offset = da - PRU_IRAM_DA;
+ va = (__force void *)(pru->mem_regions[PRU_IOMEM_IRAM].va +
+ offset);
+ }
+
+ return va;
+}
+
+/*
+ * Provide address translations for only PRU Data RAMs through the remoteproc
+ * core for any PRU client drivers. The PRU Instruction RAM access is restricted
+ * only to the PRU loader code.
+ */
+static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct pru_rproc *pru = rproc->priv;
+
+ return pru_d_da_to_va(pru, da, len);
+}
+
+/* PRU-specific address translator used by PRU loader. */
+static void *pru_da_to_va(struct rproc *rproc, u64 da, size_t len, bool is_iram)
+{
+ struct pru_rproc *pru = rproc->priv;
+ void *va;
+
+ if (is_iram)
+ va = pru_i_da_to_va(pru, da, len);
+ else
+ va = pru_d_da_to_va(pru, da, len);
+
+ return va;
+}
+
+static struct rproc_ops pru_rproc_ops = {
+ .start = pru_rproc_start,
+ .stop = pru_rproc_stop,
+ .kick = pru_rproc_kick,
+ .da_to_va = pru_rproc_da_to_va,
+};
+
+/*
+ * Custom memory copy implementation for ICSSG PRU/RTU/Tx_PRU Cores
+ *
+ * The ICSSG PRU/RTU/Tx_PRU cores have a memory copying issue with IRAM
+ * memories, that is not seen on previous generation SoCs. The data is reflected
+ * properly in the IRAM memories only for integer (4-byte) copies. Any unaligned
+ * copies result in all the other pre-existing bytes zeroed out within that
+ * 4-byte boundary, thereby resulting in wrong text/code in the IRAMs. Also, the
+ * IRAM memory port interface does not allow any 8-byte copies (as commonly used
+ * by ARM64 memcpy implementation) and throws an exception. The DRAM memory
+ * ports do not show this behavior.
+ */
+static int pru_rproc_memcpy(void *dest, const void *src, size_t count)
+{
+ const u32 *s = src;
+ u32 *d = dest;
+ size_t size = count / 4;
+ u32 *tmp_src = NULL;
+
+ /*
+ * TODO: relax limitation of 4-byte aligned dest addresses and copy
+ * sizes
+ */
+ if ((long)dest % 4 || count % 4)
+ return -EINVAL;
+
+ /* src offsets in ELF firmware image can be non-aligned */
+ if ((long)src % 4) {
+ tmp_src = kmemdup(src, count, GFP_KERNEL);
+ if (!tmp_src)
+ return -ENOMEM;
+ s = tmp_src;
+ }
+
+ while (size--)
+ *d++ = *s++;
+
+ kfree(tmp_src);
+
+ return 0;
+}
+
+static int
+pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct pru_rproc *pru = rproc->priv;
+ struct device *dev = &rproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+ bool is_iram;
+ void *ptr;
+
+ if (phdr->p_type != PT_LOAD || !filesz)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ is_iram = phdr->p_flags & PF_X;
+ ptr = pru_da_to_va(rproc, da, memsz, is_iram);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (pru->data->is_k3) {
+ ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
+ filesz);
+ if (ret) {
+ dev_err(dev, "PRU memory copy failed for da 0x%x memsz 0x%x\n",
+ da, memsz);
+ break;
+ }
+ } else {
+ memcpy(ptr, elf_data + phdr->p_offset, filesz);
+ }
+
+ /* skip the memzero logic performed by remoteproc ELF loader */
+ }
+
+ return ret;
+}
+
+static const void *
+pru_rproc_find_interrupt_map(struct device *dev, const struct firmware *fw)
+{
+ struct elf32_shdr *shdr, *name_table_shdr;
+ const char *name_table;
+ const u8 *elf_data = fw->data;
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)elf_data;
+ u16 shnum = ehdr->e_shnum;
+ u16 shstrndx = ehdr->e_shstrndx;
+ int i;
+
+ /* first, get the section header */
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ /* compute name table section header entry in shdr array */
+ name_table_shdr = shdr + shstrndx;
+ /* finally, compute the name table section address in elf */
+ name_table = elf_data + name_table_shdr->sh_offset;
+
+ for (i = 0; i < shnum; i++, shdr++) {
+ u32 size = shdr->sh_size;
+ u32 offset = shdr->sh_offset;
+ u32 name = shdr->sh_name;
+
+ if (strcmp(name_table + name, ".pru_irq_map"))
+ continue;
+
+ /* make sure we have the entire irq map */
+ if (offset + size > fw->size || offset + size < size) {
+ dev_err(dev, ".pru_irq_map section truncated\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* make sure irq map has at least the header */
+ if (sizeof(struct pru_irq_rsc) > size) {
+ dev_err(dev, "header-less .pru_irq_map section\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return shdr;
+ }
+
+ dev_dbg(dev, "no .pru_irq_map section found for this fw\n");
+
+ return NULL;
+}
+
+/*
+ * Use a custom parse_fw callback function for dealing with PRU firmware
+ * specific sections.
+ *
+ * The firmware blob can contain optional ELF sections: .resource_table section
+ * and .pru_irq_map one. The second one contains the PRUSS interrupt mapping
+ * description, which needs to be setup before powering on the PRU core. To
+ * avoid RAM wastage this ELF section is not mapped to any ELF segment (by the
+ * firmware linker) and therefore is not loaded to PRU memory.
+ */
+static int pru_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const u8 *elf_data = fw->data;
+ const void *shdr;
+ u8 class = fw_elf_get_class(fw);
+ u64 sh_offset;
+ int ret;
+
+ /* load optional rsc table */
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret == -EINVAL)
+ dev_dbg(&rproc->dev, "no resource table found for this fw\n");
+ else if (ret)
+ return ret;
+
+ /* find .pru_interrupt_map section, not having it is not an error */
+ shdr = pru_rproc_find_interrupt_map(dev, fw);
+ if (IS_ERR(shdr))
+ return PTR_ERR(shdr);
+
+ if (!shdr)
+ return 0;
+
+ /* preserve pointer to PRU interrupt map together with it size */
+ sh_offset = elf_shdr_get_sh_offset(class, shdr);
+ pru->pru_interrupt_map = (struct pru_irq_rsc *)(elf_data + sh_offset);
+ pru->pru_interrupt_map_sz = elf_shdr_get_sh_size(class, shdr);
+
+ return 0;
+}
+
+/*
+ * Compute PRU id based on the IRAM addresses. The PRU IRAMs are
+ * always at a particular offset within the PRUSS address space.
+ */
+static int pru_rproc_set_id(struct pru_rproc *pru)
+{
+ int ret = 0;
+
+ switch (pru->mem_regions[PRU_IOMEM_IRAM].pa & PRU_IRAM_ADDR_MASK) {
+ case TX_PRU0_IRAM_ADDR_MASK:
+ fallthrough;
+ case RTU0_IRAM_ADDR_MASK:
+ fallthrough;
+ case PRU0_IRAM_ADDR_MASK:
+ pru->id = PRUSS_PRU0;
+ break;
+ case TX_PRU1_IRAM_ADDR_MASK:
+ fallthrough;
+ case RTU1_IRAM_ADDR_MASK:
+ fallthrough;
+ case PRU1_IRAM_ADDR_MASK:
+ pru->id = PRUSS_PRU1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int pru_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct platform_device *ppdev = to_platform_device(dev->parent);
+ struct pru_rproc *pru;
+ const char *fw_name;
+ struct rproc *rproc = NULL;
+ struct resource *res;
+ int i, ret;
+ const struct pru_private_data *data;
+ const char *mem_names[PRU_IOMEM_MAX] = { "iram", "control", "debug" };
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = of_property_read_string(np, "firmware-name", &fw_name);
+ if (ret) {
+ dev_err(dev, "unable to retrieve firmware-name %d\n", ret);
+ return ret;
+ }
+
+ rproc = devm_rproc_alloc(dev, pdev->name, &pru_rproc_ops, fw_name,
+ sizeof(*pru));
+ if (!rproc) {
+ dev_err(dev, "rproc_alloc failed\n");
+ return -ENOMEM;
+ }
+ /* use a custom load function to deal with PRU-specific quirks */
+ rproc->ops->load = pru_rproc_load_elf_segments;
+
+ /* use a custom parse function to deal with PRU-specific resources */
+ rproc->ops->parse_fw = pru_rproc_parse_fw;
+
+ /* error recovery is not supported for PRUs */
+ rproc->recovery_disabled = true;
+
+ /*
+ * rproc_add will auto-boot the processor normally, but this is not
+ * desired with PRU client driven boot-flow methodology. A PRU
+ * application/client driver will boot the corresponding PRU
+ * remote-processor as part of its state machine either through the
+ * remoteproc sysfs interface or through the equivalent kernel API.
+ */
+ rproc->auto_boot = false;
+
+ pru = rproc->priv;
+ pru->dev = dev;
+ pru->data = data;
+ pru->pruss = platform_get_drvdata(ppdev);
+ pru->rproc = rproc;
+ pru->fw_name = fw_name;
+ spin_lock_init(&pru->rmw_lock);
+ mutex_init(&pru->lock);
+
+ for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ pru->mem_regions[i].va = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pru->mem_regions[i].va)) {
+ dev_err(dev, "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ ret = PTR_ERR(pru->mem_regions[i].va);
+ return ret;
+ }
+ pru->mem_regions[i].pa = res->start;
+ pru->mem_regions[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pru->mem_regions[i].pa,
+ pru->mem_regions[i].size, pru->mem_regions[i].va);
+ }
+
+ ret = pru_rproc_set_id(pru);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = devm_rproc_add(dev, pru->rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed: %d\n", ret);
+ return ret;
+ }
+
+ pru_rproc_create_debug_entries(rproc);
+
+ dev_dbg(dev, "PRU rproc node %pOF probed successfully\n", np);
+
+ return 0;
+}
+
+static int pru_rproc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "%s: removing rproc %s\n", __func__, rproc->name);
+
+ return 0;
+}
+
+static const struct pru_private_data pru_data = {
+ .type = PRU_TYPE_PRU,
+};
+
+static const struct pru_private_data k3_pru_data = {
+ .type = PRU_TYPE_PRU,
+ .is_k3 = 1,
+};
+
+static const struct pru_private_data k3_rtu_data = {
+ .type = PRU_TYPE_RTU,
+ .is_k3 = 1,
+};
+
+static const struct pru_private_data k3_tx_pru_data = {
+ .type = PRU_TYPE_TX_PRU,
+ .is_k3 = 1,
+};
+
+static const struct of_device_id pru_rproc_match[] = {
+ { .compatible = "ti,am3356-pru", .data = &pru_data },
+ { .compatible = "ti,am4376-pru", .data = &pru_data },
+ { .compatible = "ti,am5728-pru", .data = &pru_data },
+ { .compatible = "ti,k2g-pru", .data = &pru_data },
+ { .compatible = "ti,am654-pru", .data = &k3_pru_data },
+ { .compatible = "ti,am654-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,am654-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,j721e-pru", .data = &k3_pru_data },
+ { .compatible = "ti,j721e-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,j721e-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,am642-pru", .data = &k3_pru_data },
+ { .compatible = "ti,am642-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,am642-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,am625-pru", .data = &k3_pru_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pru_rproc_match);
+
+static struct platform_driver pru_rproc_driver = {
+ .driver = {
+ .name = PRU_RPROC_DRVNAME,
+ .of_match_table = pru_rproc_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pru_rproc_probe,
+ .remove = pru_rproc_remove,
+};
+module_platform_driver(pru_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_AUTHOR("Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org>");
+MODULE_DESCRIPTION("PRU-ICSS Remote Processor Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/pru_rproc.h b/drivers/remoteproc/pru_rproc.h
new file mode 100644
index 000000000000..8ee9c3171610
--- /dev/null
+++ b/drivers/remoteproc/pru_rproc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * PRUSS Remote Processor specific types
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef _PRU_RPROC_H_
+#define _PRU_RPROC_H_
+
+/**
+ * struct pruss_int_map - PRU system events _to_ channel and host mapping
+ * @event: number of the system event
+ * @chnl: channel number assigned to a given @event
+ * @host: host number assigned to a given @chnl
+ *
+ * PRU system events are mapped to channels, and these channels are mapped
+ * to host interrupts. Events can be mapped to channels in a one-to-one or
+ * many-to-one ratio (multiple events per channel), and channels can be
+ * mapped to host interrupts in a one-to-one or many-to-one ratio (multiple
+ * channels per interrupt).
+ */
+struct pruss_int_map {
+ u8 event;
+ u8 chnl;
+ u8 host;
+};
+
+/**
+ * struct pru_irq_rsc - PRU firmware section header for IRQ data
+ * @type: resource type
+ * @num_evts: number of described events
+ * @pru_intc_map: PRU interrupt routing description
+ *
+ * The PRU firmware blob can contain optional .pru_irq_map ELF section, which
+ * provides the PRUSS interrupt mapping description. The pru_irq_rsc struct
+ * describes resource entry format.
+ */
+struct pru_irq_rsc {
+ u8 type;
+ u8 num_evts;
+ struct pruss_int_map pru_intc_map[];
+} __packed;
+
+#endif /* _PRU_RPROC_H_ */
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
index ff92ed25d8b0..9405a758df26 100644
--- a/drivers/remoteproc/remoteproc_cdev.c
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -32,15 +32,29 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_
return -EFAULT;
if (!strncmp(cmd, "start", len)) {
- if (rproc->state == RPROC_RUNNING)
+ if (rproc->state == RPROC_RUNNING ||
+ rproc->state == RPROC_ATTACHED)
return -EBUSY;
ret = rproc_boot(rproc);
} else if (!strncmp(cmd, "stop", len)) {
- if (rproc->state != RPROC_RUNNING)
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
return -EINVAL;
+ if (rproc->state == RPROC_ATTACHED &&
+ rproc->detach_on_shutdown) {
+ dev_err(&rproc->dev,
+ "stop not supported for this rproc, use detach\n");
+ return -EINVAL;
+ }
+
rproc_shutdown(rproc);
+ } else if (!strncmp(cmd, "detach", len)) {
+ if (rproc->state != RPROC_ATTACHED)
+ return -EINVAL;
+
+ ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognized option\n");
ret = -EINVAL;
@@ -79,11 +93,17 @@ static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned l
static int rproc_cdev_release(struct inode *inode, struct file *filp)
{
struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev);
+ int ret = 0;
+
+ if (!rproc->cdev_put_on_release)
+ return 0;
- if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING)
+ if (rproc->state == RPROC_RUNNING)
rproc_shutdown(rproc);
+ else if (rproc->state == RPROC_ATTACHED)
+ ret = rproc_detach(rproc);
- return 0;
+ return ret;
}
static const struct file_operations rproc_fops = {
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index cc55ff0128cf..c330ee13ebdb 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -37,6 +37,9 @@
#include <linux/of_reserved_mem.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
+#include <linux/vmalloc.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <asm/byteorder.h>
#include <linux/platform_device.h>
@@ -226,6 +229,61 @@ out:
EXPORT_SYMBOL(rproc_da_to_va);
/**
+ * rproc_pa_to_da() - lookup the rproc device address for a physical address
+ * @rproc: handle of a remote processor
+ * @pa: physical address of the buffer to translate
+ * @da: device address to return
+ *
+ * Communication clients of remote processors usually would need a means to
+ * convert a host buffer pointer to an equivalent device virtual address pointer
+ * that the code running on the remote processor can operate on. These buffer
+ * pointers can either be from the physically contiguous memory regions (or
+ * "carveouts") or can be some memory-mapped Device IO memory. This function
+ * provides a means to translate a given physical address to its associated
+ * device address.
+ *
+ * The function looks through both the carveouts and the device memory mappings
+ * since both of them are stored in separate lists.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise. The translated
+ * device address is returned through the appropriate function argument.
+ */
+int rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da)
+{
+ int ret = -EINVAL;
+ struct rproc_mem_entry *maps = NULL;
+
+ if (!rproc || !da)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&rproc->lock))
+ return -EINTR;
+
+ if (rproc->state == RPROC_RUNNING || rproc->state == RPROC_SUSPENDED) {
+ /* Look in the mappings first */
+ list_for_each_entry(maps, &rproc->mappings, node) {
+ if (pa >= maps->dma && pa < (maps->dma + maps->len)) {
+ *da = maps->da + (pa - maps->dma);
+ ret = 0;
+ goto exit;
+ }
+ }
+ /* If not, check in the carveouts */
+ list_for_each_entry(maps, &rproc->carveouts, node) {
+ if (pa >= maps->dma && pa < (maps->dma + maps->len)) {
+ *da = maps->da + (pa - maps->dma);
+ ret = 0;
+ break;
+ }
+ }
+ }
+exit:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_pa_to_da);
+
+/**
* rproc_find_carveout_by_name() - lookup the carveout region by a name
* @rproc: handle of a remote processor
* @name: carveout name to find (format string)
@@ -482,7 +540,7 @@ static int copy_dma_range_map(struct device *to, struct device *from)
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
- * @rsc: the vring resource descriptor
+ * @ptr: the vring resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@@ -507,9 +565,10 @@ static int copy_dma_range_map(struct device *to, struct device *from)
*
* Returns 0 on success, or an appropriate error code otherwise
*/
-static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
+static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_vdev *rsc = ptr;
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
int i, ret;
@@ -627,9 +686,95 @@ void rproc_vdev_release(struct kref *ref)
}
/**
+ * rproc_process_last_trace() - setup a buffer to capture the trace snapshot
+ * before recovery
+ * @rproc: the remote processor
+ * @trace: the trace resource descriptor
+ * @count: the index of the trace under process
+ *
+ * The last trace is allocated if a previous last trace entry does not exist,
+ * and the contents of the trace buffer are copied during a recovery cleanup.
+ *
+ * NOTE: The memory in the original trace buffer is currently not zeroed out,
+ * but can be done if the remote processor is not zero initializing the trace
+ * memory region.
+ *
+ * Returns 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_process_last_trace(struct rproc *rproc,
+ struct rproc_debug_trace *trace, int count)
+{
+ struct rproc_debug_trace *trace_last, *tmp_trace;
+ struct rproc_mem_entry *tmem;
+ struct device *dev = &rproc->dev;
+ char name[16];
+ int i = 0;
+
+ if (!rproc || !trace)
+ return -EINVAL;
+
+ /* lookup trace va if not stored already */
+ tmem = &trace->trace_mem;
+ if (!tmem->va) {
+ tmem->va = rproc_da_to_va(rproc, tmem->da, tmem->len);
+ if (!tmem->va)
+ return -EINVAL;
+ }
+
+ if (count > rproc->num_last_traces) {
+ /* create a new trace_last entry */
+ snprintf(name, sizeof(name), "trace%d_last", count - 1);
+ trace_last = kzalloc(sizeof(*trace_last), GFP_KERNEL);
+ if (!trace_last)
+ return -ENOMEM;
+ tmem = &trace_last->trace_mem;
+ } else {
+ /* reuse the already existing trace_last entry */
+ list_for_each_entry_safe(trace_last, tmp_trace,
+ &rproc->last_traces, node) {
+ if (++i == count)
+ break;
+ }
+
+ tmem = &trace_last->trace_mem;
+ if (tmem->len != trace->trace_mem.len) {
+ dev_warn(dev, "len does not match between trace and trace_last\n");
+ return -EINVAL;
+ }
+
+ goto copy_and_exit;
+ }
+
+ /* allocate memory and create debugfs file for the new last_trace */
+ tmem->len = trace->trace_mem.len;
+ tmem->va = vmalloc(sizeof(u32) * tmem->len);
+ if (!tmem->va) {
+ kfree(trace_last);
+ return -ENOMEM;
+ }
+
+ trace_last->tfile = rproc_create_trace_file(name, rproc, trace_last);
+ if (!trace_last->tfile) {
+ dev_err(dev, "trace%d_last create debugfs failed\n", count - 1);
+ vfree(tmem->va);
+ kfree(trace_last);
+ return -EINVAL;
+ }
+
+ list_add_tail(&trace_last->node, &rproc->last_traces);
+ rproc->num_last_traces++;
+
+copy_and_exit:
+ /* copy the trace contents to last trace */
+ memcpy(tmem->va, trace->trace_mem.va, trace->trace_mem.len);
+
+ return 0;
+}
+
+/**
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
- * @rsc: the trace resource descriptor
+ * @ptr: the trace resource descriptor
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@@ -643,9 +788,10 @@ void rproc_vdev_release(struct kref *ref)
*
* Returns 0 on success, or an appropriate error code otherwise
*/
-static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
+static int rproc_handle_trace(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_trace *rsc = ptr;
struct rproc_debug_trace *trace;
struct device *dev = &rproc->dev;
char name[15];
@@ -695,7 +841,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
/**
* rproc_handle_devmem() - handle devmem resource entry
* @rproc: remote processor handle
- * @rsc: the devmem resource entry
+ * @ptr: the devmem resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
@@ -718,9 +864,10 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
* and not allow firmwares to request access to physical addresses that
* are outside those ranges.
*/
-static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
+static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_devmem *rsc = ptr;
struct rproc_mem_entry *mapping;
struct device *dev = &rproc->dev;
int ret;
@@ -757,6 +904,7 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
* We can't trust the remote processor not to change the resource
* table, so we must maintain this info independently.
*/
+ mapping->dma = rsc->pa;
mapping->da = rsc->da;
mapping->len = rsc->len;
list_add_tail(&mapping->node, &rproc->mappings);
@@ -898,7 +1046,7 @@ static int rproc_release_carveout(struct rproc *rproc,
/**
* rproc_handle_carveout() - handle phys contig memory allocation requests
* @rproc: rproc handle
- * @rsc: the resource entry
+ * @ptr: the resource entry
* @offset: offset of the resource entry
* @avail: size of available data (for image validation)
*
@@ -915,9 +1063,9 @@ static int rproc_release_carveout(struct rproc *rproc,
* pressure is important; it may have a substantial impact on performance.
*/
static int rproc_handle_carveout(struct rproc *rproc,
- struct fw_rsc_carveout *rsc,
- int offset, int avail)
+ void *ptr, int offset, int avail)
{
+ struct fw_rsc_carveout *rsc = ptr;
struct rproc_mem_entry *carveout;
struct device *dev = &rproc->dev;
@@ -1099,10 +1247,10 @@ EXPORT_SYMBOL(rproc_of_parse_firmware);
* enum fw_resource_type.
*/
static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
- [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
- [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
- [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
- [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
+ [RSC_CARVEOUT] = rproc_handle_carveout,
+ [RSC_DEVMEM] = rproc_handle_devmem,
+ [RSC_TRACE] = rproc_handle_trace,
+ [RSC_VDEV] = rproc_handle_vdev,
};
/* handle firmware resource entries before booting the remote processor */
@@ -1299,6 +1447,19 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc)
return 0;
}
+/**
+ * rproc_free_trace() - helper function to cleanup a trace entry
+ * @trace: the last trace element to be cleaned up
+ * @ltrace: flag to indicate if this is last trace or regular trace
+ */
+static void rproc_free_trace(struct rproc_debug_trace *trace, bool ltrace)
+{
+ rproc_remove_trace_file(trace->tfile);
+ list_del(&trace->node);
+ if (ltrace)
+ vfree(trace->trace_mem.va);
+ kfree(trace);
+}
/**
* rproc_resource_cleanup() - clean up and free all acquired resources
@@ -1316,10 +1477,14 @@ void rproc_resource_cleanup(struct rproc *rproc)
/* clean up debugfs trace entries */
list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
- rproc_remove_trace_file(trace->tfile);
+ rproc_free_trace(trace, false);
rproc->num_traces--;
- list_del(&trace->node);
- kfree(trace);
+ }
+
+ /* clean up debugfs last trace entries */
+ list_for_each_entry_safe(trace, ttmp, &rproc->last_traces, node) {
+ rproc_free_trace(trace, true);
+ rproc->num_last_traces--;
}
/* clean up iommu mapping entries */
@@ -1359,11 +1524,14 @@ static int rproc_start(struct rproc *rproc, const struct firmware *fw)
struct device *dev = &rproc->dev;
int ret;
- /* load the ELF segments to memory */
- ret = rproc_load_segments(rproc, fw);
- if (ret) {
- dev_err(dev, "Failed to load program segments: %d\n", ret);
- return ret;
+ if (!rproc->skip_firmware_load) {
+ /* load the ELF segments to memory */
+ ret = rproc_load_segments(rproc, fw);
+ if (ret) {
+ dev_err(dev, "Failed to load program segments: %d\n",
+ ret);
+ return ret;
+ }
}
/*
@@ -1418,7 +1586,7 @@ reset_table_ptr:
return ret;
}
-static int rproc_attach(struct rproc *rproc)
+static int __rproc_attach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
@@ -1446,7 +1614,7 @@ static int rproc_attach(struct rproc *rproc)
goto stop_rproc;
}
- rproc->state = RPROC_RUNNING;
+ rproc->state = RPROC_ATTACHED;
dev_info(dev, "remote processor %s is now attached\n", rproc->name);
@@ -1473,7 +1641,11 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
if (ret)
return ret;
- dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
+ if (!rproc->skip_firmware_load)
+ dev_info(dev, "Booting fw image %s, size %zd\n",
+ name, fw->size);
+ else
+ dev_info(dev, "Booting unspecified pre-loaded fw image\n");
/*
* if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1539,11 +1711,149 @@ disable_iommu:
return ret;
}
+static int rproc_set_rsc_table(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+ struct device *dev = &rproc->dev;
+ size_t table_sz;
+ int ret;
+
+ table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz);
+ if (!table_ptr) {
+ /* Not having a resource table is acceptable */
+ return 0;
+ }
+
+ if (IS_ERR(table_ptr)) {
+ ret = PTR_ERR(table_ptr);
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * If it is possible to detach the remote processor, keep an untouched
+ * copy of the resource table. That way we can start fresh again when
+ * the remote processor is re-attached, that is:
+ *
+ * DETACHED -> ATTACHED -> DETACHED -> ATTACHED
+ *
+ * Free'd in rproc_reset_rsc_table_on_detach() and
+ * rproc_reset_rsc_table_on_stop().
+ */
+ if (rproc->ops->detach) {
+ rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL);
+ if (!rproc->clean_table)
+ return -ENOMEM;
+ } else {
+ rproc->clean_table = NULL;
+ }
+
+ rproc->cached_table = NULL;
+ rproc->table_ptr = table_ptr;
+ rproc->table_sz = table_sz;
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_detach(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If we made it to this point a clean_table _must_ have been
+ * allocated in rproc_set_rsc_table(). If one isn't present
+ * something went really wrong and we must complain.
+ */
+ if (WARN_ON(!rproc->clean_table))
+ return -EINVAL;
+
+ /* Remember where the external entity installed the resource table */
+ table_ptr = rproc->table_ptr;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_detach().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+
+ /*
+ * Reset the memory area where the firmware loaded the resource table
+ * to its original value. That way when we re-attach the remote
+ * processor the resource table is clean and ready to be used again.
+ */
+ memcpy(table_ptr, rproc->clean_table, rproc->table_sz);
+
+ /*
+ * The clean resource table is no longer needed. Allocated in
+ * rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_stop(struct rproc *rproc)
+{
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If a cache table exists the remote processor was started by
+ * the remoteproc core. That cache table should be used for
+ * the rest of the shutdown process.
+ */
+ if (rproc->cached_table)
+ goto out;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_shutdown().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Since the remote processor is being switched off the clean table
+ * won't be needed. Allocated in rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+out:
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+ return 0;
+}
+
/*
* Attach to remote processor - similar to rproc_fw_boot() but without
* the steps that deal with the firmware image.
*/
-static int rproc_actuate(struct rproc *rproc)
+static int rproc_attach(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
@@ -1558,6 +1868,19 @@ static int rproc_actuate(struct rproc *rproc)
return ret;
}
+ /* Do anything that is needed to boot the remote processor */
+ ret = rproc_prepare_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
+ goto disable_iommu;
+ }
+
+ ret = rproc_set_rsc_table(rproc);
+ if (ret) {
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ goto unprepare_device;
+ }
+
/* reset max_notifyid */
rproc->max_notifyid = -1;
@@ -1572,7 +1895,7 @@ static int rproc_actuate(struct rproc *rproc)
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
- goto disable_iommu;
+ goto unprepare_device;
}
/* Allocate carveout resources associated to rproc */
@@ -1583,7 +1906,7 @@ static int rproc_actuate(struct rproc *rproc)
goto clean_up_resources;
}
- ret = rproc_attach(rproc);
+ ret = __rproc_attach(rproc);
if (ret)
goto clean_up_resources;
@@ -1591,6 +1914,9 @@ static int rproc_actuate(struct rproc *rproc)
clean_up_resources:
rproc_resource_cleanup(rproc);
+unprepare_device:
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
disable_iommu:
rproc_disable_iommu(rproc);
return ret;
@@ -1644,11 +1970,20 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
struct device *dev = &rproc->dev;
int ret;
+ /* No need to continue if a stop() operation has not been provided */
+ if (!rproc->ops->stop)
+ return -EINVAL;
+
/* Stop any subdevices for the remote processor */
rproc_stop_subdevices(rproc, crashed);
/* the installed resource table is no longer accessible */
- rproc->table_ptr = rproc->cached_table;
+ ret = rproc_reset_rsc_table_on_stop(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
+
/* power off the remote processor */
ret = rproc->ops->stop(rproc);
@@ -1661,19 +1996,74 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
rproc->state = RPROC_OFFLINE;
- /*
- * The remote processor has been stopped and is now offline, which means
- * that the next time it is brought back online the remoteproc core will
- * be responsible to load its firmware. As such it is no longer
- * autonomous.
- */
- rproc->autonomous = false;
-
dev_info(dev, "stopped remote processor %s\n", rproc->name);
return 0;
}
+/**
+ * rproc_store_last_traces() - preserve traces from last run
+ * @rproc: rproc handle
+ *
+ * This function will copy the trace contents from the previous crashed run
+ * into newly created or already existing last_trace entries during an error
+ * recovery. This will allow the user to inspect the trace contents from the
+ * last crashed run, as the regular trace files will be replaced with traces
+ * with the subsequent recovered run.
+ */
+static void rproc_store_last_traces(struct rproc *rproc)
+{
+ struct rproc_debug_trace *trace, *ttmp;
+ int count = 0, ret;
+
+ /* handle last trace here */
+ list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
+ ret = rproc_process_last_trace(rproc, trace, ++count);
+ if (ret) {
+ dev_err(&rproc->dev, "could not process last_trace%d\n",
+ count - 1);
+ break;
+ }
+ }
+}
+
+/*
+ * __rproc_detach(): Does the opposite of __rproc_attach()
+ */
+static int __rproc_detach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /* No need to continue if a detach() operation has not been provided */
+ if (!rproc->ops->detach)
+ return -EINVAL;
+
+ /* Stop any subdevices for the remote processor */
+ rproc_stop_subdevices(rproc, false);
+
+ /* the installed resource table is no longer accessible */
+ ret = rproc_reset_rsc_table_on_detach(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
+
+ /* Tell the remote processor the core isn't available anymore */
+ ret = rproc_detach_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't detach from rproc: %d\n", ret);
+ return ret;
+ }
+
+ rproc_unprepare_subdevices(rproc);
+
+ rproc->state = RPROC_DETACHED;
+
+ dev_info(dev, "detached remote processor %s\n", rproc->name);
+
+ return 0;
+}
/**
* rproc_trigger_recovery() - recover a remoteproc
@@ -1708,6 +2098,9 @@ int rproc_trigger_recovery(struct rproc *rproc)
/* generate coredump */
rproc_coredump(rproc);
+ /* generate last traces */
+ rproc_store_last_traces(rproc);
+
/* load firmware */
ret = request_firmware(&firmware_p, rproc->firmware, dev);
if (ret < 0) {
@@ -1767,6 +2160,36 @@ out:
}
/**
+ * rproc_get_id() - return the id for the rproc device
+ * @rproc: handle of a remote processor
+ *
+ * Each rproc device is associated with a platform device, which is created
+ * either from device tree (majority newer platforms) or using legacy style
+ * platform device creation (fewer legacy platforms). This function retrieves
+ * an unique id for each remote processor and is useful for clients needing
+ * to distinguish each of the remoteprocs. This unique id is derived using
+ * the platform device id for non-DT devices, or an alternate alias id for
+ * DT devices (since they do not have a valid platform device id). It is
+ * assumed that the platform devices were created with known ids or were
+ * given proper alias ids using the stem "rproc".
+ *
+ * Return: alias id for DT devices or platform device id for non-DT devices
+ * associated with the rproc
+ */
+int rproc_get_id(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (np)
+ return of_alias_get_id(np, "rproc");
+ else
+ return pdev->id;
+}
+EXPORT_SYMBOL(rproc_get_id);
+
+/**
* rproc_boot() - boot a remote processor
* @rproc: handle of a remote processor
*
@@ -1811,20 +2234,23 @@ int rproc_boot(struct rproc *rproc)
if (rproc->state == RPROC_DETACHED) {
dev_info(dev, "attaching to %s\n", rproc->name);
- ret = rproc_actuate(rproc);
+ ret = rproc_attach(rproc);
} else {
dev_info(dev, "powering up %s\n", rproc->name);
- /* load firmware */
- ret = request_firmware(&firmware_p, rproc->firmware, dev);
- if (ret < 0) {
- dev_err(dev, "request_firmware failed: %d\n", ret);
+ if (!rproc->skip_firmware_load) {
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
goto downref_rproc;
+ }
}
ret = rproc_fw_boot(rproc, firmware_p);
- release_firmware(firmware_p);
+ if (!rproc->skip_firmware_load)
+ release_firmware(firmware_p);
}
downref_rproc:
@@ -1870,7 +2296,10 @@ void rproc_shutdown(struct rproc *rproc)
if (!atomic_dec_and_test(&rproc->power))
goto out;
- ret = rproc_stop(rproc, false);
+ if (rproc->detach_on_shutdown && rproc->state == RPROC_ATTACHED)
+ ret = __rproc_detach(rproc);
+ else
+ ret = rproc_stop(rproc, false);
if (ret) {
atomic_inc(&rproc->power);
goto out;
@@ -1894,6 +2323,65 @@ out:
EXPORT_SYMBOL(rproc_shutdown);
/**
+ * rproc_detach() - Detach the remote processor from the
+ * remoteproc core
+ *
+ * @rproc: the remote processor
+ *
+ * Detach a remote processor (previously attached to with rproc_attach()).
+ *
+ * In case @rproc is still being used by an additional user(s), then
+ * this function will just decrement the power refcount and exit,
+ * without disconnecting the device.
+ *
+ * Function rproc_detach() calls __rproc_detach() in order to let a remote
+ * processor know that services provided by the application processor are
+ * no longer available. From there it should be possible to remove the
+ * platform driver and even power cycle the application processor (if the HW
+ * supports it) without needing to switch off the remote processor.
+ */
+int rproc_detach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return ret;
+ }
+
+ /* if the remote proc is still needed, bail out */
+ if (!atomic_dec_and_test(&rproc->power)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = __rproc_detach(rproc);
+ if (ret) {
+ atomic_inc(&rproc->power);
+ goto out;
+ }
+
+ /* clean up all acquired resources */
+ rproc_resource_cleanup(rproc);
+
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+
+ rproc_disable_iommu(rproc);
+
+ /* Free the copy of the resource table */
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
+ rproc->table_ptr = NULL;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_detach);
+
+/**
* rproc_get_by_phandle() - find a remote processor by phandle
* @phandle: phandle to the rproc
*
@@ -1943,6 +2431,69 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
#endif
EXPORT_SYMBOL(rproc_get_by_phandle);
+/**
+ * rproc_set_firmware() - assign a new firmware
+ * @rproc: rproc handle to which the new firmware is being assigned
+ * @fw_name: new firmware name to be assigned
+ *
+ * This function allows remoteproc drivers or clients to configure a custom
+ * firmware name that is different from the default name used during remoteproc
+ * registration. The function does not trigger a remote processor boot,
+ * only sets the firmware name used for a subsequent boot. This function
+ * should also be called only when the remote processor is offline.
+ *
+ * This allows either the userspace to configure a different name through
+ * sysfs or a kernel-level remoteproc or a remoteproc client driver to set
+ * a specific firmware when it is controlling the boot and shutdown of the
+ * remote processor.
+ *
+ * Return: 0 on success or a negative value upon failure
+ */
+int rproc_set_firmware(struct rproc *rproc, const char *fw_name)
+{
+ struct device *dev;
+ int ret, len;
+ char *p;
+
+ if (!rproc || !fw_name)
+ return -EINVAL;
+
+ dev = rproc->dev.parent;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return -EINVAL;
+ }
+
+ if (rproc->state != RPROC_OFFLINE) {
+ dev_err(dev, "can't change firmware while running\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ len = strcspn(fw_name, "\n");
+ if (!len) {
+ dev_err(dev, "can't provide empty string for firmware name\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ p = kstrndup(fw_name, len, GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ kfree_const(rproc->firmware);
+ rproc->firmware = p;
+
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_set_firmware);
+
static int rproc_validate(struct rproc *rproc)
{
switch (rproc->state) {
@@ -1953,6 +2504,13 @@ static int rproc_validate(struct rproc *rproc)
*/
if (!rproc->ops->start)
return -EINVAL;
+
+ /*
+ * Userspace driven loading cannot expect to have
+ * auto_boot set.
+ */
+ if (rproc->auto_boot && rproc->skip_firmware_load)
+ return -EINVAL;
break;
case RPROC_DETACHED:
/*
@@ -2023,16 +2581,6 @@ int rproc_add(struct rproc *rproc)
if (ret < 0)
return ret;
- /*
- * Remind ourselves the remote processor has been attached to rather
- * than booted by the remoteproc core. This is important because the
- * RPROC_DETACHED state will be lost as soon as the remote processor
- * has been attached to. Used in firmware_show() and reset in
- * rproc_stop().
- */
- if (rproc->state == RPROC_DETACHED)
- rproc->autonomous = true;
-
/* if rproc is marked always-on, request it to boot */
if (rproc->auto_boot) {
ret = rproc_trigger_auto_boot(rproc);
@@ -2222,6 +2770,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
INIT_LIST_HEAD(&rproc->carveouts);
INIT_LIST_HEAD(&rproc->mappings);
INIT_LIST_HEAD(&rproc->traces);
+ INIT_LIST_HEAD(&rproc->last_traces);
INIT_LIST_HEAD(&rproc->rvdevs);
INIT_LIST_HEAD(&rproc->subdevs);
INIT_LIST_HEAD(&rproc->dump_segments);
@@ -2420,8 +2969,9 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
dev_err(&rproc->dev, "crash detected in %s: type %s\n",
rproc->name, rproc_crash_to_string(type));
- /* create a new task to handle the error */
- schedule_work(&rproc->crash_handler);
+ /* create a new task to handle the error if not scheduled already */
+ if (!work_busy(&rproc->crash_handler))
+ schedule_work(&rproc->crash_handler);
}
EXPORT_SYMBOL(rproc_report_crash);
@@ -2434,7 +2984,11 @@ static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
rcu_read_lock();
list_for_each_entry_rcu(rproc, &rproc_list, node) {
- if (!rproc->ops->panic || rproc->state != RPROC_RUNNING)
+ if (!rproc->ops->panic)
+ continue;
+
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
continue;
d = rproc->ops->panic(rproc);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index e8bb0ee6b35a..0234766a7467 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -128,11 +128,14 @@ static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
{
struct rproc_debug_trace *data = filp->private_data;
struct rproc_mem_entry *trace = &data->trace_mem;
- void *va;
+ void *va = trace->va;
char buf[100];
int len;
- va = rproc_da_to_va(data->rproc, trace->da, trace->len);
+ if (!va) {
+ va = rproc_da_to_va(data->rproc, trace->da, trace->len);
+ trace->va = va;
+ }
if (!va) {
len = scnprintf(buf, sizeof(buf), "Trace %s not available\n",
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index c34002888d2c..6c128c0e3ad1 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -84,7 +84,6 @@ static inline void rproc_char_device_remove(struct rproc *rproc)
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
-void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
@@ -121,6 +120,14 @@ static inline int rproc_attach_device(struct rproc *rproc)
return 0;
}
+static inline int rproc_detach_device(struct rproc *rproc)
+{
+ if (rproc->ops->detach)
+ return rproc->ops->detach(rproc);
+
+ return 0;
+}
+
static inline
int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
@@ -178,6 +185,16 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
}
static inline
+struct resource_table *rproc_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *size)
+{
+ if (rproc->ops->get_loaded_rsc_table)
+ return rproc->ops->get_loaded_rsc_table(rproc, size);
+
+ return NULL;
+}
+
+static inline
bool rproc_u64_fit_in_size_t(u64 val)
{
if (sizeof(size_t) == sizeof(u64))
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index d1cf7bf277c4..a7a72e83c817 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -3,6 +3,7 @@
* Remote Processor Framework
*/
+#include <linux/module.h>
#include <linux/remoteproc.h>
#include <linux/slab.h>
@@ -15,7 +16,7 @@ static ssize_t recovery_show(struct device *dev,
{
struct rproc *rproc = to_rproc(dev);
- return sprintf(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
+ return sysfs_emit(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
}
/*
@@ -48,6 +49,10 @@ static ssize_t recovery_store(struct device *dev,
{
struct rproc *rproc = to_rproc(dev);
+ /* restrict sysfs operations if not allowed by remoteproc drivers */
+ if (rproc->deny_sysfs_ops)
+ return -EPERM;
+
if (sysfs_streq(buf, "enabled")) {
/* change the flag and begin the recovery process if needed */
rproc->recovery_disabled = false;
@@ -82,7 +87,7 @@ static ssize_t coredump_show(struct device *dev,
{
struct rproc *rproc = to_rproc(dev);
- return sprintf(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
+ return sysfs_emit(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
}
/*
@@ -107,6 +112,10 @@ static ssize_t coredump_store(struct device *dev,
{
struct rproc *rproc = to_rproc(dev);
+ /* restrict sysfs operations if not allowed by remoteproc drivers */
+ if (rproc->deny_sysfs_ops)
+ return -EPERM;
+
if (rproc->state == RPROC_CRASHED) {
dev_err(&rproc->dev, "can't change coredump configuration\n");
return -EBUSY;
@@ -138,11 +147,8 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
* If the remote processor has been started by an external
* entity we have no idea of what image it is running. As such
* simply display a generic string rather then rproc->firmware.
- *
- * Here we rely on the autonomous flag because a remote processor
- * may have been attached to and currently in a running state.
*/
- if (rproc->autonomous)
+ if (rproc->state == RPROC_ATTACHED || rproc->skip_firmware_load)
firmware = "unknown";
return sprintf(buf, "%s\n", firmware);
@@ -154,38 +160,13 @@ static ssize_t firmware_store(struct device *dev,
const char *buf, size_t count)
{
struct rproc *rproc = to_rproc(dev);
- char *p;
- int err, len = count;
+ int err;
- err = mutex_lock_interruptible(&rproc->lock);
- if (err) {
- dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, err);
- return -EINVAL;
- }
+ /* restrict sysfs operations if not allowed by remoteproc drivers */
+ if (rproc->deny_sysfs_ops)
+ return -EPERM;
- if (rproc->state != RPROC_OFFLINE) {
- dev_err(dev, "can't change firmware while running\n");
- err = -EBUSY;
- goto out;
- }
-
- len = strcspn(buf, "\n");
- if (!len) {
- dev_err(dev, "can't provide a NULL firmware\n");
- err = -EINVAL;
- goto out;
- }
-
- p = kstrndup(buf, len, GFP_KERNEL);
- if (!p) {
- err = -ENOMEM;
- goto out;
- }
-
- kfree(rproc->firmware);
- rproc->firmware = p;
-out:
- mutex_unlock(&rproc->lock);
+ err = rproc_set_firmware(rproc, buf);
return err ? err : count;
}
@@ -201,6 +182,7 @@ static const char * const rproc_state_string[] = {
[RPROC_RUNNING] = "running",
[RPROC_CRASHED] = "crashed",
[RPROC_DELETED] = "deleted",
+ [RPROC_ATTACHED] = "attached",
[RPROC_DETACHED] = "detached",
[RPROC_LAST] = "invalid",
};
@@ -224,18 +206,48 @@ static ssize_t state_store(struct device *dev,
struct rproc *rproc = to_rproc(dev);
int ret = 0;
+ /* restrict sysfs operations if not allowed by remoteproc drivers */
+ if (rproc->deny_sysfs_ops)
+ return -EPERM;
+
if (sysfs_streq(buf, "start")) {
- if (rproc->state == RPROC_RUNNING)
+ if (rproc->state == RPROC_RUNNING ||
+ rproc->state == RPROC_ATTACHED)
return -EBUSY;
+ /*
+ * prevent underlying implementation from being removed
+ * when remoteproc does not support auto-boot
+ */
+ if (!rproc->auto_boot &&
+ !try_module_get(dev->parent->driver->owner))
+ return -EINVAL;
+
ret = rproc_boot(rproc);
- if (ret)
+ if (ret) {
dev_err(&rproc->dev, "Boot failed: %d\n", ret);
+ if (!rproc->auto_boot)
+ module_put(dev->parent->driver->owner);
+ }
} else if (sysfs_streq(buf, "stop")) {
- if (rproc->state != RPROC_RUNNING)
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
return -EINVAL;
+ if (rproc->state == RPROC_ATTACHED &&
+ rproc->detach_on_shutdown) {
+ dev_err(&rproc->dev, "stop not supported for this rproc, use detach\n");
+ return -EINVAL;
+ }
+
rproc_shutdown(rproc);
+ if (!rproc->auto_boot)
+ module_put(dev->parent->driver->owner);
+ } else if (sysfs_streq(buf, "detach")) {
+ if (rproc->state != RPROC_ATTACHED)
+ return -EINVAL;
+
+ ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
ret = -EINVAL;
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index 863c0214e0a8..c4083f4cc7f4 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -76,6 +76,7 @@ struct k3_dsp_dev_data {
* @ti_sci_id: TI-SCI device identifier
* @mbox: mailbox channel handle
* @client: mailbox client to request the mailbox channel
+ * @ipc_only: flag to indicate IPC-only mode
*/
struct k3_dsp_rproc {
struct device *dev;
@@ -91,6 +92,7 @@ struct k3_dsp_rproc {
u32 ti_sci_id;
struct mbox_chan *mbox;
struct mbox_client client;
+ bool ipc_only;
};
/**
@@ -216,6 +218,43 @@ lreset:
return ret;
}
+static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_dsp_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
/*
* The C66x DSP cores have a local reset that affects only the CPU, and a
* generic module reset that powers on the device and allows the DSP internal
@@ -231,6 +270,10 @@ static int k3_dsp_rproc_prepare(struct rproc *rproc)
struct device *dev = kproc->dev;
int ret;
+ /* IPC-only mode does not require the core to be released from reset */
+ if (kproc->ipc_only)
+ return 0;
+
ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret)
@@ -255,6 +298,10 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc)
struct device *dev = kproc->dev;
int ret;
+ /* do not put back the cores into reset in IPC-only mode */
+ if (kproc->ipc_only)
+ return 0;
+
ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret)
@@ -273,37 +320,19 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc)
static int k3_dsp_rproc_start(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
- struct mbox_client *client = &kproc->client;
struct device *dev = kproc->dev;
u32 boot_addr;
int ret;
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_dsp_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
- return ret;
+ if (kproc->ipc_only) {
+ dev_err(dev, "%s cannot be invoked in IPC-only mode\n",
+ __func__);
+ return -EINVAL;
}
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- goto put_mbox;
- }
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
boot_addr = rproc->bootaddr;
if (boot_addr & (kproc->data->boot_align_addr - 1)) {
@@ -338,6 +367,13 @@ put_mbox:
static int k3_dsp_rproc_stop(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (kproc->ipc_only) {
+ dev_err(dev, "%s cannot be invoked in IPC-only mode\n",
+ __func__);
+ return -EINVAL;
+ }
mbox_free_channel(kproc->mbox);
@@ -347,6 +383,85 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
}
/*
+ * Attach to a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the DSP core.
+ */
+static int k3_dsp_rproc_attach(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (!kproc->ipc_only || rproc->state != RPROC_DETACHED) {
+ dev_err(dev, "DSP is expected to be in IPC-only mode and RPROC_DETACHED state\n");
+ return -EINVAL;
+ }
+
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ dev_err(dev, "DSP initialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * Detach from a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the DSP core is not stopped and will
+ * be left to continue to run its booted firmware.
+ */
+static int k3_dsp_rproc_detach(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->ipc_only || rproc->state != RPROC_ATTACHED) {
+ dev_err(dev, "DSP is expected to be in IPC-only mode and RPROC_ATTACHED state\n");
+ return -EINVAL;
+ }
+
+ mbox_free_channel(kproc->mbox);
+ dev_err(dev, "DSP deinitialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property.
+ */
+static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
* Custom function to translate a DSP device address (internal RAMs only) to a
* kernel virtual address. The DSPs can access their RAMs at either an internal
* address visible only from a DSP, or at the SoC-level bus address. Both these
@@ -408,8 +523,11 @@ static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
static const struct rproc_ops k3_dsp_rproc_ops = {
.start = k3_dsp_rproc_start,
.stop = k3_dsp_rproc_stop,
+ .attach = k3_dsp_rproc_attach,
+ .detach = k3_dsp_rproc_detach,
.kick = k3_dsp_rproc_kick,
.da_to_va = k3_dsp_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table,
};
static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
@@ -592,6 +710,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
struct k3_dsp_rproc *kproc;
struct rproc *rproc;
const char *fw_name;
+ bool r_state = false;
+ bool p_state = false;
int ret = 0;
int ret1;
@@ -670,19 +790,37 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
goto release_tsp;
}
- /*
- * ensure the DSP local reset is asserted to ensure the DSP doesn't
- * execute bogus code in .prepare() when the module reset is released.
- */
- if (data->uses_lreset) {
- ret = reset_control_status(kproc->reset);
- if (ret < 0) {
- dev_err(dev, "failed to get reset status, status = %d\n",
- ret);
- goto release_mem;
- } else if (ret == 0) {
- dev_warn(dev, "local reset is deasserted for device\n");
- k3_dsp_rproc_reset(kproc);
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ &r_state, &p_state);
+ if (ret) {
+ dev_err(dev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ /* configure J721E devices for either remoteproc or IPC-only mode */
+ if (p_state) {
+ dev_err(dev, "configured DSP for IPC-only mode\n");
+ rproc->state = RPROC_DETACHED;
+ rproc->detach_on_shutdown = true;
+ kproc->ipc_only = true;
+ } else {
+ dev_err(dev, "configured DSP for remoteproc mode\n");
+ /*
+ * ensure the DSP local reset is asserted to ensure the DSP
+ * doesn't execute bogus code in .prepare() when the module
+ * reset is released.
+ */
+ if (data->uses_lreset) {
+ ret = reset_control_status(kproc->reset);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset status, status = %d\n",
+ ret);
+ goto release_mem;
+ } else if (ret == 0) {
+ dev_warn(dev, "local reset is deasserted for device\n");
+ k3_dsp_rproc_reset(kproc);
+ }
}
}
@@ -750,6 +888,10 @@ static const struct k3_dsp_mem_data c71_mems[] = {
{ .name = "l1dram", .dev_addr = 0xe00000 },
};
+static const struct k3_dsp_mem_data c7xv_mems[] = {
+ { .name = "l2sram", .dev_addr = 0x800000 },
+};
+
static const struct k3_dsp_dev_data c66_data = {
.mems = c66_mems,
.num_mems = ARRAY_SIZE(c66_mems),
@@ -764,9 +906,18 @@ static const struct k3_dsp_dev_data c71_data = {
.uses_lreset = false,
};
+static const struct k3_dsp_dev_data c7xv_data = {
+ .mems = c7xv_mems,
+ .num_mems = ARRAY_SIZE(c7xv_mems),
+ .boot_align_addr = SZ_2M,
+ .uses_lreset = false,
+};
+
static const struct of_device_id k3_dsp_of_match[] = {
{ .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
{ .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
+ { .compatible = "ti,j721s2-c71-dsp", .data = &c71_data, },
+ { .compatible = "ti,am62a-c7xv-dsp", .data = &c7xv_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c
new file mode 100644
index 000000000000..faeb05d733a9
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c
@@ -0,0 +1,967 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 Cortex-M4 Remote Processor(s) driver
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct k3_m4_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from DSP view
+ * @size: Size of the memory region
+ */
+struct k3_m4_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_m4_mem_data - memory definitions for a DSP
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_m4_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_m4_dev_data - device data structure for a DSP
+ * @mems: pointer to memory definitions for a DSP
+ * @num_mems: number of memory regions in @mems
+ * @boot_align_addr: boot vector address alignment granularity
+ * @uses_lreset: flag to denote the need for local reset management
+ */
+struct k3_m4_dev_data {
+ const struct k3_m4_mem_data *mems;
+ u32 num_mems;
+ u32 boot_align_addr;
+ bool uses_lreset;
+};
+
+/**
+ * struct k3_m4_rproc - k3 M4 remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @data: pointer to M4-specific device data
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ * @ipc_only: flag to indicate IPC-only mode
+ */
+struct k3_m4_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_m4_mem *mem;
+ int num_mems;
+ struct k3_m4_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ const struct k3_m4_dev_data *data;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+ bool ipc_only;
+ struct completion shut_comp;
+};
+
+/**
+ * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the OMAP mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_m4_rproc *kproc = container_of(client, struct k3_m4_rproc,
+ client);
+ struct device *dev = kproc->rproc->dev.parent;
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 M4 rproc %s crashed\n", name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_dbg(dev, "received echo reply from %s\n", name);
+ break;
+ case RP_MBOX_SHUTDOWN_ACK:
+ dev_dbg(dev, "received shutdown_ack from %s\n", name);
+ complete(&kproc->shut_comp);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > kproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void k3_m4_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+/* Put the M4 processor into reset */
+static int k3_m4_rproc_reset(struct k3_m4_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ if (kproc->data->uses_lreset)
+ return ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+ if (reset_control_deassert(kproc->reset))
+ dev_warn(dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+/* Release the M4 processor from reset */
+static int k3_m4_rproc_release(struct k3_m4_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset)
+ goto lreset;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "released m4 reset\n");
+
+lreset:
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_m4_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_m4_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * The M4F cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the M4 internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on M4F to allow loading into the M4F
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the M4F core run.
+ */
+static int k3_m4_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* IPC-only mode does not require the core to be released from reset */
+ if (kproc->ipc_only)
+ return 0;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
+ ret);
+
+ return ret;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable M4F cores. This completes the second portion of
+ * powering down the M4F cores. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset.
+ */
+static int k3_m4_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* do not put back the cores into reset in IPC-only mode */
+ if (kproc->ipc_only)
+ return 0;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * Power up the M4F remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met.
+ */
+static int k3_m4_rproc_start(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 boot_addr;
+ int ret;
+
+ if (kproc->ipc_only) {
+ dev_err(dev, "%s cannot be invoked in IPC-only mode\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = k3_m4_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ boot_addr = rproc->bootaddr;
+ ret = k3_m4_rproc_release(kproc);
+ if (ret)
+ goto put_mbox;
+
+ return 0;
+
+put_mbox:
+ mbox_free_channel(kproc->mbox);
+ return ret;
+}
+
+/*
+ * Stop the M4 remote processor.
+ *
+ * This function puts the M4 processor into reset, and finishes processing
+ * of any pending messages.
+ */
+static int k3_m4_rproc_stop(struct rproc *rproc)
+{
+ unsigned long to = msecs_to_jiffies(3000);
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 msg = RP_MBOX_SHUTDOWN;
+ int ret;
+
+ if (kproc->ipc_only) {
+ dev_err(dev, "%s cannot be invoked in IPC-only mode\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ reinit_completion(&kproc->shut_comp);
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0) {
+ dev_err(dev, "PM mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&kproc->shut_comp, to);
+ if (ret == 0) {
+ dev_err(dev, "%s : timed out waiting for rproc completion event \n", __func__);
+ return -EBUSY;
+ };
+
+ mbox_free_channel(kproc->mbox);
+
+ /* allow some time for the remote core to enter quiescent state (ex:
+ * wfi) after sending SHUTDOWN ack. Typically, remote core is expected
+ * to enter 'wfi' right after sending the ack. 1ms is more than
+ * sufficient for this purpose
+ */
+ msleep(1);
+
+ k3_m4_rproc_reset(kproc);
+
+ return 0;
+}
+
+/*
+ * Attach to a running M4 remote processor (IPC-only mode)
+ *
+ * This rproc attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the M4 core.
+ */
+static int k3_m4_rproc_attach(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (!kproc->ipc_only || rproc->state != RPROC_DETACHED) {
+ dev_err(dev, "M4 is expected to be in IPC-only mode and RPROC_DETACHED state\n");
+ return -EINVAL;
+ }
+
+ ret = k3_m4_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ dev_err(dev, "M4 initialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * Detach from a running M4 remote processor (IPC-only mode)
+ *
+ * This rproc detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the M4 core is not stopped and will
+ * be left to continue to run its booted firmware.
+ */
+static int k3_m4_rproc_detach(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->ipc_only || rproc->state != RPROC_ATTACHED) {
+ dev_err(dev, "M4 is expected to be in IPC-only mode and RPROC_ATTACHED state\n");
+ return -EINVAL;
+ }
+
+ mbox_free_channel(kproc->mbox);
+ dev_err(dev, "M4 deinitialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted M4 in IPC-only mode. The K3 M4
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property.
+ */
+static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
+ * Custom function to translate a M4 device address (internal RAMs only) to a
+ * kernel virtual address. The M4s can access their RAMs at either an internal
+ * address visible only from a M4, or at the SoC-level bus address. Both these
+ * addresses need to be looked through for translation. The translated addresses
+ * can be used either by the remoteproc core for loading (when using kernel
+ * remoteproc loader), or by any rpmsg bus drivers.
+ */
+static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
+ /* handle M4-view addresses */
+ if (da >= dev_addr &&
+ ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ } else {
+ /* handle SoC-view addresses */
+ if (da >= bus_addr &&
+ (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rproc_ops k3_m4_rproc_ops = {
+ .start = k3_m4_rproc_start,
+ .stop = k3_m4_rproc_stop,
+ .attach = k3_m4_rproc_attach,
+ .detach = k3_m4_rproc_detach,
+ .kick = k3_m4_rproc_kick,
+ .da_to_va = k3_m4_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table,
+};
+
+static int k3_m4_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_m4_rproc *kproc)
+{
+ const struct k3_m4_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = kproc->data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ data->mems[i].name);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ data->mems[i].name);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!kproc->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ data->mems[i].name, &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems <= 0) {
+ dev_err(dev, "device does not reserved memory regions, ret = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_rmems--;
+ kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem) {
+ ret = -ENOMEM;
+ goto release_rmem;
+ }
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np) {
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto unmap_rmem;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+
+unmap_rmem:
+ for (i--; i >= 0; i--)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+release_rmem:
+ of_reserved_mem_device_release(kproc->dev);
+ return ret;
+}
+
+static void k3_m4_reserved_mem_exit(struct k3_m4_rproc *kproc)
+{
+ int i;
+
+ for (i = 0; i < kproc->num_rmems; i++)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+
+ of_reserved_mem_device_release(kproc->dev);
+}
+
+static struct ti_sci_proc *k3_m4_rproc_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
+static int k3_m4_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct k3_m4_dev_data *data;
+ struct k3_m4_rproc *kproc;
+ struct rproc *rproc;
+ const char *fw_name;
+ bool r_state = false;
+ bool p_state = false;
+ int ret = 0;
+ int ret1;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(dev, dev_name(dev), &k3_m4_rproc_ops, fw_name,
+ sizeof(*kproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ rproc->recovery_disabled = true;
+ if (data->uses_lreset) {
+ rproc->ops->prepare = k3_m4_rproc_prepare;
+ rproc->ops->unprepare = k3_m4_rproc_unprepare;
+ }
+ kproc = rproc->priv;
+ kproc->rproc = rproc;
+ kproc->dev = dev;
+ kproc->data = data;
+
+ kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
+ if (IS_ERR(kproc->ti_sci)) {
+ ret = PTR_ERR(kproc->ti_sci);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
+ ret);
+ }
+ kproc->ti_sci = NULL;
+ goto free_rproc;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ goto put_sci;
+ }
+
+ kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(kproc->reset)) {
+ ret = PTR_ERR(kproc->reset);
+ dev_err(dev, "failed to get reset, status = %d\n", ret);
+ goto put_sci;
+ }
+
+ kproc->tsp = k3_m4_rproc_of_get_tsp(dev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp)) {
+ dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+ ret);
+ ret = PTR_ERR(kproc->tsp);
+ goto put_sci;
+ }
+
+ init_completion(&kproc->shut_comp);
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0) {
+ dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto free_tsp;
+ }
+
+ ret = k3_m4_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ goto release_tsp;
+
+ ret = k3_m4_reserved_mem_init(kproc);
+ if (ret) {
+ dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
+ goto release_tsp;
+ }
+
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ &r_state, &p_state);
+ if (ret) {
+ dev_err(dev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ /* configure devices for either remoteproc or IPC-only mode */
+ if (p_state) {
+ dev_err(dev, "configured M4 for IPC-only mode\n");
+ rproc->state = RPROC_DETACHED;
+ rproc->detach_on_shutdown = true;
+ kproc->ipc_only = true;
+ } else {
+ dev_err(dev, "configured M4 for remoteproc mode\n");
+ /*
+ * ensure the M4 local reset is asserted to ensure the core
+ * doesn't execute bogus code in .prepare() when the module
+ * reset is released.
+ */
+ if (data->uses_lreset) {
+ ret = reset_control_status(kproc->reset);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset status, status = %d\n",
+ ret);
+ goto release_mem;
+ } else if (ret == 0) {
+ dev_warn(dev, "local reset is deasserted for device\n");
+ k3_m4_rproc_reset(kproc);
+ }
+ }
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ platform_set_drvdata(pdev, kproc);
+
+ return 0;
+
+release_mem:
+ k3_m4_reserved_mem_exit(kproc);
+release_tsp:
+ ret1 = ti_sci_proc_release(kproc->tsp);
+ if (ret1)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret1);
+free_tsp:
+ kfree(kproc->tsp);
+put_sci:
+ ret1 = ti_sci_put_handle(kproc->ti_sci);
+ if (ret1)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int k3_m4_rproc_remove(struct platform_device *pdev)
+{
+ struct k3_m4_rproc *kproc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ rproc_del(kproc->rproc);
+
+ ret = ti_sci_proc_release(kproc->tsp);
+ if (ret)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret);
+
+ kfree(kproc->tsp);
+
+ ret = ti_sci_put_handle(kproc->ti_sci);
+ if (ret)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
+
+ k3_m4_reserved_mem_exit(kproc);
+ rproc_free(kproc->rproc);
+
+ return 0;
+}
+
+static int __maybe_unused k3_m4_rproc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct k3_m4_rproc *kproc = platform_get_drvdata(pdev);
+
+ rproc_shutdown(kproc->rproc);
+
+ kproc->rproc->state = RPROC_SUSPENDED;
+
+ return 0;
+}
+
+static int __maybe_unused k3_m4_rproc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct k3_m4_rproc *kproc = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (kproc->rproc->state == RPROC_OFFLINE)
+ goto out;
+
+ if (kproc->rproc->state != RPROC_SUSPENDED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ rproc_boot(kproc->rproc);
+ kproc->rproc->state = RPROC_RUNNING;
+out:
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(k3_m4_rproc_pm_ops, k3_m4_rproc_suspend,
+ k3_m4_rproc_resume);
+
+static const struct k3_m4_mem_data am64_m4_mems[] = {
+ { .name = "iram", .dev_addr = 0x0 },
+ { .name = "dram", .dev_addr = 0x30000 },
+};
+
+static const struct k3_m4_dev_data am64_m4_data = {
+ .mems = am64_m4_mems,
+ .num_mems = ARRAY_SIZE(am64_m4_mems),
+ .boot_align_addr = SZ_1K,
+ .uses_lreset = true,
+};
+
+static const struct of_device_id k3_m4_of_match[] = {
+ { .compatible = "ti,am64-m4fss", .data = &am64_m4_data, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_m4_of_match);
+
+static struct platform_driver k3_m4_rproc_driver = {
+ .probe = k3_m4_rproc_probe,
+ .remove = k3_m4_rproc_remove,
+ .driver = {
+ .name = "k3-m4-rproc",
+ .pm = &k3_m4_rproc_pm_ops,
+ .of_match_table = k3_m4_of_match,
+ },
+};
+
+module_platform_driver(k3_m4_rproc_driver);
+
+MODULE_AUTHOR("Hari Nagalla <hnagalla@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI K3 M4 Remoteproc driver");
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index f92a18c06d80..eef24807270f 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -38,6 +38,10 @@
#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
+/* Available from J7200 SoCs onwards */
+#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
+/* Applicable to only AM64x SoCs */
+#define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000
/* R5 TI-SCI Processor Control Flags */
#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
@@ -47,6 +51,8 @@
#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
+/* Applicable to only AM64x SoCs */
+#define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200
/**
* struct k3_r5_mem - internal memory structure
@@ -62,9 +68,33 @@ struct k3_r5_mem {
size_t size;
};
+/*
+ * All cluster mode values are not applicable on all SoCs. The following
+ * are the modes supported on various SoCs:
+ * Split mode : AM65x, J721E, J7200 and AM64x SoCs
+ * LockStep mode : AM65x, J721E and J7200 SoCs
+ * Single-CPU mode : AM64x SoCs only
+ * None : AM62x SoCs only
+ */
enum cluster_mode {
CLUSTER_MODE_SPLIT = 0,
CLUSTER_MODE_LOCKSTEP,
+ CLUSTER_MODE_SINGLECPU,
+ CLUSTER_MODE_NONE,
+};
+
+/**
+ * struct k3_r5_soc_data - match data to handle SoC variations
+ * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
+ * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
+ * @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
+ * @is_single_core: flag to denote if SoC/IP has only single core R5
+ */
+struct k3_r5_soc_data {
+ bool tcm_is_double;
+ bool tcm_ecc_autoinit;
+ bool single_cpu_mode;
+ bool is_single_core;
};
/**
@@ -72,11 +102,13 @@ enum cluster_mode {
* @dev: cached device pointer
* @mode: Mode to configure the Cluster - Split or LockStep
* @cores: list of R5 cores within the cluster
+ * @soc_data: SoC-specific feature data for a R5FSS
*/
struct k3_r5_cluster {
struct device *dev;
enum cluster_mode mode;
struct list_head cores;
+ const struct k3_r5_soc_data *soc_data;
};
/**
@@ -123,6 +155,7 @@ struct k3_r5_core {
* @core: cached pointer to r5 core structure being used
* @rmem: reserved memory regions data
* @num_rmems: number of reserved memory regions
+ * @ipc_only: flag to indicate IPC-only mode
*/
struct k3_r5_rproc {
struct device *dev;
@@ -133,6 +166,7 @@ struct k3_r5_rproc {
struct k3_r5_core *core;
struct k3_r5_mem *rmem;
int num_rmems;
+ bool ipc_only;
};
/**
@@ -348,6 +382,44 @@ static inline int k3_r5_core_run(struct k3_r5_core *core)
0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
}
+static int k3_r5_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_r5_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* The R5F cores have controls for both a reset and a halt/run. The code
* execution from DDR requires the initial boot-strapping code to be run
@@ -355,6 +427,13 @@ static inline int k3_r5_core_run(struct k3_r5_core *core)
* applicable cores to allow loading into the TCMs. The .prepare() ops is
* invoked by remoteproc core before any firmware loading, and is followed
* by the .start() ops after loading to actually let the R5 cores run.
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to
+ * execute code, but combines the TCMs from both cores. The resets for both
+ * cores need to be released to make this possible, as the TCMs are in general
+ * private to each core. Only Core0 needs to be unhalted for running the
+ * cluster in this mode. The function uses the same reset logic as LockStep
+ * mode for this (though the behavior is agnostic of the reset release order).
*/
static int k3_r5_rproc_prepare(struct rproc *rproc)
{
@@ -362,9 +441,23 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
struct k3_r5_cluster *cluster = kproc->cluster;
struct k3_r5_core *core = kproc->core;
struct device *dev = kproc->dev;
+ u32 ctrl = 0, cfg = 0, stat = 0;
+ u64 boot_vec = 0;
+ bool mem_init_dis;
int ret;
- ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
+ /* IPC-only mode does not require the cores to be released from reset */
+ if (kproc->ipc_only)
+ return 0;
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
+ if (ret < 0)
+ return ret;
+ mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
+
+ /* Re-use LockStep-mode reset logic for Single-CPU mode */
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
if (ret) {
dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
@@ -373,6 +466,17 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
}
/*
+ * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
+ * of TCMs, so there is no need to perform the s/w memzero. This bit is
+ * configurable through System Firmware, the default value does perform
+ * auto-init, but account for it in case it is disabled
+ */
+ if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) {
+ dev_dbg(dev, "leveraging h/w init for TCM memories\n");
+ return 0;
+ }
+
+ /*
* Zero out both TCMs unconditionally (access from v8 Arm core is not
* affected by ATCM & BTCM enable configuration values) so that ECC
* can be effective on all TCM addresses.
@@ -394,6 +498,12 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
* cores. The cores themselves are only halted in the .stop() ops, and the
* .unprepare() ops is invoked by the remoteproc core after the remoteproc is
* stopped.
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) combines the TCMs from
+ * both cores. The access is made possible only with releasing the resets for
+ * both cores, but with only Core0 unhalted. This function re-uses the same
+ * reset assert logic as LockStep mode for this mode (though the behavior is
+ * agnostic of the reset assert order).
*/
static int k3_r5_rproc_unprepare(struct rproc *rproc)
{
@@ -403,7 +513,13 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
struct device *dev = kproc->dev;
int ret;
- ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
+ /* do not put back the cores into reset in IPC-only mode */
+ if (kproc->ipc_only)
+ return 0;
+
+ /* Re-use LockStep-mode reset logic for Single-CPU mode */
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
if (ret)
dev_err(dev, "unable to disable cores, ret = %d\n", ret);
@@ -422,43 +538,29 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
* first followed by Core0. The Split-mode requires that Core0 to be maintained
* always in a higher power state that Core1 (implying Core1 needs to be started
* always only after Core0 is started).
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
+ * code, so only Core0 needs to be unhalted. The function uses the same logic
+ * flow as Split-mode for this.
*/
static int k3_r5_rproc_start(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
- struct mbox_client *client = &kproc->client;
struct device *dev = kproc->dev;
struct k3_r5_core *core;
u32 boot_addr;
int ret;
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_r5_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
- return ret;
+ if (kproc->ipc_only) {
+ dev_err(dev, "%s cannot be invoked in IPC-only mode\n",
+ __func__);
+ return -EINVAL;
}
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- goto put_mbox;
- }
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
boot_addr = rproc->bootaddr;
/* TODO: add boot_addr sanity checking */
@@ -506,6 +608,10 @@ put_mbox:
* Core0 to be maintained always in a higher power state that Core1 (implying
* Core1 needs to be stopped first before Core0).
*
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
+ * code, so only Core0 needs to be halted. The function uses the same logic
+ * flow as Split-mode for this.
+ *
* Note that the R5F halt operation in general is not effective when the R5F
* core is running, but is needed to make sure the core won't run after
* deasserting the reset the subsequent time. The asserting of reset can
@@ -518,9 +624,16 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
+ struct device *dev = kproc->dev;
struct k3_r5_core *core = kproc->core;
int ret;
+ if (kproc->ipc_only) {
+ dev_err(dev, "%s cannot be invoked in IPC-only mode\n",
+ __func__);
+ return -EINVAL;
+ }
+
/* halt all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
list_for_each_entry(core, &cluster->cores, elem) {
@@ -550,6 +663,85 @@ out:
}
/*
+ * Attach to a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the R5F cores in IPC-only mode.
+ */
+static int k3_r5_rproc_attach(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (!kproc->ipc_only || rproc->state != RPROC_DETACHED) {
+ dev_err(dev, "R5F is expected to be in IPC-only mode and RPROC_DETACHED state\n");
+ return -EINVAL;
+ }
+
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ dev_err(dev, "R5F core initialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * Detach from a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the R5F cores are not stopped and
+ * will be left in booted state in IPC-only mode.
+ */
+static int k3_r5_rproc_detach(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->ipc_only || rproc->state != RPROC_ATTACHED) {
+ dev_err(dev, "R5F is expected to be in IPC-only mode and RPROC_ATTACHED state\n");
+ return -EINVAL;
+ }
+
+ mbox_free_channel(kproc->mbox);
+ dev_err(dev, "R5F core deinitialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property.
+ */
+static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
* Internal Memory translation helper
*
* Custom function implementing the rproc .da_to_va ops to provide address
@@ -623,8 +815,11 @@ static const struct rproc_ops k3_r5_rproc_ops = {
.unprepare = k3_r5_rproc_unprepare,
.start = k3_r5_rproc_start,
.stop = k3_r5_rproc_stop,
+ .attach = k3_r5_rproc_attach,
+ .detach = k3_r5_rproc_detach,
.kick = k3_r5_rproc_kick,
.da_to_va = k3_r5_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_r5_get_loaded_rsc_table,
};
/*
@@ -632,7 +827,9 @@ static const struct rproc_ops k3_r5_rproc_ops = {
*
* Each R5FSS has a cluster-level setting for configuring the processor
* subsystem either in a safety/fault-tolerant LockStep mode or a performance
- * oriented Split mode. Each R5F core has a number of settings to either
+ * oriented Split mode on most SoCs. A fewer SoCs support a non-safety mode
+ * as an alternate for LockStep mode that exercises only a single R5F core
+ * called Single-CPU mode. Each R5F core has a number of settings to either
* enable/disable each of the TCMs, control which TCM appears at the R5F core's
* address 0x0. These settings need to be configured before the resets for the
* corresponding core are released. These settings are all protected and managed
@@ -644,11 +841,13 @@ static const struct rproc_ops k3_r5_rproc_ops = {
* the cores are halted before the .prepare() step.
*
* The function is called from k3_r5_cluster_rproc_init() and is invoked either
- * once (in LockStep mode) or twice (in Split mode). Support for LockStep-mode
- * is dictated by an eFUSE register bit, and the config settings retrieved from
- * DT are adjusted accordingly as per the permitted cluster mode. All cluster
- * level settings like Cluster mode and TEINIT (exception handling state
- * dictating ARM or Thumb mode) can only be set and retrieved using Core0.
+ * once (in LockStep mode or Single-CPU modes) or twice (in Split mode). Support
+ * for LockStep-mode is dictated by an eFUSE register bit, and the config
+ * settings retrieved from DT are adjusted accordingly as per the permitted
+ * cluster mode. Another eFUSE register bit dictates if the R5F cluster only
+ * supports a Single-CPU mode. All cluster level settings like Cluster mode and
+ * TEINIT (exception handling state dictating ARM or Thumb mode) can only be set
+ * and retrieved using Core0.
*
* The function behavior is different based on the cluster mode. The R5F cores
* are configured independently as per their individual settings in Split mode.
@@ -667,10 +866,17 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
u32 set_cfg = 0, clr_cfg = 0;
u64 boot_vec = 0;
bool lockstep_en;
+ bool single_cpu;
int ret;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
- core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? core0 : kproc->core;
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU ||
+ cluster->mode == CLUSTER_MODE_NONE) {
+ core = core0;
+ } else {
+ core = kproc->core;
+ }
ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
&stat);
@@ -680,23 +886,48 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
boot_vec, cfg, ctrl, stat);
+ /* check if only Single-CPU mode is supported on applicable SoCs */
+ if (cluster->soc_data->single_cpu_mode || cluster->soc_data->is_single_core) {
+ single_cpu =
+ !!(stat & PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY);
+ if (single_cpu && cluster->mode == CLUSTER_MODE_SPLIT) {
+ dev_err(cluster->dev, "split-mode not permitted, force configuring for single-cpu mode\n");
+ cluster->mode = CLUSTER_MODE_SINGLECPU;
+ }
+ goto config;
+ }
+
+ /* check conventional LockStep vs Split mode configuration */
lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
cluster->mode = CLUSTER_MODE_SPLIT;
}
+config:
/* always enable ARM mode and set boot vector to 0 */
boot_vec = 0x0;
if (core == core0) {
clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
- /*
- * LockStep configuration bit is Read-only on Split-mode _only_
- * devices and system firmware will NACK any requests with the
- * bit configured, so program it only on permitted devices
- */
- if (lockstep_en)
- clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ if ((cluster->soc_data->single_cpu_mode) || (cluster->soc_data->is_single_core)) {
+ /*
+ * Single-CPU configuration bit can only be configured
+ * on Core0 and system firmware will NACK any requests
+ * with the bit configured, so program it only on
+ * permitted cores
+ */
+ if ((cluster->mode == CLUSTER_MODE_SINGLECPU) || (cluster->mode == CLUSTER_MODE_NONE))
+ set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
+ } else {
+ /*
+ * LockStep configuration bit is Read-only on Split-mode
+ * _only_ devices and system firmware will NACK any
+ * requests with the bit configured, so program it only
+ * on permitted devices
+ */
+ if (lockstep_en)
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ }
}
if (core->atcm_enable)
@@ -855,6 +1086,150 @@ static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
of_reserved_mem_device_release(kproc->dev);
}
+/*
+ * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
+ * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
+ * cores are usable in Split-mode, but only the Core0 TCMs can be used in
+ * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
+ * leveraging the Core1 TCMs as well in certain modes where they would have
+ * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs, Single-CPU mode on
+ * AM64x SoCs). This is done by making a Core1 TCM visible immediately after the
+ * corresponding Core0 TCM. The SoC memory map uses the larger 64 KB sizes for
+ * the Core0 TCMs, and the dts representation reflects this increased size on
+ * supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only
+ * half the original size in Split mode.
+ */
+static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *cdev = core->dev;
+ struct k3_r5_core *core0;
+
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU ||
+ cluster->mode == CLUSTER_MODE_NONE ||
+ !cluster->soc_data->tcm_is_double)
+ return;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (core == core0) {
+ WARN_ON(core->mem[0].size != SZ_64K);
+ WARN_ON(core->mem[1].size != SZ_64K);
+
+ core->mem[0].size /= 2;
+ core->mem[1].size /= 2;
+
+ dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
+ core->mem[0].size, core->mem[1].size);
+ }
+}
+
+/*
+ * This function checks and configures a R5F core for IPC-only or remoteproc
+ * mode. The driver is configured to be in IPC-only mode for a R5F core when
+ * the core has been loaded and started by a bootloader. The IPC-only mode is
+ * detected by querying the System Firmware for reset, power on and halt status
+ * and ensuring that the core is running. Any incomplete steps at bootloader
+ * are validated and errored out.
+ *
+ * In IPC-only mode, the driver state flags for ATCM, BTCM and LOCZRAMA settings
+ * and cluster mode parsed originally from kernel DT are updated to reflect the
+ * actual values configured by bootloader. The driver internal device memory
+ * addresses for TCMs are also updated.
+ */
+static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *cdev = core->dev;
+ bool r_state = false, c_state = false;
+ u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
+ u64 boot_vec = 0;
+ u32 atcm_enable, btcm_enable, loczrama;
+ struct k3_r5_core *core0;
+ enum cluster_mode mode;
+ int ret;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+
+ ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
+ &r_state, &c_state);
+ if (ret) {
+ dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+ ret);
+ return ret;
+ }
+ if (r_state != c_state) {
+ dev_warn(cdev, "R5F core may have been powered on by a different host, programmed state (%d) != actual state (%d)\n",
+ r_state, c_state);
+ }
+
+ ret = reset_control_status(core->reset);
+ if (ret < 0) {
+ dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ &stat);
+ if (ret < 0) {
+ dev_err(cdev, "failed to get initial processor status, ret = %d\n",
+ ret);
+ return ret;
+ }
+ atcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_ATCM_EN ? 1 : 0;
+ btcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_BTCM_EN ? 1 : 0;
+ loczrama = cfg & PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE ? 1 : 0;
+ if (cluster->soc_data->is_single_core) {
+ mode = CLUSTER_MODE_NONE;
+ } else if (cluster->soc_data->single_cpu_mode) {
+ mode = cfg & PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE ?
+ CLUSTER_MODE_SINGLECPU : CLUSTER_MODE_SPLIT;
+ } else {
+ mode = cfg & PROC_BOOT_CFG_FLAG_R5_LOCKSTEP ?
+ CLUSTER_MODE_LOCKSTEP : CLUSTER_MODE_SPLIT;
+ }
+ halted = ctrl & PROC_BOOT_CTRL_FLAG_R5_CORE_HALT;
+
+ /*
+ * IPC-only mode detection requires both local and module resets to
+ * be deasserted and R5F core to be unhalted. Local reset status is
+ * irrelevant if module reset is asserted (POR value has local reset
+ * deasserted), and is deemed as remoteproc mode
+ */
+ if (c_state && !ret && !halted) {
+ dev_err(cdev, "configured R5F for IPC-only mode\n");
+ kproc->rproc->state = RPROC_DETACHED;
+ kproc->rproc->detach_on_shutdown = true;
+ kproc->ipc_only = true;
+ ret = 1;
+ } else if (!c_state) {
+ dev_err(cdev, "configured R5F for remoteproc mode\n");
+ ret = 0;
+ } else {
+ dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n",
+ !ret ? "deasserted" : "asserted",
+ c_state ? "deasserted" : "asserted",
+ halted ? "halted" : "unhalted");
+ ret = -EINVAL;
+ }
+
+ /* fixup TCMs, cluster & core flags to actual values in IPC-only mode */
+ if (ret > 0) {
+ if (core == core0)
+ cluster->mode = mode;
+ core->atcm_enable = atcm_enable;
+ core->btcm_enable = btcm_enable;
+ core->loczrama = loczrama;
+ core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
+ core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
+ }
+
+ return ret;
+}
+
static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
@@ -895,6 +1270,12 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
kproc->rproc = rproc;
core->rproc = rproc;
+ ret = k3_r5_rproc_configure_mode(kproc);
+ if (ret < 0)
+ goto err_config;
+ if (ret)
+ goto init_rmem;
+
ret = k3_r5_rproc_configure(kproc);
if (ret) {
dev_err(dev, "initial configure failed, ret = %d\n",
@@ -902,6 +1283,9 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
goto err_config;
}
+init_rmem:
+ k3_r5_adjust_tcm_sizes(kproc);
+
ret = k3_r5_reserved_mem_init(kproc);
if (ret) {
dev_err(dev, "reserved memory init failed, ret = %d\n",
@@ -915,8 +1299,10 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
goto err_add;
}
- /* create only one rproc in lockstep mode */
- if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
+ /* create only one rproc in lockstep mode or single-cpu mode */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_NONE ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU)
break;
}
@@ -940,19 +1326,20 @@ out:
return ret;
}
-static int k3_r5_cluster_rproc_exit(struct platform_device *pdev)
+static void k3_r5_cluster_rproc_exit(void *data)
{
- struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct k3_r5_cluster *cluster = platform_get_drvdata(data);
struct k3_r5_rproc *kproc;
struct k3_r5_core *core;
struct rproc *rproc;
/*
- * lockstep mode has only one rproc associated with first core, whereas
- * split-mode has two rprocs associated with each core, and requires
- * that core1 be powered down first
+ * lockstep mode and single-cpu modes have only one rproc associated
+ * with first core, whereas split-mode has two rprocs associated with
+ * each core, and requires that core1 be powered down first
*/
- core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
+ core = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
list_last_entry(&cluster->cores, struct k3_r5_core, elem);
@@ -967,8 +1354,6 @@ static int k3_r5_cluster_rproc_exit(struct platform_device *pdev)
rproc_free(rproc);
core->rproc = NULL;
}
-
- return 0;
}
static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
@@ -1255,9 +1640,9 @@ static void k3_r5_core_of_exit(struct platform_device *pdev)
devres_release_group(dev, k3_r5_core_of_init);
}
-static void k3_r5_cluster_of_exit(struct platform_device *pdev)
+static void k3_r5_cluster_of_exit(void *data)
{
- struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct k3_r5_cluster *cluster = platform_get_drvdata(data);
struct platform_device *cpdev;
struct k3_r5_core *core, *temp;
@@ -1313,27 +1698,58 @@ static int k3_r5_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(dev);
struct k3_r5_cluster *cluster;
+ const struct k3_r5_soc_data *data;
int ret;
int num_cores;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(dev, "SoC-specific data is not defined\n");
+ return -ENODEV;
+ }
+
cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
if (!cluster)
return -ENOMEM;
cluster->dev = dev;
- cluster->mode = CLUSTER_MODE_LOCKSTEP;
+ /*
+ * default to most common efuse configurations - Split-mode on AM64x
+ * and LockStep-mode on all others
+ */
+ if (!data->is_single_core)
+ cluster->mode = data->single_cpu_mode ?
+ CLUSTER_MODE_SPLIT : CLUSTER_MODE_LOCKSTEP;
+ else
+ cluster->mode = CLUSTER_MODE_NONE;
+
+ cluster->soc_data = data;
INIT_LIST_HEAD(&cluster->cores);
- ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
- if (ret < 0 && ret != -EINVAL) {
- dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
- ret);
- return ret;
+ if (!data->is_single_core) {
+ ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n", ret);
+ return ret;
+ }
}
+ /*
+ * Translate SoC-specific dts value of 1 or 2 into appropriate
+ * driver-specific mode. Valid values are dictated by YAML binding
+ */
+ if (cluster->mode && data->single_cpu_mode)
+ cluster->mode = CLUSTER_MODE_SINGLECPU;
+
num_cores = of_get_available_child_count(np);
- if (num_cores != 2) {
- dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
+ if (num_cores != 2 && !data->is_single_core) {
+ dev_err(dev, "MCU cluster requires both R5F cores to be enabled but num_cores is set to = %d\n",
+ num_cores);
+ return -ENODEV;
+ }
+
+ if (num_cores != 1 && data->is_single_core) {
+ dev_err(dev, "SoC supports only single core R5 but num_cores is set to %d\n",
num_cores);
return -ENODEV;
}
@@ -1353,9 +1769,7 @@ static int k3_r5_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))k3_r5_cluster_of_exit,
- pdev);
+ ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev);
if (ret)
return ret;
@@ -1366,18 +1780,48 @@ static int k3_r5_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))k3_r5_cluster_rproc_exit,
- pdev);
+ ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev);
if (ret)
return ret;
return 0;
}
+static const struct k3_r5_soc_data am65_j721e_soc_data = {
+ .tcm_is_double = false,
+ .tcm_ecc_autoinit = false,
+ .single_cpu_mode = false,
+ .is_single_core = false,
+};
+
+static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
+ .tcm_is_double = true,
+ .tcm_ecc_autoinit = true,
+ .single_cpu_mode = false,
+ .is_single_core = false,
+};
+
+static const struct k3_r5_soc_data am64_soc_data = {
+ .tcm_is_double = true,
+ .tcm_ecc_autoinit = true,
+ .single_cpu_mode = true,
+ .is_single_core = false,
+};
+
+static const struct k3_r5_soc_data am62_soc_data = {
+ .tcm_is_double = false,
+ .tcm_ecc_autoinit = true,
+ .single_cpu_mode = false,
+ .is_single_core = true,
+};
+
static const struct of_device_id k3_r5_of_match[] = {
- { .compatible = "ti,am654-r5fss", },
- { .compatible = "ti,j721e-r5fss", },
+ { .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
+ { .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
+ { .compatible = "ti,j7200-r5fss", .data = &j7200_j721s2_soc_data, },
+ { .compatible = "ti,am64-r5fss", .data = &am64_soc_data, },
+ { .compatible = "ti,am62-r5fss", .data = &am62_soc_data, },
+ { .compatible = "ti,j721s2-r5fss", .data = &j7200_j721s2_soc_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_r5_of_match);
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index b9349d684258..d92d7f32ba8d 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -160,6 +160,7 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
}
rproc->auto_boot = false;
+ rproc->deny_sysfs_ops = true;
wkupm3 = rproc->priv;
wkupm3->rproc = rproc;
diff --git a/drivers/rpmsg-kdrv/Kconfig b/drivers/rpmsg-kdrv/Kconfig
new file mode 100644
index 000000000000..beb6843405eb
--- /dev/null
+++ b/drivers/rpmsg-kdrv/Kconfig
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Rpmsg virtual device drivers"
+
+# RPMSG always gets selected by whoever wants it
+config RPMSG_KDRV
+ tristate "RPMSG virtual device interface"
+ select RPMSG
+ help
+ Say Y here enable support for RPMSG based remote devices, usually
+ exported by a firmware running rpmsg stack and remote_device stack.
+ This feature enables the framework for para-virtualizing entire H/W
+ or specific resources of a hardware
+
+
+config RPMSG_KDRV_DEMO
+ tristate "RPMSG virtual demo device support"
+ select RPMSG_KDRV
+ help
+ Say Y here to enable support for remote device based demo device.
+ This setup expects that the demo device server will be running on a
+ remoteproc and a client (sample) driver will be able to call demo device
+ APIs using remote_device framework.
+
+ The demo device is no real device. It serves the purpose of providing
+ a sample driver to be used as a reference for developing more
+ remote_device drivers (like display or ethernet)
+
+config RPMSG_KDRV_DISPLAY
+ tristate "RPMSG virtual display device support"
+ select RPMSG_KDRV
+ help
+ Say Y here to enable support for remote device based display
+ virtualization. This setup expects that the display will be driven
+ by a remoteproc and DRM driver will be able to use display features
+ using remote_device framework
+
+config RPMSG_KDRV_ETH_SWITCH
+ tristate "RPMSG virtual eth switch device support"
+ select RPMSG_KDRV
+ default m
+ help
+ Say Y here to enable support for remote device based Eth switch
+ virtualization. This setup expects that the Eth switch will be driven
+ by a remoteproc and virtual Network device will be able to use
+ Eth switch features using remote_device framework
+
+endmenu
diff --git a/drivers/rpmsg-kdrv/Makefile b/drivers/rpmsg-kdrv/Makefile
new file mode 100644
index 000000000000..9177ae3310ce
--- /dev/null
+++ b/drivers/rpmsg-kdrv/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_RPMSG_KDRV) += rpmsg_kdrv.o
+obj-$(CONFIG_RPMSG_KDRV_DEMO) += rpmsg_kdrv_demo.o
+obj-$(CONFIG_RPMSG_KDRV_DISPLAY) += rpmsg_kdrv_display.o
+obj-$(CONFIG_RPMSG_KDRV_ETH_SWITCH) += rpmsg_kdrv_switch.o
diff --git a/drivers/rpmsg-kdrv/rpmsg_kdrv.c b/drivers/rpmsg-kdrv/rpmsg_kdrv.c
new file mode 100644
index 000000000000..ba3d79ed5d00
--- /dev/null
+++ b/drivers/rpmsg-kdrv/rpmsg_kdrv.c
@@ -0,0 +1,744 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subahjit_paul@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <linux/rpmsg.h>
+#include <linux/rpmsg-remotedev/rpmsg-remotedev.h>
+#include "shared/rpmsg-kdrv-transport.h"
+#include "rpmsg_kdrv_internal.h"
+
+struct rpmsg_kdrv_priv {
+ struct rpmsg_device *rpdev;
+
+ struct idr message_idr;
+ struct mutex message_lock;
+
+ int num_raw_devices;
+ struct rpmsg_kdrv_init_device_info raw_devices[RPMSG_KDRV_TP_MAX_DEVICES];
+ void *raw_device_data[RPMSG_KDRV_TP_MAX_DEVICES];
+ int raw_device_data_size[RPMSG_KDRV_TP_MAX_DEVICES];
+};
+
+struct rpmsg_kdrv_ctx {
+ struct rpmsg_device *rpdev;
+ bool wait_for_response;
+ request_cb_t callback;
+ void *cb_data;
+ bool response_recv;
+ struct wait_queue_head response_wq;
+
+ struct rpmsg_kdrv_device_header *dev_hdr;
+ void *req;
+ void *resp;
+ int req_size;
+ int resp_size;
+};
+
+static struct bus_type rpmsg_kdrv_bus;
+
+#define to_rpmsg_kdrv_device(d) container_of(d, struct rpmsg_kdrv_device, dev)
+#define to_rpmsg_kdrv_driver(d) container_of(d, struct rpmsg_kdrv_driver, drv)
+
+static int rpmsg_kdrv_match_id(struct device *dev, const void *data)
+{
+ const uint32_t *idptr = data;
+ struct rpmsg_kdrv_device *kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+
+ if (kddev->device_id == *idptr)
+ return 1;
+ return 0;
+}
+
+static int rpmsg_kdrv_match_remotedev(struct device *dev, const void *data)
+{
+ const struct rpmsg_remotedev *rdev = data;
+ struct rpmsg_kdrv_device *kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+
+ if (kddev->remotedev == rdev)
+ return 1;
+ return 0;
+}
+
+static int rpmsg_kdrv_match_name(struct device *dev, const void *data)
+{
+ const char *name = data;
+ struct rpmsg_kdrv_device *kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+
+ if (strcmp(kddev->device_name, name) == 0)
+ return 1;
+ return 0;
+}
+
+int rpmsg_kdrv_register_driver(struct rpmsg_kdrv_driver *drv)
+{
+ int ret;
+
+ drv->drv.bus = &rpmsg_kdrv_bus;
+ drv->drv.owner = THIS_MODULE;
+
+ ret = driver_register(&drv->drv);
+ if (ret)
+ pr_err("%s: driver_register failed\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmsg_kdrv_register_driver);
+
+static void rpmsg_kdrv_driver_handle_data(struct rpmsg_device *rpdev, void *data, int len, void *private, u32 src)
+{
+ struct device *dev;
+ struct rpmsg_kdrv_device_header *hdr = data;
+ struct rpmsg_kdrv_device *kddev = NULL;
+ struct rpmsg_kdrv_driver *kddrv = NULL;
+ void *message;
+ int message_size;
+ uint32_t msg_device_id;
+ int ret;
+
+ msg_device_id = hdr->device_id;
+ dev = bus_find_device(&rpmsg_kdrv_bus, NULL, &(msg_device_id), rpmsg_kdrv_match_id);
+ if (!dev) {
+ dev_err(&rpdev->dev, "%s: message received for unknown device\n", __func__);
+ return;
+ }
+ kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+ kddrv = to_rpmsg_kdrv_driver(kddev->dev.driver);
+ if (!kddrv) {
+ dev_err(&rpdev->dev, "%s: message received for device with no driver\n", __func__);
+ return;
+ }
+
+ message = (void *)(&hdr[1]);
+ message_size = len - sizeof(*hdr);
+ ret = kddrv->callback(kddev, message, message_size);
+ if (ret)
+ dev_err(&rpdev->dev, "%s: message callback returns %d\n", __func__, ret);
+
+}
+
+static int rpmsg_kdrv_connect(struct rpmsg_device *rpdev, struct rpmsg_kdrv_device *kddev)
+{
+ int ret;
+ struct rpmsg_kdrv_init_connect_message *connect_req;
+
+ connect_req = devm_kzalloc(&rpdev->dev, sizeof(*connect_req), GFP_KERNEL);
+ if (!connect_req)
+ return -ENOMEM;
+
+ connect_req->header.message_type = RPMSG_KDRV_TP_INIT_CONNECT_MESSAGE;
+ connect_req->device_id = kddev->device_id;
+
+ ret = rpmsg_kdrv_send_message(rpdev, RPMSG_KDRV_TP_DEVICE_ID_INIT,
+ connect_req, sizeof(*connect_req));
+
+ devm_kfree(&rpdev->dev, connect_req);
+ return ret;
+}
+
+static int rpmsg_kdrv_disconnect(struct rpmsg_device *rpdev, struct rpmsg_kdrv_device *kddev)
+{
+ int ret;
+ struct rpmsg_kdrv_init_disconnect_message *disconnect_req;
+
+ disconnect_req = devm_kzalloc(&rpdev->dev, sizeof(*disconnect_req), GFP_KERNEL);
+ if (!disconnect_req)
+ return -ENOMEM;
+
+ disconnect_req->header.message_type = RPMSG_KDRV_TP_INIT_DISCONNECT_MESSAGE;
+ disconnect_req->device_id = kddev->device_id;
+
+ ret = rpmsg_kdrv_send_message(rpdev, RPMSG_KDRV_TP_DEVICE_ID_INIT,
+ disconnect_req, sizeof(*disconnect_req));
+
+ devm_kfree(&rpdev->dev, disconnect_req);
+ return ret;
+}
+
+struct rpmsg_remotedev *rpmsg_remotedev_get_named_device(const char *device_name)
+{
+ struct device *dev;
+ struct rpmsg_kdrv_device *kddev = NULL;
+
+ dev = bus_find_device(&rpmsg_kdrv_bus, NULL, (void *)device_name, rpmsg_kdrv_match_name);
+ if (!dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+ if (!kddev->remotedev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ rpmsg_kdrv_connect(kddev->rpdev, kddev);
+
+ return kddev->remotedev;
+}
+EXPORT_SYMBOL(rpmsg_remotedev_get_named_device);
+
+void rpmsg_remotedev_put_device(struct rpmsg_remotedev *rdev)
+{
+ struct device *dev;
+ struct rpmsg_kdrv_device *kddev = NULL;
+
+ dev = bus_find_device(&rpmsg_kdrv_bus, NULL, (void *)rdev, rpmsg_kdrv_match_remotedev);
+ if (!dev) {
+ pr_err("%s: could not find device for remotedev\n", __func__);
+ return;
+ }
+
+ kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+
+ rpmsg_kdrv_disconnect(kddev->rpdev, kddev);
+}
+EXPORT_SYMBOL(rpmsg_remotedev_put_device);
+
+static void rpmsg_kdrv_release_device(struct device *dev)
+{
+ struct rpmsg_kdrv_device *kddev = to_rpmsg_kdrv_device(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ devm_kfree(&kddev->rpdev->dev, kddev);
+}
+
+static struct rpmsg_kdrv_device *rpmsg_kdrv_device_create(struct rpmsg_device *rpdev, int index)
+{
+ struct rpmsg_kdrv_device *kddev = devm_kzalloc(&rpdev->dev, sizeof(*kddev), GFP_KERNEL);
+ struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+ struct rpmsg_kdrv_init_device_info *dev = &priv->raw_devices[index];
+ int ret;
+
+ if (!kddev) {
+ dev_err(&rpdev->dev, "%s: could not allocate kddev\n", __func__);
+ return NULL;
+ }
+
+ kddev->rpdev = rpdev;
+ kddev->device_id = dev->device_id;
+ kddev->device_type = dev->device_type;
+ kddev->device_data_len = priv->raw_device_data_size[index];
+ kddev->device_data = priv->raw_device_data[index];
+ kddev->device_name = devm_kstrdup(&rpdev->dev, dev->device_name, GFP_KERNEL);
+ if (!kddev->device_name) {
+ dev_err(&rpdev->dev, "%s: could not allocate device name\n", __func__);
+ devm_kfree(&rpdev->dev, kddev);
+ return NULL;
+ }
+
+ kddev->dev.parent = &rpdev->dev;
+ kddev->dev.release = rpmsg_kdrv_release_device;
+ kddev->dev.bus = &rpmsg_kdrv_bus;
+
+ dev_set_name(&kddev->dev, "rpmsg-kdrv-%u-%s", dev->device_id, dev->device_name);
+
+ ret = device_register(&kddev->dev);
+ if (ret) {
+ dev_err(&rpdev->dev, "%s: device_register failed: %d\n", __func__, ret);
+ put_device(&kddev->dev);
+ return NULL;
+ }
+ dev_dbg(&rpdev->dev, "%s: registered new device : %s\n", __func__, dev_name(&kddev->dev));
+
+ return kddev;
+}
+
+static int rpmsg_kdrv_get_devices_cb(void *cb_data, void *req, int req_sz, void *resp, int resp_sz)
+{
+ int i, cnt;
+ struct rpmsg_device *rpdev = cb_data;
+ struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+ struct rpmsg_kdrv_init_dev_info_response *info_resp = resp;
+ struct rpmsg_kdrv_init_device_info *dev;
+ int ret = 0;
+
+ if (info_resp->header.message_type != RPMSG_KDRV_TP_INIT_DEV_INFO_RESPONSE) {
+ dev_err(&rpdev->dev, "%s: wrong response type\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < info_resp->num_devices; i++) {
+ dev = &info_resp->devices[i];
+ cnt = priv->num_raw_devices;
+
+ priv->raw_device_data_size[cnt] = dev->device_data_len;
+ priv->raw_device_data[cnt] = devm_kzalloc(&rpdev->dev, dev->device_data_len, GFP_KERNEL);
+ if (!priv->raw_device_data[cnt]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(priv->raw_device_data[cnt],
+ &info_resp->device_data[dev->device_data_offset],
+ dev->device_data_len);
+ memcpy(&priv->raw_devices[cnt], dev, sizeof(*dev));
+ priv->num_raw_devices++;
+
+ dev_dbg(&rpdev->dev, "new device: %s\n", dev->device_name);
+ }
+
+ for (i = 0; i < priv->num_raw_devices; i++)
+ rpmsg_kdrv_device_create(rpdev, i);
+
+out:
+ devm_kfree(&rpdev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_get_devices(struct rpmsg_device *rpdev)
+{
+ int ret;
+ struct rpmsg_kdrv_init_dev_info_request *info_req;
+
+ info_req = devm_kzalloc(&rpdev->dev, sizeof(*info_req), GFP_KERNEL);
+ if (!info_req)
+ return -ENOMEM;
+
+ info_req->header.message_type = RPMSG_KDRV_TP_INIT_DEV_INFO_REQUEST;
+
+ ret = rpmsg_kdrv_send_request_with_callback(rpdev, RPMSG_KDRV_TP_DEVICE_ID_INIT,
+ info_req, sizeof(*info_req), rpdev, rpmsg_kdrv_get_devices_cb);
+ if (ret)
+ goto nosend;
+
+ return 0;
+
+nosend:
+ devm_kfree(&rpdev->dev, info_req);
+ return ret;
+}
+
+static void rpmsg_kdrv_del_packet_id(struct rpmsg_device *rpdev, int id)
+{
+ struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+
+ mutex_lock(&priv->message_lock);
+ idr_remove(&priv->message_idr, id);
+ mutex_unlock(&priv->message_lock);
+}
+
+static uint32_t rpmsg_kdrv_new_packet_id(struct rpmsg_device *rpdev, void *data)
+{
+ struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+ int id;
+
+ mutex_lock(&priv->message_lock);
+ id = idr_alloc(&priv->message_idr, data, RPMSG_KDRV_TP_PACKET_ID_FIRST, 0, GFP_KERNEL);
+ mutex_unlock(&priv->message_lock);
+
+ if (id < 0)
+ return 0;
+
+ return id;
+}
+
+static void rpmsg_kdrv_dev_hdr_delete(struct rpmsg_device *rpdev, struct rpmsg_kdrv_device_header *hdr)
+{
+ rpmsg_kdrv_del_packet_id(rpdev, hdr->packet_id);
+ devm_kfree(&rpdev->dev, hdr);
+}
+
+static struct rpmsg_kdrv_device_header *rpmsg_kdrv_dev_hdr_alloc(struct rpmsg_device *rpdev,
+ int device_id, int size, int pkt_type, int pkt_src, void *msg, int len, struct rpmsg_kdrv_ctx *ctx)
+{
+ struct rpmsg_kdrv_device_header *dev_hdr;
+ void *dst;
+
+ dev_hdr = devm_kzalloc(&rpdev->dev, size, GFP_KERNEL);
+ if (!dev_hdr)
+ return NULL;
+
+ dev_hdr->device_id = device_id;
+ dev_hdr->packet_type = pkt_type;
+ dev_hdr->packet_source = pkt_src;
+ dev_hdr->packet_size = size;
+ dev_hdr->packet_id = RPMSG_KDRV_TP_PACKET_ID_NONE;
+
+
+ dst = (void *)(&dev_hdr[1]);
+ memcpy(dst, msg, len);
+
+ if (pkt_type == RPMSG_KDRV_TP_PACKET_TYPE_MESSAGE)
+ return dev_hdr;
+
+ dev_hdr->packet_id = rpmsg_kdrv_new_packet_id(rpdev, ctx);
+ if (!dev_hdr->packet_id) {
+ devm_kfree(&rpdev->dev, dev_hdr);
+ return NULL;
+ }
+
+ ctx->dev_hdr = dev_hdr;
+
+ return dev_hdr;
+}
+
+static struct rpmsg_kdrv_ctx *rpmsg_kdrv_ctx_alloc(struct rpmsg_device *rpdev, bool blocking,
+ request_cb_t callback, void *cb_data, void *req, int req_size, void *resp, int resp_size)
+{
+ struct rpmsg_kdrv_ctx *ctx;
+
+ ctx = devm_kzalloc(&rpdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->rpdev = rpdev;
+ if (blocking) {
+ ctx->wait_for_response = true;
+ ctx->response_recv = false;
+ init_waitqueue_head(&ctx->response_wq);
+ } else {
+ ctx->wait_for_response = false;
+ ctx->callback = callback;
+ }
+
+ ctx->cb_data = cb_data;
+ ctx->req = req;
+ ctx->req_size = req_size;
+ ctx->resp = resp;
+ ctx->resp_size = resp_size;
+
+ return ctx;
+}
+
+static int rpmsg_kdrv_send_packet(struct rpmsg_device *rpdev, void *data, int len)
+{
+ return rpmsg_send(rpdev->ept, data, len);
+}
+
+/*
+ * rpmsg_kdrv_send_request_with_callback
+ *
+ * Send a message where
+ * a) the caller does not block
+ * b) the caller expects multile responses
+ *
+ * The callback function must return
+ * a) RRMSG_KDRV_CALLBACK_DONE when no more responses are expected
+ * b) RPMSG_KDRV_CALLBACK_MORE when more responses are awaited
+ *
+ * The caller is expected to destroy message when it does not
+ * expect any more responses
+ */
+int rpmsg_kdrv_send_request_with_callback(struct rpmsg_device *rpdev, uint32_t device_id,
+ void *message, uint32_t message_size,
+ void *cb_data, request_cb_t callback)
+{
+ struct rpmsg_kdrv_device_header *dev_hdr;
+ int total_size = message_size + sizeof(*dev_hdr);
+ struct rpmsg_kdrv_ctx *ctx = NULL;
+ int ret;
+
+ ctx = rpmsg_kdrv_ctx_alloc(rpdev, false, callback, cb_data, message, message_size, NULL, 0);
+ if (!ctx) {
+ dev_err(&rpdev->dev, "%s: ctx allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ dev_hdr = rpmsg_kdrv_dev_hdr_alloc(rpdev, device_id, total_size,
+ RPMSG_KDRV_TP_PACKET_TYPE_REQUEST,
+ RPMSG_KDRV_TP_PACKET_SOURCE_CLIENT,
+ message, message_size,
+ ctx);
+ if (!dev_hdr) {
+ dev_err(&rpdev->dev, "%s: device header allocation failed\n", __func__);
+ ret = -ENOMEM;
+ goto dev_hdr_fail;
+ }
+
+ ret = rpmsg_kdrv_send_packet(rpdev, dev_hdr, total_size);
+ if (ret) {
+ dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
+ goto nosend;
+ }
+
+ return 0;
+
+nosend:
+ rpmsg_kdrv_dev_hdr_delete(rpdev, dev_hdr);
+dev_hdr_fail:
+ devm_kfree(&rpdev->dev, ctx);
+ return ret;
+}
+EXPORT_SYMBOL(rpmsg_kdrv_send_request_with_callback);
+
+/*
+ * rpmsg_kdrv_send_request_with_response
+ *
+ * Send a message where the caller will block for a response
+ *
+ * The caller is expected to destroy message and response
+ * when this function returns
+ */
+int rpmsg_kdrv_send_request_with_response(struct rpmsg_device *rpdev, uint32_t device_id,
+ void *message, uint32_t message_size,
+ void *response, uint32_t response_size)
+{
+ struct rpmsg_kdrv_device_header *dev_hdr;
+ int total_size = message_size + sizeof(*dev_hdr);
+ struct rpmsg_kdrv_ctx *ctx = NULL;
+ int ret;
+
+ ctx = rpmsg_kdrv_ctx_alloc(rpdev, true, NULL, NULL, message, message_size, response, response_size);
+ if (!ctx) {
+ dev_err(&rpdev->dev, "%s: ctx allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ dev_hdr = rpmsg_kdrv_dev_hdr_alloc(rpdev, device_id, total_size,
+ RPMSG_KDRV_TP_PACKET_TYPE_REQUEST,
+ RPMSG_KDRV_TP_PACKET_SOURCE_CLIENT,
+ message, message_size,
+ ctx);
+ if (!dev_hdr) {
+ dev_err(&rpdev->dev, "%s: device header allocation failed\n", __func__);
+ ret = -ENOMEM;
+ goto dev_hdr_fail;
+ }
+
+ ret = rpmsg_kdrv_send_packet(rpdev, dev_hdr, total_size);
+ if (ret) {
+ dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
+ goto nosend;
+ }
+
+ wait_event(ctx->response_wq, ctx->response_recv == true);
+
+nosend:
+ rpmsg_kdrv_dev_hdr_delete(rpdev, dev_hdr);
+dev_hdr_fail:
+ devm_kfree(&rpdev->dev, ctx);
+ return ret;
+}
+EXPORT_SYMBOL(rpmsg_kdrv_send_request_with_response);
+
+/*
+ * rpmsg_kdrv_send_message
+ *
+ * Send a message and dont expect a response
+ *
+ * The caller is expected to destroy message when
+ * this function returns
+ */
+int rpmsg_kdrv_send_message(struct rpmsg_device *rpdev, uint32_t device_id,
+ void *message, uint32_t message_size)
+{
+ struct rpmsg_kdrv_device_header *dev_hdr;
+ int total_size = message_size + sizeof(*dev_hdr);
+ int ret;
+
+ /* We dont need a ctx for direct messages */
+
+ dev_hdr = rpmsg_kdrv_dev_hdr_alloc(rpdev, device_id, total_size,
+ RPMSG_KDRV_TP_PACKET_TYPE_MESSAGE,
+ RPMSG_KDRV_TP_PACKET_SOURCE_CLIENT,
+ message, message_size,
+ NULL);
+ if (!dev_hdr) {
+ dev_err(&rpdev->dev, "%s: device header allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret = rpmsg_kdrv_send_packet(rpdev, dev_hdr, total_size);
+ if (ret) {
+ dev_err(&rpdev->dev, "%s: rpmsg_send failed: %d\n", __func__, ret);
+ goto out;
+ }
+
+out:
+ rpmsg_kdrv_dev_hdr_delete(rpdev, dev_hdr);
+ return ret;
+}
+EXPORT_SYMBOL(rpmsg_kdrv_send_message);
+
+static int rpmsg_kdrv_cb(struct rpmsg_device *rpdev, void *data, int len,
+ void *private, u32 src)
+{
+ struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+ struct rpmsg_kdrv_device_header *hdr = data;
+ struct rpmsg_kdrv_message_header *msg;
+ int msg_len;
+ struct rpmsg_kdrv_ctx *ctx;
+ int ret;
+
+ if (hdr->packet_type != RPMSG_KDRV_TP_PACKET_TYPE_RESPONSE) {
+ rpmsg_kdrv_driver_handle_data(rpdev, data, len, private, src);
+ return 0;
+ }
+
+ mutex_lock(&priv->message_lock);
+ ctx = idr_find(&priv->message_idr, hdr->packet_id);
+ mutex_unlock(&priv->message_lock);
+
+ if (!ctx) {
+ dev_err(&rpdev->dev, "%s: response received with no pending request\n", __func__);
+ return 0;
+ }
+
+ msg = (struct rpmsg_kdrv_message_header *)((void *)(&hdr[1]));
+ msg_len = len - sizeof(*hdr);
+
+ /* process callback if expected */
+ if (ctx->callback) {
+ ret = ctx->callback(ctx->cb_data, ctx->req, ctx->req_size, msg, msg_len);
+ if (ret == RRMSG_KDRV_CALLBACK_DONE) {
+ /* No need to keep the ctx alive */
+ rpmsg_kdrv_dev_hdr_delete(rpdev, ctx->dev_hdr);
+ devm_kfree(&rpdev->dev, ctx);
+ }
+ return 0;
+ }
+
+ /* copy the response and wake up caller, caller will destroy ctx & dev_hdr */
+ memcpy(ctx->resp, msg, min(msg_len, ctx->resp_size));
+
+ ctx->response_recv = true;
+ wake_up(&ctx->response_wq);
+
+ return 0;
+}
+
+static int rpmsg_kdrv_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct rpmsg_kdrv_device *kddev = to_rpmsg_kdrv_device(dev);
+ struct rpmsg_kdrv_driver *kddrv = to_rpmsg_kdrv_driver(drv);
+
+ if (kddrv->device_type == kddev->device_type) {
+ dev_dbg(dev, "%s: matching with driver %s\n", __func__, drv->name);
+ return 1;
+ }
+
+ dev_dbg(dev, "%s: does not match driver %s\n", __func__, drv->name);
+ return 0;
+}
+
+static int rpmsg_kdrv_dev_probe(struct device *dev)
+{
+ struct rpmsg_kdrv_device *kddev = to_rpmsg_kdrv_device(dev);
+ struct rpmsg_kdrv_driver *kddrv = to_rpmsg_kdrv_driver(kddev->dev.driver);
+ int ret;
+
+ dev_dbg(dev, "%s: probe\n", __func__);
+
+ ret = kddrv->probe(kddev);
+ if (ret) {
+ dev_err(dev, "%s: child probe failed\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rpmsg_kdrv_dev_remove(struct device *dev)
+{
+ struct rpmsg_kdrv_device *kddev = to_rpmsg_kdrv_device(dev);
+ struct rpmsg_kdrv_driver *kddrv = to_rpmsg_kdrv_driver(kddev->dev.driver);
+
+ dev_dbg(dev, "%s: remove\n", __func__);
+
+ kddrv->remove(kddev);
+ return 0;
+}
+
+static int rpmsg_kdrv_probe(struct rpmsg_device *rpdev)
+{
+ int ret;
+ struct rpmsg_kdrv_priv *priv;
+
+ dev_dbg(&rpdev->dev, "%s: probing rpmsg kdrv driver\n", __func__);
+
+ priv = devm_kzalloc(&rpdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&rpdev->dev, priv);
+ priv->rpdev = rpdev;
+
+ idr_init(&priv->message_idr);
+ mutex_init(&priv->message_lock);
+
+ dev_dbg(&rpdev->dev, "%s: sending device info request\n", __func__);
+ ret = rpmsg_kdrv_get_devices(rpdev);
+ if (ret) {
+ dev_err(&rpdev->dev, "%s: error collecting device info\n", __func__);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ dev_set_drvdata(&rpdev->dev, NULL);
+ devm_kfree(&rpdev->dev, priv);
+ return ret;
+}
+
+static void rpmsg_kdrv_remove(struct rpmsg_device *rpdev)
+{
+ dev_dbg(&rpdev->dev, "removing rpmsg kdrv driver\n");
+
+ /* TODO check for pending responses for any of the child devices */
+ /* TODO disconnect them all */
+}
+
+static struct bus_type rpmsg_kdrv_bus = {
+ .name = "rpmsg_kdrv",
+ .match = rpmsg_kdrv_dev_match,
+ .probe = rpmsg_kdrv_dev_probe,
+ .remove = rpmsg_kdrv_dev_remove,
+};
+
+static struct rpmsg_device_id rpmsg_kdrv_id_table[] = {
+ { .name = "rpmsg-kdrv" },
+ { },
+};
+
+static struct rpmsg_driver rpmsg_kdrv = {
+ .drv.name = "rpmsg-kdrv",
+ .id_table = rpmsg_kdrv_id_table,
+ .probe = rpmsg_kdrv_probe,
+ .callback = rpmsg_kdrv_cb,
+ .remove = rpmsg_kdrv_remove,
+};
+
+static int __init rpmsg_kdrv_init(void)
+{
+ int ret;
+
+ ret = bus_register(&rpmsg_kdrv_bus);
+ if (ret) {
+ pr_err("failed to register rpmsg kdrv bus: %d\n", ret);
+ goto out;
+ }
+
+ ret = register_rpmsg_driver(&rpmsg_kdrv);
+ if (ret) {
+ pr_err("failed to register rpmsg kdrv driver: %d\n", ret);
+ goto rpdrv_fail;
+ }
+
+ pr_debug("registered rpmsg kdrv driver\n");
+
+ return 0;
+
+rpdrv_fail:
+ bus_unregister(&rpmsg_kdrv_bus);
+out:
+ return ret;
+}
+module_init(rpmsg_kdrv_init);
+
+static void __exit rpmsg_kdrv_fini(void)
+{
+ pr_debug("unregistering rpmsg kdrv driver\n");
+
+ unregister_rpmsg_driver(&rpmsg_kdrv);
+ bus_unregister(&rpmsg_kdrv_bus);
+}
+module_exit(rpmsg_kdrv_fini);
+
+MODULE_AUTHOR("Subhajit Paul <subhajit_paul@ti.com>");
+MODULE_DESCRIPTION("TI Remote-device framework Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg-kdrv/rpmsg_kdrv_demo.c b/drivers/rpmsg-kdrv/rpmsg_kdrv_demo.c
new file mode 100644
index 000000000000..fac88f9b3602
--- /dev/null
+++ b/drivers/rpmsg-kdrv/rpmsg_kdrv_demo.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subahjit_paul@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/rpmsg.h>
+#include <linux/rpmsg-remotedev/rpmsg-remotedev.h>
+
+#include "shared/rpmsg-kdrv-transport-demo.h"
+#include "rpmsg_kdrv_internal.h"
+
+struct rpmsg_kdrv_demo_private {
+ struct rpmsg_kdrv_device *kddev;
+ struct rpmsg_remotedev rdev;
+
+ void *data;
+ ssize_t data_len;
+};
+
+static int rpmsg_kdrv_demo_get_data(struct rpmsg_remotedev *rdev, void *data, ssize_t len)
+{
+ struct rpmsg_kdrv_demo_private *priv = container_of(rdev, struct rpmsg_kdrv_demo_private, rdev);
+
+ if (!data)
+ if (!len)
+ return priv->data_len;
+ else
+ return -EINVAL;
+ else if (len < priv->data_len)
+ return -EINVAL;
+
+ memcpy(data, priv->data, priv->data_len);
+ return priv->data_len;
+}
+
+static int rpmsg_kdrv_demo_ping(struct rpmsg_remotedev *rdev, void *ping_data, ssize_t ping_len)
+{
+ struct rpmsg_kdrv_demo_private *priv = container_of(rdev, struct rpmsg_kdrv_demo_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_demodev_ping_request *req;
+ struct rpmsg_kdrv_demodev_ping_response *resp;
+ int ret;
+
+ if (!ping_len)
+ return 0;
+
+ if (ping_len > RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_DEMODEV_PING_REQUEST;
+ memcpy(req->data, ping_data, ping_len);
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id, req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_err(&kddev->dev, "%s: rpmsg_kdrv_send_request_with_response\n", __func__);
+ goto out;
+ }
+
+
+ if (resp->header.message_type != RPMSG_KDRV_TP_DEMODEV_PING_RESPONSE) {
+ dev_err(&kddev->dev, "%s: wrong response type\n", __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+
+ memcpy(ping_data, resp->data, RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN);
+
+out:
+ devm_kfree(&kddev->dev, req);
+ devm_kfree(&kddev->dev, resp);
+ return ret;
+}
+
+static int rpmsg_kdrv_demo_c2s_message(struct rpmsg_remotedev *rdev, void *c2s_msg_data, ssize_t len)
+{
+ struct rpmsg_kdrv_demo_private *priv = container_of(rdev, struct rpmsg_kdrv_demo_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_demodev_c2s_message *msg;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ if (len > RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN)
+ return -EINVAL;
+
+ msg = devm_kzalloc(&kddev->dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+
+ msg->header.message_type = RPMSG_KDRV_TP_DEMODEV_C2S_MESSAGE;
+ memcpy(msg->data, c2s_msg_data, len);
+
+ ret = rpmsg_kdrv_send_message(rpdev, kddev->device_id, msg, sizeof(*msg));
+ if (ret)
+ dev_err(&kddev->dev, "%s: rpmsg_kdrv_send_message\n", __func__);
+
+ devm_kfree(&kddev->dev, msg);
+ return ret;
+}
+
+
+static struct rpmsg_remotedev_demo_ops demo_ops = {
+ .get_data = rpmsg_kdrv_demo_get_data,
+ .ping = rpmsg_kdrv_demo_ping,
+ .c2s_message = rpmsg_kdrv_demo_c2s_message,
+};
+
+static void rpmsg_kdrv_demo_device_init(struct rpmsg_kdrv_device *dev, void *data, int len)
+{
+ struct rpmsg_kdrv_demo_private *priv = dev->driver_private;
+
+ priv->data = devm_kzalloc(&dev->dev, len, GFP_KERNEL);
+ if (!priv->data)
+ return;
+
+ memcpy(priv->data, data, len);
+ priv->data_len = len;
+}
+
+static int rpmsg_kdrv_demo_probe(struct rpmsg_kdrv_device *dev)
+{
+ struct rpmsg_kdrv_demo_private *priv;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->rdev.type = RPMSG_REMOTEDEV_DEMO_DEVICE;
+ priv->rdev.device.demo.ops = &demo_ops;
+
+ priv->kddev = dev;
+ dev->driver_private = priv;
+ dev->remotedev = &priv->rdev;
+
+ rpmsg_kdrv_demo_device_init(dev, dev->device_data, dev->device_data_len);
+
+ return 0;
+}
+
+static void rpmsg_kdrv_demo_remove(struct rpmsg_kdrv_device *dev)
+{
+ dev_dbg(&dev->dev, "%s\n", __func__);
+}
+
+static void rpmsg_kdrv_demo_handle_s2c_message(struct rpmsg_kdrv_device *dev, void *msg, ssize_t len)
+{
+ struct rpmsg_kdrv_demo_private *priv = dev->driver_private;
+ struct rpmsg_remotedev *rdev = &priv->rdev;
+
+ if (rdev->device.demo.cb_ops && rdev->device.demo.cb_ops->s2c_message)
+ rdev->device.demo.cb_ops->s2c_message(msg, len, rdev->cb_data);
+}
+
+static int rpmsg_kdrv_demo_callback(struct rpmsg_kdrv_device *dev, void *msg, int len)
+{
+ struct rpmsg_kdrv_demodev_message_header *hdr = msg;
+
+ if (hdr->message_type == RPMSG_KDRV_TP_DEMODEV_S2C_MESSAGE) {
+ struct rpmsg_kdrv_demodev_s2c_message *s2c = msg;
+
+ rpmsg_kdrv_demo_handle_s2c_message(dev, s2c->data,
+ RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN);
+ } else
+ dev_err(&dev->dev, "%s: unknown message type (%d) for demo device\n", __func__, hdr->message_type);
+
+ return 0;
+}
+
+
+static struct rpmsg_kdrv_driver rpmsg_kdrv_demo = {
+ .drv.name = "rpmsg-kdrv-demo",
+ .device_type = RPMSG_KDRV_TP_DEVICE_TYPE_DEMO,
+ .probe = rpmsg_kdrv_demo_probe,
+ .remove = rpmsg_kdrv_demo_remove,
+ .callback = rpmsg_kdrv_demo_callback,
+};
+
+static int __init rpmsg_kdrv_demo_driver_init(void)
+{
+ return rpmsg_kdrv_register_driver(&rpmsg_kdrv_demo);
+}
+module_init(rpmsg_kdrv_demo_driver_init);
+
+static void rpmsg_kdrv_demo_driver_fini(void)
+{
+}
+module_exit(rpmsg_kdrv_demo_driver_fini);
+
+MODULE_AUTHOR("Subhajit Paul <subhajit_paul@ti.com>");
+MODULE_DESCRIPTION("TI Remote-device Demo Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg-kdrv/rpmsg_kdrv_display.c b/drivers/rpmsg-kdrv/rpmsg_kdrv_display.c
new file mode 100644
index 000000000000..73df24f128f0
--- /dev/null
+++ b/drivers/rpmsg-kdrv/rpmsg_kdrv_display.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subahjit_paul@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <drm/drm_fourcc.h>
+#include <linux/rpmsg.h>
+#include <linux/rpmsg-remotedev/rpmsg-remotedev.h>
+
+#include "shared/rpmsg-kdrv-transport-display.h"
+#include "rpmsg_kdrv_internal.h"
+
+#define RPMSG_KDRV_DISPLAY_RES_ID_FIRST (0x10)
+
+struct rpmsg_kdrv_display_private {
+ struct rpmsg_kdrv_device *kddev;
+
+ struct rpmsg_remotedev rdev;
+
+ struct idr res_idr;
+ struct mutex res_lock;
+
+};
+
+static uint32_t check_min(uint32_t a, uint32_t b, int line)
+{
+ uint32_t res = min(a, b);
+
+ if (res != b) {
+ pr_err("Copy mismatch at Line %d\n", line);
+ WARN_ON(1);
+ }
+
+ return res;
+}
+
+static inline enum rpmsg_kdrv_display_format rpmsg_kdrv_display_fmt_to_rpmsg_fmt(uint32_t in_fmt)
+{
+ switch (in_fmt) {
+ case DRM_FORMAT_ARGB8888:
+ return RPMSG_KDRV_TP_DISPLAY_FORMAT_ARGB8888;
+ case DRM_FORMAT_XRGB8888:
+ return RPMSG_KDRV_TP_DISPLAY_FORMAT_XRGB8888;
+ default:
+ return RPMSG_KDRV_TP_DISPLAY_FORMAT_MAX;
+ }
+}
+
+static inline uint32_t rpmsg_kdrv_display_fmt_to_drm_fmt(uint32_t in_fmt)
+{
+ switch (in_fmt) {
+ case RPMSG_KDRV_TP_DISPLAY_FORMAT_ARGB8888:
+ return DRM_FORMAT_ARGB8888;
+ case RPMSG_KDRV_TP_DISPLAY_FORMAT_XRGB8888:
+ return DRM_FORMAT_XRGB8888;
+ default:
+ return 0;
+ }
+}
+
+static bool rpmsg_kdrv_display_ready(struct rpmsg_remotedev *rdev)
+{
+ struct rpmsg_kdrv_display_private *priv = container_of(rdev, struct rpmsg_kdrv_display_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_display_ready_query_request *req;
+ struct rpmsg_kdrv_display_ready_query_response *resp;
+ int ret;
+ bool retval;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return false;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return false;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_DISPLAY_READY_QUERY_REQUEST;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id, req, sizeof(*req), resp, sizeof(*resp));
+ if (ret) {
+ dev_err(&kddev->dev, "%s: rpmsg_kdrv_send_request_with_response\n", __func__);
+ retval = false;
+ goto out;
+ }
+
+ if (resp->header.message_type != RPMSG_KDRV_TP_DISPLAY_READY_QUERY_RESPONSE) {
+ dev_err(&kddev->dev, "%s: wrong response type\n", __func__);
+ retval = false;
+ goto out;
+ }
+
+ retval = resp->ready ? true : false;
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return retval;
+
+}
+
+static void rpmsg_kdrv_display_copy_vid_info(struct rpmsg_remotedev_display_pipe *dst, struct rpmsg_kdrv_display_vid_info *src)
+{
+ int cnt;
+ uint32_t out_fmt;
+
+ dst->pipe_id = src->id;
+ dst->can_scale = src->can_scale ? true : false;
+ dst->can_mod_win = src->mutable_window ? true : false;
+ if (dst->can_mod_win)
+ dst->fixed_win_x = dst->fixed_win_y = dst->fixed_win_w = dst->fixed_win_h = 0;
+ else {
+ dst->fixed_win_x = src->fixed_window_x;
+ dst->fixed_win_y = src->fixed_window_y;
+ dst->fixed_win_w = src->fixed_window_w;
+ dst->fixed_win_h = src->fixed_window_h;
+ }
+ dst->initial_zorder = src->init_zorder;
+ dst->num_formats = check_min(RPMSG_REMOTEDEV_DISPLAY_MAX_FORMATS, src->num_formats, __LINE__);
+
+ dst->num_allowed_zorders = check_min(RPMSG_REMOTEDEV_DISPLAY_MAX_ZORDERS, src->num_zorders, __LINE__);
+
+ for (cnt = 0; cnt < dst->num_formats; cnt++) {
+ out_fmt = rpmsg_kdrv_display_fmt_to_drm_fmt(src->format[cnt]);
+ WARN_ON(out_fmt == 0);
+ dst->formats[cnt] = out_fmt;
+ }
+
+ for (cnt = 0; cnt < dst->num_allowed_zorders; cnt++)
+ dst->allowed_zorders[cnt] = src->zorder[cnt];
+}
+
+static void rpmsg_kdrv_display_copy_vp_info(struct rpmsg_remotedev_display_disp *dst, struct rpmsg_kdrv_display_vp_info *src)
+{
+ int vidcnt;
+
+ dst->disp_id = src->id;
+ dst->width = src->width;
+ dst->height = src->height;
+ dst->refresh = src->refresh;
+ dst->num_pipes = check_min(RPMSG_REMOTEDEV_DISPLAY_MAX_PIPES, src->num_vids, __LINE__);
+
+ for (vidcnt = 0; vidcnt < dst->num_pipes; vidcnt++)
+ rpmsg_kdrv_display_copy_vid_info(&dst->pipes[vidcnt], &src->vid[vidcnt]);
+}
+
+static int rpmsg_kdrv_display_get_res(struct rpmsg_remotedev *rdev, struct rpmsg_remotedev_display_resinfo *res)
+{
+ struct rpmsg_kdrv_display_private *priv = container_of(rdev, struct rpmsg_kdrv_display_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_display_res_info_request *req;
+ struct rpmsg_kdrv_display_res_info_response *resp;
+ int ret, vpcnt;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_DISPLAY_RES_INFO_REQUEST;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id, req, sizeof(*req), resp, sizeof(*resp));
+ if (ret) {
+ dev_err(&kddev->dev, "%s: rpmsg_kdrv_send_request_with_response\n", __func__);
+ goto out;
+ }
+
+ if (resp->header.message_type != RPMSG_KDRV_TP_DISPLAY_RES_INFO_RESPONSE) {
+ dev_err(&kddev->dev, "%s: wrong response type\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ res->num_disps = check_min(RPMSG_REMOTEDEV_DISPLAY_MAX_DISPS, resp->num_vps, __LINE__);
+
+ for (vpcnt = 0; vpcnt < res->num_disps; vpcnt++)
+ rpmsg_kdrv_display_copy_vp_info(&res->disps[vpcnt], &resp->vp[vpcnt]);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static uint32_t rpmsg_kdrv_display_res_id_new(struct rpmsg_kdrv_device *kddev, void *data)
+{
+ struct rpmsg_kdrv_display_private *priv = kddev->driver_private;
+ int id;
+
+ mutex_lock(&priv->res_lock);
+ id = idr_alloc(&priv->res_idr, data, RPMSG_KDRV_DISPLAY_RES_ID_FIRST, 0, GFP_KERNEL);
+ mutex_unlock(&priv->res_lock);
+
+ if (id < 0)
+ return 0;
+
+ return id;
+}
+
+static void rpmsg_kdrv_display_free_res_id(struct rpmsg_kdrv_device *kddev, uint32_t id)
+{
+ struct rpmsg_kdrv_display_private *priv = kddev->driver_private;
+
+ mutex_lock(&priv->res_lock);
+ idr_remove(&priv->res_idr, id);
+ mutex_unlock(&priv->res_lock);
+}
+
+static void rpmsg_kdrv_free_request_res(struct rpmsg_kdrv_device *kddev, struct rpmsg_kdrv_display_commit_request *req)
+{
+ int i;
+
+ rpmsg_kdrv_display_free_res_id(kddev, req->commit_id);
+
+ for (i = 0; i < req->num_vid_updates; i++)
+ if (req->vid[i].enabled)
+ rpmsg_kdrv_display_free_res_id(kddev, req->vid[i].buffer.buffer_id);
+
+}
+
+static bool rpmsg_kdrv_display_copy_buffer(struct rpmsg_kdrv_device *kddev, struct rpmsg_kdrv_display_buffer_info *dst,
+ struct rpmsg_remotedev_display_buffer *src)
+{
+ int i;
+
+ dst->width = src->width;
+ dst->height = src->height;
+
+ dst->format = rpmsg_kdrv_display_fmt_to_rpmsg_fmt(src->format);
+ if (WARN_ON(dst->format == RPMSG_KDRV_TP_DISPLAY_FORMAT_MAX))
+ return false;
+
+ dst->num_planes = check_min(RPMSG_KDRV_TP_DISPLAY_MAX_PLANES, src->num_planes, __LINE__);
+ if (dst->num_planes != src->num_planes)
+ return false;
+
+ for (i = 0; i < dst->num_planes; i++) {
+ dst->plane[i] = (uint64_t)src->planes[i];
+ dst->pitch[i] = src->pitches[i];
+ }
+
+ dst->buffer_id = rpmsg_kdrv_display_res_id_new(kddev, src);
+ if (!dst->buffer_id)
+ return false;
+
+ return true;
+}
+
+static bool rpmsg_kdrv_display_copy_vid_commit(struct rpmsg_kdrv_device *kddev, struct rpmsg_kdrv_display_vid_update_info *dst,
+ struct rpmsg_remotedev_display_pipe_update *src)
+{
+ dst->id = src->pipe_id;
+ dst->enabled = src->enabled ? 1 : 0;
+ if (dst->enabled) {
+ dst->dst_w = src->dst_w;
+ dst->dst_h = src->dst_h;
+ dst->dst_x = src->dst_x;
+ dst->dst_y = src->dst_y;
+
+ if (!rpmsg_kdrv_display_copy_buffer(kddev, &dst->buffer, src->buffer))
+ return false;
+ }
+
+ return true;
+}
+
+static bool rpmsg_kdrv_display_copy_commit(struct rpmsg_kdrv_device *kddev, struct rpmsg_kdrv_display_commit_request *dst,
+ struct rpmsg_remotedev_display_commit *src)
+{
+ int i, copied_vids;
+
+ dst->id = src->disp_id;
+ dst->num_vid_updates = check_min(RPMSG_KDRV_TP_DISPLAY_MAX_VIDS, src->num_pipe_updates, __LINE__);
+
+ for (i = 0, copied_vids = 0; i < dst->num_vid_updates; i++, copied_vids++)
+ if (!rpmsg_kdrv_display_copy_vid_commit(kddev, &dst->vid[i], &src->pipes[i]))
+ goto free_vid_res;
+
+ dst->commit_id = rpmsg_kdrv_display_res_id_new(kddev, src);
+ if (!dst->commit_id)
+ goto free_vid_res;
+
+ return true;
+
+free_vid_res:
+ for (i = 0; i < copied_vids; i++)
+ if (dst->vid[i].enabled)
+ rpmsg_kdrv_display_free_res_id(kddev, dst->vid[i].buffer.buffer_id);
+ return false;
+
+}
+
+static int rpmsg_kdrv_display_commit(struct rpmsg_remotedev *rdev, struct rpmsg_remotedev_display_commit *commit)
+{
+ struct rpmsg_kdrv_display_private *priv = container_of(rdev, struct rpmsg_kdrv_display_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_display_commit_request *req;
+ struct rpmsg_kdrv_display_commit_response *resp;
+ int ret;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_DISPLAY_COMMIT_REQUEST;
+
+ if (!rpmsg_kdrv_display_copy_commit(kddev, req, commit)) {
+ dev_err(&kddev->dev, "%s: failed to copy commit request\n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id, req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_err(&kddev->dev, "%s: rpmsg_kdrv_send_request_with_response\n", __func__);
+ goto nosend;
+ }
+
+
+ if (resp->header.message_type != RPMSG_KDRV_TP_DISPLAY_COMMIT_RESPONSE) {
+ dev_err(&kddev->dev, "%s: wrong response type\n", __func__);
+ goto out;
+ }
+
+ ret = ((resp->status == 0) ? 0 : -EINVAL);
+ goto out;
+
+nosend:
+ rpmsg_kdrv_free_request_res(kddev, req);
+out:
+ devm_kfree(&kddev->dev, req);
+ devm_kfree(&kddev->dev, resp);
+ return ret;
+}
+
+
+static struct rpmsg_remotedev_display_ops disp_ops = {
+ .ready = rpmsg_kdrv_display_ready,
+ .get_res_info = rpmsg_kdrv_display_get_res,
+ .commit = rpmsg_kdrv_display_commit,
+};
+
+static void rpmsg_kdrv_display_device_init(struct rpmsg_kdrv_device *kddev, void *data, int len)
+{
+}
+
+static int rpmsg_kdrv_display_probe(struct rpmsg_kdrv_device *dev)
+{
+ struct rpmsg_kdrv_display_private *priv;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->rdev.type = RPMSG_REMOTEDEV_DISPLAY_DEVICE;
+ priv->rdev.device.display.ops = &disp_ops;
+
+ mutex_init(&priv->res_lock);
+ idr_init(&priv->res_idr);
+
+ priv->kddev = dev;
+ dev->driver_private = priv;
+ dev->remotedev = &priv->rdev;
+
+ rpmsg_kdrv_display_device_init(dev, dev->device_data, dev->device_data_len);
+
+ return 0;
+}
+
+static void rpmsg_kdrv_display_remove(struct rpmsg_kdrv_device *dev)
+{
+ dev_dbg(&dev->dev, "%s\n", __func__);
+}
+
+static void rpmsg_kdrv_display_handle_commit(struct rpmsg_kdrv_device *dev, struct rpmsg_kdrv_display_commit_done_message *msg)
+{
+ struct rpmsg_kdrv_display_private *priv = dev->driver_private;
+ struct rpmsg_remotedev *rdev = &priv->rdev;
+ struct rpmsg_remotedev_display_commit *commit;
+
+ mutex_lock(&priv->res_lock);
+ commit = idr_find(&priv->res_idr, msg->commit_id);
+ idr_remove(&priv->res_idr, msg->commit_id);
+ mutex_unlock(&priv->res_lock);
+
+ if (!commit) {
+ dev_err(&dev->dev, "%s: no pending commit found\n", __func__);
+ return;
+ }
+
+ if (rdev->device.display.cb_ops && rdev->device.display.cb_ops->commit_done)
+ rdev->device.display.cb_ops->commit_done(commit, rdev->cb_data);
+}
+
+static void rpmsg_kdrv_display_handle_buffer(struct rpmsg_kdrv_device *dev, struct rpmsg_kdrv_display_buffer_done_message *msg)
+{
+ struct rpmsg_kdrv_display_private *priv = dev->driver_private;
+ struct rpmsg_remotedev *rdev = &priv->rdev;
+ struct rpmsg_remotedev_display_buffer *buffer;
+
+ mutex_lock(&priv->res_lock);
+ buffer = idr_find(&priv->res_idr, msg->buffer_id);
+ idr_remove(&priv->res_idr, msg->buffer_id);
+ mutex_unlock(&priv->res_lock);
+
+ if (!buffer) {
+ dev_err(&dev->dev, "%s: no pending buffer found\n", __func__);
+ return;
+ }
+
+ if (rdev->device.display.cb_ops && rdev->device.display.cb_ops->buffer_done)
+ rdev->device.display.cb_ops->buffer_done(buffer, rdev->cb_data);
+}
+
+static int rpmsg_kdrv_display_callback(struct rpmsg_kdrv_device *dev, void *msg, int len)
+{
+ struct rpmsg_kdrv_display_message_header *hdr = msg;
+
+ if (hdr->message_type == RPMSG_KDRV_TP_DISPLAY_COMMIT_DONE_MESSAGE)
+ rpmsg_kdrv_display_handle_commit(dev, msg);
+ else if (hdr->message_type == RPMSG_KDRV_TP_DISPLAY_BUFFER_DONE_MESSAGE)
+ rpmsg_kdrv_display_handle_buffer(dev, msg);
+
+ return 0;
+}
+
+
+static struct rpmsg_kdrv_driver rpmsg_kdrv_display = {
+ .drv.name = "rpmsg-kdrv-display",
+ .device_type = RPMSG_KDRV_TP_DEVICE_TYPE_DISPLAY,
+ .probe = rpmsg_kdrv_display_probe,
+ .remove = rpmsg_kdrv_display_remove,
+ .callback = rpmsg_kdrv_display_callback,
+};
+
+static int __init rpmsg_kdrv_display_driver_init(void)
+{
+ return rpmsg_kdrv_register_driver(&rpmsg_kdrv_display);
+}
+module_init(rpmsg_kdrv_display_driver_init);
+
+static void rpmsg_kdrv_display_driver_fini(void)
+{
+}
+module_exit(rpmsg_kdrv_display_driver_fini);
+
+MODULE_AUTHOR("Subhajit Paul <subhajit_paul@ti.com>");
+MODULE_DESCRIPTION("TI Remote-device Virtual Display Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg-kdrv/rpmsg_kdrv_internal.h b/drivers/rpmsg-kdrv/rpmsg_kdrv_internal.h
new file mode 100644
index 000000000000..efb3876564ed
--- /dev/null
+++ b/drivers/rpmsg-kdrv/rpmsg_kdrv_internal.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_INTERNAL_H__
+#define __RPMSG_KDRV_INTERNAL_H__
+
+#define RRMSG_KDRV_CALLBACK_DONE (0)
+#define RRMSG_KDRV_CALLBACK_MORE (1)
+
+struct rpmsg_kdrv_device {
+ struct device dev;
+ struct rpmsg_device *rpdev;
+ int device_type;
+ int device_id;
+ void *device_data;
+ int device_data_len;
+ char *device_name;
+ void *device_private;
+ void *driver_private;
+ struct rpmsg_remotedev *remotedev;
+};
+
+struct rpmsg_kdrv_driver {
+ struct device_driver drv;
+ int device_type;
+ int (*probe)(struct rpmsg_kdrv_device *dev);
+ void (*remove)(struct rpmsg_kdrv_device *dev);
+ int (*callback)(struct rpmsg_kdrv_device *dev, void *msg, int len);
+};
+
+typedef int (*request_cb_t)(void *data, void *req, int req_sz, void *resp, int resp_sz);
+
+extern int rpmsg_kdrv_register_driver(struct rpmsg_kdrv_driver *drv);
+
+extern int rpmsg_kdrv_send_request_with_callback(struct rpmsg_device *rpdev,
+ uint32_t device_id, void *message, uint32_t message_size, void *cb_data,
+ request_cb_t callback);
+extern int rpmsg_kdrv_send_request_with_response(struct rpmsg_device *rpdev,
+ uint32_t device_id, void *message, uint32_t message_size,
+ void *response, uint32_t response_size);
+extern int rpmsg_kdrv_send_message(struct rpmsg_device *rpdev,
+ uint32_t device_id, void *message, uint32_t message_size);
+
+
+#endif
diff --git a/drivers/rpmsg-kdrv/rpmsg_kdrv_switch.c b/drivers/rpmsg-kdrv/rpmsg_kdrv_switch.c
new file mode 100644
index 000000000000..aabfefd25a36
--- /dev/null
+++ b/drivers/rpmsg-kdrv/rpmsg_kdrv_switch.c
@@ -0,0 +1,977 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Grygorii Strashko <grygorii.strashko@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rpmsg.h>
+#include <linux/rpmsg-remotedev/rpmsg-remotedev.h>
+
+#include "rpmsg_kdrv_internal.h"
+#include "shared/rpmsg-kdrv-transport-switch.h"
+
+struct rpmsg_kdrv_switch_private {
+ struct rpmsg_kdrv_device *kddev;
+ struct rpmsg_remotedev rdev;
+ u64 session_id;
+ u32 core_key;
+ u32 permissions;
+ u32 uart_id;
+ u32 attached:1;
+ u32 uart_connected:1;
+};
+
+static bool
+rpmsg_kdrv_switch_check_perm(struct rpmsg_kdrv_switch_private *priv,
+ enum rpmsg_kdrv_ethswitch_message_type msg_type)
+{
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+
+ if (priv->permissions & BIT(msg_type))
+ return true;
+
+ dev_err_ratelimited(&kddev->dev, "permission denied msg: 0x%02X\n",
+ msg_type);
+ return false;
+}
+
+static int
+rpmsg_kdrv_switch_check_resp_status(struct rpmsg_kdrv_ethswitch_common_resp_info *info)
+{
+ if (info->status == RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_EAGAIN)
+ return -EAGAIN;
+ if (info->status == RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_EFAIL)
+ return -EIO;
+ return 0;
+}
+
+static int rpmsg_kdrv_switch_ping(struct rpmsg_remotedev *rdev,
+ const u8 *data, int size)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_ping_req *req;
+ struct rpmsg_kdrv_ethswitch_ping_resp *resp;
+ int ret;
+
+ if (!rpmsg_kdrv_switch_check_perm(priv,
+ RPMSG_KDRV_TP_ETHSWITCH_PING_REQUEST))
+ return -EPERM;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ if (size > RPMSG_KDRV_TP_ETHSWITCH_MESSAGE_DATA_LEN)
+ size = RPMSG_KDRV_TP_ETHSWITCH_MESSAGE_DATA_LEN;
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_PING_REQUEST;
+ memcpy(req->data, data, size);
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ if (memcmp(req->data, resp->data, size)) {
+ dev_dbg(&kddev->dev, "%s: ping fail - data\n", __func__);
+ ret = -EINVAL;
+ }
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int
+rpmsg_kdrv_switch_attach(struct rpmsg_remotedev *rdev,
+ struct rpmsg_rdev_eth_switch_attach_info *attach_info)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_attach_req *req;
+ struct rpmsg_kdrv_ethswitch_attach_resp *resp;
+ int ret;
+
+ if (priv->attached)
+ return -EBUSY;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_ATTACH;
+ req->cpsw_type = RPMSG_KDRV_TP_ETHSWITCH_CPSWTYPE_MAIN_CPSW;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+ if (ret)
+ goto out;
+
+ priv->session_id = resp->id;
+ priv->core_key = resp->core_key;
+ priv->attached = true;
+
+ dev_dbg(&kddev->dev, "%s: done id:%llX core_key:%08X\n",
+ __func__, priv->session_id, priv->core_key);
+
+ attach_info->rx_mtu = resp->rx_mtu;
+ memcpy(attach_info->tx_mtu, resp->tx_mtu, sizeof(attach_info->tx_mtu));
+ attach_info->features = resp->features;
+ if (priv->uart_connected)
+ attach_info->features |=
+ RPMSG_KDRV_ETHSWITCH_FEATURE_DUMP_STATS;
+ attach_info->mac_only_port = resp->mac_only_port;
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int
+rpmsg_kdrv_switch_attach_ext(struct rpmsg_remotedev *rdev,
+ struct rpmsg_rdev_eth_switch_attach_ext_info *attach_ext_info)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_attach_extended_req *req;
+ struct rpmsg_kdrv_ethswitch_attach_extended_resp *resp;
+ int ret;
+
+ if (priv->attached)
+ return -EBUSY;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_ATTACH_EXT;
+ req->cpsw_type = RPMSG_KDRV_TP_ETHSWITCH_CPSWTYPE_MAIN_CPSW;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+ if (ret)
+ goto out;
+
+ priv->session_id = resp->id;
+ priv->core_key = resp->core_key;
+ priv->attached = true;
+
+ dev_dbg(&kddev->dev, "%s: done id:%llX core_key:%08X\n",
+ __func__, priv->session_id, priv->core_key);
+
+ attach_ext_info->rx_mtu = resp->rx_mtu;
+ memcpy(attach_ext_info->tx_mtu, resp->tx_mtu,
+ sizeof(attach_ext_info->tx_mtu));
+ attach_ext_info->features = resp->features;
+ if (priv->uart_connected)
+ attach_ext_info->features |=
+ RPMSG_KDRV_ETHSWITCH_FEATURE_DUMP_STATS;
+ attach_ext_info->flow_idx = resp->alloc_flow_idx;
+ attach_ext_info->tx_cpsw_psil_dst_id = resp->tx_cpsw_psil_dst_id;
+ ether_addr_copy(attach_ext_info->mac_addr, resp->mac_address);
+ if (attach_ext_info->features | RPMSG_KDRV_ETHSWITCH_FEATURE_MAC_ONLY)
+ attach_ext_info->mac_only_port = resp->mac_only_port;
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_detach(struct rpmsg_remotedev *rdev)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_detach_req *req;
+ struct rpmsg_kdrv_ethswitch_detach_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_DETACH;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+
+ priv->attached = false;
+
+ dev_dbg(&kddev->dev, "%s: done ret:%d\n", __func__, ret);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int
+rpmsg_kdrv_switch_get_tx_info(struct rpmsg_remotedev *rdev,
+ struct rpmsg_rdev_eth_switch_tx_info *info)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_alloc_req *req;
+ struct rpmsg_kdrv_ethswitch_alloc_tx_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_ALLOC_TX;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+ if (ret)
+ goto out;
+
+ info->tx_cpsw_psil_dst_id = resp->tx_cpsw_psil_dst_id;
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int
+rpmsg_kdrv_switch_get_rx_info(struct rpmsg_remotedev *rdev,
+ struct rpmsg_rdev_eth_switch_rx_info *info)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_alloc_req *req;
+ struct rpmsg_kdrv_ethswitch_alloc_rx_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_ALLOC_RX;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+ if (ret)
+ goto out;
+
+ info->flow_idx = resp->alloc_flow_idx;
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_get_mac(struct rpmsg_remotedev *rdev,
+ void *mac_addr)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_alloc_req *req;
+ struct rpmsg_kdrv_ethswitch_alloc_mac_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_ALLOC_MAC;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+ if (ret)
+ goto out;
+
+ ether_addr_copy(mac_addr, resp->mac_address);
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_register_mac(struct rpmsg_remotedev *rdev,
+ void *mac_addr, u32 flow_idx_offset)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_register_mac_req *req;
+ struct rpmsg_kdrv_ethswitch_register_mac_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_REGISTER_MAC;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+ ether_addr_copy(req->mac_address, mac_addr);
+ req->flow_idx = flow_idx_offset;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int
+rpmsg_kdrv_switch_unregister_mac(struct rpmsg_remotedev *rdev,
+ void *mac_addr, u32 flow_idx_offset)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_unregister_mac_req *req;
+ struct rpmsg_kdrv_ethswitch_unregister_mac_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_UNREGISTER_MAC;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+ ether_addr_copy(req->mac_address, mac_addr);
+ req->flow_idx = flow_idx_offset;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_reg_ipv4(struct rpmsg_remotedev *rdev,
+ void *mac_addr, __be32 ipv4)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_ipv4_register_mac_req *req;
+ struct rpmsg_kdrv_ethswitch_ipv4_register_mac_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_IPV4_MAC_REGISTER;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+ ether_addr_copy(req->mac_address, mac_addr);
+ memcpy(req->ipv4_addr, &ipv4, RPMSG_KDRV_TP_ETHSWITCH_IPV4ADDRLEN);
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_unreg_ipv4(struct rpmsg_remotedev *rdev,
+ __be32 ipv4)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_ipv4_unregister_mac_req *req;
+ struct rpmsg_kdrv_ethswitch_ipv4_unregister_mac_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_IPV4_MAC_UNREGISTER;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+ memcpy(req->ipv4_addr, &ipv4, RPMSG_KDRV_TP_ETHSWITCH_IPV4ADDRLEN);
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_reg_read(struct rpmsg_remotedev *rdev,
+ u32 reg_addr, u32 *val)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_regrd_req *req;
+ struct rpmsg_kdrv_ethswitch_regrd_resp *resp;
+ int ret;
+
+ if (!rpmsg_kdrv_switch_check_perm(priv, RPMSG_KDRV_TP_ETHSWITCH_REGRD))
+ return -EPERM;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_REGRD;
+ req->regaddr = reg_addr;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+ if (ret)
+ goto out;
+
+ *val = resp->regval;
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_c2s_dbg_dump_stats(struct rpmsg_remotedev *rdev)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_c2s_notify *req;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_C2S_NOTIFY;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+ req->notifyid = RPMSG_KDRV_TP_ETHSWITCH_CLIENTNOTIFY_DUMPSTATS;
+
+ ret = rpmsg_kdrv_send_message(rpdev, kddev->device_id,
+ req, sizeof(*req));
+
+ dev_dbg(&kddev->dev, "%s: done ret:%d\n", __func__, ret);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_set_promisc(struct rpmsg_remotedev *rdev, u32 enable)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_set_promisc_mode_req *req;
+ struct rpmsg_kdrv_ethswitch_set_promisc_mode_resp *resp;
+ int ret;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_SET_PROMISC_MODE;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+ req->enable = enable ? 1 : 0;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+ if (ret)
+ goto out;
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_filter_add_mc(struct rpmsg_remotedev *rdev,
+ const void *mac_addr, u16 vlan_id, u32 flow_idx)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_filter_add_mc_req *req;
+ struct rpmsg_kdrv_ethswitch_filter_add_mc_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_FILTER_ADD_MAC;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+ ether_addr_copy(req->mac_address, mac_addr);
+ req->flow_idx = flow_idx;
+ req->vlan_id = vlan_id;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static int rpmsg_kdrv_switch_filter_del_mc(struct rpmsg_remotedev *rdev,
+ const void *mac_addr, u16 vlan_id, u32 flow_idx)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_device *kddev = priv->kddev;
+ struct rpmsg_device *rpdev = kddev->rpdev;
+ struct rpmsg_kdrv_ethswitch_filter_del_mc_req *req;
+ struct rpmsg_kdrv_ethswitch_filter_del_mc_resp *resp;
+ int ret;
+
+ if (!priv->attached)
+ return -EINVAL;
+
+ req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ devm_kfree(&kddev->dev, req);
+ return -ENOMEM;
+ }
+
+ req->header.message_type = RPMSG_KDRV_TP_ETHSWITCH_FILTER_DEL_MAC;
+ req->info.id = priv->session_id;
+ req->info.core_key = priv->core_key;
+ ether_addr_copy(req->mac_address, mac_addr);
+ req->flow_idx = flow_idx;
+ req->vlan_id = vlan_id;
+
+ ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id,
+ req, sizeof(*req),
+ resp, sizeof(*resp));
+ if (ret) {
+ dev_dbg(&kddev->dev, "%s: send: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = rpmsg_kdrv_switch_check_resp_status(&resp->info);
+
+ dev_dbg(&kddev->dev, "%s: done\n", __func__);
+
+out:
+ devm_kfree(&kddev->dev, resp);
+ devm_kfree(&kddev->dev, req);
+ return ret;
+}
+
+static void rpmsg_kdrv_switch_get_fw_ver(struct rpmsg_remotedev *rdev,
+ char *buf, size_t size)
+{
+ struct rpmsg_kdrv_switch_private *priv =
+ container_of(rdev, struct rpmsg_kdrv_switch_private, rdev);
+ struct rpmsg_kdrv_ethswitch_fw_version_info *fw_info;
+ struct rpmsg_kdrv_ethswitch_device_data *kddev_data;
+
+ kddev_data = priv->kddev->device_data;
+ fw_info = &kddev_data->fw_ver;
+
+ snprintf(buf, size, "%u.%u.%u %.*s/%.*s/%.*s SHA:%.*s",
+ fw_info->major, fw_info->minor, fw_info->rev,
+ RPMSG_KDRV_TP_ETHSWITCH_DATELEN, fw_info->date,
+ RPMSG_KDRV_TP_ETHSWITCH_MONTHLEN, fw_info->month,
+ RPMSG_KDRV_TP_ETHSWITCH_YEARLEN, fw_info->year,
+ RPMSG_KDRV_TP_ETHSWITCH_COMMITSHALEN, fw_info->commit_hash);
+}
+
+static struct rpmsg_remotedev_eth_switch_ops switch_ops = {
+ .get_fw_ver = rpmsg_kdrv_switch_get_fw_ver,
+ .attach = rpmsg_kdrv_switch_attach,
+ .attach_ext = rpmsg_kdrv_switch_attach_ext,
+ .detach = rpmsg_kdrv_switch_detach,
+ .get_tx_info = rpmsg_kdrv_switch_get_tx_info,
+ .get_rx_info = rpmsg_kdrv_switch_get_rx_info,
+ .get_mac = rpmsg_kdrv_switch_get_mac,
+ .register_mac = rpmsg_kdrv_switch_register_mac,
+ .unregister_mac = rpmsg_kdrv_switch_unregister_mac,
+ .register_ipv4 = rpmsg_kdrv_switch_reg_ipv4,
+ .unregister_ipv4 = rpmsg_kdrv_switch_unreg_ipv4,
+ .ping = rpmsg_kdrv_switch_ping,
+ .read_reg = rpmsg_kdrv_switch_reg_read,
+ .dbg_dump_stats = rpmsg_kdrv_switch_c2s_dbg_dump_stats,
+ .set_promisc_mode = rpmsg_kdrv_switch_set_promisc,
+ .filter_add_mc = rpmsg_kdrv_switch_filter_add_mc,
+ .filter_del_mc = rpmsg_kdrv_switch_filter_del_mc,
+};
+
+static int rpmsg_kdrv_switch_callback(struct rpmsg_kdrv_device *dev,
+ void *msg, int len)
+{
+ return 0;
+}
+
+static int
+rpmsg_kdrv_switch_dev_data_parse(struct rpmsg_kdrv_device *kddev,
+ void *data, int len,
+ struct rpmsg_kdrv_switch_private *priv)
+{
+ struct rpmsg_kdrv_ethswitch_device_data *kddev_data = data;
+ struct rpmsg_kdrv_ethswitch_fw_version_info *fw_info;
+
+ if (sizeof(*kddev_data) != len)
+ return -EINVAL;
+
+ dev_info(&kddev->dev, "Device info: permissions: %08X uart_id: %d\n",
+ kddev_data->permission_flags,
+ kddev_data->uart_connected ? kddev_data->uart_id : -1);
+
+ fw_info = &kddev_data->fw_ver;
+
+ dev_info(&kddev->dev, "FW ver %u.%u (rev %u) %.*s/%.*s/%.*s SHA:%.*s\n",
+ fw_info->major, fw_info->minor, fw_info->rev,
+ RPMSG_KDRV_TP_ETHSWITCH_DATELEN, fw_info->date,
+ RPMSG_KDRV_TP_ETHSWITCH_MONTHLEN, fw_info->month,
+ RPMSG_KDRV_TP_ETHSWITCH_YEARLEN, fw_info->year,
+ RPMSG_KDRV_TP_ETHSWITCH_COMMITSHALEN, fw_info->commit_hash);
+
+ if (fw_info->major != RPMSG_KDRV_TP_ETHSWITCH_VERSION_MAJOR &&
+ fw_info->minor != RPMSG_KDRV_TP_ETHSWITCH_VERSION_MINOR) {
+ dev_err(&kddev->dev, "Unsupported EthSwitch FW version\n");
+ return -EOPNOTSUPP;
+ }
+
+ priv->uart_connected = kddev_data->uart_connected;
+ priv->uart_id = kddev_data->uart_connected ? kddev_data->uart_id : -1;
+ priv->permissions = kddev_data->permission_flags;
+
+ return 0;
+}
+
+static int rpmsg_kdrv_switch_probe(struct rpmsg_kdrv_device *dev)
+{
+ struct rpmsg_kdrv_switch_private *priv;
+ int ret;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->rdev.type = RPMSG_REMOTEDEV_ETH_SWITCH_DEVICE;
+ priv->rdev.device.eth_switch.ops = &switch_ops;
+
+ priv->kddev = dev;
+
+ ret = rpmsg_kdrv_switch_dev_data_parse(dev, dev->device_data,
+ dev->device_data_len, priv);
+ if (ret)
+ return ret;
+
+ dev->driver_private = priv;
+ dev->remotedev = &priv->rdev;
+
+ return 0;
+}
+
+static void rpmsg_kdrv_switch_remove(struct rpmsg_kdrv_device *dev)
+{
+ dev->driver_private = NULL;
+ dev->remotedev = NULL;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+}
+
+static struct rpmsg_kdrv_driver rpmsg_kdrv_switch = {
+ .drv = {
+ .name = "rpmsg-kdrv-eth-switch",
+ },
+ .device_type = RPMSG_KDRV_TP_DEVICE_TYPE_ETHSWITCH,
+ .probe = rpmsg_kdrv_switch_probe,
+ .remove = rpmsg_kdrv_switch_remove,
+ .callback = rpmsg_kdrv_switch_callback,
+};
+
+static int __init rpmsg_kdrv_display_driver_init(void)
+{
+ return rpmsg_kdrv_register_driver(&rpmsg_kdrv_switch);
+}
+module_init(rpmsg_kdrv_display_driver_init);
+
+static void rpmsg_kdrv_display_driver_fini(void)
+{
+}
+module_exit(rpmsg_kdrv_display_driver_fini);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI J721E RPMSG KDRV Ethernet switch driver");
diff --git a/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-common.h b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-common.h
new file mode 100644
index 000000000000..e6077fcfbbaa
--- /dev/null
+++ b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-common.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_TRANSPORT_COMMON_H__
+#define __RPMSG_KDRV_TRANSPORT_COMMON_H__
+
+/*
+ * Device types supported by RPMSG-KDRV framework
+ * Currently supported device types: display
+ * Planned future support for capture and i2c devices
+ */
+#define RPMSG_KDRV_TP_DEVICE_TYPE_INIT (0x0)
+#define RPMSG_KDRV_TP_DEVICE_TYPE_DISPLAY (0x1)
+#define RPMSG_KDRV_TP_DEVICE_TYPE_DEMO (0x2)
+#define RPMSG_KDRV_TP_DEVICE_TYPE_ETHSWITCH (0x3)
+
+/* More device types here*/
+#define RPMSG_KDRV_TP_DEVICE_TYPE_MAX (0x4)
+
+/*
+ * Maximum number of proxy devices per remotecore
+ */
+#define RPMSG_KDRV_TP_MAX_DEVICES (4)
+
+/*
+ * Maximum length of proxy device name
+ */
+#define RPMSG_KDRV_TP_DEVICE_NAME_LEN (32)
+
+/*
+ * Statically assigned device ID for init device
+ * Remote device framework dynamically assigns device
+ * IDs for other devices. All dynamically assigned IDs
+ * are greater than RPMSG_KDRV_TP_DEVICE_ID_INIT
+ */
+#define RPMSG_KDRV_TP_DEVICE_ID_INIT (0)
+
+/*
+ * Packet IDs are assigned dynamically (for REQUEST packets)
+ * starting from RPMSG_KDRV_TP_PACKET_ID_FIRST
+ * For MESSAGE packets, framework can use RPMSG_KDRV_TP_PACKET_ID_NONE
+ */
+#define RPMSG_KDRV_TP_PACKET_ID_NONE (0x10)
+#define RPMSG_KDRV_TP_PACKET_ID_FIRST (RPMSG_KDRV_TP_PACKET_ID_NONE + 1)
+
+enum rpmsg_kdrv_packet_source {
+ RPMSG_KDRV_TP_PACKET_SOURCE_SERVER,
+ RPMSG_KDRV_TP_PACKET_SOURCE_CLIENT,
+ RPMSG_KDRV_TP_PACKET_SOURCE_MAX,
+};
+
+enum rpmsg_kdrv_packet_type {
+ RPMSG_KDRV_TP_PACKET_TYPE_REQUEST,
+ RPMSG_KDRV_TP_PACKET_TYPE_RESPONSE,
+ RPMSG_KDRV_TP_PACKET_TYPE_MESSAGE,
+ RPMSG_KDRV_TP_PACKET_TYPE_MAX,
+};
+
+/*RPMSG_KDRV message :
+ * => device_header
+ * => message_header : defined by each device type
+ * => request / response / message payload
+ */
+struct rpmsg_kdrv_device_header {
+ /* ID of device sending the packet */
+ u8 device_id;
+ /* enum: rpmsg_kdrv_packet_type */
+ u8 packet_type;
+ /* enum: rpmsg_kdrv_packet_source */
+ u8 packet_source;
+ /* dynamically assigned packet ID for response matching */
+ u32 packet_id;
+ /* size of packet */
+ u32 packet_size;
+} __packed;
+
+#endif
diff --git a/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-demo.h b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-demo.h
new file mode 100644
index 000000000000..6d709b248d0b
--- /dev/null
+++ b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-demo.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_TRANSPORT_DEMODEV_H__
+#define __RPMSG_KDRV_TRANSPORT_DEMODEV_H__
+
+#include "rpmsg-kdrv-transport-common.h"
+
+enum rpmsg_kdrv_display_message_type {
+ RPMSG_KDRV_TP_DEMODEV_PING_REQUEST,
+ RPMSG_KDRV_TP_DEMODEV_PING_RESPONSE,
+ RPMSG_KDRV_TP_DEMODEV_S2C_MESSAGE,
+ RPMSG_KDRV_TP_DEMODEV_C2S_MESSAGE,
+ RPMSG_KDRV_TP_DEMODEV_MAX,
+};
+
+/*
+ * Maximum length of demo device data
+ */
+#define RPMSG_KDRV_TP_DEMODEV_DEVICE_DATA_LEN (32)
+
+/*
+ * Maximum length of demo device message data
+ */
+#define RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN (128)
+
+/*
+ * per-device data for demo device
+ */
+struct rpmsg_kdrv_demodev_device_data {
+ /* Does the device send all vsyncs? */
+ u8 charString[RPMSG_KDRV_TP_DEMODEV_DEVICE_DATA_LEN];
+} __packed;
+
+/*
+ * message header for demo device
+ */
+struct rpmsg_kdrv_demodev_message_header {
+ /* enum: rpmsg_kdrv_demodev_message_type */
+ u8 message_type;
+} __packed;
+
+/* demo device ping request - always client to server */
+struct rpmsg_kdrv_demodev_ping_request {
+ /* message header */
+ struct rpmsg_kdrv_demodev_message_header header;
+ /* ping data */
+ u8 data[RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN];
+} __packed;
+
+/* demo device ping response - always server to client */
+struct rpmsg_kdrv_demodev_ping_response {
+ /* message header */
+ struct rpmsg_kdrv_demodev_message_header header;
+ /* ping data */
+ u8 data[RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN];
+} __packed;
+
+/* demo device server to client one-way message */
+struct rpmsg_kdrv_demodev_s2c_message {
+ /* message header */
+ struct rpmsg_kdrv_demodev_message_header header;
+ /* message data */
+ u8 data[RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN];
+} __packed;
+
+/* demo device client to server one-way message */
+struct rpmsg_kdrv_demodev_c2s_message {
+ /* message header */
+ struct rpmsg_kdrv_demodev_message_header header;
+ /* message data */
+ u8 data[RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN];
+} __packed;
+
+#endif
diff --git a/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-display.h b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-display.h
new file mode 100644
index 000000000000..705d869ccfac
--- /dev/null
+++ b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-display.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_TRANSPORT_DISPLAY_H__
+#define __RPMSG_KDRV_TRANSPORT_DISPLAY_H__
+
+#include "rpmsg-kdrv-transport-common.h"
+
+/*
+ * Maximum number of planes per buffer
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_PLANES (2)
+
+/*
+ * Maximum number of shared displays
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_VPS (2)
+
+/*
+ * Maximum number of pipes per shared display
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_VIDS (4)
+
+/*
+ * Maximum number of formats supported per pipe
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_FORMATS (2)
+
+/*
+ * Maximum number of zorders supported per pipe
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_ZORDERS (4)
+
+enum rpmsg_kdrv_display_format {
+ RPMSG_KDRV_TP_DISPLAY_FORMAT_ARGB8888,
+ RPMSG_KDRV_TP_DISPLAY_FORMAT_XRGB8888,
+ RPMSG_KDRV_TP_DISPLAY_FORMAT_MAX,
+};
+
+enum rpmsg_kdrv_display_message_type {
+ RPMSG_KDRV_TP_DISPLAY_READY_QUERY_REQUEST,
+ RPMSG_KDRV_TP_DISPLAY_READY_QUERY_RESPONSE,
+ RPMSG_KDRV_TP_DISPLAY_RES_INFO_REQUEST,
+ RPMSG_KDRV_TP_DISPLAY_RES_INFO_RESPONSE,
+ RPMSG_KDRV_TP_DISPLAY_COMMIT_REQUEST,
+ RPMSG_KDRV_TP_DISPLAY_COMMIT_RESPONSE,
+ RPMSG_KDRV_TP_DISPLAY_COMMIT_DONE_MESSAGE,
+ RPMSG_KDRV_TP_DISPLAY_BUFFER_DONE_MESSAGE,
+ RPMSG_KDRV_TP_DISPLAY_MAX,
+};
+
+/*
+ * per-device data for display device
+ */
+struct rpmsg_kdrv_display_device_data {
+ /* Does the device send all vsyncs? */
+ u8 periodic_vsync;
+ /*Does the device defer the use of buffers? */
+ u8 deferred_buffer_usage;
+} __packed;
+
+/*
+ * message header for display device
+ */
+struct rpmsg_kdrv_display_message_header {
+ /* enum: rpmsg_kdrv_display_message_type */
+ u8 message_type;
+} __packed;
+
+/* display device request to provide ready / not-ready info */
+struct rpmsg_kdrv_display_ready_query_request {
+ /* message header */
+ struct rpmsg_kdrv_display_message_header header;
+} __packed;
+
+/* display device response indicating ready / not-ready status */
+struct rpmsg_kdrv_display_ready_query_response {
+ /* message header */
+ struct rpmsg_kdrv_display_message_header header;
+ /* can be 0 : if not ready 1: if ready */
+ u8 ready;
+} __packed;
+
+/* display device buffer update info */
+struct rpmsg_kdrv_display_buffer_info {
+ /* buffer width */
+ u16 width;
+ /* buffer height */
+ u16 height;
+ /* enum: rpmsg_kdrv_display_format */
+ u8 format;
+ /* number of planes */
+ u8 num_planes;
+ /* per plane start addresses */
+ u64 plane[RPMSG_KDRV_TP_DISPLAY_MAX_PLANES];
+ /* per plane pitch */
+ u16 pitch[RPMSG_KDRV_TP_DISPLAY_MAX_PLANES];
+ /* buffer id : to be used in buffer-done message */
+ u32 buffer_id;
+} __packed;
+
+/* display device pipe update info */
+struct rpmsg_kdrv_display_vid_update_info {
+ /* pipe ID */
+ u8 id;
+ /*enable / disable request */
+ u8 enabled;
+ /* window width */
+ u16 dst_w;
+ /* window height */
+ u16 dst_h;
+ /* window position X */
+ u16 dst_x;
+ /* window position Y */
+ u16 dst_y;
+ /* buffer */
+ struct rpmsg_kdrv_display_buffer_info buffer;
+} __packed;
+
+/* display device commit request */
+struct rpmsg_kdrv_display_commit_request {
+ /* message header */
+ struct rpmsg_kdrv_display_message_header header;
+ /*ID of shared display */
+ u8 id;
+ /* number of pipe updates in the commit */
+ u8 num_vid_updates;
+ /* list of pipe updates */
+ struct rpmsg_kdrv_display_vid_update_info vid[RPMSG_KDRV_TP_DISPLAY_MAX_VIDS];
+ /*commit id : to be used in commit-done message */
+ u32 commit_id;
+} __packed;
+
+/* display device commit response */
+struct rpmsg_kdrv_display_commit_response {
+ /* message header */
+ struct rpmsg_kdrv_display_message_header header;
+ /*commit id : from commit request */
+ u32 commit_id;
+ /*status : 0 = accepted, 1 = rejected */
+ u8 status;
+} __packed;
+
+/* display device commit done message */
+struct rpmsg_kdrv_display_commit_done_message {
+ /* message header */
+ struct rpmsg_kdrv_display_message_header header;
+ /* commit id : from commit request */
+ u32 commit_id;
+} __packed;
+
+/*display device buffer deferred release message */
+struct rpmsg_kdrv_display_buffer_done_message {
+ /* message header */
+ struct rpmsg_kdrv_display_message_header header;
+ /* buffer id: from bufer_info */
+ u32 buffer_id;
+} __packed;
+
+/* display device request to provide list of shared resources */
+struct rpmsg_kdrv_display_res_info_request {
+ /* message header */
+ struct rpmsg_kdrv_display_message_header header;
+} __packed;
+
+/* display device shared pipe */
+struct rpmsg_kdrv_display_vid_info {
+ /* pipe ID */
+ u8 id;
+ /* is pipe window fixed on display? */
+ u8 mutable_window;
+ /* fixed window position X, if applicable */
+ u16 fixed_window_x;
+ /* fixed window position Y, if applicable */
+ u16 fixed_window_y;
+ /* fixed window width, if applicable */
+ u16 fixed_window_w;
+ /* fixed window height, if applicable */
+ u16 fixed_window_h;
+ /* can pipe scale buffers? */
+ u8 can_scale;
+ /* number of formats supported */
+ u8 num_formats;
+ /*enum: rpmsg_kdrv_display_format */
+ u8 format[RPMSG_KDRV_TP_DISPLAY_MAX_FORMATS];
+ /* initial zorder of pipe */
+ u8 init_zorder;
+ /* number of allowed zorders */
+ u8 num_zorders;
+ /* list of allowed zorders */
+ u8 zorder[RPMSG_KDRV_TP_DISPLAY_MAX_ZORDERS];
+} __packed;
+
+/* display device shared display */
+struct rpmsg_kdrv_display_vp_info {
+ /* ID of shared display */
+ u8 id;
+ /* raster width */
+ u16 width;
+ /* raster height */
+ u16 height;
+ /* refresh rate */
+ u8 refresh;
+ /* number of pipes for this display */
+ u8 num_vids;
+ /* list of pipes */
+ struct rpmsg_kdrv_display_vid_info vid[RPMSG_KDRV_TP_DISPLAY_MAX_VIDS];
+} __packed;
+
+/* display device response providing list of shared resources */
+struct rpmsg_kdrv_display_res_info_response {
+ /* message header */
+ struct rpmsg_kdrv_display_message_header header;
+ /* number of shared displays */
+ u8 num_vps;
+ /* list of shared displays */
+ struct rpmsg_kdrv_display_vp_info vp[RPMSG_KDRV_TP_DISPLAY_MAX_VPS];
+} __packed;
+
+#endif
diff --git a/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-switch.h b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-switch.h
new file mode 100644
index 000000000000..d01553064b25
--- /dev/null
+++ b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-switch.h
@@ -0,0 +1,664 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Grygorii Strashko <grygorii.strashko@ti.com>
+ */
+
+#ifndef DRIVERS_RPMSG_KDRV_SHARED_RPMSG_KDRV_TRANSPORT_SWITCH_H_
+#define DRIVERS_RPMSG_KDRV_SHARED_RPMSG_KDRV_TRANSPORT_SWITCH_H_
+
+#include <linux/etherdevice.h>
+#include "rpmsg-kdrv-transport-common.h"
+
+#define RPMSG_KDRV_TP_ETHSWITCH_VERSION_MAJOR (0)
+#define RPMSG_KDRV_TP_ETHSWITCH_VERSION_MINOR (1)
+#define RPMSG_KDRV_TP_ETHSWITCH_VERSION_REVISION (1)
+
+/**
+ * enum rpmsg_kdrv_ethswitch_message_type - Eth switch rpmsg protocol messages
+ */
+enum rpmsg_kdrv_ethswitch_message_type {
+ RPMSG_KDRV_TP_ETHSWITCH_ATTACH = 0x00,
+ RPMSG_KDRV_TP_ETHSWITCH_ATTACH_EXT = 0x01,
+ RPMSG_KDRV_TP_ETHSWITCH_ALLOC_TX = 0x02,
+ RPMSG_KDRV_TP_ETHSWITCH_ALLOC_RX = 0x03,
+ RPMSG_KDRV_TP_ETHSWITCH_REGISTER_DEFAULTFLOW = 0x04,
+ RPMSG_KDRV_TP_ETHSWITCH_ALLOC_MAC = 0x05,
+ RPMSG_KDRV_TP_ETHSWITCH_REGISTER_MAC = 0x06,
+ RPMSG_KDRV_TP_ETHSWITCH_UNREGISTER_MAC = 0x07,
+ RPMSG_KDRV_TP_ETHSWITCH_UNREGISTER_DEFAULTFLOW = 0x08,
+ RPMSG_KDRV_TP_ETHSWITCH_FREE_MAC = 0x09,
+ RPMSG_KDRV_TP_ETHSWITCH_FREE_TX = 0x0A,
+ RPMSG_KDRV_TP_ETHSWITCH_FREE_RX = 0x0B,
+ RPMSG_KDRV_TP_ETHSWITCH_DETACH = 0x0C,
+ RPMSG_KDRV_TP_ETHSWITCH_IOCTL = 0x0D,
+ RPMSG_KDRV_TP_ETHSWITCH_REGWR = 0x0E,
+ RPMSG_KDRV_TP_ETHSWITCH_REGRD = 0x0F,
+ RPMSG_KDRV_TP_ETHSWITCH_IPV4_MAC_REGISTER = 0x10,
+ RPMSG_KDRV_TP_ETHSWITCH_IPV6_MAC_REGISTER = 0x11,
+ RPMSG_KDRV_TP_ETHSWITCH_IPV4_MAC_UNREGISTER = 0x12,
+ RPMSG_KDRV_TP_ETHSWITCH_IPV6_MAC_UNREGISTER = 0x13,
+ RPMSG_KDRV_TP_ETHSWITCH_PING_REQUEST = 0x14,
+ RPMSG_KDRV_TP_ETHSWITCH_S2C_NOTIFY = 0x15,
+ RPMSG_KDRV_TP_ETHSWITCH_C2S_NOTIFY = 0x16,
+ RPMSG_KDRV_TP_ETHSWITCH_REGISTER_ETHTYPE = 0x17,
+ RPMSG_KDRV_TP_ETHSWITCH_UNREGISTER_ETHTYPE = 0x18,
+ RPMSG_KDRV_TP_ETHSWITCH_REGISTER_REMOTETIMER = 0x19,
+ RPMSG_KDRV_TP_ETHSWITCH_UNREGISTER_REMOTETIMER = 0x1A,
+ RPMSG_KDRV_TP_ETHSWITCH_SET_PROMISC_MODE = 0x1B,
+ RPMSG_KDRV_TP_ETHSWITCH_FILTER_ADD_MAC = 0x1C,
+ RPMSG_KDRV_TP_ETHSWITCH_FILTER_DEL_MAC = 0x1D,
+ RPMSG_KDRV_TP_ETHSWITCH_MAX = 0x1E,
+};
+
+/**
+ * Client to Eth switch notification events @RPMSG_KDRV_TP_ETHSWITCH_C2S_NOTIFY
+ */
+enum rpmsg_kdrv_ethswitch_c2s_notify_type {
+ RPMSG_KDRV_TP_ETHSWITCH_CLIENTNOTIFY_DUMPSTATS = 0x00,
+ RPMSG_KDRV_TP_ETHSWITCH_CLIENTNOTIFY_MAX,
+};
+
+/**
+ * Eth switch HW ID
+ */
+enum rpmsg_kdrv_ethswitch_cpsw_type {
+ RPMSG_KDRV_TP_ETHSWITCH_CPSWTYPE_MCU_CPSW,
+ RPMSG_KDRV_TP_ETHSWITCH_CPSWTYPE_MAIN_CPSW,
+ RPMSG_KDRV_TP_ETHSWITCH_CPSWTYPE_MAX,
+};
+
+/**
+ * Response status codes returned by Eth switch FW in
+ * struct @rpmsg_kdrv_ethswitch_common_resp_info
+ */
+#define RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK (0)
+#define RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_EAGAIN (-1)
+#define RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_EFAIL (-2)
+#define RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_EACCESS (-3)
+
+/* Maximum length of message data */
+#define RPMSG_KDRV_TP_ETHSWITCH_MESSAGE_DATA_LEN (128)
+
+/* Number of priorities supported by CPSW */
+#define RPMSG_KDRV_TP_ETHSWITCH_PRIORITY_NUM (8)
+
+/* IPv4 Address length in octets */
+#define RPMSG_KDRV_TP_ETHSWITCH_IPV4ADDRLEN (4)
+
+/**
+ * struct rpmsg_kdrv_ethswitch_msg_header - Message Header for outgoing messages
+ *
+ * @message_type: Type of messages: One of
+ * enum @rpmsg_kdrv_ethswitch_message_type values
+ */
+struct rpmsg_kdrv_ethswitch_msg_header {
+ u8 message_type;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_common_req_info - common request msgs data
+ *
+ * @id: unique handle
+ * @core_key: core specific key to indicate attached core
+ *
+ * Common structure used for all Eth switch FW request msgs except
+ * @RPMSG_KDRV_TP_ETHSWITCH_ATTACH. It has to be filled with values returned
+ * by @RPMSG_KDRV_TP_ETHSWITCH_ATTACH.
+ */
+struct rpmsg_kdrv_ethswitch_common_req_info {
+ u64 id;
+ u32 core_key;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_common_resp_info - common response data
+ *
+ * @status: status of request
+ *
+ * Common data returned by Eth switch FW in all response messages to identify
+ * status of request message processing.
+ */
+struct rpmsg_kdrv_ethswitch_common_resp_info {
+ s32 status;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_attach_req - attach cmd client request msg
+ *
+ * @header: msg header
+ * @cpsw_type: CPSW HW type enum @rpmsg_kdrv_ethswitch_cpsw_type
+ *
+ * Client attach message @RPMSG_KDRV_TP_ETHSWITCH_ATTACH. it should be always
+ * sent first before other requests to Eth switch FW.
+ */
+struct rpmsg_kdrv_ethswitch_attach_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ u8 cpsw_type;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_attach_resp - attach client response msg
+ *
+ * @info: common response data. Status of the request processing
+ * @id: unique handle used by all further CMDs
+ * @core_key: core specific key to indicate attached core
+ * @rx_mtu: MTU of rx packets
+ * @tx_mtu: MTU of tx packet per priority
+ * @features: supported features mask
+ * @mac_only_port: 1-relative MAC port number for ports in MAC-only mode, 0
+ * for switch ports.
+ *
+ * Attach client response msg received as response to client attach request
+ * @RPMSG_KDRV_TP_ETHSWITCH_ATTACH. The @id and @core_key should be used to
+ * fill struct @rpmsg_kdrv_ethswitch_common_req_info in all further request
+ * messages.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_attach_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+ u64 id;
+ u32 core_key;
+ u32 rx_mtu;
+ u32 tx_mtu[RPMSG_KDRV_TP_ETHSWITCH_PRIORITY_NUM];
+ u32 features;
+#define RPMSG_KDRV_TP_ETHSWITCH_FEATURE_TXCSUM BIT(0)
+#define RPMSG_KDRV_ETHSWITCH_FEATURE_MAC_ONLY BIT(2)
+#define RPMSG_KDRV_ETHSWITCH_FEATURE_MC_FILTER BIT(3)
+ u32 mac_only_port;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_attach_extended_req - extended attach request msg
+ *
+ * @header: msg header
+ * @cpsw_type: CPSW HW type enum @rpmsg_kdrv_ethswitch_cpsw_type
+ *
+ * Client extended attach request @RPMSG_KDRV_TP_ETHSWITCH_ATTACH_EXT. It can
+ * be used instead of @RPMSG_KDRV_TP_ETHSWITCH_ATTACH and has to sent first
+ * before other requests to Eth switch FW.
+ */
+struct rpmsg_kdrv_ethswitch_attach_extended_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ u8 cpsw_type;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_common_response_info - extended attach resp msg
+ *
+ * @info: common response data. Status of the request processing
+ * @id: unique handle used by all further CMDs
+ * @core_key: core specific key to indicate attached core
+ * @rx_mtu: MTU of rx packets
+ * @tx_mtu: MTU of tx packet per priority
+ * @features: supported features mask
+ * @alloc_flow_idx: RX UDMA flow ID
+ * @tx_cpsw_psil_dst_id: PSI-L dest thread id
+ * @mac_address: default eth MAC address assigned to this client
+ * @mac_only_port: 1-relative MAC port number for ports in MAC-only mode, 0
+ * for switch ports.
+ *
+ * Extended attach response msg received as response to client extended attach
+ * request @RPMSG_KDRV_TP_ETHSWITCH_ATTACH_EXT. The @id and @core_key should be
+ * used to fill struct @rpmsg_kdrv_ethswitch_common_req_info in all further
+ * request messages. In addition, it provides allocated DMA resources and
+ * MAC address.
+ *
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_attach_extended_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+ u64 id;
+ u32 core_key;
+ u32 rx_mtu;
+ u32 tx_mtu[RPMSG_KDRV_TP_ETHSWITCH_PRIORITY_NUM];
+ u32 features;
+ u32 alloc_flow_idx;
+ u32 tx_cpsw_psil_dst_id;
+ u8 mac_address[ETH_ALEN];
+ u32 mac_only_port;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_detach_req - detach client request msg
+ *
+ * @header: msg header
+ * @info: common request msgs data
+ *
+ * Client detach request message @RPMSG_KDRV_TP_ETHSWITCH_DETACH.
+ * it should be always sent as the last message to Eth switch FW.
+ */
+struct rpmsg_kdrv_ethswitch_detach_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_detach_resp - detach client response msg
+ *
+ * @info: common response data. Status of the request processing
+ *
+ * Client detach response msg received as response to client detach request
+ * @RPMSG_KDRV_TP_ETHSWITCH_DETACH.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_detach_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_alloc_req - alloc resources request msg
+ *
+ * @header: msg header
+ * @info: common request msgs data
+ *
+ * Client resources allocation request messages
+ * @RPMSG_KDRV_TP_ETHSWITCH_ALLOC_RX: get RX DMA resources
+ * @RPMSG_KDRV_TP_ETHSWITCH_ALLOC_TX: get TX DMA resources
+ * @RPMSG_KDRV_TP_ETHSWITCH_ALLOC_MAC: get MAC address
+ */
+struct rpmsg_kdrv_ethswitch_alloc_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_alloc_rx_resp - alloc rx resources response msg
+ *
+ * @info: common response data. Status of the request processing
+ * @alloc_flow_idx: RX UDMA flow ID
+ *
+ * Client alloc rx resources response msg received as response to request
+ * @RPMSG_KDRV_TP_ETHSWITCH_ALLOC_RX. The @alloc_flow_idx is RX UDMA flow ID
+ * to be used for ingress packets reception.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_alloc_rx_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+ u32 alloc_flow_idx;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_alloc_tx_resp - alloc tx resources response msg
+ *
+ * @info: common response data. Status of the request processing
+ * @tx_cpsw_psil_dst_id: PSI-L dest thread id
+ *
+ * Client alloc tx resources response msg received as response to request
+ * @RPMSG_KDRV_TP_ETHSWITCH_ALLOC_TX. The @tx_cpsw_psil_dst_id is TX PSI-L dest
+ * thread ID to be used for TX UDMA channel setup.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_alloc_tx_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+ u32 tx_cpsw_psil_dst_id;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_alloc_mac_resp - alloc MAC resources response msg
+ *
+ * @info: common response data. Status of the request processing
+ * @mac_address: default eth MAC address assigned to this client
+ *
+ * Client alloc MAC resources response msg received as response to request
+ * @RPMSG_KDRV_TP_ETHSWITCH_ALLOC_MAC.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_alloc_mac_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+ u8 mac_address[ETH_ALEN];
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_register_mac_req - register MAC addr
+ *
+ * @header: msg header
+ * @info: common request msgs data
+ * @mac_address: eth MAC address used by client
+ * @flow_idx: RX UDMA flow ID
+ *
+ * Client register MAC addr message @RPMSG_KDRV_TP_ETHSWITCH_REGISTER_MAC.
+ * it should be sent to Eth switch FW to configure HW network traffic
+ * classifiers so all network traffic directed to @mac_address will be
+ * redirected to this client and can be received through allocated RX UDMA flow
+ * @flow_idx.
+ *
+ * This message has to be sent by client when it's ready to receive network
+ * traffic.
+ */
+struct rpmsg_kdrv_ethswitch_register_mac_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+ u8 mac_address[ETH_ALEN];
+ u32 flow_idx;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_register_mac_resp - register MAC addr response
+ *
+ * @info: common response data. Status of the request processing
+ *
+ * Client register MAC addr response msg received as response to
+ * request @RPMSG_KDRV_TP_ETHSWITCH_REGISTER_MAC.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_register_mac_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_unregister_mac_req - unregister MAC addr
+ *
+ * @header: msg header
+ * @info: common request msgs data
+ * @mac_address: eth MAC address used by client
+ * @flow_idx: RX UDMA flow ID
+ *
+ * Client unregister MAC addr message @RPMSG_KDRV_TP_ETHSWITCH_UNREGISTER_MAC.
+ * it should be sent to Eth switch FW to disable HW network traffic
+ * classifiers so all network traffic directed to @mac_address will be dropped.
+ *
+ * This message has to be sent by client when it does not want to receive any
+ * more network traffic.
+ */
+struct rpmsg_kdrv_ethswitch_unregister_mac_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+ u8 mac_address[ETH_ALEN];
+ u32 flow_idx;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_unregister_mac_resp - unregister MAC addr resp
+ *
+ * @info: common response data. Status of the request processing
+ *
+ * Client unregister MAC addr response msg received as response to
+ * request @RPMSG_KDRV_TP_ETHSWITCH_UNREGISTER_MAC.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_unregister_mac_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_ipv4_register_mac_req - register IPv4:MAC pair
+ *
+ * @header: msg header
+ * @info: common request msgs data
+ * @mac_address: eth MAC address used by client
+ * @ipv4_addr: IPv4 addr
+ *
+ * Client register IPv4:MAC addr pair message
+ * @RPMSG_KDRV_TP_ETHSWITCH_IPV4_MAC_REGISTER registers pair of IPv4 @ipv4_addr
+ * and Eth MAC @mac_address addresses in Eth switch FW ARP database.
+ *
+ * This message has to be sent by client when there is new IPv4 addr assigned.
+ */
+struct rpmsg_kdrv_ethswitch_ipv4_register_mac_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+ u8 mac_address[ETH_ALEN];
+ u8 ipv4_addr[RPMSG_KDRV_TP_ETHSWITCH_IPV4ADDRLEN];
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_ipv4_register_mac_resp - register IPv4:MAC pair
+ * response
+ *
+ * @info: common response data. Status of the request processing
+ *
+ * Client register IPv4:MAC addr pair response msg received as response to
+ * request @RPMSG_KDRV_TP_ETHSWITCH_IPV4_MAC_REGISTER.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_ipv4_register_mac_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_ipv4_unregister_mac_req - unregister IPv4 addr
+ *
+ * @header: msg header
+ * @info: common request msgs data
+ * @ipv4_addr: IPv4 addr
+ *
+ * Client unregister IPv4 addr message
+ * @RPMSG_KDRV_TP_ETHSWITCH_IPV4_MAC_UNREGISTER. It removes IPv4 @ipv4_addr
+ * address from Eth switch FW ARP database.
+ *
+ * This message has to be sent by client when there is IPv4 addr unassigned.
+ */
+struct rpmsg_kdrv_ethswitch_ipv4_unregister_mac_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+ u8 ipv4_addr[RPMSG_KDRV_TP_ETHSWITCH_IPV4ADDRLEN];
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_ipv4_unregister_mac_resp - unregister IPv4 addr
+ *
+ * @info: common response data. Status of the request processing
+ *
+ * Client unregister IPv4 addr response msg received as response to
+ * request @RPMSG_KDRV_TP_ETHSWITCH_IPV4_MAC_UNREGISTER.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_ipv4_unregister_mac_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_ping_req - ping request
+ *
+ * @header: msg header
+ * @data: custom data
+ *
+ * Client ping request @RPMSG_KDRV_TP_ETHSWITCH_PING_REQUEST. The Eth switch FW
+ * should return the same @data in struct @rpmsg_kdrv_ethswitch_ping_resp.
+ * Can be used any time - no attach required.
+ */
+struct rpmsg_kdrv_ethswitch_ping_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ u8 data[RPMSG_KDRV_TP_ETHSWITCH_MESSAGE_DATA_LEN];
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_ping_resp - ping response
+ *
+ * @data: custom data
+ *
+ * The ping response msg received as response to request
+ * @RPMSG_KDRV_TP_ETHSWITCH_PING_REQUEST. The Eth switch FW should return
+ * the same @data as was provided in struct @rpmsg_kdrv_ethswitch_ping_req.
+ */
+struct rpmsg_kdrv_ethswitch_ping_resp {
+ u8 data[RPMSG_KDRV_TP_ETHSWITCH_MESSAGE_DATA_LEN];
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_regrd_req - read hw register request
+ *
+ * @header: msg header
+ * @regaddr: phys register address
+ *
+ * The read hw register request @RPMSG_KDRV_TP_ETHSWITCH_REGRD.
+ * The Eth switch FW should return the @regaddr register value.
+ * Can be used any time - no attach required.
+ */
+struct rpmsg_kdrv_ethswitch_regrd_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ u32 regaddr;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_regrd_resp - read hw register response
+ *
+ * @info: common response data. Status of the request processing
+ * @regval: register value
+ *
+ * The read hw register response received as response to request
+ * @RPMSG_KDRV_TP_ETHSWITCH_REGRD. The @regval is hw register value from
+ * @regaddr phys register address provided in
+ * struct @rpmsg_kdrv_ethswitch_regrd_req
+ *
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_regrd_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+ u32 regval;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_c2s_notify - notification request
+ *
+ * @header: msg header
+ * @info: common request msg data
+ * @notifyid: enum @rpmsg_kdrv_ethswitch_c2s_notify_type
+ * @notify_info_len: length of @notify_info
+ * @notify_info: notification message data
+ *
+ * The notification request message @RPMSG_KDRV_TP_ETHSWITCH_C2S_NOTIFY is one
+ * way message to Eth switch FW without response.
+ */
+struct rpmsg_kdrv_ethswitch_c2s_notify {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+ u8 notifyid;
+ u32 notify_info_len;
+ u8 notify_info[RPMSG_KDRV_TP_ETHSWITCH_MESSAGE_DATA_LEN];
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_set_promisc_mode_req - set promiscuous mode
+ *
+ * @header: msg header
+ * @info: common request msg data
+ * @enable: promiscuous mode (enable or disable)
+ *
+ * Client message @RPMSG_KDRV_TP_ETHSWITCH_SET_PROMISC_MODE is sent to change
+ * the promiscuous mode.
+ */
+struct rpmsg_kdrv_ethswitch_set_promisc_mode_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+ u32 enable;
+} __packed;
+
+/**
+ * Set promiscuous mode response msg received as response to client's mode change
+ * request @RPMSG_KDRV_TP_ETHSWITCH_SET_PROMISC_MODE.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_set_promisc_mode_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_filter_add_mc_req - add multicast MAC address to filter
+ *
+ * @header: msg header
+ * @info: common request msg data
+ * @mac_address: Multicast address to be added
+ * @vlan_id: VLAN id
+ * @flow_idx: RX UDMA flow ID (used for multicast addresses marked as 'exclusive' in
+ * switch firmware)
+ *
+ * Client message @RPMSG_KDRV_TP_ETHSWITCH_FILTER_ADD_MAC is sent to add a multicast
+ * address to the receive filter.
+ */
+struct rpmsg_kdrv_ethswitch_filter_add_mc_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+ u8 mac_address[ETH_ALEN];
+ u16 vlan_id;
+ u32 flow_idx;
+} __packed;
+
+/**
+ * Response msg received as response to client's request to add a multicas address to
+ * receive filter via @RPMSG_KDRV_TP_ETHSWITCH_FILTER_ADD_MAC.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_filter_add_mc_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_filter_del_mac_req - delete multicast MAC address
+ * from filter
+ *
+ * @header: msg header
+ * @info: common request msg data
+ * @mac_address: Multicast address to be removed
+ * @vlan_id: VLAN id
+ * @flow_idx: RX UDMA flow ID (used for multicast addresses marked as 'exclusive' in
+ * switch firmware)
+ *
+ * Client message @RPMSG_KDRV_TP_ETHSWITCH_FILTER_DEL_MAC is sent to delete a multicast
+ * address from the receive filter.
+ */
+struct rpmsg_kdrv_ethswitch_filter_del_mc_req {
+ struct rpmsg_kdrv_ethswitch_msg_header header;
+ struct rpmsg_kdrv_ethswitch_common_req_info info;
+ u8 mac_address[ETH_ALEN];
+ u16 vlan_id;
+ u32 flow_idx;
+} __packed;
+
+/**
+ * Response msg received as response to client's request to delete a multicas address to
+ * receive filter via @RPMSG_KDRV_TP_ETHSWITCH_FILTER_DEL_MAC.
+ * The @info.status field is @RPMSG_KDRV_TP_ETHSWITCH_CMDSTATUS_OK on success.
+ */
+struct rpmsg_kdrv_ethswitch_filter_del_mc_resp {
+ struct rpmsg_kdrv_ethswitch_common_resp_info info;
+} __packed;
+
+/**
+ * struct rpmsg_kdrv_ethswitch_fw_version_info - fw version info
+ *
+ * @major: major
+ * @minor: minor
+ * @rev: revision
+ * @year:
+ * @month:
+ * @date: build date
+ * @commit_hash: commit hash
+ */
+struct rpmsg_kdrv_ethswitch_fw_version_info {
+#define RPMSG_KDRV_TP_ETHSWITCH_YEARLEN (4)
+#define RPMSG_KDRV_TP_ETHSWITCH_MONTHLEN (3)
+#define RPMSG_KDRV_TP_ETHSWITCH_DATELEN (2)
+#define RPMSG_KDRV_TP_ETHSWITCH_COMMITSHALEN (8)
+ u32 major;
+ u32 minor;
+ u32 rev;
+ char year[RPMSG_KDRV_TP_ETHSWITCH_YEARLEN];
+ char month[RPMSG_KDRV_TP_ETHSWITCH_MONTHLEN];
+ char date[RPMSG_KDRV_TP_ETHSWITCH_DATELEN];
+ char commit_hash[RPMSG_KDRV_TP_ETHSWITCH_COMMITSHALEN];
+} __packed;
+
+/*
+ * per-device data for ethswitch device
+ */
+/**
+ * struct rpmsg_kdrv_ethswitch_device_data - rpmsg_kdrv_device data
+ *
+ * @fw_ver: fw version info
+ * @permission_flags: permission enabled for each
+ * enum @rpmsg_kdrv_ethswitch_message_type command
+ * @uart_connected: flag indicating if UART is connected
+ * @uart_id: UART ID used by firmware for log prints
+ *
+ * Provided as part of RPMSG KDRV device discovery protocol
+ */
+struct rpmsg_kdrv_ethswitch_device_data {
+ struct rpmsg_kdrv_ethswitch_fw_version_info fw_ver;
+ u32 permission_flags;
+ u32 uart_connected;
+ u32 uart_id;
+} __packed;
+
+#endif /* DRIVERS_RPMSG_KDRV_SHARED_RPMSG_KDRV_TRANSPORT_SWITCH_H_ */
diff --git a/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport.h b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport.h
new file mode 100644
index 000000000000..eba4d45173f7
--- /dev/null
+++ b/drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_TRANSPORT_H__
+#define __RPMSG_KDRV_TRANSPORT_H__
+
+#include "rpmsg-kdrv-transport-common.h"
+
+enum rpmsg_kdrv_init_message_type {
+ RPMSG_KDRV_TP_INIT_DEV_INFO_REQUEST,
+ RPMSG_KDRV_TP_INIT_DEV_INFO_RESPONSE,
+ RPMSG_KDRV_TP_INIT_CONNECT_MESSAGE,
+ RPMSG_KDRV_TP_INIT_DISCONNECT_MESSAGE,
+ RPMSG_KDRV_TP_INIT_MAX,
+};
+
+/*
+ * message header for init device
+ */
+struct rpmsg_kdrv_init_message_header {
+ /* enum: rpmsg_kdrv_init_message_type */
+ u8 message_type;
+} __packed;
+
+/*
+ * init device request to provide list of devices
+ */
+struct rpmsg_kdrv_init_dev_info_request {
+ /* message header */
+ struct rpmsg_kdrv_init_message_header header;
+} __packed;
+
+struct rpmsg_kdrv_init_device_info {
+ /* device id */
+ u8 device_id;
+ /* device type (display, capture etc) */
+ u8 device_type;
+ /* name of device */
+ u8 device_name[RPMSG_KDRV_TP_DEVICE_NAME_LEN];
+ /* device specific info length */
+ u16 device_data_len;
+ /* per device-type info offset */
+ u16 device_data_offset;
+} __packed;
+
+/*
+ * init device response with list of devices
+ */
+struct rpmsg_kdrv_init_dev_info_response {
+ /* message header */
+ struct rpmsg_kdrv_init_message_header header;
+ /*number of exported devices */
+ u8 num_devices;
+ /* list of exported devices */
+ struct rpmsg_kdrv_init_device_info devices[RPMSG_KDRV_TP_MAX_DEVICES];
+ /* device specific data */
+ u8 device_data[0];
+} __packed;
+
+/*
+ * init device per-device connect message
+ */
+struct rpmsg_kdrv_init_connect_message {
+ /* message header */
+ struct rpmsg_kdrv_init_message_header header;
+ /* device ID to connect */
+ u8 device_id;
+} __packed;
+
+/*
+ * init device per-device disconnect message
+ */
+struct rpmsg_kdrv_init_disconnect_message {
+ /* message header */
+ struct rpmsg_kdrv_init_message_header header;
+ /* device ID to disconnect */
+ u8 device_id;
+} __packed;
+
+#endif
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index f96716893c2a..53a372b439a3 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -64,4 +64,17 @@ config RPMSG_VIRTIO
select RPMSG
select VIRTIO
+config RPMSG_PRU
+ tristate "PRU RPMsg Communication driver"
+ depends on RPMSG_VIRTIO
+ depends on REMOTEPROC
+ depends on PRU_REMOTEPROC
+ help
+ An rpmsg driver that exposes interfaces to user space, to allow
+ applications to communicate with the PRU processors on available
+ TI SoCs. This is restricted to SoCs that have the PRUSS remoteproc
+ support.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index ffe932ef6050..30c3a006cdae 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o
obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o
+obj-$(CONFIG_RPMSG_PRU) += rpmsg_pru.o
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 7cbed0310c09..bd1500f4e99d 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1475,6 +1475,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
+ chinfo.desc[0] = '\0';
rpmsg_unregister_device(glink->dev, &chinfo);
}
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index b5167ef93abf..4cf31e91f510 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1307,6 +1307,7 @@ static void qcom_channel_state_worker(struct work_struct *work)
strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
+ chinfo.desc[0] = '\0';
rpmsg_unregister_device(&edge->dev, &chinfo);
channel->registered = false;
spin_lock_irqsave(&edge->channels_lock, flags);
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index be90d77c5168..5e7b9a89679c 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -137,6 +137,8 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
}
eptdev->ept = ept;
+ if (eptdev->chinfo.src == RPMSG_ADDR_ANY)
+ eptdev->chinfo.src = ept->addr;
filp->private_data = eptdev;
return 0;
@@ -433,6 +435,7 @@ static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd,
chinfo.name[RPMSG_NAME_SIZE-1] = '\0';
chinfo.src = eptinfo.src;
chinfo.dst = eptinfo.dst;
+ chinfo.desc[0] = '\0';
return rpmsg_eptdev_create(ctrldev, chinfo);
};
@@ -521,12 +524,19 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
put_device(&ctrldev->dev);
}
+static const struct rpmsg_device_id rpmsg_char_id_table[] = {
+ { .name = "rpmsg_chrdev" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_char_id_table);
+
static struct rpmsg_driver rpmsg_chrdev_driver = {
.probe = rpmsg_chrdev_probe,
.remove = rpmsg_chrdev_remove,
.drv = {
.name = "rpmsg_chrdev",
},
+ .id_table = rpmsg_char_id_table,
};
static int rpmsg_char_init(void)
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 028ca5961bc2..f607b851dce7 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -283,6 +283,27 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
}
EXPORT_SYMBOL(rpmsg_trysend_offchannel);
+/**
+ * rpmsg_get_mtu() - get maximum transmission buffer size for sending message.
+ * @ept: the rpmsg endpoint
+ *
+ * This function returns maximum buffer size available for a single message.
+ *
+ * Return: the maximum transmission size on success and an appropriate error
+ * value on failure.
+ */
+
+ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
+{
+ if (WARN_ON(!ept))
+ return -EINVAL;
+ if (!ept->ops->get_mtu)
+ return -EOPNOTSUPP;
+
+ return ept->ops->get_mtu(ept);
+}
+EXPORT_SYMBOL(rpmsg_get_mtu);
+
/*
* match a rpmsg channel with a channel info struct.
* this is used to make sure we're not creating rpmsg devices for channels
@@ -365,6 +386,7 @@ static DEVICE_ATTR_RW(field)
/* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */
rpmsg_show_attr(name, id.name, "%s\n");
+rpmsg_show_attr(desc, desc, "%s\n");
rpmsg_show_attr(src, src, "0x%x\n");
rpmsg_show_attr(dst, dst, "0x%x\n");
rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n");
@@ -386,6 +408,7 @@ static DEVICE_ATTR_RO(modalias);
static struct attribute *rpmsg_dev_attrs[] = {
&dev_attr_name.attr,
+ &dev_attr_desc.attr,
&dev_attr_modalias.attr,
&dev_attr_dst.attr,
&dev_attr_src.attr,
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 3fc83cd50e98..e6f88ee90ff6 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -47,6 +47,7 @@ struct rpmsg_device_ops {
* @trysendto: see @rpmsg_trysendto(), optional
* @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
* @poll: see @rpmsg_poll(), optional
+ * @get_mtu: see @rpmsg_get_mtu(), optional
*
* Indirection table for the operations that a rpmsg backend should implement.
* In addition to @destroy_ept, the backend must at least implement @send and
@@ -66,6 +67,7 @@ struct rpmsg_endpoint_ops {
void *data, int len);
__poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
+ ssize_t (*get_mtu)(struct rpmsg_endpoint *ept);
};
int rpmsg_register_device(struct rpmsg_device *rpdev);
diff --git a/drivers/rpmsg/rpmsg_pru.c b/drivers/rpmsg/rpmsg_pru.c
new file mode 100644
index 000000000000..9549dfff14ac
--- /dev/null
+++ b/drivers/rpmsg/rpmsg_pru.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU Remote Processor Messaging Driver
+ *
+ * Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com/
+ * Jason Reeder <jreeder@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#define PRU_MAX_DEVICES (16)
+/* Matches the RPMSG_BUF_SIZE definition in virtio_rpmsg_bus.c */
+#define FIFO_MSG_SIZE (512)
+#define MAX_FIFO_MSG (32)
+
+/**
+ * struct rpmsg_pru_dev - Structure that contains the per-device data
+ * @rpdev: rpmsg channel device that is associated with this rpmsg_pru device
+ * @dev: device
+ * @cdev: character device
+ * @locked: boolean used to determine whether or not the device file is in use
+ * @devt: dev_t structure for the rpmsg_pru device
+ * @msg_fifo: kernel fifo used to buffer the messages between userspace and PRU
+ * @msg_len: array storing the lengths of each message in the kernel fifo
+ * @msg_idx_rd: kernel fifo read index
+ * @msg_idx_wr: kernel fifo write index
+ * @wait_list: wait queue used to implement the poll operation of the character
+ * device
+ *
+ * Each rpmsg_pru device provides an interface, using an rpmsg channel (rpdev),
+ * between a user space character device (cdev) and a PRU core. A kernel fifo
+ * (msg_fifo) is used to buffer the messages in the kernel that are
+ * being passed between the character device and the PRU.
+ */
+struct rpmsg_pru_dev {
+ struct rpmsg_device *rpdev;
+ struct device *dev;
+ struct cdev cdev;
+ bool locked;
+ dev_t devt;
+ struct kfifo msg_fifo;
+ u32 msg_len[MAX_FIFO_MSG];
+ int msg_idx_rd;
+ int msg_idx_wr;
+ wait_queue_head_t wait_list;
+};
+
+static struct class *rpmsg_pru_class;
+static dev_t rpmsg_pru_devt;
+static DEFINE_MUTEX(rpmsg_pru_lock);
+static DEFINE_IDR(rpmsg_pru_minors);
+
+static int rpmsg_pru_open(struct inode *inode, struct file *filp)
+{
+ struct rpmsg_pru_dev *prudev;
+ int ret = -EACCES;
+
+ prudev = container_of(inode->i_cdev, struct rpmsg_pru_dev, cdev);
+
+ mutex_lock(&rpmsg_pru_lock);
+ if (!prudev->locked) {
+ prudev->locked = true;
+ filp->private_data = prudev;
+ ret = 0;
+ }
+ mutex_unlock(&rpmsg_pru_lock);
+
+ if (ret)
+ dev_err(prudev->dev, "Device already open\n");
+
+ return ret;
+}
+
+static int rpmsg_pru_release(struct inode *inode, struct file *filp)
+{
+ struct rpmsg_pru_dev *prudev;
+
+ prudev = container_of(inode->i_cdev, struct rpmsg_pru_dev, cdev);
+ mutex_lock(&rpmsg_pru_lock);
+ prudev->locked = false;
+ mutex_unlock(&rpmsg_pru_lock);
+ return 0;
+}
+
+static ssize_t rpmsg_pru_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int ret;
+ u32 length;
+ struct rpmsg_pru_dev *prudev;
+
+ prudev = filp->private_data;
+
+ if (kfifo_is_empty(&prudev->msg_fifo) &&
+ (filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(prudev->wait_list,
+ !kfifo_is_empty(&prudev->msg_fifo));
+ if (ret)
+ return -EINTR;
+
+ ret = kfifo_to_user(&prudev->msg_fifo, buf,
+ prudev->msg_len[prudev->msg_idx_rd], &length);
+ prudev->msg_idx_rd = (prudev->msg_idx_rd + 1) % MAX_FIFO_MSG;
+
+ return ret ? ret : length;
+}
+
+static ssize_t rpmsg_pru_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int ret;
+ struct rpmsg_pru_dev *prudev;
+ static char rpmsg_pru_buf[FIFO_MSG_SIZE];
+ ssize_t max_payload;
+
+ prudev = filp->private_data;
+ max_payload = rpmsg_get_mtu(prudev->rpdev->ept);
+
+ if (count > max_payload) {
+ dev_err(prudev->dev, "Data too large for RPMsg Buffer\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(rpmsg_pru_buf, buf, count)) {
+ dev_err(prudev->dev, "Error copying buffer from user space");
+ return -EFAULT;
+ }
+
+ ret = rpmsg_send(prudev->rpdev->ept, (void *)rpmsg_pru_buf, count);
+ if (ret)
+ dev_err(prudev->dev, "rpmsg_send failed: %d\n", ret);
+
+ return ret ? ret : count;
+}
+
+static unsigned int rpmsg_pru_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ int mask;
+ struct rpmsg_pru_dev *prudev;
+
+ prudev = filp->private_data;
+
+ poll_wait(filp, &prudev->wait_list, wait);
+
+ mask = POLLOUT | POLLWRNORM;
+
+ if (!kfifo_is_empty(&prudev->msg_fifo))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static const struct file_operations rpmsg_pru_fops = {
+ .owner = THIS_MODULE,
+ .open = rpmsg_pru_open,
+ .release = rpmsg_pru_release,
+ .read = rpmsg_pru_read,
+ .write = rpmsg_pru_write,
+ .poll = rpmsg_pru_poll,
+ .llseek = noop_llseek,
+};
+
+static int rpmsg_pru_cb(struct rpmsg_device *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ u32 length;
+ struct rpmsg_pru_dev *prudev;
+
+ prudev = dev_get_drvdata(&rpdev->dev);
+
+ if (kfifo_avail(&prudev->msg_fifo) < len) {
+ dev_err(&rpdev->dev, "Not enough space on the FIFO\n");
+ return -ENOSPC;
+ }
+
+ if ((prudev->msg_idx_wr + 1) % MAX_FIFO_MSG ==
+ prudev->msg_idx_rd) {
+ dev_err(&rpdev->dev, "Message length table is full\n");
+ return -ENOSPC;
+ }
+
+ length = kfifo_in(&prudev->msg_fifo, data, len);
+ prudev->msg_len[prudev->msg_idx_wr] = length;
+ prudev->msg_idx_wr = (prudev->msg_idx_wr + 1) % MAX_FIFO_MSG;
+
+ wake_up_interruptible(&prudev->wait_list);
+
+ return 0;
+}
+
+static int rpmsg_pru_probe(struct rpmsg_device *rpdev)
+{
+ int ret;
+ struct rpmsg_pru_dev *prudev;
+ int minor_got;
+
+ prudev = devm_kzalloc(&rpdev->dev, sizeof(*prudev), GFP_KERNEL);
+ if (!prudev)
+ return -ENOMEM;
+
+ mutex_lock(&rpmsg_pru_lock);
+ minor_got = idr_alloc(&rpmsg_pru_minors, prudev, 0, PRU_MAX_DEVICES,
+ GFP_KERNEL);
+ mutex_unlock(&rpmsg_pru_lock);
+ if (minor_got < 0) {
+ ret = minor_got;
+ dev_err(&rpdev->dev, "Failed to get a minor number for the rpmsg_pru device: %d\n",
+ ret);
+ goto fail_alloc_minor;
+ }
+
+ prudev->devt = MKDEV(MAJOR(rpmsg_pru_devt), minor_got);
+
+ cdev_init(&prudev->cdev, &rpmsg_pru_fops);
+ prudev->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&prudev->cdev, prudev->devt, 1);
+ if (ret) {
+ dev_err(&rpdev->dev, "Unable to add cdev for the rpmsg_pru device\n");
+ goto fail_add_cdev;
+ }
+
+ prudev->dev = device_create(rpmsg_pru_class, &rpdev->dev, prudev->devt,
+ NULL, "rpmsg_pru%d", rpdev->dst);
+ if (IS_ERR(prudev->dev)) {
+ dev_err(&rpdev->dev, "Unable to create the rpmsg_pru device\n");
+ ret = PTR_ERR(prudev->dev);
+ goto fail_create_device;
+ }
+
+ prudev->rpdev = rpdev;
+
+ ret = kfifo_alloc(&prudev->msg_fifo, MAX_FIFO_MSG * FIFO_MSG_SIZE,
+ GFP_KERNEL);
+ if (ret) {
+ dev_err(&rpdev->dev, "Unable to allocate fifo for the rpmsg_pru device\n");
+ goto fail_alloc_fifo;
+ }
+
+ init_waitqueue_head(&prudev->wait_list);
+
+ dev_set_drvdata(&rpdev->dev, prudev);
+
+ dev_info(&rpdev->dev, "new rpmsg_pru device: /dev/rpmsg_pru%d",
+ rpdev->dst);
+
+ return 0;
+
+fail_alloc_fifo:
+ device_destroy(rpmsg_pru_class, prudev->devt);
+fail_create_device:
+ cdev_del(&prudev->cdev);
+fail_add_cdev:
+ mutex_lock(&rpmsg_pru_lock);
+ idr_remove(&rpmsg_pru_minors, minor_got);
+ mutex_unlock(&rpmsg_pru_lock);
+fail_alloc_minor:
+ return ret;
+}
+
+static void rpmsg_pru_remove(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_pru_dev *prudev;
+
+ prudev = dev_get_drvdata(&rpdev->dev);
+
+ kfifo_free(&prudev->msg_fifo);
+ device_destroy(rpmsg_pru_class, prudev->devt);
+ cdev_del(&prudev->cdev);
+ mutex_lock(&rpmsg_pru_lock);
+ idr_remove(&rpmsg_pru_minors, MINOR(prudev->devt));
+ mutex_unlock(&rpmsg_pru_lock);
+}
+
+/* .name matches on RPMsg Channels and causes a probe */
+static const struct rpmsg_device_id rpmsg_driver_pru_id_table[] = {
+ { .name = "rpmsg-pru" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_pru_id_table);
+
+static struct rpmsg_driver rpmsg_pru_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .id_table = rpmsg_driver_pru_id_table,
+ .probe = rpmsg_pru_probe,
+ .callback = rpmsg_pru_cb,
+ .remove = rpmsg_pru_remove,
+};
+
+static int __init rpmsg_pru_init(void)
+{
+ int ret;
+
+ rpmsg_pru_class = class_create(THIS_MODULE, "rpmsg_pru");
+ if (IS_ERR(rpmsg_pru_class)) {
+ pr_err("Unable to create class\n");
+ ret = PTR_ERR(rpmsg_pru_class);
+ goto fail_create_class;
+ }
+
+ ret = alloc_chrdev_region(&rpmsg_pru_devt, 0, PRU_MAX_DEVICES,
+ "rpmsg_pru");
+ if (ret) {
+ pr_err("Unable to allocate chrdev region\n");
+ goto fail_alloc_region;
+ }
+
+ ret = register_rpmsg_driver(&rpmsg_pru_driver);
+ if (ret) {
+ pr_err("Unable to register rpmsg driver");
+ goto fail_register_rpmsg_driver;
+ }
+
+ return 0;
+
+fail_register_rpmsg_driver:
+ unregister_chrdev_region(rpmsg_pru_devt, PRU_MAX_DEVICES);
+fail_alloc_region:
+ class_destroy(rpmsg_pru_class);
+fail_create_class:
+ return ret;
+}
+
+static void __exit rpmsg_pru_exit(void)
+{
+ unregister_rpmsg_driver(&rpmsg_pru_driver);
+ idr_destroy(&rpmsg_pru_minors);
+ mutex_destroy(&rpmsg_pru_lock);
+ class_destroy(rpmsg_pru_class);
+ unregister_chrdev_region(rpmsg_pru_devt, PRU_MAX_DEVICES);
+}
+
+module_init(rpmsg_pru_init);
+module_exit(rpmsg_pru_exit);
+
+MODULE_AUTHOR("Jason Reeder <jreeder@ti.com>");
+MODULE_DESCRIPTION("PRU Remote Processor Messaging Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 7d7ed4e5cce7..97256d3a79be 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -112,6 +112,23 @@ struct rpmsg_ns_msg {
} __packed;
/**
+ * struct rpmsg_ns_msg_ext - dynamic name service announcement message v2
+ * @name: name of remote service that is published
+ * @desc: description of remote service
+ * @addr: address of remote service that is published
+ * @flags: indicates whether service is created or destroyed
+ *
+ * Interchangeable nameservice message with rpmsg_ns_msg. This one has
+ * the addition of the desc field for extra flexibility.
+ */
+struct rpmsg_ns_msg_ext {
+ char name[RPMSG_NAME_SIZE];
+ char desc[RPMSG_NAME_SIZE];
+ u32 addr;
+ u32 flags;
+} __packed;
+
+/**
* enum rpmsg_ns_flags - dynamic name service announcement flags
*
* @RPMSG_NS_CREATE: a new remote service was just created
@@ -181,6 +198,7 @@ static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
int len, u32 dst);
static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
u32 dst, void *data, int len);
+static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept);
static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
.destroy_ept = virtio_rpmsg_destroy_ept,
@@ -190,6 +208,7 @@ static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
.trysend = virtio_rpmsg_trysend,
.trysendto = virtio_rpmsg_trysendto,
.trysend_offchannel = virtio_rpmsg_trysend_offchannel,
+ .get_mtu = virtio_rpmsg_get_mtu,
};
/**
@@ -274,6 +293,9 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
goto free_ept;
}
ept->addr = id;
+ ept->cb_lockdep_class = ((ept->addr == RPMSG_NS_ADDR) ?
+ RPMSG_LOCKDEP_SUBCLASS_NS :
+ RPMSG_LOCKDEP_SUBCLASS_NORMAL);
mutex_unlock(&vrp->endpoints_lock);
@@ -314,7 +336,7 @@ __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
mutex_unlock(&vrp->endpoints_lock);
/* make sure in-flight inbound messages won't invoke cb anymore */
- mutex_lock(&ept->cb_lock);
+ mutex_lock_nested(&ept->cb_lock, ept->cb_lockdep_class);
ept->cb = NULL;
mutex_unlock(&ept->cb_lock);
@@ -390,6 +412,24 @@ static void virtio_rpmsg_release_device(struct device *dev)
kfree(vch);
}
+static int virtio_rpmsg_desc_match(struct device *dev, void *data)
+{
+ struct rpmsg_channel_info *chinfo = data;
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+
+ if (!*chinfo->desc)
+ return 0;
+
+ if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE))
+ return 0;
+
+ if (strncmp(chinfo->desc, rpdev->desc, RPMSG_NAME_SIZE))
+ return 0;
+
+ /* found a match ! */
+ return 1;
+}
+
/*
* create an rpmsg channel using its name and address info.
* this function will be used to create both static and dynamic
@@ -413,6 +453,15 @@ static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp,
return NULL;
}
+ tmp = device_find_child(dev, chinfo, virtio_rpmsg_desc_match);
+ if (tmp) {
+ /* decrement the matched device's refcount back */
+ put_device(tmp);
+ dev_err(dev, "channel %s:%x:%x failed, desc '%s' already exists\n",
+ chinfo->name, chinfo->src, chinfo->dst, chinfo->desc);
+ return NULL;
+ }
+
vch = kzalloc(sizeof(*vch), GFP_KERNEL);
if (!vch)
return NULL;
@@ -425,6 +474,7 @@ static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp,
rpdev->src = chinfo->src;
rpdev->dst = chinfo->dst;
rpdev->ops = &virtio_rpmsg_ops;
+ strncpy(rpdev->desc, chinfo->desc, RPMSG_NAME_SIZE);
/*
* rpmsg server channels has predefined local address (for now),
@@ -705,6 +755,14 @@ static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
}
+static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept)
+{
+ struct rpmsg_device *rpdev = ept->rpdev;
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+
+ return vch->vrp->buf_size - sizeof(struct rpmsg_hdr);
+}
+
static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
struct rpmsg_hdr *msg, unsigned int len)
{
@@ -746,7 +804,7 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
if (ept) {
/* make sure ept->cb doesn't go away while we use it */
- mutex_lock(&ept->cb_lock);
+ mutex_lock_nested(&ept->cb_lock, ept->cb_lockdep_class);
if (ept->cb)
ept->cb(ept->rpdev, msg->data, msg_len, ept->priv,
@@ -826,18 +884,30 @@ static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
void *priv, u32 src)
{
struct rpmsg_ns_msg *msg = data;
+ struct rpmsg_ns_msg_ext *msg_ext = data;
struct rpmsg_device *newch;
struct rpmsg_channel_info chinfo;
struct virtproc_info *vrp = priv;
struct device *dev = &vrp->vdev->dev;
int ret;
+ u32 addr;
+ u32 flags;
#if defined(CONFIG_DYNAMIC_DEBUG)
dynamic_hex_dump("NS announcement: ", DUMP_PREFIX_NONE, 16, 1,
data, len, true);
#endif
- if (len != sizeof(*msg)) {
+ if (len == sizeof(*msg)) {
+ addr = virtio32_to_cpu(vrp->vdev, msg->addr);
+ flags = virtio32_to_cpu(vrp->vdev, msg->flags);
+ chinfo.desc[0] = '\0';
+ } else if (len == sizeof(*msg_ext)) {
+ addr = virtio32_to_cpu(vrp->vdev, msg_ext->addr);
+ flags = virtio32_to_cpu(vrp->vdev, msg_ext->flags);
+ msg_ext->desc[RPMSG_NAME_SIZE - 1] = '\0';
+ strncpy(chinfo.desc, msg_ext->desc, sizeof(chinfo.desc));
+ } else {
dev_err(dev, "malformed ns msg (%d)\n", len);
return -EINVAL;
}
@@ -858,13 +928,13 @@ static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
- chinfo.dst = virtio32_to_cpu(vrp->vdev, msg->addr);
+ chinfo.dst = addr;
dev_info(dev, "%sing channel %s addr 0x%x\n",
- virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY ?
+ flags & RPMSG_NS_DESTROY ?
"destroy" : "creat", msg->name, chinfo.dst);
- if (virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY) {
+ if (flags & RPMSG_NS_DESTROY) {
ret = rpmsg_unregister_device(&vrp->vdev->dev, &chinfo);
if (ret)
dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 54cf5ec8f401..cab616badd26 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -573,6 +573,16 @@ config RTC_DRV_TPS65910
This driver can also be built as a module. If so, the module
will be called rtc-tps65910.
+config RTC_DRV_TPS6594X
+ tristate "TI TPS6594X RTC driver"
+ depends on MFD_TPS6594X
+ help
+ If you say yes here you get support for the RTC of TI TPS6594X series PMIC
+ chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-tps6594x.
+
config RTC_DRV_TPS80031
tristate "TI TPS80031/TPS80032 RTC driver"
depends on MFD_TPS80031
@@ -1920,6 +1930,17 @@ config RTC_DRV_ASPEED
This driver can also be built as a module, if so, the module
will be called "rtc-aspeed".
+config RTC_DRV_TI_K3
+ tristate "TI K3 RTC"
+ depends on ARCH_K3 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for the Texas Instruments's
+ Real Time Clock for K3 architecture.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-ti-k3".
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index bfb57464118d..5d131be4758b 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -168,8 +168,10 @@ obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o
obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_TI_K3) += rtc-ti-k3.o
obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
+obj-$(CONFIG_RTC_DRV_TPS6594X) += rtc-tps6594x.o
obj-$(CONFIG_RTC_DRV_TPS80031) += rtc-tps80031.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
new file mode 100644
index 000000000000..c2c4ec71abcc
--- /dev/null
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments K3 RTC driver
+ *
+ * Copyright (C) 2021-2022 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/sys_soc.h>
+
+/* Registers */
+#define REG_K3RTC_S_CNT_LSW 0x08
+#define REG_K3RTC_S_CNT_MSW 0x0c
+#define REG_K3RTC_COMP 0x10
+#define REG_K3RTC_ON_OFF_S_CNT_LSW 0x20
+#define REG_K3RTC_ON_OFF_S_CNT_MSW 0x24
+#define REG_K3RTC_SCRATCH0 0x30
+#define REG_K3RTC_SCRATCH7 0x4c
+#define REG_K3RTC_GENERAL_CTL 0x50
+#define REG_K3RTC_IRQSTATUS_RAW_SYS 0x54
+#define REG_K3RTC_IRQSTATUS_SYS 0x58
+#define REG_K3RTC_IRQENABLE_SET_SYS 0x5c
+#define REG_K3RTC_IRQENABLE_CLR_SYS 0x60
+#define REG_K3RTC_SYNCPEND 0x68
+#define REG_K3RTC_KICK0 0x70
+#define REG_K3RTC_KICK1 0x74
+
+/* Freeze when lsw is read and unfreeze when msw is read */
+#define K3RTC_CNT_FMODE_S_CNT_VALUE (0x2 << 24)
+
+/* Magic values for lock/unlock */
+#define K3RTC_KICK0_UNLOCK_VALUE 0x83e70b13
+#define K3RTC_KICK1_UNLOCK_VALUE 0x95a4f1e0
+
+/* Multiplier for ppb conversions */
+#define K3RTC_PPB_MULT (1000000000LL)
+/* Min and max values supported with 'offset' interface (swapped sign) */
+#define K3RTC_MIN_OFFSET (-277761)
+#define K3RTC_MAX_OFFSET (277778)
+
+static const struct regmap_config ti_k3_rtc_regmap_config = {
+ .name = "peripheral-registers",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_K3RTC_KICK1,
+};
+
+enum ti_k3_rtc_fields {
+ K3RTC_KICK0,
+ K3RTC_KICK1,
+ K3RTC_S_CNT_LSW,
+ K3RTC_S_CNT_MSW,
+ K3RTC_O32K_OSC_DEP_EN,
+ K3RTC_UNLOCK,
+ K3RTC_CNT_FMODE,
+ K3RTC_PEND,
+ K3RTC_RELOAD_FROM_BBD,
+ K3RTC_COMP,
+
+ K3RTC_ALM_S_CNT_LSW,
+ K3RTC_ALM_S_CNT_MSW,
+ K3RTC_IRQ_STATUS_RAW,
+ K3RTC_IRQ_STATUS,
+ K3RTC_IRQ_ENABLE_SET,
+ K3RTC_IRQ_ENABLE_CLR,
+
+ K3RTC_IRQ_STATUS_ALT,
+ K3RTC_IRQ_ENABLE_CLR_ALT,
+
+ K3_RTC_MAX_FIELDS
+};
+
+static struct reg_field ti_rtc_reg_fields[] = {
+ [K3RTC_KICK0] = REG_FIELD(REG_K3RTC_KICK0, 0, 31),
+ [K3RTC_KICK1] = REG_FIELD(REG_K3RTC_KICK1, 0, 31),
+ [K3RTC_S_CNT_LSW] = REG_FIELD(REG_K3RTC_S_CNT_LSW, 0, 31),
+ [K3RTC_S_CNT_MSW] = REG_FIELD(REG_K3RTC_S_CNT_MSW, 0, 15),
+ [K3RTC_O32K_OSC_DEP_EN] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 21, 21),
+ [K3RTC_UNLOCK] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 23, 23),
+ [K3RTC_CNT_FMODE] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 24, 25),
+ [K3RTC_PEND] = REG_FIELD(REG_K3RTC_SYNCPEND, 0, 1),
+ [K3RTC_RELOAD_FROM_BBD] = REG_FIELD(REG_K3RTC_SYNCPEND, 31, 31),
+ [K3RTC_COMP] = REG_FIELD(REG_K3RTC_COMP, 0, 31),
+
+ /* We use on to off as alarm trigger */
+ [K3RTC_ALM_S_CNT_LSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_LSW, 0, 31),
+ [K3RTC_ALM_S_CNT_MSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_MSW, 0, 15),
+ [K3RTC_IRQ_STATUS_RAW] = REG_FIELD(REG_K3RTC_IRQSTATUS_RAW_SYS, 0, 0),
+ [K3RTC_IRQ_STATUS] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_SET] = REG_FIELD(REG_K3RTC_IRQENABLE_SET_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_CLR] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 0, 0),
+ /* Off to on is alternate */
+ [K3RTC_IRQ_STATUS_ALT] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 1, 1),
+ [K3RTC_IRQ_ENABLE_CLR_ALT] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 1, 1),
+};
+
+/**
+ * struct ti_k3_rtc - Private data for ti-k3-rtc
+ * @irq: IRQ
+ * @sync_timeout_us: data sync timeout period in uSec
+ * @rate_32k: 32k clock rate in Hz
+ * @rtc_dev: rtc device
+ * @regmap: rtc mmio regmap
+ * @r_fields: rtc register fields
+ */
+struct ti_k3_rtc {
+ unsigned int irq;
+ u32 sync_timeout_us;
+ unsigned long rate_32k;
+ struct rtc_device *rtc_dev;
+ struct regmap *regmap;
+ struct regmap_field *r_fields[K3_RTC_MAX_FIELDS];
+};
+
+static int k3rtc_field_read(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f)
+{
+ int ret;
+ int val;
+
+ ret = regmap_field_read(priv->r_fields[f], &val);
+ /*
+ * We should'nt be seeing regmap fail on us for mmio reads
+ * This is possible if clk context fails, but that is'nt the case for us
+ */
+ if (WARN_ON_ONCE(ret))
+ return ret;
+ return val;
+}
+
+static void k3rtc_field_write(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f, u32 val)
+{
+ regmap_field_write(priv->r_fields[f], val);
+}
+
+/**
+ * k3rtc_fence - Ensure a register sync took place between the two domains
+ * @priv: pointer to priv data
+ *
+ * Return: 0 if the sync took place, else returns -ETIMEDOUT
+ */
+static int k3rtc_fence(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_PEND], ret,
+ !ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+static inline int k3rtc_check_unlocked(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_field_read(priv, K3RTC_UNLOCK);
+ if (ret < 0)
+ return ret;
+
+ return (ret) ? 0 : 1;
+}
+
+static int k3rtc_unlock_rtc(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_check_unlocked(priv);
+ if (!ret)
+ return ret;
+
+ k3rtc_field_write(priv, K3RTC_KICK0, K3RTC_KICK0_UNLOCK_VALUE);
+ k3rtc_field_write(priv, K3RTC_KICK1, K3RTC_KICK1_UNLOCK_VALUE);
+
+ /* Skip fence since we are going to check the unlock bit as fence */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_UNLOCK], ret,
+ ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+/*
+ * This is the list of SoCs affected by TI's i2327 errata causing the RTC
+ * state-machine to break if not unlocked fast enough during boot. These
+ * SoCs must have the bootloader unlock this device very early in the
+ * boot-flow before we (Linux) can use this device.
+ */
+static const struct soc_device_attribute has_erratum_i2327[] = {
+ { .family = "AM62X", .revision = "SR1.0" },
+ { /* sentinel */ }
+};
+
+static int k3rtc_configure(struct device *dev)
+{
+ int ret;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ /*
+ * HWBUG: The compare statemachine is broken if the RTC module
+ * is NOT unlocked in under one second of boot - which is pretty long
+ * time from the perspective of Linux driver (module load, u-boot
+ * shell all can take much longer than this.
+ *
+ * In such occurrence, it is assumed that the RTC module is un-usable
+ */
+ if (soc_device_match(has_erratum_i2327)) {
+ ret = k3rtc_check_unlocked(priv);
+ /* If there is an error OR if we are locked, return error */
+ if (ret) {
+ dev_err(dev,
+ HW_ERR "Erratum i2327 unlock QUIRK! Cannot operate!!\n");
+ return -EFAULT;
+ }
+ } else {
+ /* May need to explicitly unlock first time */
+ ret = k3rtc_unlock_rtc(priv);
+ if (ret) {
+ dev_err(dev, "Failed to unlock(%d)!\n", ret);
+ return ret;
+ }
+ }
+
+ /* Enable Shadow register sync on 32k clk boundary */
+ k3rtc_field_write(priv, K3RTC_O32K_OSC_DEP_EN, 0x1);
+
+ /*
+ * Wait at least clk sync time before proceeding further programming.
+ * This ensures that the 32k based sync is active.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 5);
+
+ /* We need to ensure fence here to make sure sync here */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev,
+ "Failed fence osc_dep enable(%d) - is 32k clk working?!\n", ret);
+ return ret;
+ }
+
+ /*
+ * FMODE setting: Reading lower seconds will freeze value on higher
+ * seconds. This also implies that we must *ALWAYS* read lower seconds
+ * prior to reading higher seconds
+ */
+ k3rtc_field_write(priv, K3RTC_CNT_FMODE, K3RTC_CNT_FMODE_S_CNT_VALUE);
+
+ /* Clear any spurious IRQ sources if any */
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS, 0x1);
+ /* Disable all IRQs */
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR, 0x1);
+
+ /* And.. Let us Sync the writes in */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, tm);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+
+ seconds = rtc_tm_to_time64(tm);
+
+ /*
+ * Read operation on LSW will freeze the RTC, so to update
+ * the time, we cannot use field operations. just write since the
+ * reserved bits are ignored.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_LSW, seconds);
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_MSW, seconds >> 32);
+
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ u32 offset = enabled ? K3RTC_IRQ_ENABLE_SET : K3RTC_IRQ_ENABLE_CLR;
+
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+ if ((enabled && reg) || (!enabled && !reg))
+ return 0;
+
+ k3rtc_field_write(priv, offset, 0x1);
+
+ /*
+ * Ensure the write sync is through - NOTE: it should be OK to have
+ * ISR to fire as we are checking sync (which should be done in a 32k
+ * cycle or so).
+ */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, &alarm->time);
+
+ alarm->enabled = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+ int ret;
+
+ seconds = rtc_tm_to_time64(&alarm->time);
+
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_LSW, seconds);
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_MSW, (seconds >> 32));
+
+ /* Make sure the alarm time is synced in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence(%d)!\n", ret);
+ return ret;
+ }
+
+ /* Alarm irq enable will do a sync */
+ return ti_k3_rtc_alarm_irq_enable(dev, alarm->enabled);
+}
+
+static int ti_k3_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ comp = k3rtc_field_read(priv, K3RTC_COMP);
+
+ /* Convert from RTC calibration register format to ppb format */
+ tmp = comp * (s64)K3RTC_PPB_MULT;
+ if (tmp < 0)
+ tmp -= ticks_per_hr / 2LL;
+ else
+ tmp += ticks_per_hr / 2LL;
+ tmp = div_s64(tmp, ticks_per_hr);
+
+ /* Offset value operates in negative way, so swap sign */
+ *offset = (long)-tmp;
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_offset(struct device *dev, long offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ /* Make sure offset value is within supported range */
+ if (offset < K3RTC_MIN_OFFSET || offset > K3RTC_MAX_OFFSET)
+ return -ERANGE;
+
+ /* Convert from ppb format to RTC calibration register format */
+ tmp = offset * (s64)ticks_per_hr;
+ if (tmp < 0)
+ tmp -= K3RTC_PPB_MULT / 2LL;
+ else
+ tmp += K3RTC_PPB_MULT / 2LL;
+ tmp = div_s64(tmp, K3RTC_PPB_MULT);
+
+ /* Offset value operates in negative way, so swap sign */
+ comp = (int)-tmp;
+
+ k3rtc_field_write(priv, K3RTC_COMP, comp);
+
+ return k3rtc_fence(priv);
+}
+
+static irqreturn_t ti_k3_rtc_interrupt(s32 irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ int ret;
+
+ /*
+ * IRQ assertion can be very fast, however, the IRQ Status clear
+ * de-assert depends on 32k clock edge in the 32k domain
+ * If we clear the status prior to the first 32k clock edge,
+ * the status bit is cleared, but the IRQ stays re-asserted.
+ *
+ * To prevent this condition, we need to wait for clk sync time.
+ * We can either do that by polling the 32k observability signal for
+ * a toggle OR we could just sleep and let the processor do other
+ * stuff.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 2);
+
+ /* Lets make sure that this is a valid interrupt */
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_STATUS);
+
+ if (!reg) {
+ u32 raw = k3rtc_field_read(priv, K3RTC_IRQ_STATUS_RAW);
+
+ dev_err(dev,
+ HW_ERR
+ "Erratum i2327/IRQ trig: status: 0x%08x / 0x%08x\n", reg, raw);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Write 1 to clear status reg
+ * We cannot use a field operation here due to a potential race between
+ * 32k domain and vbus domain.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_IRQSTATUS_SYS, 0x1);
+
+ /* Sync the write in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence irq status clr(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Force the 32k status to be reloaded back in to ensure status is
+ * reflected back correctly.
+ */
+ k3rtc_field_write(priv, K3RTC_RELOAD_FROM_BBD, 0x1);
+
+ /* Ensure the write sync is through */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence reload from bbd(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Now we ensure that the status bit is cleared */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_IRQ_STATUS],
+ ret, !ret, 2, priv->sync_timeout_us);
+ if (ret) {
+ dev_err(dev, "Time out waiting for status clear\n");
+ return IRQ_NONE;
+ }
+
+ /* Notify RTC core on event */
+ rtc_update_irq(priv->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops ti_k3_rtc_ops = {
+ .read_time = ti_k3_rtc_read_time,
+ .set_time = ti_k3_rtc_set_time,
+ .read_alarm = ti_k3_rtc_read_alarm,
+ .set_alarm = ti_k3_rtc_set_alarm,
+ .read_offset = ti_k3_rtc_read_offset,
+ .set_offset = ti_k3_rtc_set_offset,
+ .alarm_irq_enable = ti_k3_rtc_alarm_irq_enable,
+};
+
+static int ti_k3_rtc_scratch_read(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+ int ret;
+
+ ret = regmap_bulk_read(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+
+ return ret;
+}
+
+static int ti_k3_rtc_scratch_write(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+ int ret;
+
+ ret = regmap_bulk_write(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+ if (ret)
+ return ret;
+
+ return k3rtc_fence(priv);
+}
+
+static struct nvmem_config ti_k3_rtc_nvmem_config = {
+ .name = "ti_k3_rtc_scratch",
+ .word_size = 4,
+ .stride = 4,
+ .size = REG_K3RTC_SCRATCH7 - REG_K3RTC_SCRATCH0 + 4,
+ .reg_read = ti_k3_rtc_scratch_read,
+ .reg_write = ti_k3_rtc_scratch_write,
+};
+
+static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = devm_clk_get(dev, "osc32k");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "No input reference 32k clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable the reference 32k clock(%d)\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
+
+ priv->rate_32k = clk_get_rate(clk);
+
+ /* Make sure we are exact 32k clock. Else, try to compensate delay */
+ if (priv->rate_32k != 32768)
+ dev_warn(dev, "Clock rate %ld is not 32768! Could misbehave!\n",
+ priv->rate_32k);
+
+ /*
+ * Sync timeout should be two 32k clk sync cycles = ~61uS. We double
+ * it to comprehend intermediate bus segment and cpu frequency
+ * deltas
+ */
+ priv->sync_timeout_us = (u32)(DIV_ROUND_UP_ULL(1000000, priv->rate_32k) * 4);
+
+ return ret;
+}
+
+static int k3rtc_get_vbusclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ /* Note: VBUS is'nt a context clock, it is needed for hardware operation */
+ clk = devm_clk_get(dev, "vbus");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "No input vbus clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable the vbus clock(%d)\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+ return ret;
+}
+
+static int ti_k3_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_k3_rtc *priv;
+ void __iomem *rtc_base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct ti_k3_rtc), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rtc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc_base))
+ return PTR_ERR(rtc_base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, rtc_base, &ti_k3_rtc_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ ret = devm_regmap_field_bulk_alloc(dev, priv->regmap, priv->r_fields,
+ ti_rtc_reg_fields, K3_RTC_MAX_FIELDS);
+ if (ret)
+ return ret;
+
+ ret = k3rtc_get_32kclk(dev, priv);
+ if (ret)
+ return ret;
+ ret = k3rtc_get_vbusclk(dev, priv);
+ if (ret)
+ return ret;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ priv->irq = (unsigned int)ret;
+
+ priv->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(priv->rtc_dev))
+ return PTR_ERR(priv->rtc_dev);
+
+ priv->rtc_dev->ops = &ti_k3_rtc_ops;
+ priv->rtc_dev->range_max = (1ULL << 48) - 1; /* 48Bit seconds */
+ ti_k3_rtc_nvmem_config.priv = priv;
+
+ ret = devm_request_threaded_irq(dev, priv->irq, NULL,
+ ti_k3_rtc_interrupt,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (ret) {
+ dev_err(dev, "Could not request IRQ: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = k3rtc_configure(dev);
+ if (ret)
+ return ret;
+
+ if (device_property_present(dev, "wakeup-source"))
+ device_init_wakeup(dev, true);
+ else
+ device_set_wakeup_capable(dev, true);
+
+ ret = rtc_register_device(priv->rtc_dev);
+ if (ret)
+ return ret;
+
+ ret = rtc_nvmem_register(priv->rtc_dev, &ti_k3_rtc_nvmem_config);
+ return ret;
+}
+
+static const struct of_device_id ti_k3_rtc_of_match_table[] = {
+ {.compatible = "ti,am62-rtc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ti_k3_rtc_of_match_table);
+
+static int __maybe_unused ti_k3_rtc_suspend(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(priv->irq);
+ return 0;
+}
+
+static int __maybe_unused ti_k3_rtc_resume(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(priv->irq);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ti_k3_rtc_pm_ops, ti_k3_rtc_suspend, ti_k3_rtc_resume);
+
+static struct platform_driver ti_k3_rtc_driver = {
+ .probe = ti_k3_rtc_probe,
+ .driver = {
+ .name = "rtc-ti-k3",
+ .of_match_table = ti_k3_rtc_of_match_table,
+ .pm = &ti_k3_rtc_pm_ops,
+ },
+};
+module_platform_driver(ti_k3_rtc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 RTC driver");
+MODULE_AUTHOR("Nishanth Menon");
diff --git a/drivers/rtc/rtc-tps6594x.c b/drivers/rtc/rtc-tps6594x.c
new file mode 100644
index 000000000000..e9f904d0a769
--- /dev/null
+++ b/drivers/rtc/rtc-tps6594x.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * rtc-tps6594x.c -- TPS6594x Real Time Clock driver.
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * TODO: alarm support
+ */
+
+#include <linux/bcd.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps6594x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct tps6594x_rtc {
+ struct rtc_device *rtc;
+ struct device *dev;
+};
+
+#define TPS6594X_NUM_TIME_REGS (TPS6594X_RTC_YEARS - TPS6594X_RTC_SECONDS + 1)
+
+static int tps6594x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char rtc_data[TPS6594X_NUM_TIME_REGS];
+ struct tps6594x *tps6594x = dev_get_drvdata(dev->parent);
+ int ret;
+
+ /* Reset TPS6594X_RTC_CTRL_REG_GET_TIME bit to zero, required for latch */
+ ret = regmap_update_bits(tps6594x->regmap, TPS6594X_RTC_CTRL_1,
+ TPS6594X_RTC_CTRL_REG_GET_TIME, 0);
+ if (ret < 0) {
+ dev_err(dev, "RTC CTRL reg update failed, err: %d\n", ret);
+ return ret;
+ }
+
+ /* Copy RTC counting registers to static registers or latches */
+ ret = regmap_update_bits(tps6594x->regmap, TPS6594X_RTC_CTRL_1,
+ TPS6594X_RTC_CTRL_REG_GET_TIME, TPS6594X_RTC_CTRL_REG_GET_TIME);
+ if (ret < 0) {
+ dev_err(dev, "RTC CTRL reg update failed, err: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_bulk_read(tps6594x->regmap, TPS6594X_RTC_SECONDS,
+ rtc_data, TPS6594X_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "RTC_SECONDS reg read failed, err = %d\n", ret);
+ return ret;
+ }
+
+ tm->tm_sec = bcd2bin(rtc_data[0]);
+ tm->tm_min = bcd2bin(rtc_data[1]);
+ tm->tm_hour = bcd2bin(rtc_data[2]);
+ tm->tm_mday = bcd2bin(rtc_data[3]);
+ tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
+ tm->tm_year = bcd2bin(rtc_data[5]) + 100;
+
+ return ret;
+}
+
+static int tps6594x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char rtc_data[TPS6594X_NUM_TIME_REGS];
+ struct tps6594x *tps6594x = dev_get_drvdata(dev->parent);
+ int ret, retries = 5;
+ unsigned int val;
+
+ rtc_data[0] = bin2bcd(tm->tm_sec);
+ rtc_data[1] = bin2bcd(tm->tm_min);
+ rtc_data[2] = bin2bcd(tm->tm_hour);
+ rtc_data[3] = bin2bcd(tm->tm_mday);
+ rtc_data[4] = bin2bcd(tm->tm_mon + 1);
+ rtc_data[5] = bin2bcd(tm->tm_year - 100);
+
+ /* Stop RTC while updating the RTC time registers */
+ ret = regmap_update_bits(tps6594x->regmap, TPS6594X_RTC_CTRL_1,
+ TPS6594X_RTC_CTRL_REG_STOP_RTC, 0);
+ if (ret < 0) {
+ dev_err(dev, "RTC stop failed, err = %d\n", ret);
+ return ret;
+ }
+
+ /* Waiting till RTC isn't running */
+ do {
+ ret = regmap_read(tps6594x->regmap, TPS6594X_RTC_STATUS, &val);
+ if (ret < 0) {
+ dev_err(dev, "RTC_STATUS reg read failed, err = %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+ } while (--retries && (val & TPS6594X_RTC_STATUS_RUN));
+
+ if (!retries) {
+ dev_err(dev, "RTC_STATUS is still RUNNING\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = regmap_bulk_write(tps6594x->regmap, TPS6594X_RTC_SECONDS,
+ rtc_data, TPS6594X_NUM_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "RTC_SECONDS reg write failed, err = %d\n", ret);
+ return ret;
+ }
+
+ /* Start back RTC */
+ ret = regmap_update_bits(tps6594x->regmap, TPS6594X_RTC_CTRL_1,
+ TPS6594X_RTC_CTRL_REG_STOP_RTC,
+ TPS6594X_RTC_CTRL_REG_STOP_RTC);
+ if (ret < 0)
+ dev_err(dev, "RTC start failed, err = %d\n", ret);
+
+ return ret;
+}
+
+static const struct rtc_class_ops tps6594x_rtc_ops = {
+ .read_time = tps6594x_rtc_read_time,
+ .set_time = tps6594x_rtc_set_time,
+};
+
+static int tps6594x_rtc_probe(struct platform_device *pdev)
+{
+ struct tps6594x *tps6594x = dev_get_drvdata(pdev->dev.parent);
+ struct tps6594x_rtc *tps6594x_rtc = NULL;
+ int ret;
+
+ tps6594x_rtc = devm_kzalloc(&pdev->dev, sizeof(struct tps6594x_rtc), GFP_KERNEL);
+ if (!tps6594x_rtc)
+ return -ENOMEM;
+
+ tps6594x_rtc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, tps6594x_rtc);
+
+ /* Start RTC */
+ ret = regmap_update_bits(tps6594x->regmap, TPS6594X_RTC_CTRL_1,
+ TPS6594X_RTC_CTRL_REG_STOP_RTC,
+ TPS6594X_RTC_CTRL_REG_STOP_RTC);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "RTC_CTRL write failed, err = %d\n", ret);
+ return ret;
+ }
+
+ tps6594x_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &tps6594x_rtc_ops, THIS_MODULE);
+ if (IS_ERR(tps6594x_rtc->rtc)) {
+ ret = PTR_ERR(tps6594x_rtc->rtc);
+ dev_err(&pdev->dev, "RTC register failed, err = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_tps6594x_rtc_match[] = {
+ { .compatible = "ti,tps6594x-rtc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_tps6594x_rtc_match);
+#endif
+
+static struct platform_driver tps6594x_rtc_driver = {
+ .probe = tps6594x_rtc_probe,
+ .driver = {
+ .name = "tps6594x-rtc",
+ .of_match_table = of_match_ptr(of_tps6594x_rtc_match),
+ },
+};
+
+module_platform_driver(tps6594x_rtc_driver);
+
+MODULE_ALIAS("platform:tps6594x_rtc");
+MODULE_DESCRIPTION("TI TPS6594x series RTC driver");
+MODULE_AUTHOR("Keerthy J <j-keerthy@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index e5c68051fb17..ad9f28dc13f1 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -276,6 +276,7 @@ struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rp
strscpy(chinfo.name, name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
+ chinfo.desc[0] = '\0';
return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
}
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index f5b82ffa637b..fe4a1bb292ea 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -1,22 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-# 64-bit ARM SoCs from TI
-if ARM64
-
-if ARCH_K3
-
-config ARCH_K3_AM6_SOC
- bool "K3 AM6 SoC"
- help
- Enable support for TI's AM6 SoC Family support
-
-config ARCH_K3_J721E_SOC
- bool "K3 J721E SoC"
- help
- Enable support for TI's J721E SoC Family support
-
-endif
-
-endif
#
# TI SOC drivers
@@ -26,6 +8,17 @@ menuconfig SOC_TI
if SOC_TI
+config KEYSTONE_DSP_MEM
+ tristate "TI Keystone DSP Memory Mapping Driver"
+ depends on ARCH_KEYSTONE
+ help
+ Userspace memory mapping interface driver for TI Keystone SoCs.
+ Provides access to MSM SRAM memory regions and dedicated DDR
+ carveout memory regions to user space to aid userspace loading
+ of the DSPs within the SoC.
+
+ If unsure, say N.
+
config KEYSTONE_NAVIGATOR_QMSS
tristate "Keystone Queue Manager Sub System"
depends on ARCH_KEYSTONE
@@ -81,7 +74,7 @@ config TI_SCI_PM_DOMAINS
rootfs may be available.
config TI_K3_RINGACC
- bool "K3 Ring accelerator Sub System"
+ tristate "K3 Ring accelerator Sub System"
depends on ARCH_K3 || COMPILE_TEST
depends on TI_SCI_INTA_IRQCHIP
help
@@ -112,6 +105,17 @@ config TI_PRUSS
processors on various TI SoCs. It's safe to say N here if you're
not interested in the PRU or if you are unsure.
+config TI_PAT
+ tristate "TI PAT DMA-BUF exporter"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on ARCH_DMA_ADDR_T_64BIT
+ select REGMAP
+ help
+ Driver for TI Page-based Address Translator (PAT). This driver
+ provides the an API allowing the remapping of a non-contiguous
+ DMA-BUF into a contiguous one that is sutable for devices needing
+ contiguous memory.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index cc3c972fad2e..3cd6331328a4 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -2,6 +2,7 @@
#
# TI Keystone SOC drivers
#
+obj-$(CONFIG_KEYSTONE_DSP_MEM) += keystone_dsp_mem.o
obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
@@ -14,3 +15,4 @@ obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
obj-$(CONFIG_TI_K3_SOCINFO) += k3-socinfo.o
obj-$(CONFIG_TI_PRUSS) += pruss.o
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
+obj-$(CONFIG_TI_PAT) += ti-pat.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 1147dc4c1d59..a5bf90646337 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -7,10 +7,12 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
+#include <linux/dma/ti-cppi5.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
@@ -21,6 +23,7 @@ static LIST_HEAD(k3_ringacc_list);
static DEFINE_MUTEX(k3_ringacc_list_lock);
#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+#define K3_DMARING_CFG_RING_SIZE_ELCNT_MASK GENMASK(15, 0)
/**
* struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
@@ -43,7 +46,13 @@ struct k3_ring_rt_regs {
u32 hwindx;
};
-#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_DMARING_RT_REGS_STEP 0x2000
+#define K3_DMARING_RT_REGS_REVERSE_OFS 0x1000
+#define K3_RINGACC_RT_OCC_MASK GENMASK(20, 0)
+#define K3_DMARING_RT_OCC_TDOWN_COMPLETE BIT(31)
+#define K3_DMARING_RT_DB_ENTRY_MASK GENMASK(7, 0)
+#define K3_DMARING_RT_DB_TDOWN_ACK BIT(31)
/**
* struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
@@ -122,6 +131,7 @@ struct k3_ring_state {
u32 occ;
u32 windex;
u32 rindex;
+ u32 tdown_complete:1;
};
/**
@@ -137,10 +147,13 @@ struct k3_ring_state {
* @elm_size: Size of the ring element
* @mode: Ring mode
* @flags: flags
+ * @state: Ring state
* @ring_id: Ring Id
* @parent: Pointer on struct @k3_ringacc
* @use_count: Use count for shared rings
* @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ * @dma_dev: device to be used for DMA API (allocation, mapping)
+ * @asel: Address Space Select value for physical addresses
*/
struct k3_ring {
struct k3_ring_rt_regs __iomem *rt;
@@ -155,11 +168,15 @@ struct k3_ring {
u32 flags;
#define K3_RING_FLAG_BUSY BIT(1)
#define K3_RING_FLAG_SHARED BIT(2)
+#define K3_RING_FLAG_REVERSE BIT(3)
struct k3_ring_state state;
u32 ring_id;
struct k3_ringacc *parent;
u32 use_count;
int proxy_id;
+ struct device *dma_dev;
+ u32 asel;
+#define K3_ADDRESS_ASEL_SHIFT 48
};
struct k3_ringacc_ops {
@@ -185,6 +202,7 @@ struct k3_ringacc_ops {
* @tisci_ring_ops: ti-sci rings ops
* @tisci_dev_id: ti-sci device id
* @ops: SoC specific ringacc operation
+ * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA)
*/
struct k3_ringacc {
struct device *dev;
@@ -207,6 +225,7 @@ struct k3_ringacc {
u32 tisci_dev_id;
const struct k3_ringacc_ops *ops;
+ bool dma_rings;
};
/**
@@ -218,6 +237,21 @@ struct k3_ringacc_soc_data {
unsigned dma_ring_reset_quirk:1;
};
+static int k3_ringacc_ring_read_occ(struct k3_ring *ring)
+{
+ return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK;
+}
+
+static void k3_ringacc_ring_update_occ(struct k3_ring *ring)
+{
+ u32 val;
+
+ val = readl(&ring->rt->occ);
+
+ ring->state.occ = val & K3_RINGACC_RT_OCC_MASK;
+ ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE);
+}
+
static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
{
return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
@@ -231,12 +265,24 @@ static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
static struct k3_ring_ops k3_ring_mode_ring_ops = {
.push_tail = k3_ringacc_ring_push_mem,
.pop_head = k3_ringacc_ring_pop_mem,
};
+static struct k3_ring_ops k3_dmaring_fwd_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_dmaring_fwd_pop,
+};
+
+static struct k3_ring_ops k3_dmaring_reverse_ops = {
+ /* Reverse side of the DMA ring can only be popped by SW */
+ .pop_head = k3_dmaring_reverse_pop,
+};
+
static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
@@ -290,6 +336,9 @@ struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
mutex_lock(&ringacc->req_lock);
+ if (!try_module_get(ringacc->dev->driver->owner))
+ goto err_module_get;
+
if (id == K3_RINGACC_RING_ID_ANY) {
/* Request for any general purpose ring */
struct ti_sci_resource_desc *gp_rings =
@@ -334,11 +383,48 @@ out:
return &ringacc->rings[id];
error:
+ module_put(ringacc->dev->driver->owner);
+
+err_module_get:
mutex_unlock(&ringacc->req_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ /*
+ * DMA rings must be requested by ID, completion ring is the reverse
+ * side of the forward ring
+ */
+ if (fwd_id < 0)
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (test_bit(fwd_id, ringacc->rings_inuse)) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ *fwd_ring = &ringacc->rings[fwd_id];
+ *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings];
+ set_bit(fwd_id, ringacc->rings_inuse);
+ ringacc->rings[fwd_id].use_count++;
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id);
+
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return ret;
+}
+
int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
int fwd_id, int compl_id,
struct k3_ring **fwd_ring,
@@ -349,6 +435,10 @@ int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
if (!fwd_ring || !compl_ring)
return -EINVAL;
+ if (ringacc->dma_rings)
+ return k3_dmaring_request_dual_ring(ringacc, fwd_id,
+ fwd_ring, compl_ring);
+
*fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
if (!(*fwd_ring))
return -ENODEV;
@@ -365,20 +455,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- ring->size,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID;
+ ring_cfg.count = ring->size;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -398,20 +484,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
enum k3_ring_mode mode)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- mode,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID;
+ ring_cfg.mode = mode;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -426,7 +508,7 @@ void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
goto reset;
if (!occ)
- occ = readl(&ring->rt->occ);
+ occ = k3_ringacc_ring_read_occ(ring);
if (occ) {
u32 db_ring_cnt, db_ring_cnt_cur;
@@ -478,20 +560,15 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -506,6 +583,13 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
ringacc = ring->parent;
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
if (!test_bit(ring->ring_id, ringacc->rings_inuse))
@@ -521,11 +605,14 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
k3_ringacc_ring_free_sci(ring);
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt, ring->ring_mem_dma);
ring->flags = 0;
ring->ops = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+
if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
clear_bit(ring->proxy_id, ringacc->proxy_inuse);
ring->proxy = NULL;
@@ -535,6 +622,8 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
no_init:
clear_bit(ring->ring_id, ringacc->rings_inuse);
+ module_put(ringacc->dev->driver->owner);
+
out:
mutex_unlock(&ringacc->req_lock);
return 0;
@@ -575,29 +664,112 @@ EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
- u32 ring_idx;
int ret;
if (!ringacc->tisci)
return -EINVAL;
- ring_idx = ring->ring_id;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring_idx,
- lower_32_bits(ring->ring_mem_dma),
- upper_32_bits(ring->ring_mem_dma),
- ring->size,
- ring->mode,
- ring->elm_size,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+ ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma);
+ ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma);
+ ring_cfg.count = ring->size;
+ ring_cfg.mode = ring->mode;
+ ring_cfg.size = ring->elm_size;
+ ring_cfg.asel = ring->asel;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
- ret, ring_idx);
+ ret, ring->ring_id);
+
+ return ret;
+}
+
+static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc;
+ struct k3_ring *reverse_ring;
+ int ret = 0;
+
+ if (cfg->elm_size != K3_RINGACC_RING_ELSIZE_8 ||
+ cfg->mode != K3_RINGACC_RING_MODE_RING ||
+ cfg->size & ~K3_DMARING_CFG_RING_SIZE_ELCNT_MASK)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->asel = cfg->asel;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev) {
+ dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n",
+ ring->ring_id);
+ ring->dma_dev = ringacc->dev;
+ }
+
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ ring->ops = &k3_dmaring_fwd_ops;
+
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+
+ k3_ringacc_ring_dump(ring);
+
+ /* DMA rings: configure reverse ring */
+ reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings];
+ reverse_ring->size = cfg->size;
+ reverse_ring->elm_size = cfg->elm_size;
+ reverse_ring->mode = cfg->mode;
+ reverse_ring->asel = cfg->asel;
+ memset(&reverse_ring->state, 0, sizeof(reverse_ring->state));
+ reverse_ring->ops = &k3_dmaring_reverse_ops;
+
+ reverse_ring->ring_mem_virt = ring->ring_mem_virt;
+ reverse_ring->ring_mem_dma = ring->ring_mem_dma;
+ reverse_ring->flags |= K3_RING_FLAG_BUSY;
+ k3_ringacc_ring_dump(reverse_ring);
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+ ring->proxy = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
return ret;
}
@@ -608,8 +780,12 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
if (!ring || !cfg)
return -EINVAL;
+
ringacc = ring->parent;
+ if (ringacc->dma_rings)
+ return k3_dmaring_cfg(ring, cfg);
+
if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
@@ -648,8 +824,12 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
switch (ring->mode) {
case K3_RINGACC_RING_MODE_RING:
ring->ops = &k3_ring_mode_ring_ops;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev)
+ ring->dma_dev = ringacc->dev;
break;
case K3_RINGACC_RING_MODE_MESSAGE:
+ ring->dma_dev = ringacc->dev;
if (ring->proxy)
ring->ops = &k3_ring_mode_proxy_ops;
else
@@ -661,9 +841,9 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
goto err_free_proxy;
}
- ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
- ring->size * (4 << ring->elm_size),
- &ring->ring_mem_dma, GFP_KERNEL);
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
if (!ring->ring_mem_virt) {
dev_err(ringacc->dev, "Failed to alloc ring mem\n");
ret = -ENOMEM;
@@ -684,12 +864,13 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
return 0;
err_free_mem:
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt,
ring->ring_mem_dma);
err_free_ops:
ring->ops = NULL;
+ ring->dma_dev = NULL;
err_free_proxy:
ring->proxy = NULL;
return ret;
@@ -711,7 +892,7 @@ u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
return -EINVAL;
if (!ring->state.free)
- ring->state.free = ring->size - readl(&ring->rt->occ);
+ ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring);
return ring->state.free;
}
@@ -722,7 +903,7 @@ u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- return readl(&ring->rt->occ);
+ return k3_ringacc_ring_read_occ(ring);
}
EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
@@ -898,6 +1079,72 @@ static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
K3_RINGACC_ACCESS_MODE_POP_HEAD);
}
+/*
+ * The element is 48 bits of address + ASEL bits in the ring.
+ * ASEL is used by the DMAs and should be removed for the kernel as it is not
+ * part of the physical memory address.
+ */
+static void k3_dmaring_remove_asel_from_elem(u64 *elem)
+{
+ *elem &= GENMASK_ULL(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+ u32 elem_idx;
+
+ /*
+ * DMA rings: forward ring is always tied DMA channel and HW does not
+ * maintain any state data required for POP operation and its unknown
+ * how much elements were consumed by HW. So, to actually
+ * do POP, the read pointer has to be recalculated every time.
+ */
+ ring->state.occ = k3_ringacc_ring_read_occ(ring);
+ if (ring->state.windex >= ring->state.occ)
+ elem_idx = ring->state.windex - ring->state.occ;
+ else
+ elem_idx = ring->size - (ring->state.occ - ring->state.windex);
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx);
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.windex, elem_idx,
+ elem_ptr);
+ return 0;
+}
+
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
+
+ if (ring->state.occ) {
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
+ writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db);
+ } else if (ring->state.tdown_complete) {
+ dma_addr_t *value = elem;
+
+ *value = CPPI5_TDCM_MARKER;
+ writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db);
+ ring->state.tdown_complete = false;
+ }
+
+ dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.rindex, elem_ptr);
+ return 0;
+}
+
static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
{
void *elem_ptr;
@@ -905,6 +1152,11 @@ static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
memcpy(elem_ptr, elem, (4 << ring->elm_size));
+ if (ring->parent->dma_rings) {
+ u64 *addr = elem_ptr;
+
+ *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT);
+ }
ring->state.windex = (ring->state.windex + 1) % ring->size;
ring->state.free--;
@@ -981,12 +1233,12 @@ int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
return -EINVAL;
if (!ring->state.occ)
- ring->state.occ = k3_ringacc_ring_get_occ(ring);
+ k3_ringacc_ring_update_occ(ring);
dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
ring->state.rindex);
- if (!ring->state.occ)
+ if (!ring->state.occ && !ring->state.tdown_complete)
return -ENODATA;
if (ring->ops && ring->ops->pop_head)
@@ -1004,7 +1256,7 @@ int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
return -EINVAL;
if (!ring->state.occ)
- ring->state.occ = k3_ringacc_ring_get_occ(ring);
+ k3_ringacc_ring_update_occ(ring);
dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
ring->state.occ, ring->state.rindex);
@@ -1115,7 +1367,7 @@ static int k3_ringacc_init(struct platform_device *pdev,
dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
if (!dev->msi_domain) {
- dev_err(dev, "Failed to get MSI domain\n");
+ dev_dbg(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
@@ -1208,19 +1460,80 @@ static const struct of_device_id k3_ringacc_of_match[] = {
{ .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
{},
};
+MODULE_DEVICE_TABLE(of, k3_ringacc_of_match);
+
+struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
+ struct k3_ringacc_init_data *data)
+{
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ void __iomem *base_rt;
+ struct resource *res;
+ int i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return ERR_PTR(-ENOMEM);
+
+ ringacc->dev = dev;
+ ringacc->dma_rings = true;
+ ringacc->num_rings = data->num_rings;
+ ringacc->tisci = data->tisci;
+ ringacc->tisci_dev_id = data->tisci_dev_id;
+
+ mutex_init(&ringacc->req_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ringrt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return ERR_CAST(base_rt);
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings * 2,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_rings),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ struct k3_ring *ring = &ringacc->rings[i];
+
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ ring = &ringacc->rings[ringacc->num_rings + i];
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i +
+ K3_DMARING_RT_REGS_REVERSE_OFS;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ ring->flags = K3_RING_FLAG_REVERSE;
+ }
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ dev_info(dev, "Number of rings: %u\n", ringacc->num_rings);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init);
static int k3_ringacc_probe(struct platform_device *pdev)
{
const struct ringacc_match_data *match_data;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct k3_ringacc *ringacc;
int ret;
- match = of_match_node(k3_ringacc_of_match, dev->of_node);
- if (!match)
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
return -ENODEV;
- match_data = match->data;
ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
if (!ringacc)
@@ -1243,12 +1556,27 @@ static int k3_ringacc_probe(struct platform_device *pdev)
return 0;
}
+static int k3_ringacc_remove(struct platform_device *pdev)
+{
+ struct k3_ringacc *ringacc = dev_get_drvdata(&pdev->dev);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_del(&ringacc->list);
+ mutex_unlock(&k3_ringacc_list_lock);
+ return 0;
+}
+
static struct platform_driver k3_ringacc_driver = {
.probe = k3_ringacc_probe,
+ .remove = k3_ringacc_remove,
.driver = {
.name = "k3-ringacc",
.of_match_table = k3_ringacc_of_match,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver(k3_ringacc_driver);
+module_platform_driver(k3_ringacc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI Ringacc driver for K3 SOCs");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index bbbc2d2b7091..ad97e08a25f6 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -40,6 +40,11 @@ static const struct k3_soc_id {
{ 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" },
{ 0xBB6D, "J7200" },
+ { 0xBB38, "AM64X" },
+ { 0xBB75, "J721S2"},
+ { 0xBB7E, "AM62X" },
+ { 0xBB80, "J784S4" },
+ { 0xBB8D, "AM62AX" },
};
static int
diff --git a/drivers/soc/ti/keystone_dsp_mem.c b/drivers/soc/ti/keystone_dsp_mem.c
new file mode 100644
index 000000000000..b102411bf660
--- /dev/null
+++ b/drivers/soc/ti/keystone_dsp_mem.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI Keystone DSP Memory Mapping Driver
+ *
+ * Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <uapi/linux/keystone_dsp_mem.h>
+
+#define KEYSTONE_ALIAS_PHYS_START 0x80000000ULL
+#define KEYSTONE_ALIAS_PHYS_SIZE 0x80000000ULL /* 2G */
+
+#define KEYSTONE_HIGH_PHYS_START 0x800000000ULL
+#define KEYSTONE_HIGH_PHYS_LIMIT (KEYSTONE_HIGH_PHYS_START + \
+ KEYSTONE_ALIAS_PHYS_SIZE)
+
+#define to_alias_addr(addr) (((addr) - KEYSTONE_HIGH_PHYS_START) + \
+ KEYSTONE_ALIAS_PHYS_START)
+
+/**
+ * struct keystone_dsp_mem - internal memory structure
+ * @addr: physical address on the bus to access the memory region
+ * @size: size of the memory region
+ * @kobj: kobject for the sysfs directory file
+ */
+struct keystone_dsp_mem {
+ phys_addr_t addr;
+ resource_size_t size;
+ struct kobject kobj;
+};
+
+#define to_dsp_mem(obj) container_of(obj, struct keystone_dsp_mem, kobj)
+
+/**
+ * struct keystone_dsp_mem_info - Keystone DSP Memory device structure
+ * @misc: child miscdevice structure
+ * @mem: memory region array pointer
+ * @num_maps: number of memory regions
+ */
+struct keystone_dsp_mem_info {
+ struct miscdevice misc;
+ struct keystone_dsp_mem *mem;
+ int num_maps;
+};
+
+static struct keystone_dsp_mem_info *dsp_mem;
+
+#define to_dsp_mem_info(m) container_of(m, struct keystone_dsp_mem_info, misc)
+
+static ssize_t mem_addr_show(struct keystone_dsp_mem *mem, char *buf)
+{
+ return sprintf(buf, "%pa\n", &mem->addr);
+}
+
+static ssize_t mem_size_show(struct keystone_dsp_mem *mem, char *buf)
+{
+ return sprintf(buf, "%pa\n", &mem->size);
+}
+
+struct mem_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct keystone_dsp_mem *mem, char *buf);
+ ssize_t (*store)(struct keystone_dsp_mem *mem, const char *buf,
+ size_t len);
+};
+
+static struct mem_sysfs_entry addr_attribute =
+ __ATTR(addr, 0444, mem_addr_show, NULL);
+static struct mem_sysfs_entry size_attribute =
+ __ATTR(size, 0444, mem_size_show, NULL);
+
+static struct attribute *attrs[] = {
+ &addr_attribute.attr,
+ &size_attribute.attr,
+ NULL, /* sentinel */
+};
+
+static ssize_t mem_type_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct keystone_dsp_mem *mem = to_dsp_mem(kobj);
+ struct mem_sysfs_entry *entry;
+
+ entry = container_of(attr, struct mem_sysfs_entry, attr);
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(mem, buf);
+}
+
+static const struct sysfs_ops mem_sysfs_ops = {
+ .show = mem_type_show,
+};
+
+static struct kobj_type mem_attr_type = {
+ .sysfs_ops = &mem_sysfs_ops,
+ .default_attrs = attrs,
+};
+
+static int keystone_dsp_mem_add_attrs(struct keystone_dsp_mem_info *dsp_mem)
+{
+ int i, ret;
+ struct keystone_dsp_mem *mem;
+ struct kobject *kobj_parent = &dsp_mem->misc.this_device->kobj;
+
+ for (i = 0; i < dsp_mem->num_maps; i++) {
+ mem = &dsp_mem->mem[i];
+ kobject_init(&mem->kobj, &mem_attr_type);
+ ret = kobject_add(&mem->kobj, kobj_parent, "memory%d", i);
+ if (ret)
+ goto err_kobj;
+ ret = kobject_uevent(&mem->kobj, KOBJ_ADD);
+ if (ret)
+ goto err_kobj;
+ }
+
+ return 0;
+
+err_kobj:
+ for (; i >= 0; i--) {
+ mem = &dsp_mem->mem[i];
+ kobject_put(&mem->kobj);
+ }
+ return ret;
+}
+
+static void keystone_dsp_mem_del_attrs(struct keystone_dsp_mem_info *dsp_mem)
+{
+ int i;
+ struct keystone_dsp_mem *mem;
+
+ for (i = 0; i < dsp_mem->num_maps; i++) {
+ mem = &dsp_mem->mem[i];
+ kobject_put(&mem->kobj);
+ }
+}
+
+static int keystone_dsp_mem_check_addr(struct keystone_dsp_mem_info *dsp_mem,
+ int mask, size_t size)
+{
+ size_t req_offset;
+ u32 index;
+
+ index = mask & KEYSTONE_DSP_MEM_MAP_INDEX_MASK;
+ if (index >= dsp_mem->num_maps) {
+ pr_err("%s: invalid mmap region index %d\n", __func__, index);
+ return -EINVAL;
+ }
+
+ req_offset = (mask - index) << PAGE_SHIFT;
+ if (req_offset + size < req_offset) {
+ pr_err("%s: invalid request - overflow, mmap offset = 0x%zx size 0x%zx region %d\n",
+ __func__, req_offset, size, index);
+ return -EINVAL;
+ }
+
+ if ((req_offset + size) > dsp_mem->mem[index].size) {
+ pr_err("%s: invalid request - out of range, mmap offset 0x%zx size 0x%zx region %d\n",
+ __func__, req_offset, size, index);
+ return -EINVAL;
+ }
+
+ return index;
+}
+
+/*
+ * This is a custom mmap function following semantics based on the UIO
+ * mmap implementation. The vm_pgoff passed in the vma structure is a
+ * combination of the memory region index and the actual page offset in
+ * that region. This checks if user request is in valid range before
+ * providing mmap access.
+ *
+ * XXX: Evaluate this approach, as the MSMC memory can be mapped in whole
+ * into userspace as it is not super-large, and the allowable kernel
+ * unmapped DDR memory can be mmaped using traditional mmap semantics.
+ */
+static int keystone_dsp_mem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+ struct miscdevice *misc = file->private_data;
+ struct keystone_dsp_mem_info *dsp_mem = to_dsp_mem_info(misc);
+ int index;
+
+ index = keystone_dsp_mem_check_addr(dsp_mem, vma->vm_pgoff, size);
+ if (index < 0)
+ return index;
+
+ vma->vm_page_prot =
+ phys_mem_access_prot(file,
+ (dsp_mem->mem[index].addr >> PAGE_SHIFT) +
+ (vma->vm_pgoff - index), size,
+ vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ (dsp_mem->mem[index].addr >> PAGE_SHIFT) +
+ (vma->vm_pgoff - index), size, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static const struct file_operations keystone_dsp_mem_fops = {
+ .owner = THIS_MODULE,
+ .mmap = keystone_dsp_mem_mmap,
+};
+
+static int keystone_dsp_mem_parse(struct device_node *np, int index)
+{
+ phys_addr_t start, end, addr, size;
+ struct resource res;
+ resource_size_t rsize;
+ int ret, j;
+
+ if (!of_find_property(np, "no-map", NULL)) {
+ pr_err("dsp reserved memory regions without no-map are not supported\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
+ /* make sure only aliased addresses are covered */
+ rsize = resource_size(&res);
+ start = res.start;
+ end = res.start + rsize;
+ if (start < KEYSTONE_HIGH_PHYS_START ||
+ start >= KEYSTONE_HIGH_PHYS_LIMIT ||
+ end > KEYSTONE_HIGH_PHYS_LIMIT) {
+ pr_err("invalid address/size for keystone dsp memory carveout: %pa of size %pa\n",
+ &start, &rsize);
+ return -EINVAL;
+ }
+
+ /* check for overlaps */
+ start = to_alias_addr(start);
+ end = to_alias_addr(end);
+ for (j = 0; j < index; j++) {
+ addr = dsp_mem->mem[j].addr;
+ size = dsp_mem->mem[j].size;
+ if ((end > addr && end <= addr + size) ||
+ (start >= addr && start < addr + size) ||
+ (start < addr && end > addr + size)) {
+ pr_err("dsp memory carveout (%pa of size %pa) overlaps with (%pa of size %pa)\n",
+ &start, &rsize, &addr, &size);
+ return -EINVAL;
+ }
+ }
+
+ dsp_mem->mem[index].addr = to_alias_addr(res.start);
+ dsp_mem->mem[index].size = resource_size(&res);
+
+ return 0;
+}
+
+static int keystone_dsp_mem_init(void)
+{
+ struct miscdevice *misc;
+ struct resource res;
+ struct device_node *rmem_np, *sram_np, *np;
+ int ret, i = 0;
+ int num_maps = 0, num_sram = 0;
+
+ if (!of_have_populated_dt())
+ return -EOPNOTSUPP;
+
+ /* module is supported only on TI Keystone SoCs */
+ if (!of_machine_is_compatible("ti,keystone"))
+ return -EOPNOTSUPP;
+
+ /* count the number of DDR regions */
+ rmem_np = of_find_node_by_path("/reserved-memory");
+ if (rmem_np) {
+ for_each_available_child_of_node(rmem_np, np) {
+ if (of_device_is_compatible(np,
+ "ti,keystone-dsp-mem-pool"))
+ num_maps++;
+ }
+ }
+
+ for_each_compatible_node(sram_np, NULL, "ti,keystone-dsp-msm-ram") {
+ if (!of_device_is_available(sram_np))
+ continue;
+ num_sram++;
+ }
+
+ if ((!num_maps && !num_sram) ||
+ (num_maps + num_sram > KEYSTONE_DSP_MEM_MAP_INDEX_MASK)) {
+ ret = -EINVAL;
+ goto put_rmem;
+ }
+
+ dsp_mem = kzalloc(sizeof(*dsp_mem), GFP_KERNEL);
+ if (!dsp_mem) {
+ ret = -ENOMEM;
+ goto put_rmem;
+ }
+
+ dsp_mem->mem = kcalloc(num_maps + num_sram, sizeof(*dsp_mem->mem),
+ GFP_KERNEL);
+ if (!dsp_mem->mem) {
+ ret = -ENOMEM;
+ goto free_dsp;
+ }
+
+ /* handle reserved-memory carveouts */
+ if (num_maps) {
+ for_each_available_child_of_node(rmem_np, np) {
+ if (!of_device_is_compatible(np, "ti,keystone-dsp-mem-pool"))
+ continue;
+
+ ret = keystone_dsp_mem_parse(np, i);
+ if (ret) {
+ of_node_put(np);
+ goto free_mem;
+ }
+ i++;
+ dsp_mem->num_maps++;
+ }
+ }
+
+ /* handle on-chip SRAM reserved regions */
+ if (num_sram) {
+ for_each_compatible_node(sram_np, NULL,
+ "ti,keystone-dsp-msm-ram") {
+ if (!of_device_is_available(sram_np))
+ continue;
+
+ ret = of_address_to_resource(sram_np, 0, &res);
+ if (ret) {
+ ret = -EINVAL;
+ of_node_put(sram_np);
+ goto free_mem;
+ }
+ dsp_mem->mem[i].addr = res.start;
+ dsp_mem->mem[i].size = resource_size(&res);
+ i++;
+ dsp_mem->num_maps++;
+ }
+ }
+
+ misc = &dsp_mem->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = "dspmem";
+ misc->fops = &keystone_dsp_mem_fops;
+ misc->parent = NULL;
+ ret = misc_register(misc);
+ if (ret) {
+ pr_err("%s: could not register dspmem misc device\n", __func__);
+ goto free_mem;
+ }
+
+ ret = keystone_dsp_mem_add_attrs(dsp_mem);
+ if (ret) {
+ pr_err("%s: error creating sysfs files (%d)\n", __func__, ret);
+ goto unregister_misc;
+ }
+ of_node_put(rmem_np);
+
+ pr_info("registered dspmem misc device\n");
+
+ return 0;
+
+unregister_misc:
+ misc_deregister(&dsp_mem->misc);
+free_mem:
+ kfree(dsp_mem->mem);
+free_dsp:
+ kfree(dsp_mem);
+ dsp_mem = NULL;
+put_rmem:
+ of_node_put(rmem_np);
+ return ret;
+}
+
+static void keystone_dsp_mem_exit(void)
+{
+ keystone_dsp_mem_del_attrs(dsp_mem);
+
+ misc_deregister(&dsp_mem->misc);
+
+ kfree(dsp_mem->mem);
+ kfree(dsp_mem);
+ dsp_mem = NULL;
+}
+
+module_init(keystone_dsp_mem_init);
+module_exit(keystone_dsp_mem_exit);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Keystone DSP Memory Mapping Driver");
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 56597f6ea666..591d14ebcb11 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -500,7 +500,7 @@ EXPORT_SYMBOL_GPL(knav_dma_open_channel);
/**
* knav_dma_close_channel() - Destroy a dma channel
*
- * channel: dma channel handle
+ * @channel: dma channel handle
*
*/
void knav_dma_close_channel(void *channel)
@@ -758,6 +758,7 @@ static int knav_dma_probe(struct platform_device *pdev)
for_each_child_of_node(node, child) {
ret = dma_init(node, child);
if (ret) {
+ of_node_put(child);
dev_err(&pdev->dev, "init failed with %d\n", ret);
break;
}
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 20c84741639e..52389859395c 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
/**
* knav_queue_notify: qmss queue notfier call
*
- * @inst: qmss queue instance like accumulator
+ * @inst: - qmss queue instance like accumulator
*/
void knav_queue_notify(struct knav_queue_inst *inst)
{
@@ -511,10 +511,10 @@ static int knav_queue_flush(struct knav_queue *qh)
/**
* knav_queue_open() - open a hardware queue
- * @name - name to give the queue handle
- * @id - desired queue number if any or specifes the type
+ * @name: - name to give the queue handle
+ * @id: - desired queue number if any or specifes the type
* of queue
- * @flags - the following flags are applicable to queues:
+ * @flags: - the following flags are applicable to queues:
* KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
* exclusive by default.
* Subsequent attempts to open a shared queue should
@@ -545,7 +545,7 @@ EXPORT_SYMBOL_GPL(knav_queue_open);
/**
* knav_queue_close() - close a hardware queue handle
- * @qh - handle to close
+ * @qhandle: - handle to close
*/
void knav_queue_close(void *qhandle)
{
@@ -572,9 +572,9 @@ EXPORT_SYMBOL_GPL(knav_queue_close);
/**
* knav_queue_device_control() - Perform control operations on a queue
- * @qh - queue handle
- * @cmd - control commands
- * @arg - command argument
+ * @qhandle: - queue handle
+ * @cmd: - control commands
+ * @arg: - command argument
*
* Returns 0 on success, errno otherwise.
*/
@@ -623,10 +623,10 @@ EXPORT_SYMBOL_GPL(knav_queue_device_control);
/**
* knav_queue_push() - push data (or descriptor) to the tail of a queue
- * @qh - hardware queue handle
- * @data - data to push
- * @size - size of data to push
- * @flags - can be used to pass additional information
+ * @qhandle: - hardware queue handle
+ * @dma: - DMA data to push
+ * @size: - size of data to push
+ * @flags: - can be used to pass additional information
*
* Returns 0 on success, errno otherwise.
*/
@@ -646,8 +646,8 @@ EXPORT_SYMBOL_GPL(knav_queue_push);
/**
* knav_queue_pop() - pop data (or descriptor) from the head of a queue
- * @qh - hardware queue handle
- * @size - (optional) size of the data pop'ed.
+ * @qhandle: - hardware queue handle
+ * @size: - (optional) size of the data pop'ed.
*
* Returns a DMA address on success, 0 on failure.
*/
@@ -746,9 +746,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
/**
* knav_pool_create() - Create a pool of descriptors
- * @name - name to give the pool handle
- * @num_desc - numbers of descriptors in the pool
- * @region_id - QMSS region id from which the descriptors are to be
+ * @name: - name to give the pool handle
+ * @num_desc: - numbers of descriptors in the pool
+ * @region_id: - QMSS region id from which the descriptors are to be
* allocated.
*
* Returns a pool handle on success.
@@ -856,7 +856,7 @@ EXPORT_SYMBOL_GPL(knav_pool_create);
/**
* knav_pool_destroy() - Free a pool of descriptors
- * @pool - pool handle
+ * @ph: - pool handle
*/
void knav_pool_destroy(void *ph)
{
@@ -884,7 +884,7 @@ EXPORT_SYMBOL_GPL(knav_pool_destroy);
/**
* knav_pool_desc_get() - Get a descriptor from the pool
- * @pool - pool handle
+ * @ph: - pool handle
*
* Returns descriptor from the pool.
*/
@@ -905,7 +905,8 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_get);
/**
* knav_pool_desc_put() - return a descriptor to the pool
- * @pool - pool handle
+ * @ph: - pool handle
+ * @desc: - virtual address
*/
void knav_pool_desc_put(void *ph, void *desc)
{
@@ -918,11 +919,11 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_put);
/**
* knav_pool_desc_map() - Map descriptor for DMA transfer
- * @pool - pool handle
- * @desc - address of descriptor to map
- * @size - size of descriptor to map
- * @dma - DMA address return pointer
- * @dma_sz - adjusted return pointer
+ * @ph: - pool handle
+ * @desc: - address of descriptor to map
+ * @size: - size of descriptor to map
+ * @dma: - DMA address return pointer
+ * @dma_sz: - adjusted return pointer
*
* Returns 0 on success, errno otherwise.
*/
@@ -945,9 +946,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_map);
/**
* knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
- * @pool - pool handle
- * @dma - DMA address of descriptor to unmap
- * @dma_sz - size of descriptor to unmap
+ * @ph: - pool handle
+ * @dma: - DMA address of descriptor to unmap
+ * @dma_sz: - size of descriptor to unmap
*
* Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
* error values on return.
@@ -968,7 +969,7 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
/**
* knav_pool_count() - Get the number of descriptors in pool.
- * @pool - pool handle
+ * @ph: - pool handle
* Returns number of elements in the pool.
*/
int knav_pool_count(void *ph)
@@ -1086,6 +1087,7 @@ static int knav_queue_setup_regions(struct knav_device *kdev,
for_each_child_of_node(regions, child) {
region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region) {
+ of_node_put(child);
dev_err(dev, "out of memory allocating region\n");
return -ENOMEM;
}
@@ -1307,12 +1309,11 @@ static int knav_setup_queue_pools(struct knav_device *kdev,
struct device_node *queue_pools)
{
struct device_node *type, *range;
- int ret;
for_each_child_of_node(queue_pools, type) {
for_each_child_of_node(type, range) {
- ret = knav_setup_queue_range(kdev, range);
/* return value ignored, we init the rest... */
+ knav_setup_queue_range(kdev, range);
}
}
@@ -1399,6 +1400,7 @@ static int knav_queue_init_qmgrs(struct knav_device *kdev,
for_each_child_of_node(qmgrs, child) {
qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
if (!qmgr) {
+ of_node_put(child);
dev_err(dev, "out of memory allocating qmgr\n");
return -ENOMEM;
}
@@ -1498,6 +1500,7 @@ static int knav_queue_init_pdsps(struct knav_device *kdev,
for_each_child_of_node(pdsps, child) {
pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
if (!pdsp) {
+ of_node_put(child);
dev_err(dev, "out of memory allocating pdsp\n");
return -ENOMEM;
}
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index dc21aa855a45..73be958f6aa5 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -135,13 +135,11 @@ static int am33xx_push_sram_idle(void)
static int am33xx_do_sram_idle(u32 wfi_flags)
{
- int ret = 0;
-
if (!m3_ipc || !pm_ops)
return 0;
if (wfi_flags & WFI_FLAG_WAKE_M3)
- ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+ m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
}
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index 30695172a508..434ebd5c4d9f 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -2,10 +2,11 @@
/*
* PRU-ICSS platform driver for various TI SoCs
*
- * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014-2021 Texas Instruments Incorporated - https://www.ti.com/
* Author(s):
* Suman Anna <s-anna@ti.com>
* Andrew F. Davis <afd@ti.com>
+ * Tero Kristo <t-kristo@ti.com>
*/
#include <linux/clk-provider.h>
@@ -18,18 +19,264 @@
#include <linux/pm_runtime.h>
#include <linux/pruss_driver.h>
#include <linux/regmap.h>
+#include <linux/remoteproc.h>
#include <linux/slab.h>
+#define SYSCFG_STANDBY_INIT BIT(4)
+#define SYSCFG_SUB_MWAIT_READY BIT(5)
+
/**
* struct pruss_private_data - PRUSS driver private data
* @has_no_sharedram: flag to indicate the absence of PRUSS Shared Data RAM
* @has_core_mux_clock: flag to indicate the presence of PRUSS core clock
+ * @has_ocp_syscfg: flag to indicate if OCP SYSCFG is present
*/
struct pruss_private_data {
bool has_no_sharedram;
bool has_core_mux_clock;
+ bool has_ocp_syscfg;
};
+/**
+ * pruss_get() - get the pruss for a given PRU remoteproc
+ * @rproc: remoteproc handle of a PRU instance
+ *
+ * Finds the parent pruss device for a PRU given the @rproc handle of the
+ * PRU remote processor. This function increments the pruss device's refcount,
+ * so always use pruss_put() to decrement it back once pruss isn't needed
+ * anymore.
+ *
+ * Return: pruss handle on success, and an ERR_PTR on failure using one
+ * of the following error values
+ * -EINVAL if invalid parameter
+ * -ENODEV if PRU device or PRUSS device is not found
+ */
+struct pruss *pruss_get(struct rproc *rproc)
+{
+ struct pruss *pruss;
+ struct device *dev;
+ struct platform_device *ppdev;
+
+ if (IS_ERR_OR_NULL(rproc))
+ return ERR_PTR(-EINVAL);
+
+ dev = &rproc->dev;
+
+ /* make sure it is PRU rproc */
+ if (!dev->parent || !is_pru_rproc(dev->parent))
+ return ERR_PTR(-ENODEV);
+
+ ppdev = to_platform_device(dev->parent->parent);
+ pruss = platform_get_drvdata(ppdev);
+ if (!pruss)
+ return ERR_PTR(-ENODEV);
+
+ get_device(pruss->dev);
+
+ return pruss;
+}
+EXPORT_SYMBOL_GPL(pruss_get);
+
+/**
+ * pruss_put() - decrement pruss device's usecount
+ * @pruss: pruss handle
+ *
+ * Complimentary function for pruss_get(). Needs to be called
+ * after the PRUSS is used, and only if the pruss_get() succeeds.
+ */
+void pruss_put(struct pruss *pruss)
+{
+ if (IS_ERR_OR_NULL(pruss))
+ return;
+
+ put_device(pruss->dev);
+}
+EXPORT_SYMBOL_GPL(pruss_put);
+
+/**
+ * pruss_request_mem_region() - request a memory resource
+ * @pruss: the pruss instance
+ * @mem_id: the memory resource id
+ * @region: pointer to memory region structure to be filled in
+ *
+ * This function allows a client driver to request a memory resource,
+ * and if successful, will let the client driver own the particular
+ * memory region until released using the pruss_release_mem_region()
+ * API.
+ *
+ * Return: 0 if requested memory region is available with the memory region
+ * values returned in memory pointed by @region, an error otherwise
+ */
+int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id,
+ struct pruss_mem_region *region)
+{
+ if (!pruss || !region || mem_id >= PRUSS_MEM_MAX)
+ return -EINVAL;
+
+ mutex_lock(&pruss->lock);
+
+ if (pruss->mem_in_use[mem_id]) {
+ mutex_unlock(&pruss->lock);
+ return -EBUSY;
+ }
+
+ *region = pruss->mem_regions[mem_id];
+ pruss->mem_in_use[mem_id] = region;
+
+ mutex_unlock(&pruss->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pruss_request_mem_region);
+
+/**
+ * pruss_release_mem_region() - release a memory resource
+ * @pruss: the pruss instance
+ * @region: the memory region to release
+ *
+ * This function is the complimentary function to
+ * pruss_request_mem_region(), and allows the client drivers to
+ * release back a memory resource.
+ *
+ * Return: 0 on success, an error code otherwise
+ */
+int pruss_release_mem_region(struct pruss *pruss,
+ struct pruss_mem_region *region)
+{
+ int id;
+
+ if (!pruss || !region)
+ return -EINVAL;
+
+ mutex_lock(&pruss->lock);
+
+ /* find out the memory region being released */
+ for (id = 0; id < PRUSS_MEM_MAX; id++) {
+ if (pruss->mem_in_use[id] == region)
+ break;
+ }
+
+ if (id == PRUSS_MEM_MAX) {
+ mutex_unlock(&pruss->lock);
+ return -EINVAL;
+ }
+
+ pruss->mem_in_use[id] = NULL;
+ memset(region, 0, sizeof(*region));
+
+ mutex_unlock(&pruss->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pruss_release_mem_region);
+
+/**
+ * pruss_cfg_read() - read a PRUSS CFG sub-module register
+ * @pruss: the pruss instance handle
+ * @reg: register offset within the CFG sub-module
+ * @val: pointer to return the value in
+ *
+ * Reads a given register within the PRUSS CFG sub-module and
+ * returns it through the passed-in @val pointer
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int pruss_cfg_read(struct pruss *pruss, unsigned int reg, unsigned int *val)
+{
+ if (IS_ERR_OR_NULL(pruss))
+ return -EINVAL;
+
+ return regmap_read(pruss->cfg_regmap, reg, val);
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_read);
+
+/**
+ * pruss_cfg_update() - configure a PRUSS CFG sub-module register
+ * @pruss: the pruss instance handle
+ * @reg: register offset within the CFG sub-module
+ * @mask: bit mask to use for programming the @val
+ * @val: value to write
+ *
+ * Programs a given register within the PRUSS CFG sub-module
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int pruss_cfg_update(struct pruss *pruss, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ if (IS_ERR_OR_NULL(pruss))
+ return -EINVAL;
+
+ return regmap_update_bits(pruss->cfg_regmap, reg, mask, val);
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_update);
+
+/**
+ * pruss_cfg_ocp_master_ports() - configure PRUSS OCP master ports
+ * @pruss: the pruss instance handle
+ * @enable: set to true for enabling or false for disabling the OCP master ports
+ *
+ * This function programs the PRUSS_SYSCFG.STANDBY_INIT bit either to enable or
+ * disable the OCP master ports (applicable only on SoCs using OCP interconnect
+ * like the OMAP family). Clearing the bit achieves dual functionalities - one
+ * is to deassert the MStandby signal to the device PRCM, and the other is to
+ * enable OCP master ports to allow accesses outside of the PRU-ICSS. The
+ * function has to wait for the PRCM to acknowledge through the monitoring of
+ * the PRUSS_SYSCFG.SUB_MWAIT bit when enabling master ports. Setting the bit
+ * disables the master access, and also signals the PRCM that the PRUSS is ready
+ * for Standby.
+ *
+ * Return: 0 on success, or an error code otherwise. ETIMEDOUT is returned
+ * when the ready-state fails.
+ */
+int pruss_cfg_ocp_master_ports(struct pruss *pruss, bool enable)
+{
+ int ret;
+ u32 syscfg_val, i;
+ const struct pruss_private_data *data;
+
+ if (IS_ERR_OR_NULL(pruss))
+ return -EINVAL;
+
+ data = of_device_get_match_data(pruss->dev);
+
+ /* nothing to do on non OMAP-SoCs */
+ if (!data || !data->has_ocp_syscfg)
+ return 0;
+
+ /* assert the MStandby signal during disable path */
+ if (!enable)
+ return pruss_cfg_update(pruss, PRUSS_CFG_SYSCFG,
+ SYSCFG_STANDBY_INIT,
+ SYSCFG_STANDBY_INIT);
+
+ /* enable the OCP master ports and disable MStandby */
+ ret = pruss_cfg_update(pruss, PRUSS_CFG_SYSCFG, SYSCFG_STANDBY_INIT, 0);
+ if (ret)
+ return ret;
+
+ /* wait till we are ready for transactions - delay is arbitrary */
+ for (i = 0; i < 10; i++) {
+ ret = pruss_cfg_read(pruss, PRUSS_CFG_SYSCFG, &syscfg_val);
+ if (ret)
+ goto disable;
+
+ if (!(syscfg_val & SYSCFG_SUB_MWAIT_READY))
+ return 0;
+
+ udelay(5);
+ }
+
+ dev_err(pruss->dev, "timeout waiting for SUB_MWAIT_READY\n");
+ ret = -ETIMEDOUT;
+
+disable:
+ pruss_cfg_update(pruss, PRUSS_CFG_SYSCFG, SYSCFG_STANDBY_INIT,
+ SYSCFG_STANDBY_INIT);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_ocp_master_ports);
+
static void pruss_of_free_clk_provider(void *data)
{
struct device_node *clk_mux_np = data;
@@ -126,8 +373,6 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
int ret = 0;
data = of_device_get_match_data(dev);
- if (IS_ERR(data))
- return -ENODEV;
clks_np = of_get_child_by_name(cfg_node, "clocks");
if (!clks_np) {
@@ -163,6 +408,53 @@ static struct regmap_config regmap_conf = {
.reg_stride = 4,
};
+static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss)
+{
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct resource res;
+ int ret;
+
+ child = of_get_child_by_name(np, "cfg");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(child, 0, &res)) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!pruss->cfg_base) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
+ (u64)res.start);
+ regmap_conf.max_register = resource_size(&res) - 4;
+
+ pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
+ &regmap_conf);
+ kfree(regmap_conf.name);
+ if (IS_ERR(pruss->cfg_regmap)) {
+ dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
+ PTR_ERR(pruss->cfg_regmap));
+ ret = PTR_ERR(pruss->cfg_regmap);
+ goto node_put;
+ }
+
+ ret = pruss_clk_init(pruss, child);
+ if (ret)
+ dev_err(dev, "pruss_clk_init failed, ret = %d\n", ret);
+
+node_put:
+ of_node_put(child);
+ return ret;
+}
+
static int pruss_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -175,10 +467,6 @@ static int pruss_probe(struct platform_device *pdev)
const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
data = of_device_get_match_data(&pdev->dev);
- if (IS_ERR(data)) {
- dev_err(dev, "missing private data\n");
- return -ENODEV;
- }
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret) {
@@ -191,6 +479,7 @@ static int pruss_probe(struct platform_device *pdev)
return -ENOMEM;
pruss->dev = dev;
+ mutex_init(&pruss->lock);
child = of_get_child_by_name(np, "memories");
if (!child) {
@@ -245,56 +534,18 @@ static int pruss_probe(struct platform_device *pdev)
goto rpm_disable;
}
- child = of_get_child_by_name(np, "cfg");
- if (!child) {
- dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
- ret = -ENODEV;
+ ret = pruss_cfg_of_init(dev, pruss);
+ if (ret < 0)
goto rpm_put;
- }
-
- if (of_address_to_resource(child, 0, &res)) {
- ret = -ENOMEM;
- goto node_put;
- }
-
- pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
- if (!pruss->cfg_base) {
- ret = -ENOMEM;
- goto node_put;
- }
-
- regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
- (u64)res.start);
- regmap_conf.max_register = resource_size(&res) - 4;
-
- pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
- &regmap_conf);
- kfree(regmap_conf.name);
- if (IS_ERR(pruss->cfg_regmap)) {
- dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
- PTR_ERR(pruss->cfg_regmap));
- ret = PTR_ERR(pruss->cfg_regmap);
- goto node_put;
- }
-
- ret = pruss_clk_init(pruss, child);
- if (ret) {
- dev_err(dev, "failed to setup coreclk-mux\n");
- goto node_put;
- }
ret = devm_of_platform_populate(dev);
if (ret) {
dev_err(dev, "failed to register child devices\n");
- goto node_put;
+ goto rpm_put;
}
- of_node_put(child);
-
return 0;
-node_put:
- of_node_put(child);
rpm_put:
pm_runtime_put_sync(dev);
rpm_disable:
@@ -317,10 +568,16 @@ static int pruss_remove(struct platform_device *pdev)
/* instance-specific driver private data */
static const struct pruss_private_data am437x_pruss1_data = {
.has_no_sharedram = false,
+ .has_ocp_syscfg = true,
};
static const struct pruss_private_data am437x_pruss0_data = {
.has_no_sharedram = true,
+ .has_ocp_syscfg = false,
+};
+
+static const struct pruss_private_data am33xx_am57xx_data = {
+ .has_ocp_syscfg = true,
};
static const struct pruss_private_data am65x_j721e_pruss_data = {
@@ -328,13 +585,15 @@ static const struct pruss_private_data am65x_j721e_pruss_data = {
};
static const struct of_device_id pruss_of_match[] = {
- { .compatible = "ti,am3356-pruss" },
+ { .compatible = "ti,am3356-pruss", .data = &am33xx_am57xx_data },
{ .compatible = "ti,am4376-pruss0", .data = &am437x_pruss0_data, },
{ .compatible = "ti,am4376-pruss1", .data = &am437x_pruss1_data, },
- { .compatible = "ti,am5728-pruss" },
+ { .compatible = "ti,am5728-pruss", .data = &am33xx_am57xx_data },
{ .compatible = "ti,k2g-pruss" },
{ .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
{ .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am625-pruss", .data = &am65x_j721e_pruss_data, },
{},
};
MODULE_DEVICE_TABLE(of, pruss_of_match);
diff --git a/drivers/soc/ti/ti-pat.c b/drivers/soc/ti/ti-pat.c
new file mode 100644
index 000000000000..1671b32faf38
--- /dev/null
+++ b/drivers/soc/ti/ti-pat.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI PAT mapped DMA-BUF memory re-exporter
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
+ * Andrew Davis <afd@ti.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include <linux/ti-pat.h>
+
+/* TI PAT MMRS registers */
+#define TI_PAT_MMRS_PID 0x0 /* Revision Register */
+#define TI_PAT_MMRS_CONFIG 0x4 /* Config Register */
+#define TI_PAT_MMRS_CONTROL 0x10 /* Control Register */
+
+/* TI PAT CONTROL register field values */
+#define TI_PAT_CONTROL_ARB_MODE_UF 0x0 /* Updates first */
+#define TI_PAT_CONTROL_ARB_MODE_RR 0x2 /* Round-robin */
+
+#define TI_PAT_CONTROL_PAGE_SIZE_4KB 0x0
+#define TI_PAT_CONTROL_PAGE_SIZE_16KB 0x1
+#define TI_PAT_CONTROL_PAGE_SIZE_64KB 0x2
+#define TI_PAT_CONTROL_PAGE_SIZE_1MB 0x3
+
+/* TI PAT TABLE registers */
+#define TI_PAT_TABLE_ADDRL 0x0 /* Low address Register */
+#define TI_PAT_TABLE_ADDRH 0x4 /* High address and enable Register */
+
+static unsigned int ti_pat_page_sizes[] = {
+ [TI_PAT_CONTROL_PAGE_SIZE_4KB] = 4 * 1024,
+ [TI_PAT_CONTROL_PAGE_SIZE_16KB] = 16 * 1024,
+ [TI_PAT_CONTROL_PAGE_SIZE_64KB] = 64 * 1024,
+ [TI_PAT_CONTROL_PAGE_SIZE_1MB] = 1024 * 1024,
+};
+
+enum ti_pat_fields {
+ /* Revision */
+ F_PID_MAJOR,
+ F_PID_MINOR,
+
+ /* Controls */
+ F_CONTROL_ARB_MODE,
+ F_CONTROL_PAGE_SIZE,
+ F_CONTROL_REPLACE_OID_EN,
+ F_CONTROL_EN,
+
+ /* sentinel */
+ F_MMRS_FIELDS,
+
+ /* Table */
+ F_TABLE_ADDRL,
+ F_TABLE_ADDRH,
+ F_TABLE_ENABLE,
+
+ /* sentinel */
+ F_MAX_FIELDS
+};
+
+static struct reg_field ti_pat_reg_fields[] = {
+ /* Revision */
+ [F_PID_MAJOR] = REG_FIELD(TI_PAT_MMRS_PID, 8, 10),
+ [F_PID_MINOR] = REG_FIELD(TI_PAT_MMRS_PID, 0, 5),
+ /* Controls */
+ [F_CONTROL_ARB_MODE] = REG_FIELD(TI_PAT_MMRS_CONTROL, 6, 7),
+ [F_CONTROL_PAGE_SIZE] = REG_FIELD(TI_PAT_MMRS_CONTROL, 4, 5),
+ [F_CONTROL_REPLACE_OID_EN] = REG_FIELD(TI_PAT_MMRS_CONTROL, 1, 1),
+ [F_CONTROL_EN] = REG_FIELD(TI_PAT_MMRS_CONTROL, 0, 0),
+ /* Table */
+ [F_TABLE_ADDRL] = REG_FIELD(TI_PAT_TABLE_ADDRL, 0, 31),
+ [F_TABLE_ADDRH] = REG_FIELD(TI_PAT_TABLE_ADDRH, 0, 3),
+ [F_TABLE_ENABLE] = REG_FIELD(TI_PAT_TABLE_ADDRH, 31, 31),
+};
+
+/**
+ * struct ti_pat_data - PAT device instance data
+ * @dev: PAT device structure
+ * @mdev: misc device
+ * @mmrs_fields: Register fields for both MMRS and TABLE
+ * @page_count: Total number of pages in this PAT
+ * @page_size: Size of region mapped by each page in bytes
+ * @window_base: Base address of WINDOW region
+ * @pool: Pool for managing translation space
+ */
+struct ti_pat_data {
+ struct device *dev;
+ struct miscdevice mdev;
+ struct regmap_field *fields[F_MAX_FIELDS];
+ unsigned int page_count;
+ unsigned int page_size;
+ phys_addr_t window_base;
+ struct gen_pool *pool;
+};
+
+struct ti_pat_dma_buf_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct ti_pat_buffer *buffer;
+ struct list_head list;
+};
+
+/**
+ * struct ti_pat_buffer - Single buffer instance data
+ * @pat: PAT instance for whom this buffer belongs
+ * @i_dma_buf: Imported DMA-BUF buffer
+ * @size: Total allocated size of this buffer
+ * @offset: Allocated offset into the PAT window
+ * @e_dma_buf: Exported DMA-BUF buffer
+ * @attachment: Our attachment to the imported buffer
+ * @sgt: DMA map of our imported buffer
+ * @attachments: Attachments to this buffer
+ * @map_count: Reference count of mappings to this buffer
+ * @lock: Protect the attach list and map count
+ */
+struct ti_pat_buffer {
+ struct ti_pat_data *pat;
+ struct dma_buf *i_dma_buf;
+ size_t size;
+ unsigned long offset;
+ struct dma_buf *e_dma_buf;
+
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+
+ struct list_head attachments;
+ int map_count;
+
+ struct mutex lock;
+};
+
+static const struct regmap_config ti_pat_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int ti_pat_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct ti_pat_dma_buf_attachment *a;
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ a->dev = attachment->dev;
+ a->buffer = buffer;
+ INIT_LIST_HEAD(&a->list);
+
+ a->table = kzalloc(sizeof(*a->table), GFP_KERNEL);
+ if (!a->table) {
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ if (sg_alloc_table(a->table, 1, GFP_KERNEL)) {
+ kfree(a->table);
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ sg_set_page(a->table->sgl, pfn_to_page(PFN_DOWN(buffer->offset)), buffer->size, 0);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ /* First time attachment we attach to parent */
+ if (list_empty(&buffer->attachments)) {
+ buffer->attachment = dma_buf_attach(buffer->i_dma_buf, buffer->pat->dev);
+ if (IS_ERR(buffer->attachment)) {
+ dev_err(buffer->pat->dev, "Unable to attach to parent DMA-BUF\n");
+ mutex_unlock(&buffer->lock);
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+ return PTR_ERR(buffer->attachment);
+ }
+ }
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void ti_pat_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct ti_pat_dma_buf_attachment *a = attachment->priv;
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ /* Last attachment we detach from parent */
+ if (list_empty(&buffer->attachments)) {
+ dma_buf_detach(buffer->i_dma_buf, buffer->attachment);
+ buffer->attachment = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+}
+
+static unsigned int ti_pat_table_index_from_page(size_t page)
+{
+ /* Every 256 pages start on a 4k boundary */
+ unsigned int boundery = page / 256;
+ unsigned int offset = page % 256;
+ /* Each page occupies 8 bytes in the table */
+ return (boundery * (4096 / 8)) + offset;
+}
+
+static void ti_pat_set_page(struct ti_pat_data *pat, size_t page_id, dma_addr_t dma_address)
+{
+ unsigned int index = ti_pat_table_index_from_page(page_id);
+
+ /*
+ * Addresses will always be at least 4K aligned, so both high and low
+ * addresses are shifted by an additional 12 bits before being written
+ * to the PAT.
+ */
+ u32 base_l = dma_address >> 12;
+ u32 base_h = dma_address >> 44;
+
+ dev_dbg(pat->dev, "Enabling PAT index: %zu pointing to %pad\n", page_id, &dma_address);
+
+ regmap_fields_write(pat->fields[F_TABLE_ADDRL], index, base_l);
+ regmap_fields_write(pat->fields[F_TABLE_ADDRH], index, base_h);
+ regmap_fields_write(pat->fields[F_TABLE_ENABLE], index, 1);
+}
+
+static void ti_pat_unset_page(struct ti_pat_data *pat, size_t page_id)
+{
+ unsigned int index = ti_pat_table_index_from_page(page_id);
+
+ dev_dbg(pat->dev, "Disabling PAT index: %zu\n", page_id);
+
+ regmap_fields_write(pat->fields[F_TABLE_ENABLE], index, 0);
+}
+
+static struct sg_table *ti_pat_dma_buf_map(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct ti_pat_dma_buf_attachment *a = attachment->priv;
+ struct ti_pat_buffer *buffer = a->buffer;
+ struct ti_pat_data *pat = buffer->pat;
+ struct sg_table *table = a->table;
+ struct scatterlist *s;
+ unsigned int i, s_len;
+ size_t page_id;
+ int ret;
+
+ mutex_lock(&buffer->lock);
+ /* First time mapping we map to parent */
+ if (!buffer->map_count) {
+ buffer->sgt = dma_buf_map_attachment(buffer->attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR(buffer->sgt)) {
+ dev_err(pat->dev, "Unable to map parent DMA-BUF\n");
+ return buffer->sgt;
+ }
+
+ /* And program PAT area for this set of pages */
+ page_id = (size_t)(buffer->offset - pat->window_base) / pat->page_size;
+ for_each_sg(buffer->sgt->sgl, s, buffer->sgt->nents, i) {
+ if (s->offset) {
+ dev_err(pat->dev, "Cannot use offset buffers\n");
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ if (s->length % pat->page_size) {
+ dev_err(pat->dev, "Cannot use buffers not a multiple of page size\n");
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ for (s_len = 0; s_len < s->length; s_len += pat->page_size)
+ ti_pat_set_page(pat, page_id++, s->dma_address + s_len);
+ }
+ }
+ buffer->map_count++;
+ mutex_unlock(&buffer->lock);
+
+ /* Map the attached device's table to get DMA addresses */
+ if (!dma_map_sg_attrs(attachment->dev, table->sgl, table->nents, direction, DMA_ATTR_SKIP_CPU_SYNC))
+ return ERR_PTR(-ENOMEM);
+
+ return table;
+
+unmap:
+ dma_buf_unmap_attachment(buffer->attachment, buffer->sgt, DMA_BIDIRECTIONAL);
+ mutex_unlock(&buffer->lock);
+ return ERR_PTR(ret);
+}
+
+static void ti_pat_dma_buf_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct ti_pat_dma_buf_attachment *a = attachment->priv;
+ struct ti_pat_buffer *buffer = a->buffer;
+ struct ti_pat_data *pat = buffer->pat;
+
+ /* Unmap the attached device's table */
+ dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents, direction, DMA_ATTR_SKIP_CPU_SYNC);
+
+ mutex_lock(&buffer->lock);
+ buffer->map_count--;
+ /* Last mapping we unmap from parent */
+ if (!buffer->map_count) {
+ /* Disable PAT pages for this area */
+ size_t page_start = (size_t)(buffer->offset - pat->window_base) / pat->page_size;
+ size_t page_end = page_start + (buffer->size / pat->page_size);
+
+ for (; page_start < page_end; page_start++)
+ ti_pat_unset_page(pat, page_start);
+
+ dma_buf_unmap_attachment(buffer->attachment, buffer->sgt, DMA_BIDIRECTIONAL);
+ buffer->sgt = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static void ti_pat_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ if (buffer->attachment && buffer->sgt)
+ dma_buf_unmap_attachment(buffer->attachment, buffer->sgt, DMA_BIDIRECTIONAL);
+ if (buffer->i_dma_buf && !IS_ERR_OR_NULL(buffer->attachment))
+ dma_buf_detach(buffer->i_dma_buf, buffer->attachment);
+ if (buffer->i_dma_buf)
+ dma_buf_put(buffer->i_dma_buf);
+
+ if (buffer->offset)
+ gen_pool_free(buffer->pat->pool, buffer->offset, buffer->size);
+
+ kfree(buffer);
+}
+
+static int ti_pat_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_begin_cpu_access(buffer->i_dma_buf, direction);
+}
+
+static int ti_pat_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_end_cpu_access(buffer->i_dma_buf, direction);
+}
+
+static int ti_pat_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_mmap(buffer->i_dma_buf, vma, vma->vm_pgoff);
+}
+
+static void *ti_pat_dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_vmap(buffer->i_dma_buf);
+}
+
+static void ti_pat_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ struct ti_pat_buffer *buffer = dmabuf->priv;
+
+ return dma_buf_vunmap(buffer->i_dma_buf, vaddr);
+}
+
+static const struct dma_buf_ops dma_buf_ops = {
+ .attach = ti_pat_dma_buf_attach,
+ .detach = ti_pat_dma_buf_detach,
+
+ .map_dma_buf = ti_pat_dma_buf_map,
+ .unmap_dma_buf = ti_pat_dma_buf_unmap,
+
+ .release = ti_pat_dma_buf_release,
+
+ .begin_cpu_access = ti_pat_dma_buf_begin_cpu_access,
+ .end_cpu_access = ti_pat_dma_buf_end_cpu_access,
+ .mmap = ti_pat_dma_buf_mmap,
+ .vmap = ti_pat_dma_buf_vmap,
+ .vunmap = ti_pat_dma_buf_vunmap,
+};
+
+int ti_pat_export(struct ti_pat_data *pat,
+ struct dma_buf *i_dma_buf,
+ struct dma_buf **e_dma_buf)
+{
+ struct ti_pat_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ buffer->pat = pat;
+ buffer->i_dma_buf = i_dma_buf;
+ buffer->size = buffer->i_dma_buf->size;
+ mutex_init(&buffer->lock);
+ INIT_LIST_HEAD(&buffer->attachments);
+ buffer->map_count = 0;
+
+ /* Reserve PAT space */
+ buffer->offset = gen_pool_alloc(buffer->pat->pool, buffer->size);
+ if (!buffer->offset) {
+ ret = -ENOMEM;
+ goto free_buffer;
+ }
+
+ exp_info.ops = &dma_buf_ops;
+ exp_info.size = buffer->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = buffer;
+
+ *e_dma_buf = dma_buf_export(&exp_info);
+ if (IS_ERR(*e_dma_buf)) {
+ ret = PTR_ERR(*e_dma_buf);
+ goto free_pool;
+ }
+
+ return 0;
+
+free_pool:
+ gen_pool_free(buffer->pat->pool, buffer->offset, buffer->size);
+free_buffer:
+ kfree(buffer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ti_pat_export);
+
+static long ti_pat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ti_pat_data *pat = container_of(file->private_data, struct ti_pat_data, mdev);
+
+ switch (cmd) {
+ case TI_PAT_IOC_EXPORT:
+ {
+ struct ti_pat_export_data export;
+ struct dma_buf *i_dma_buf;
+ struct dma_buf *e_dma_buf;
+ int ret;
+
+ if (_IOC_SIZE(cmd) > sizeof(export))
+ return -EINVAL;
+
+ if (copy_from_user(&export, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ i_dma_buf = dma_buf_get(export.fd);
+ if (IS_ERR(i_dma_buf))
+ return PTR_ERR(i_dma_buf);
+
+ ret = ti_pat_export(pat, i_dma_buf, &e_dma_buf);
+ if (ret) {
+ dma_buf_put(i_dma_buf);
+ return ret;
+ }
+
+ export.fd = dma_buf_fd(e_dma_buf, O_CLOEXEC);
+ if (export.fd < 0) {
+ dma_buf_put(e_dma_buf);
+ dma_buf_put(i_dma_buf);
+ return export.fd;
+ }
+
+ if (copy_to_user((void __user *)arg, &export, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static const struct file_operations ti_pat_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ti_pat_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ti_pat_ioctl,
+#endif
+};
+
+static int ti_pat_probe(struct platform_device *pdev)
+{
+ struct ti_pat_data *pat;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *mmrs_map;
+ struct regmap *table_map;
+ unsigned int revision_major;
+ unsigned int revision_minor;
+ resource_size_t size;
+ size_t page_size;
+ int i, ret;
+
+ pat = devm_kzalloc(&pdev->dev, sizeof(*pat), GFP_KERNEL);
+ if (!pat)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, pat);
+ pat->dev = &pdev->dev;
+
+ /* Set DMA mask to 64 bits */
+ ret = dma_set_mask_and_coherent(pat->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(pat->dev, "Unable to set coherent mask to 64");
+ return ret;
+ }
+
+ /* MMRS */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmrs");
+ if (!res) {
+ dev_err(pat->dev, "Unable to find MMRS IO resource\n");
+ return -ENOENT;
+ }
+ base = devm_ioremap_resource(pat->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mmrs_map = devm_regmap_init_mmio(pat->dev, base, &ti_pat_regmap_config);
+ if (IS_ERR(mmrs_map)) {
+ dev_err(pat->dev, "Unable to allocate MMRS register map\n");
+ return PTR_ERR(mmrs_map);
+ }
+
+ for (i = 0; i < F_MMRS_FIELDS; i++) {
+ pat->fields[i] = devm_regmap_field_alloc(pat->dev, mmrs_map, ti_pat_reg_fields[i]);
+ if (IS_ERR(pat->fields[i])) {
+ dev_err(pat->dev, "Unable to allocate Regmap fields\n");
+ return PTR_ERR(pat->fields[i]);
+ }
+ }
+
+ ret = regmap_read(mmrs_map, TI_PAT_MMRS_CONFIG, &pat->page_count);
+ if (ret) {
+ dev_err(pat->dev, "Unable to read device page count\n");
+ return ret;
+ }
+
+ ret = regmap_field_read(pat->fields[F_PID_MAJOR], &revision_major);
+ if (ret) {
+ dev_err(pat->dev, "Unable to read device major revision\n");
+ return ret;
+ }
+
+ ret = regmap_field_read(pat->fields[F_PID_MINOR], &revision_minor);
+ if (ret) {
+ dev_err(pat->dev, "Unable to read device minor revision\n");
+ return ret;
+ }
+
+ dev_info(pat->dev, "Found PAT Rev %d.%d with %d pages\n", revision_major, revision_minor, pat->page_count);
+
+ /* TABLE */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "table");
+ if (!res) {
+ dev_err(pat->dev, "Unable to find TABLE IO resource\n");
+ return -ENOENT;
+ }
+ base = devm_ioremap_resource(pat->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* 256 pages per 4KB of table space */
+ size = resource_size(res);
+ if (size != (pat->page_count << 4))
+ dev_warn(pat->dev, "TABLE region size (%pa) does not match reported page count\n", &size);
+
+ table_map = devm_regmap_init_mmio(pat->dev, base, &ti_pat_regmap_config);
+ if (IS_ERR(table_map)) {
+ dev_err(pat->dev, "Unable to allocate TABLE register map\n");
+ return PTR_ERR(table_map);
+ }
+
+ for (i = F_MMRS_FIELDS + 1; i < F_MAX_FIELDS; i++) {
+ ti_pat_reg_fields[i].id_size = ti_pat_table_index_from_page(pat->page_count);
+ ti_pat_reg_fields[i].id_offset = 8; /* 8 bytes per entry */
+ pat->fields[i] = devm_regmap_field_alloc(pat->dev, table_map, ti_pat_reg_fields[i]);
+ if (IS_ERR(pat->fields[i])) {
+ dev_err(pat->dev, "Unable to allocate Regmap fields\n");
+ return PTR_ERR(pat->fields[i]);
+ }
+ }
+
+ /* WINDOW */
+ ret = device_property_read_u64(pat->dev, "ti,pat-window-base", &pat->window_base);
+ if (ret) {
+ dev_err(pat->dev, "Unable to find ti,pat-window-base\n");
+ return -ENOENT;
+ }
+
+ ret = device_property_read_u64(pat->dev, "ti,pat-window-size", &size);
+ if (ret) {
+ dev_err(pat->dev, "Unable to find ti,pat-window-size\n");
+ return -ENOENT;
+ }
+
+ pat->page_size = PAGE_SIZE;
+ for (page_size = 0; page_size < ARRAY_SIZE(ti_pat_page_sizes); page_size++)
+ if (ti_pat_page_sizes[page_size] == pat->page_size)
+ break;
+ if (page_size == ARRAY_SIZE(ti_pat_page_sizes)) {
+ dev_err(pat->dev, "Unsupported PAGE_SIZE (%d)\n", pat->page_size);
+ return -EINVAL;
+ }
+ regmap_field_write(pat->fields[F_CONTROL_PAGE_SIZE], page_size);
+
+ /* Enable this PAT module */
+ regmap_field_write(pat->fields[F_CONTROL_EN], 1);
+
+ pat->pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!pat->pool)
+ return -ENOMEM;
+ gen_pool_add(pat->pool, pat->window_base, size, -1);
+
+ pat->mdev.minor = MISC_DYNAMIC_MINOR;
+ pat->mdev.name = pdev->name;
+ pat->mdev.fops = &ti_pat_fops;
+ pat->mdev.parent = NULL;
+ ret = misc_register(&pat->mdev);
+ if (ret) {
+ dev_err(pat->dev, "Unable to register misc device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ti_pat_of_match[] = {
+ { .compatible = "ti,j721e-pat", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_pat_of_match);
+
+static struct platform_driver ti_pat_driver = {
+ .probe = ti_pat_probe,
+ .driver = {
+ .name = "ti-pat",
+ .of_match_table = ti_pat_of_match,
+ },
+};
+module_platform_driver(ti_pat_driver);
+
+MODULE_AUTHOR("Andrew Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TI PAT mapped DMA-BUF memory exporter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index 0eb9462f609e..a1d9c027022a 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -89,6 +89,18 @@ static int ti_sci_inta_msi_alloc_descs(struct device *dev,
list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
count++;
}
+ for (i = 0; i < res->desc[set].num_sec; i++) {
+ msi_desc = alloc_msi_entry(dev, 1, NULL);
+ if (!msi_desc) {
+ ti_sci_inta_msi_free_descs(dev);
+ return -ENOMEM;
+ }
+
+ msi_desc->inta.dev_index = res->desc[set].start_sec + i;
+ INIT_LIST_HEAD(&msi_desc->list);
+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ count++;
+ }
}
return count;
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index ef3f95fefab5..3b0abf38cdf4 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -7,7 +7,9 @@
* Dave Gerlach <d-gerlach@ti.com>
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
@@ -40,12 +42,30 @@
#define M3_FW_VERSION_MASK 0xffff
#define M3_WAKE_SRC_MASK 0xff
+#define IPC_MEM_TYPE_SHIFT (0x0)
+#define IPC_MEM_TYPE_MASK (0x7 << 0)
+#define IPC_VTT_STAT_SHIFT (0x3)
+#define IPC_VTT_STAT_MASK (0x1 << 3)
+#define IPC_VTT_GPIO_PIN_SHIFT (0x4)
+#define IPC_VTT_GPIO_PIN_MASK (0x3f << 4)
+#define IPC_IO_ISOLATION_STAT_SHIFT (10)
+#define IPC_IO_ISOLATION_STAT_MASK (0x1 << 10)
+
+#define IPC_DBG_HALT_SHIFT (11)
+#define IPC_DBG_HALT_MASK (0x1 << 11)
+
#define M3_STATE_UNKNOWN 0
#define M3_STATE_RESET 1
#define M3_STATE_INITED 2
#define M3_STATE_MSG_FOR_LP 3
#define M3_STATE_MSG_FOR_RESET 4
+#define WKUP_M3_SD_FW_MAGIC 0x570C
+
+#define WKUP_M3_DMEM_START 0x80000
+#define WKUP_M3_AUXDATA_OFFSET 0x1000
+#define WKUP_M3_AUXDATA_SIZE 0xFF
+
static struct wkup_m3_ipc *m3_ipc_state;
static const struct wkup_m3_wakeup_src wakeups[] = {
@@ -66,6 +86,147 @@ static const struct wkup_m3_wakeup_src wakeups[] = {
{.irq_nr = 0, .src = "Unknown"},
};
+/**
+ * wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
+ * @data - pointer to data
+ * @sz - size of data to copy (limit 256 bytes)
+ *
+ * Copies any additional blob of data to the wkup_m3 dmem to be used by the
+ * firmware
+ */
+static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
+ const void *data, int sz)
+{
+ unsigned long aux_data_dev_addr;
+ void *aux_data_addr;
+
+ aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
+ aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
+ aux_data_dev_addr,
+ WKUP_M3_AUXDATA_SIZE);
+ memcpy(aux_data_addr, data, sz);
+
+ return WKUP_M3_AUXDATA_OFFSET;
+}
+
+static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
+{
+ unsigned long val, aux_base;
+ struct wkup_m3_scale_data_header hdr;
+ struct wkup_m3_ipc *m3_ipc = context;
+ struct device *dev = m3_ipc->dev;
+
+ if (!fw) {
+ dev_err(dev, "Voltage scale fw name given but file missing.\n");
+ return;
+ }
+
+ memcpy(&hdr, fw->data, sizeof(hdr));
+
+ if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
+ dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
+ goto release_sd_fw;
+ }
+
+ aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
+ fw->size - sizeof(hdr));
+
+ val = (aux_base + hdr.sleep_offset);
+ val |= ((aux_base + hdr.wake_offset) << 16);
+
+ m3_ipc->volt_scale_offsets = val;
+
+release_sd_fw:
+ release_firmware(fw);
+};
+
+static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
+ struct device *dev)
+{
+ int ret = 0;
+
+ /*
+ * If no name is provided, user has already been warned, pm will
+ * still work so return 0
+ */
+
+ if (!m3_ipc->sd_fw_name)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
+ m3_ipc, wkup_m3_scale_data_fw_cb);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void wkup_m3_set_halt_late(bool enabled)
+{
+ if (enabled)
+ m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
+ else
+ m3_ipc_state->halt = 0;
+}
+
+static int option_get(void *data, u64 *val)
+{
+ u32 *option = data;
+
+ *val = *option;
+
+ return 0;
+}
+
+static int option_set(void *data, u64 val)
+{
+ u32 *option = data;
+
+ *option = val;
+
+ if (option == &m3_ipc_state->halt) {
+ if (val)
+ wkup_m3_set_halt_late(true);
+ else
+ wkup_m3_set_halt_late(false);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
+ "%llu\n");
+
+static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
+
+ if (!m3_ipc->dbg_path)
+ return -EINVAL;
+
+ (void)debugfs_create_file("enable_late_halt", 0644,
+ m3_ipc->dbg_path,
+ &m3_ipc->halt,
+ &wkup_m3_ipc_option_fops);
+
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+ debugfs_remove_recursive(m3_ipc->dbg_path);
+}
+#else
+static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
{
writel(AM33XX_M3_TXEV_ACK,
@@ -130,6 +291,7 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
}
m3_ipc->state = M3_STATE_INITED;
+ wkup_m3_init_scale_data(m3_ipc, dev);
complete(&m3_ipc->sync_complete);
break;
case M3_STATE_MSG_FOR_RESET:
@@ -215,9 +377,21 @@ static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
(m3_ipc->state != M3_STATE_UNKNOWN));
}
+static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
+{
+ m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
+ (gpio << IPC_VTT_GPIO_PIN_SHIFT);
+}
+
+static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
+}
+
/* Public functions */
/**
* wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @mem_type: memory type value read directly from emif
*
* wkup_m3 must know what memory type is in use to properly suspend
@@ -230,6 +404,7 @@ static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
/**
* wkup_m3_set_resume_address - Pass wkup_m3 resume address
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @addr: Physical address from which resume code should execute
*/
static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
@@ -239,6 +414,7 @@ static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
/**
* wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns code representing the status of a low power mode transition.
* 0 - Successful transition
@@ -260,6 +436,7 @@ static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_prepare_low_power - Request preparation for transition to
* low power state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @state: A kernel suspend state to enter, either MEM or STANDBY
*
* Returns 0 if preparation was successful, otherwise returns error code
@@ -276,12 +453,15 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
switch (state) {
case WKUP_M3_DEEPSLEEP:
m3_power_state = IPC_CMD_DS0;
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
break;
case WKUP_M3_STANDBY:
m3_power_state = IPC_CMD_STANDBY;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
case WKUP_M3_IDLE:
m3_power_state = IPC_CMD_IDLE;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
default:
return 1;
@@ -290,11 +470,13 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/* Program each required IPC register then write defaults to others */
wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
- wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
+ m3_ipc->vtt_conf |
+ m3_ipc->isolation_conf |
+ m3_ipc->halt, 4);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
- wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
@@ -315,6 +497,7 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/**
* wkup_m3_finish_low_power - Return m3 to reset state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns 0 if reset was successful, otherwise returns error code
*/
@@ -362,8 +545,7 @@ static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_set_rtc_only - Set the rtc_only flag
- * @wkup_m3_wakeup: struct wkup_m3_wakeup_src * gets assigned the
- * wakeup src value
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*/
static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
{
@@ -428,12 +610,13 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
static int wkup_m3_ipc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int irq, ret;
+ int irq, ret, temp;
phandle rproc_phandle;
struct rproc *m3_rproc;
struct resource *res;
struct task_struct *task;
struct wkup_m3_ipc *m3_ipc;
+ struct device_node *np = dev->of_node;
m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
if (!m3_ipc)
@@ -493,6 +676,23 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
m3_ipc->ops = &ipc_ops;
+ if (of_find_property(np, "ti,needs-vtt-toggle", NULL) &&
+ !(of_property_read_u32(np, "ti,vtt-gpio-pin", &temp))) {
+ if (temp >= 0 && temp <= 31)
+ wkup_m3_set_vtt_gpio(m3_ipc, temp);
+ else
+ dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
+ }
+
+ if (of_find_property(np, "ti,set-io-isolation", NULL))
+ wkup_m3_set_io_isolation(m3_ipc);
+
+ ret = of_property_read_string(np, "ti,scale-data-fw",
+ &m3_ipc->sd_fw_name);
+ if (ret) {
+ dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
+ };
+
/*
* Wait for firmware loading completion in a thread so we
* can boot the wkup_m3 as soon as it's ready without holding
@@ -507,6 +707,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
+ wkup_m3_ipc_dbg_init(m3_ipc);
+
return 0;
err_put_rproc:
@@ -518,6 +720,8 @@ err_free_mbox:
static int wkup_m3_ipc_remove(struct platform_device *pdev)
{
+ wkup_m3_ipc_dbg_destroy(m3_ipc_state);
+
mbox_free_channel(m3_ipc_state->mbox);
rproc_shutdown(m3_ipc_state->rproc);
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 2e1255bf1b42..3f64275a3140 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
+#include <linux/sys_soc.h>
#include <linux/timer.h>
#define CQSPI_NAME "cadence-qspi"
@@ -41,23 +42,35 @@
struct cqspi_st;
+struct phy_setting {
+ u8 rx;
+ u8 tx;
+ u8 read_delay;
+};
+
struct cqspi_flash_pdata {
- struct cqspi_st *cqspi;
- u32 clk_rate;
- u32 read_delay;
- u32 tshsl_ns;
- u32 tsd2d_ns;
- u32 tchsh_ns;
- u32 tslch_ns;
- u8 inst_width;
- u8 addr_width;
- u8 data_width;
- u8 cs;
+ struct cqspi_st *cqspi;
+ u32 clk_rate;
+ u32 read_delay;
+ u32 tshsl_ns;
+ u32 tsd2d_ns;
+ u32 tchsh_ns;
+ u32 tslch_ns;
+ u8 inst_width;
+ u8 addr_width;
+ u8 data_width;
+ bool dtr;
+ u8 cs;
+ bool use_phy;
+ struct phy_setting phy_setting;
+ struct spi_mem_op phy_read_op;
+ u32 phy_tx_start;
+ u32 phy_tx_end;
};
struct cqspi_st {
struct platform_device *pdev;
-
+ struct spi_master *master;
struct clk *clk;
unsigned int sclk;
@@ -79,6 +92,7 @@ struct cqspi_st {
u32 trigger_address;
u32 wr_delay;
bool use_direct_mode;
+ bool use_direct_mode_wr;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
};
@@ -106,11 +120,15 @@ struct cqspi_driver_platdata {
/* Register map */
#define CQSPI_REG_CONFIG 0x00
#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_PHY_EN BIT(3)
#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
#define CQSPI_REG_CONFIG_BAUD_LSB 19
+#define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
+#define CQSPI_REG_CONFIG_PHY_PIPELINE BIT(25)
+#define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
#define CQSPI_REG_CONFIG_IDLE_LSB 31
#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
@@ -146,6 +164,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
+#define CQSPI_REG_READCAPTURE_DQS_LSB 8
#define CQSPI_REG_SIZE 0x14
#define CQSPI_REG_SIZE_ADDRESS_LSB 0
@@ -173,6 +192,9 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+#define CQSPI_REG_WR_COMPLETION_CTRL 0x38
+#define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
+
#define CQSPI_REG_IRQSTATUS 0x40
#define CQSPI_REG_IRQMASK 0x44
@@ -188,6 +210,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDCTRL 0x90
#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
+#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
@@ -198,6 +221,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
#define CQSPI_REG_INDIRECTWR 0x70
#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
@@ -214,6 +238,21 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+#define CQSPI_REG_POLLING_STATUS 0xB0
+#define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16
+
+#define CQSPI_REG_PHY_CONFIG 0xB4
+#define CQSPI_REG_PHY_CONFIG_RX_DEL_LSB 0
+#define CQSPI_REG_PHY_CONFIG_RX_DEL_MASK 0x7F
+#define CQSPI_REG_PHY_CONFIG_TX_DEL_LSB 16
+#define CQSPI_REG_PHY_CONFIG_TX_DEL_MASK 0x7F
+#define CQSPI_REG_PHY_CONFIG_RESYNC BIT(31)
+
+#define CQSPI_REG_OP_EXT_LOWER 0xE0
+#define CQSPI_REG_OP_EXT_READ_LSB 24
+#define CQSPI_REG_OP_EXT_WRITE_LSB 16
+#define CQSPI_REG_OP_EXT_STIG_LSB 0
+
/* Interrupt status bits */
#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
@@ -234,6 +273,578 @@ struct cqspi_driver_platdata {
#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
+#define CQSPI_PHY_INIT_RD 1
+#define CQSPI_PHY_MAX_RD 4
+#define CQSPI_PHY_MAX_RX 63
+#define CQSPI_PHY_MAX_TX 63
+#define CQSPI_PHY_LOW_RX_BOUND 15
+#define CQSPI_PHY_HIGH_RX_BOUND 25
+#define CQSPI_PHY_LOW_TX_BOUND 32
+#define CQSPI_PHY_HIGH_TX_BOUND 48
+#define CQSPI_PHY_TX_LOOKUP_LOW_BOUND 24
+#define CQSPI_PHY_TX_LOOKUP_HIGH_BOUND 38
+
+#define CQSPI_PHY_DEFAULT_TEMP 45
+#define CQSPI_PHY_MIN_TEMP -45
+#define CQSPI_PHY_MAX_TEMP 130
+#define CQSPI_PHY_MID_TEMP (CQSPI_PHY_MIN_TEMP + \
+ ((CQSPI_PHY_MAX_TEMP - CQSPI_PHY_MIN_TEMP) / 2))
+
+static const u8 phy_tuning_pattern[] = {
+0xFE, 0xFF, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0xFE, 0xFE, 0x01, 0x01,
+0x01, 0x01, 0x00, 0x00, 0xFE, 0xFE, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
+0x00, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0xFE, 0xFE, 0xFF, 0x01,
+0x01, 0x01, 0x01, 0x01, 0xFE, 0x00, 0xFE, 0xFE, 0x01, 0x01, 0x01, 0x01, 0xFE,
+0x00, 0xFE, 0xFE, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0xFE, 0xFE,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0xFE, 0xFE, 0xFF, 0x01, 0x01, 0x01, 0x01,
+0x01, 0x00, 0xFE, 0xFE, 0xFE, 0x01, 0x01, 0x01, 0x01, 0x00, 0xFE, 0xFE, 0xFE,
+0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFE, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF,
+0xFF, 0x00, 0xFE, 0xFE, 0xFE, 0xFF, 0x01, 0x01, 0x01, 0x01, 0x01, 0xFE, 0xFE,
+0xFE, 0xFE, 0x01, 0x01, 0x01, 0x01, 0xFE, 0xFE, 0xFE, 0xFE, 0x01,
+};
+
+static void cqspi_set_tx_dll(void __iomem *reg_base, u8 dll)
+{
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_PHY_CONFIG);
+ reg &= ~(CQSPI_REG_PHY_CONFIG_TX_DEL_MASK <<
+ CQSPI_REG_PHY_CONFIG_TX_DEL_LSB);
+ reg |= (dll & CQSPI_REG_PHY_CONFIG_TX_DEL_MASK) <<
+ CQSPI_REG_PHY_CONFIG_TX_DEL_LSB;
+ reg |= CQSPI_REG_PHY_CONFIG_RESYNC;
+ writel(reg, reg_base + CQSPI_REG_PHY_CONFIG);
+}
+
+static void cqspi_set_rx_dll(void __iomem *reg_base, u8 dll)
+{
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_PHY_CONFIG);
+ reg &= ~(CQSPI_REG_PHY_CONFIG_RX_DEL_MASK <<
+ CQSPI_REG_PHY_CONFIG_RX_DEL_LSB);
+ reg |= (dll & CQSPI_REG_PHY_CONFIG_RX_DEL_MASK) <<
+ CQSPI_REG_PHY_CONFIG_RX_DEL_LSB;
+ reg |= CQSPI_REG_PHY_CONFIG_RESYNC;
+ writel(reg, reg_base + CQSPI_REG_PHY_CONFIG);
+}
+
+/* TODO: Figure out how to get the temperature here. */
+static int cqspi_get_temp(int *temp)
+{
+ return -EOPNOTSUPP;
+}
+
+static void cqspi_phy_apply_setting(struct cqspi_flash_pdata *f_pdata,
+ struct phy_setting *phy)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+
+ cqspi_set_rx_dll(cqspi->iobase, phy->rx);
+ cqspi_set_tx_dll(cqspi->iobase, phy->tx);
+ f_pdata->phy_setting.read_delay = phy->read_delay;
+}
+
+static int cqspi_phy_check_pattern(struct cqspi_flash_pdata *f_pdata,
+ struct spi_mem *mem)
+{
+ struct spi_mem_op op = f_pdata->phy_read_op;
+ u8 *read_data;
+ unsigned int size = sizeof(phy_tuning_pattern);
+ int ret;
+
+ read_data = kmalloc(size, GFP_KERNEL);
+ if (!read_data)
+ return -ENOMEM;
+
+ op.data.buf.in = read_data;
+ op.data.nbytes = size;
+
+ ret = spi_mem_exec_op(mem, &op);
+ if (ret)
+ goto out;
+
+ if (memcmp(read_data, phy_tuning_pattern,
+ ARRAY_SIZE(phy_tuning_pattern))) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ kfree(read_data);
+ return ret;
+}
+
+static int cqspi_find_rx_low(struct cqspi_flash_pdata *f_pdata,
+ struct spi_mem *mem, struct phy_setting *phy)
+{
+ struct device *dev = &f_pdata->cqspi->pdev->dev;
+ int ret;
+
+ do {
+ phy->rx = 0;
+ do {
+ cqspi_phy_apply_setting(f_pdata, phy);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (!ret)
+ return 0;
+
+ phy->rx++;
+ } while (phy->rx <= CQSPI_PHY_LOW_RX_BOUND);
+
+ phy->read_delay++;
+ } while (phy->read_delay <= CQSPI_PHY_MAX_RD);
+
+ dev_dbg(dev, "Unable to find RX low\n");
+ return -ENOENT;
+}
+
+static int cqspi_find_rx_high(struct cqspi_flash_pdata *f_pdata,
+ struct spi_mem *mem, struct phy_setting *phy)
+{
+ struct device *dev = &f_pdata->cqspi->pdev->dev;
+ int ret;
+
+ do {
+ phy->rx = CQSPI_PHY_MAX_RX;
+ do {
+ cqspi_phy_apply_setting(f_pdata, phy);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (!ret)
+ return 0;
+
+ phy->rx--;
+ } while (phy->rx >= CQSPI_PHY_HIGH_RX_BOUND);
+
+ phy->read_delay++;
+ } while (phy->read_delay <= CQSPI_PHY_MAX_RD);
+
+ dev_dbg(dev, "Unable to find RX high\n");
+ return -ENOENT;
+}
+
+static int cqspi_find_tx_low(struct cqspi_flash_pdata *f_pdata,
+ struct spi_mem *mem, struct phy_setting *phy)
+{
+ struct device *dev = &f_pdata->cqspi->pdev->dev;
+ int ret;
+
+ do {
+ phy->tx = 0;
+ do {
+ cqspi_phy_apply_setting(f_pdata, phy);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (!ret)
+ return 0;
+
+ phy->tx++;
+ } while (phy->tx <= CQSPI_PHY_LOW_TX_BOUND);
+
+ phy->read_delay++;
+ } while (phy->read_delay <= CQSPI_PHY_MAX_RD);
+
+ dev_dbg(dev, "Unable to find TX low\n");
+ return -ENOENT;
+}
+
+static int cqspi_find_tx_high(struct cqspi_flash_pdata *f_pdata,
+ struct spi_mem *mem, struct phy_setting *phy)
+{
+ struct device *dev = &f_pdata->cqspi->pdev->dev;
+ int ret;
+
+ do {
+ phy->tx = CQSPI_PHY_MAX_TX;
+ do {
+ cqspi_phy_apply_setting(f_pdata, phy);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (!ret)
+ return 0;
+
+ phy->tx--;
+ } while (phy->tx >= CQSPI_PHY_HIGH_TX_BOUND);
+
+ phy->read_delay++;
+ } while (phy->read_delay <= CQSPI_PHY_MAX_RD);
+
+ dev_dbg(dev, "Unable to find TX high\n");
+ return -ENOENT;
+}
+
+static int cqspi_phy_find_gaplow(struct cqspi_flash_pdata *f_pdata,
+ struct spi_mem *mem,
+ struct phy_setting *bottomleft,
+ struct phy_setting *topright,
+ struct phy_setting *gaplow)
+{
+ struct phy_setting left, right, mid;
+ int ret;
+
+ left = *bottomleft;
+ right = *topright;
+
+ mid.tx = left.tx + ((right.tx - left.tx) / 2);
+ mid.rx = left.rx + ((right.rx - left.rx) / 2);
+ mid.read_delay = left.read_delay;
+
+ do {
+ cqspi_phy_apply_setting(f_pdata, &mid);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (ret) {
+ /* The pattern was not found. Go to the lower half. */
+ right.tx = mid.tx;
+ right.rx = mid.rx;
+
+ mid.tx = left.tx + ((mid.tx - left.tx) / 2);
+ mid.rx = left.rx + ((mid.rx - left.rx) / 2);
+ } else {
+ /* The pattern was found. Go to the upper half. */
+ left.tx = mid.tx;
+ left.rx = mid.rx;
+
+ mid.tx = mid.tx + ((right.tx - mid.tx) / 2);
+ mid.rx = mid.rx + ((right.rx - mid.rx) / 2);
+ }
+
+ /* Break the loop if the window has closed. */
+ } while ((right.tx - left.tx >= 2) && (right.rx - left.rx >= 2));
+
+ *gaplow = mid;
+ return 0;
+}
+
+static int cqspi_phy_find_gaphigh(struct cqspi_flash_pdata *f_pdata,
+ struct spi_mem *mem,
+ struct phy_setting *bottomleft,
+ struct phy_setting *topright,
+ struct phy_setting *gaphigh)
+{
+ struct phy_setting left, right, mid;
+ int ret;
+
+ left = *bottomleft;
+ right = *topright;
+
+ mid.tx = left.tx + ((right.tx - left.tx) / 2);
+ mid.rx = left.rx + ((right.rx - left.rx) / 2);
+ mid.read_delay = right.read_delay;
+
+ do {
+ cqspi_phy_apply_setting(f_pdata, &mid);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (ret) {
+ /* The pattern was not found. Go to the upper half. */
+ left.tx = mid.tx;
+ left.rx = mid.rx;
+
+ mid.tx = mid.tx + ((right.tx - mid.tx) / 2);
+ mid.rx = mid.rx + ((right.rx - mid.rx) / 2);
+ } else {
+ /* The pattern was found. Go to the lower half. */
+ right.tx = mid.tx;
+ right.rx = mid.rx;
+
+ mid.tx = left.tx + ((mid.tx - left.tx) / 2);
+ mid.rx = left.rx + ((mid.rx - left.rx) / 2);
+ }
+
+ /* Break the loop if the window has closed. */
+ } while ((right.tx - left.tx >= 2) && (right.rx - left.rx >= 2));
+
+ *gaphigh = mid;
+ return 0;
+}
+
+static int cqspi_phy_calibrate(struct cqspi_flash_pdata *f_pdata,
+ struct spi_mem *mem)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
+ struct phy_setting rxlow, rxhigh, txlow, txhigh, temp;
+ struct phy_setting bottomleft, topright, searchpoint, gaplow, gaphigh;
+ int ret, tmp;
+
+ f_pdata->use_phy = true;
+
+ /* Look for RX boundaries at lower TX range. */
+ rxlow.tx = f_pdata->phy_tx_start;
+
+ do {
+ dev_dbg(dev, "Searching for rxlow on TX = %d\n", rxlow.tx);
+ rxlow.read_delay = CQSPI_PHY_INIT_RD;
+ ret = cqspi_find_rx_low(f_pdata, mem, &rxlow);
+ } while (ret && ++rxlow.tx <= CQSPI_PHY_TX_LOOKUP_LOW_BOUND);
+
+ if (ret)
+ goto out;
+ dev_dbg(dev, "rxlow: RX: %d TX: %d RD: %d\n", rxlow.rx, rxlow.tx,
+ rxlow.read_delay);
+
+ rxhigh.tx = rxlow.tx;
+ rxhigh.read_delay = rxlow.read_delay;
+ ret = cqspi_find_rx_high(f_pdata, mem, &rxhigh);
+ if (ret)
+ goto out;
+ dev_dbg(dev, "rxhigh: RX: %d TX: %d RD: %d\n", rxhigh.rx, rxhigh.tx,
+ rxhigh.read_delay);
+
+ /*
+ * Check a different point if rxlow and rxhigh are on the same read
+ * delay. This avoids mistaking the failing region for an RX boundary.
+ */
+ if (rxlow.read_delay == rxhigh.read_delay) {
+ dev_dbg(dev,
+ "rxlow and rxhigh at the same read delay.\n");
+
+ /* Look for RX boundaries at upper TX range. */
+ temp.tx = f_pdata->phy_tx_end;
+
+ do {
+ dev_dbg(dev, "Searching for rxlow on TX = %d\n",
+ temp.tx);
+ temp.read_delay = CQSPI_PHY_INIT_RD;
+ ret = cqspi_find_rx_low(f_pdata, mem, &temp);
+ } while (ret && --temp.tx >= CQSPI_PHY_TX_LOOKUP_HIGH_BOUND);
+
+ if (ret)
+ goto out;
+ dev_dbg(dev, "rxlow: RX: %d TX: %d RD: %d\n", temp.rx, temp.tx,
+ temp.read_delay);
+
+ if (temp.rx < rxlow.rx) {
+ rxlow = temp;
+ dev_dbg(dev, "Updating rxlow to the one at TX = 48\n");
+ }
+
+ /* Find RX max. */
+ ret = cqspi_find_rx_high(f_pdata, mem, &temp);
+ if (ret)
+ goto out;
+ dev_dbg(dev, "rxhigh: RX: %d TX: %d RD: %d\n", temp.rx, temp.tx,
+ temp.read_delay);
+
+ if (temp.rx < rxhigh.rx) {
+ rxhigh = temp;
+ dev_dbg(dev, "Updating rxhigh to the one at TX = 48\n");
+ }
+ }
+
+ /* Look for TX boundaries at 1/4 of RX window. */
+ txlow.rx = rxlow.rx + ((rxhigh.rx - rxlow.rx) / 4);
+ txhigh.rx = txlow.rx;
+
+ txlow.read_delay = CQSPI_PHY_INIT_RD;
+ ret = cqspi_find_tx_low(f_pdata, mem, &txlow);
+ if (ret)
+ goto out;
+ dev_dbg(dev, "txlow: RX: %d TX: %d RD: %d\n", txlow.rx, txlow.tx,
+ txlow.read_delay);
+
+ txhigh.read_delay = txlow.read_delay;
+ ret = cqspi_find_tx_high(f_pdata, mem, &txhigh);
+ if (ret)
+ goto out;
+ dev_dbg(dev, "txhigh: RX: %d TX: %d RD: %d\n", txhigh.rx, txhigh.tx,
+ txhigh.read_delay);
+
+ /*
+ * Check a different point if txlow and txhigh are on the same read
+ * delay. This avoids mistaking the failing region for an TX boundary.
+ */
+ if (txlow.read_delay == txhigh.read_delay) {
+ /* Look for TX boundaries at 3/4 of RX window. */
+ temp.rx = rxlow.rx + (3 * (rxhigh.rx - rxlow.rx) / 4);
+ temp.read_delay = CQSPI_PHY_INIT_RD;
+ dev_dbg(dev,
+ "txlow and txhigh at the same read delay. Searching at RX = %d\n",
+ temp.rx);
+
+ ret = cqspi_find_tx_low(f_pdata, mem, &temp);
+ if (ret)
+ goto out;
+ dev_dbg(dev, "txlow: RX: %d TX: %d RD: %d\n", temp.rx, temp.tx,
+ temp.read_delay);
+
+ if (temp.tx < txlow.tx) {
+ txlow = temp;
+ dev_dbg(dev, "Updating txlow with the one at RX = %d\n",
+ txlow.rx);
+ }
+
+ ret = cqspi_find_tx_high(f_pdata, mem, &temp);
+ if (ret)
+ goto out;
+ dev_dbg(dev, "txhigh: RX: %d TX: %d RD: %d\n", temp.rx, temp.tx,
+ temp.read_delay);
+
+ if (temp.tx < txhigh.tx) {
+ txhigh = temp;
+ dev_dbg(dev, "Updating txhigh with the one at RX = %d\n",
+ txhigh.rx);
+ }
+ }
+
+ /*
+ * Set bottom left and top right corners. These are theoretical
+ * corners. They may not actually be "good" points. But the longest
+ * diagonal will be between these corners.
+ */
+ bottomleft.tx = txlow.tx;
+ bottomleft.rx = rxlow.rx;
+ if (txlow.read_delay <= rxlow.read_delay)
+ bottomleft.read_delay = txlow.read_delay;
+ else
+ bottomleft.read_delay = rxlow.read_delay;
+
+ temp = bottomleft;
+ temp.tx += 4;
+ temp.rx += 4;
+ cqspi_phy_apply_setting(f_pdata, &temp);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (ret) {
+ temp.read_delay--;
+ cqspi_phy_apply_setting(f_pdata, &temp);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ }
+
+ if (!ret)
+ bottomleft.read_delay = temp.read_delay;
+
+ topright.tx = txhigh.tx;
+ topright.rx = rxhigh.rx;
+ if (txhigh.read_delay >= rxhigh.read_delay)
+ topright.read_delay = txhigh.read_delay;
+ else
+ topright.read_delay = rxhigh.read_delay;
+
+ temp = topright;
+ temp.tx -= 4;
+ temp.rx -= 4;
+ cqspi_phy_apply_setting(f_pdata, &temp);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (ret) {
+ temp.read_delay++;
+ cqspi_phy_apply_setting(f_pdata, &temp);
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ }
+
+ if (!ret)
+ topright.read_delay = temp.read_delay;
+
+ dev_dbg(dev, "topright: RX: %d TX: %d RD: %d\n", topright.rx,
+ topright.tx, topright.read_delay);
+ dev_dbg(dev, "bottomleft: RX: %d TX: %d RD: %d\n", bottomleft.rx,
+ bottomleft.tx, bottomleft.read_delay);
+
+ ret = cqspi_phy_find_gaplow(f_pdata, mem, &bottomleft, &topright,
+ &gaplow);
+ if (ret)
+ goto out;
+ dev_dbg(dev, "gaplow: RX: %d TX: %d RD: %d\n", gaplow.rx, gaplow.tx,
+ gaplow.read_delay);
+
+ if (bottomleft.read_delay == topright.read_delay) {
+ /*
+ * If there is only one passing region, it means that the "true"
+ * topright is too small to find, so the start of the failing
+ * region is a good approximation. Put the tuning point in the
+ * middle and adjust for temperature.
+ */
+ topright = gaplow;
+ searchpoint.read_delay = bottomleft.read_delay;
+ searchpoint.tx = bottomleft.tx +
+ ((topright.tx - bottomleft.tx) / 2);
+ searchpoint.rx = bottomleft.rx +
+ ((topright.rx - bottomleft.rx) / 2);
+
+ ret = cqspi_get_temp(&tmp);
+ if (ret) {
+ /*
+ * Assume room temperature if it couldn't be obtained
+ * from the thermal sensor.
+ *
+ * TODO: Change it to dev_warn once support for finding
+ * out the temperature is added.
+ */
+ dev_dbg(dev,
+ "Unable to get temperature. Assuming room temperature\n");
+ tmp = CQSPI_PHY_DEFAULT_TEMP;
+ }
+
+ if (tmp < CQSPI_PHY_MIN_TEMP || tmp > CQSPI_PHY_MAX_TEMP) {
+ dev_err(dev,
+ "Temperature outside operating range: %dC\n",
+ tmp);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Avoid a divide-by-zero. */
+ if (tmp == CQSPI_PHY_MID_TEMP)
+ tmp++;
+ dev_dbg(dev, "Temperature: %dC\n", tmp);
+
+ searchpoint.tx += (topright.tx - bottomleft.tx) /
+ (330 / (tmp - CQSPI_PHY_MID_TEMP));
+ searchpoint.rx += (topright.rx - bottomleft.rx) /
+ (330 / (tmp - CQSPI_PHY_MID_TEMP));
+ } else {
+ /*
+ * If there are two passing regions, find the start and end of
+ * the second one.
+ */
+ ret = cqspi_phy_find_gaphigh(f_pdata, mem, &bottomleft,
+ &topright, &gaphigh);
+ if (ret)
+ goto out;
+ dev_dbg(dev, "gaphigh: RX: %d TX: %d RD: %d\n", gaphigh.rx,
+ gaphigh.tx, gaphigh.read_delay);
+
+ /*
+ * Place the final tuning point in the corner furthest from the
+ * failing region but leave some margin for temperature changes.
+ */
+ if ((abs(gaplow.tx - bottomleft.tx) +
+ abs(gaplow.rx - bottomleft.rx)) <
+ (abs(gaphigh.tx - topright.tx) +
+ abs(gaphigh.rx - topright.rx))) {
+ searchpoint = topright;
+ searchpoint.tx -= 16;
+ searchpoint.rx -= (16 * (topright.rx - bottomleft.rx)) /
+ (topright.tx - bottomleft.tx);
+ } else {
+ searchpoint = bottomleft;
+ searchpoint.tx += 16;
+ searchpoint.rx += (16 * (topright.rx - bottomleft.rx)) /
+ (topright.tx - bottomleft.tx);
+ }
+ }
+
+ /* Set the final PHY settings and check if they are working. */
+ cqspi_phy_apply_setting(f_pdata, &searchpoint);
+ dev_dbg(dev, "Final tuning point: RX: %d TX: %d RD: %d\n",
+ searchpoint.rx, searchpoint.tx, searchpoint.read_delay);
+
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (ret) {
+ dev_err(dev,
+ "Failed to find pattern at final calibration point\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+ f_pdata->phy_setting.read_delay = searchpoint.read_delay;
+ f_pdata->phy_setting.rx = searchpoint.rx;
+ f_pdata->phy_setting.tx = searchpoint.tx;
+out:
+ if (ret)
+ f_pdata->use_phy = false;
+ return ret;
+}
+
static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
{
u32 val;
@@ -288,6 +899,88 @@ static unsigned int cqspi_calc_rdreg(struct cqspi_flash_pdata *f_pdata)
return rdreg;
}
+static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
+{
+ unsigned int dummy_clk;
+
+ dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
+ if (dtr)
+ dummy_clk /= 2;
+
+ return dummy_clk;
+}
+
+static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+
+ /*
+ * For an op to be DTR, cmd phase along with every other non-empty
+ * phase should have dtr field set to 1. If an op phase has zero
+ * nbytes, ignore its dtr field; otherwise, check its dtr field.
+ */
+ f_pdata->dtr = op->cmd.dtr &&
+ (!op->addr.nbytes || op->addr.dtr) &&
+ (!op->data.nbytes || op->data.dtr);
+
+ switch (op->data.buswidth) {
+ case 0:
+ break;
+ case 1:
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+ break;
+ case 2:
+ f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
+ break;
+ case 4:
+ f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
+ break;
+ case 8:
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Right now we only support 8-8-8 DTR mode. */
+ if (f_pdata->dtr) {
+ switch (op->cmd.buswidth) {
+ case 0:
+ break;
+ case 8:
+ f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (op->addr.buswidth) {
+ case 0:
+ break;
+ case 8:
+ f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (op->data.buswidth) {
+ case 0:
+ break;
+ case 8:
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int cqspi_wait_idle(struct cqspi_st *cqspi)
{
const unsigned int poll_idle_retry = 3;
@@ -345,19 +1038,85 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
return cqspi_wait_idle(cqspi);
}
+static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op,
+ unsigned int shift)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ u8 ext;
+
+ if (op->cmd.nbytes != 2)
+ return -EINVAL;
+
+ /* Opcode extension is the LSB. */
+ ext = op->cmd.opcode & 0xff;
+
+ reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER);
+ reg &= ~(0xff << shift);
+ reg |= ext << shift;
+ writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER);
+
+ return 0;
+}
+
+static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op, unsigned int shift,
+ bool enable)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ int ret;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+
+ /*
+ * We enable dual byte opcode here. The callers have to set up the
+ * extension opcode based on which type of operation it is.
+ */
+ if (enable) {
+ reg |= CQSPI_REG_CONFIG_DTR_PROTO;
+ reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
+
+ /* Set up command opcode extension. */
+ ret = cqspi_setup_opcode_ext(f_pdata, op, shift);
+ if (ret)
+ return ret;
+ } else {
+ reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
+ reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
+ }
+
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+
+ return cqspi_wait_idle(cqspi);
+}
+
static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
u8 *rxbuf = op->data.buf.in;
- u8 opcode = op->cmd.opcode;
+ u8 opcode;
size_t n_rx = op->data.nbytes;
unsigned int rdreg;
unsigned int reg;
+ unsigned int dummy_clk;
size_t read_len;
int status;
+ status = cqspi_set_protocol(f_pdata, op);
+ if (status)
+ return status;
+
+ status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB,
+ f_pdata->dtr);
+ if (status)
+ return status;
+
if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
dev_err(&cqspi->pdev->dev,
"Invalid input argument, len %zu rxbuf 0x%p\n",
@@ -365,16 +1124,40 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
return -EINVAL;
}
+ if (f_pdata->dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
rdreg = cqspi_calc_rdreg(f_pdata);
writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
+ dummy_clk = cqspi_calc_dummy(op, f_pdata->dtr);
+ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+ return -EOPNOTSUPP;
+
+ if (dummy_clk)
+ reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
+ << CQSPI_REG_CMDCTRL_DUMMY_LSB;
+
reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
/* 0 means 1 byte. */
reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+
+ /* setup ADDR BIT field */
+ if (op->addr.nbytes) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((op->addr.nbytes - 1) &
+ CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+
+ writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
+ }
+
status = cqspi_exec_flash_cmd(cqspi, reg);
if (status)
return status;
@@ -393,6 +1176,9 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
memcpy(rxbuf, &reg, read_len);
}
+ /* Reset CMD_CTRL Reg once command read completes */
+ writel(0, reg_base + CQSPI_REG_CMDCTRL);
+
return 0;
}
@@ -401,12 +1187,22 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
{
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
- const u8 opcode = op->cmd.opcode;
+ u8 opcode;
const u8 *txbuf = op->data.buf.out;
size_t n_tx = op->data.nbytes;
unsigned int reg;
unsigned int data;
size_t write_len;
+ int ret;
+
+ ret = cqspi_set_protocol(f_pdata, op);
+ if (ret)
+ return ret;
+
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB,
+ f_pdata->dtr);
+ if (ret)
+ return ret;
if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
dev_err(&cqspi->pdev->dev,
@@ -415,6 +1211,14 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
return -EINVAL;
}
+ reg = cqspi_calc_rdreg(f_pdata);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ if (f_pdata->dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
if (op->addr.nbytes) {
@@ -444,7 +1248,12 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
}
}
- return cqspi_exec_flash_cmd(cqspi, reg);
+ ret = cqspi_exec_flash_cmd(cqspi, reg);
+
+ /* Reset CMD_CTRL Reg once command write completes */
+ writel(0, reg_base + CQSPI_REG_CMDCTRL);
+
+ return ret;
}
static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
@@ -454,12 +1263,25 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
void __iomem *reg_base = cqspi->iobase;
unsigned int dummy_clk = 0;
unsigned int reg;
+ int ret;
+ u8 opcode;
- reg = op->cmd.opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB,
+ f_pdata->dtr);
+ if (ret)
+ return ret;
+
+ if (f_pdata->dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
+
+ reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
reg |= cqspi_calc_rdreg(f_pdata);
/* Setup dummy clock cycles */
- dummy_clk = op->dummy.nbytes * 8;
+ dummy_clk = cqspi_calc_dummy(op, f_pdata->dtr);
+
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
return -EOPNOTSUPP;
@@ -474,6 +1296,7 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (op->addr.nbytes - 1);
writel(reg, reg_base + CQSPI_REG_SIZE);
+ readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
return 0;
}
@@ -502,6 +1325,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTRD_START_MASK,
reg_base + CQSPI_REG_INDIRECTRD);
+ readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */
while (remaining > 0) {
if (!wait_for_completion_timeout(&cqspi->transfer_complete,
@@ -573,19 +1397,56 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
unsigned int reg;
+ int ret;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
+ u8 opcode;
+
+ ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB,
+ f_pdata->dtr);
+ if (ret)
+ return ret;
+
+ if (f_pdata->dtr)
+ opcode = op->cmd.opcode >> 8;
+ else
+ opcode = op->cmd.opcode;
/* Set opcode. */
- reg = op->cmd.opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg |= f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
+ reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
reg = cqspi_calc_rdreg(f_pdata);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+ /*
+ * SPI NAND flashes require the address of the status register to be
+ * passed in the Read SR command. Also, some SPI NOR flashes like the
+ * cypress Semper flash expect a 4-byte dummy address in the Read SR
+ * command in DTR mode.
+ *
+ * But this controller does not support address phase in the Read SR
+ * command when doing auto-HW polling. So, disable write completion
+ * polling on the controller's side. spinand and spi-nor will take
+ * care of polling the status register.
+ */
+ reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
+ writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+
+ /*
+ * DAC mode require auto polling as flash needs to be polled for
+ * write completion in case of bubble in SPI transaction due to slow
+ * CPU/DMA master.
+ */
+ cqspi->use_direct_mode_wr = false;
+
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (op->addr.nbytes - 1);
writel(reg, reg_base + CQSPI_REG_SIZE);
+ readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
return 0;
}
@@ -611,6 +1472,8 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTWR_START_MASK,
reg_base + CQSPI_REG_INDIRECTWR);
+ readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */
+
/*
* As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
* Controller programming sequence, couple of cycles of
@@ -770,6 +1633,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
static void cqspi_readdata_capture(struct cqspi_st *cqspi,
const bool bypass,
+ const bool dqs,
const unsigned int delay)
{
void __iomem *reg_base = cqspi->iobase;
@@ -788,6 +1652,11 @@ static void cqspi_readdata_capture(struct cqspi_st *cqspi,
reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
<< CQSPI_REG_READCAPTURE_DELAY_LSB;
+ if (dqs)
+ reg |= (1 << CQSPI_REG_READCAPTURE_DQS_LSB);
+ else
+ reg &= ~(1 << CQSPI_REG_READCAPTURE_DQS_LSB);
+
writel(reg, reg_base + CQSPI_REG_READCAPTURE);
}
@@ -806,6 +1675,64 @@ static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
writel(reg, reg_base + CQSPI_REG_CONFIG);
}
+static void cqspi_phy_enable(struct cqspi_flash_pdata *f_pdata, bool enable)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ u32 reg;
+ u8 dummy;
+
+ if (enable) {
+ cqspi_readdata_capture(cqspi, 1, true,
+ f_pdata->phy_setting.read_delay);
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_PHY_EN |
+ CQSPI_REG_CONFIG_PHY_PIPELINE;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+
+ /*
+ * Reduce dummy cycle by 1. This is a requirement of PHY mode
+ * operation for correctly reading the data.
+ */
+ reg = readl(reg_base + CQSPI_REG_RD_INSTR);
+ dummy = (reg >> CQSPI_REG_RD_INSTR_DUMMY_LSB) &
+ CQSPI_REG_RD_INSTR_DUMMY_MASK;
+ dummy--;
+ reg &= ~(CQSPI_REG_RD_INSTR_DUMMY_MASK <<
+ CQSPI_REG_RD_INSTR_DUMMY_LSB);
+
+ reg |= (dummy & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+ } else {
+ cqspi_readdata_capture(cqspi, !cqspi->rclk_en, false,
+ f_pdata->read_delay);
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg &= ~(CQSPI_REG_CONFIG_PHY_EN |
+ CQSPI_REG_CONFIG_PHY_PIPELINE);
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+
+ /*
+ * Dummy cycles were decremented when enabling PHY. Increment
+ * dummy cycle by 1 to restore the original value.
+ */
+ reg = readl(reg_base + CQSPI_REG_RD_INSTR);
+ dummy = (reg >> CQSPI_REG_RD_INSTR_DUMMY_LSB) &
+ CQSPI_REG_RD_INSTR_DUMMY_MASK;
+ dummy++;
+ reg &= ~(CQSPI_REG_RD_INSTR_DUMMY_MASK <<
+ CQSPI_REG_RD_INSTR_DUMMY_LSB);
+
+ reg |= (dummy & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+ }
+
+ cqspi_wait_idle(cqspi);
+}
+
static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
unsigned long sclk)
{
@@ -827,7 +1754,7 @@ static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
cqspi->sclk = sclk;
cqspi_config_baudrate_div(cqspi);
cqspi_delay(f_pdata);
- cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
+ cqspi_readdata_capture(cqspi, !cqspi->rclk_en, false,
f_pdata->read_delay);
}
@@ -835,35 +1762,6 @@ static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
cqspi_controller_enable(cqspi, 1);
}
-static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
- const struct spi_mem_op *op)
-{
- f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
- f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
- f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-
- if (op->data.dir == SPI_MEM_DATA_IN) {
- switch (op->data.buswidth) {
- case 1:
- f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
- break;
- case 2:
- f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
- break;
- case 4:
- f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
- break;
- case 8:
- f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
@@ -881,7 +1779,16 @@ static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
if (ret)
return ret;
- if (cqspi->use_direct_mode && ((to + len) <= cqspi->ahb_size)) {
+ /*
+ * Some flashes like the Cypress Semper flash expect a dummy 4-byte
+ * address (all 0s) with the read status register command in DTR mode.
+ * But this controller does not support sending dummy address bytes to
+ * the flash when it is polling the write completion register in DTR
+ * mode. So, we can not use direct mode when in DTR mode for writing
+ * data.
+ */
+ if (!f_pdata->dtr && cqspi->use_direct_mode &&
+ ((to + len) <= cqspi->ahb_size) && cqspi->use_direct_mode_wr) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
return cqspi_wait_idle(cqspi);
}
@@ -889,6 +1796,39 @@ static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
return cqspi_indirect_write_execute(f_pdata, to, buf, len);
}
+/*
+ * Check if PHY mode can be used on the given op. This is assuming it will be a
+ * DAC mode read, since PHY won't work on any other type of operation anyway.
+ */
+static bool cqspi_phy_op_eligible(const struct spi_mem_op *op)
+{
+ /* PHY is only tuned for 8D-8D-8D. */
+ if (!(op->cmd.dtr && op->addr.dtr && op->dummy.dtr && op->data.dtr))
+ return false;
+ if (op->cmd.buswidth != 8)
+ return false;
+ if (op->addr.nbytes && op->addr.buswidth != 8)
+ return false;
+ if (op->dummy.nbytes && op->dummy.buswidth != 8)
+ return false;
+ if (op->data.nbytes && op->data.buswidth != 8)
+ return false;
+
+ return true;
+}
+
+static bool cqspi_use_phy(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ if (!f_pdata->use_phy)
+ return false;
+
+ if (op->data.nbytes < 16)
+ return false;
+
+ return cqspi_phy_op_eligible(op);
+}
+
static void cqspi_rx_dma_callback(void *param)
{
struct cqspi_st *cqspi = param;
@@ -896,8 +1836,8 @@ static void cqspi_rx_dma_callback(void *param)
complete(&cqspi->rx_dma_complete);
}
-static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
- u_char *buf, loff_t from, size_t len)
+static int cqspi_direct_read_dma(struct cqspi_flash_pdata *f_pdata,
+ u_char *buf, loff_t from, size_t len)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
struct device *dev = &cqspi->pdev->dev;
@@ -909,11 +1849,6 @@ static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
dma_addr_t dma_dst;
struct device *ddev;
- if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
- memcpy_fromio(buf, cqspi->ahb_base + from, len);
- return 0;
- }
-
ddev = cqspi->rx_chan->device->dev;
dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(ddev, dma_dst)) {
@@ -942,7 +1877,7 @@ static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
dma_async_issue_pending(cqspi->rx_chan);
if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
- msecs_to_jiffies(len))) {
+ msecs_to_jiffies(max_t(size_t, len, 500)))) {
dmaengine_terminate_sync(cqspi->rx_chan);
dev_err(dev, "DMA wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
@@ -955,6 +1890,104 @@ err_unmap:
return ret;
}
+static void cqspi_memcpy_fromio(const struct spi_mem_op *op, void *to,
+ const void __iomem *from, size_t count)
+{
+ if (op->data.buswidth == 8 && op->data.dtr) {
+ /*
+ * 8D-8D-8D ops with odd length should be rejected by
+ * supports_op() so no need to worry about that.
+ */
+ while (count && !IS_ALIGNED((unsigned long)from, 4)) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+
+ /*
+ * The controller can work with both 32-bit and 64-bit
+ * platforms. 32-bit platforms won't have a readq. So use a
+ * readl instead.
+ */
+ while (count >= 4) {
+ *(u32 *)to = __raw_readl(from);
+ from += 4;
+ to += 4;
+ count -= 4;
+ }
+
+ while (count) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+
+ return;
+ }
+
+ memcpy_fromio(to, from, count);
+}
+
+static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ loff_t from = op->addr.val;
+ loff_t from_aligned, to_aligned;
+ size_t len = op->data.nbytes;
+ size_t len_aligned;
+ u_char *buf = op->data.buf.in;
+ int ret;
+
+ if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
+ cqspi_memcpy_fromio(op, buf, cqspi->ahb_base + from, len);
+ return 0;
+ }
+
+ if (!cqspi_use_phy(f_pdata, op))
+ return cqspi_direct_read_dma(f_pdata, buf, from, len);
+
+ /*
+ * PHY reads must be 16-byte aligned, and they must be a multiple of 16
+ * bytes.
+ */
+ from_aligned = (from + 0xF) & ~0xF;
+ to_aligned = (from + len) & ~0xF;
+ len_aligned = to_aligned - from_aligned;
+
+ /* Read the unaligned part at the start. */
+ if (from != from_aligned) {
+ ret = cqspi_direct_read_dma(f_pdata, buf, from,
+ from_aligned - from);
+ if (ret)
+ return ret;
+ buf += from_aligned - from;
+ }
+
+ if (len_aligned) {
+ cqspi_phy_enable(f_pdata, true);
+ ret = cqspi_direct_read_dma(f_pdata, buf, from_aligned,
+ len_aligned);
+ cqspi_phy_enable(f_pdata, false);
+ if (ret)
+ return ret;
+ buf += len_aligned;
+ }
+
+ /* Now read the remaining part, if any. */
+ if (to_aligned != (from + len)) {
+ ret = cqspi_direct_read_dma(f_pdata, buf, to_aligned,
+ (from + len) - to_aligned);
+ if (ret)
+ return ret;
+ buf += (from + len) - to_aligned;
+ }
+
+ return 0;
+}
+
static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
@@ -973,7 +2006,7 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
return ret;
if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
- return cqspi_direct_read_execute(f_pdata, buf, from, len);
+ return cqspi_direct_read_execute(f_pdata, op);
return cqspi_indirect_read_execute(f_pdata, buf, from, len);
}
@@ -987,7 +2020,13 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
cqspi_configure(f_pdata, mem->spi->max_speed_hz);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
- if (!op->addr.nbytes)
+ /*
+ * Performing reads in DAC mode forces to read minimum 4 bytes
+ * which is unsupported on some flash devices during register
+ * reads, prefer STIG mode for such small reads.
+ */
+ if (!op->addr.nbytes ||
+ op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX)
return cqspi_command_read(f_pdata, op);
return cqspi_read(f_pdata, op);
@@ -1010,6 +2049,60 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
return ret;
}
+static bool cqspi_supports_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ bool all_true, all_false;
+
+ /*
+ * op->dummy.dtr is required for converting nbytes into ncycles.
+ * Also, don't check the dtr field of the op phase having zero nbytes.
+ */
+ all_true = op->cmd.dtr &&
+ (!op->addr.nbytes || op->addr.dtr) &&
+ (!op->dummy.nbytes || op->dummy.dtr) &&
+ (!op->data.nbytes || op->data.dtr);
+
+ all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
+ !op->data.dtr;
+
+ /* Mixed DTR modes not supported. */
+ if (!(all_true || all_false))
+ return false;
+
+ if (all_true)
+ return spi_mem_dtr_supports_op(mem, op);
+ else
+ return spi_mem_default_supports_op(mem, op);
+}
+
+static void cqspi_mem_do_calibration(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
+ struct cqspi_flash_pdata *f_pdata;
+ struct device *dev = &cqspi->pdev->dev;
+ int ret;
+
+ f_pdata = &cqspi->f_pdata[mem->spi->chip_select];
+
+ /* Check if the op is eligible for PHY mode operation. */
+ if (!cqspi_phy_op_eligible(op))
+ return;
+
+ f_pdata->phy_read_op = *op;
+
+ ret = cqspi_phy_check_pattern(f_pdata, mem);
+ if (ret) {
+ dev_dbg(dev, "Pattern not found. Skipping calibration.\n");
+ return;
+ }
+
+ ret = cqspi_phy_calibrate(f_pdata, mem);
+ if (ret)
+ dev_info(&cqspi->pdev->dev, "PHY calibration failed: %d\n", ret);
+}
+
static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
struct cqspi_flash_pdata *f_pdata,
struct device_node *np)
@@ -1044,6 +2137,12 @@ static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
return -ENXIO;
}
+ if (of_property_read_u32(np, "cdns,phy-tx-start", &f_pdata->phy_tx_start))
+ f_pdata->phy_tx_start = 16;
+
+ if (of_property_read_u32(np, "cdns,phy-tx-end", &f_pdata->phy_tx_end))
+ f_pdata->phy_tx_end = 48;
+
return 0;
}
@@ -1138,6 +2237,8 @@ static const char *cqspi_get_name(struct spi_mem *mem)
static const struct spi_controller_mem_ops cqspi_mem_ops = {
.exec_op = cqspi_exec_mem_op,
.get_name = cqspi_get_name,
+ .supports_op = cqspi_supports_mem_op,
+ .do_calibration = cqspi_mem_do_calibration,
};
static int cqspi_setup_flash(struct cqspi_st *cqspi)
@@ -1174,6 +2275,11 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
return 0;
}
+static const struct soc_device_attribute k3_soc_devices[] = {
+ { .family = "AM64X", .revision = "SR1.0" },
+ { /* sentinel */ }
+};
+
static int cqspi_probe(struct platform_device *pdev)
{
const struct cqspi_driver_platdata *ddata;
@@ -1186,7 +2292,7 @@ static int cqspi_probe(struct platform_device *pdev)
int ret;
int irq;
- master = spi_alloc_master(&pdev->dev, sizeof(*cqspi));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(*cqspi));
if (!master) {
dev_err(&pdev->dev, "spi_alloc_master failed\n");
return -ENOMEM;
@@ -1198,14 +2304,14 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi = spi_master_get_devdata(master);
cqspi->pdev = pdev;
+ cqspi->master = master;
platform_set_drvdata(pdev, cqspi);
/* Obtain configuration from OF. */
ret = cqspi_of_get_pdata(cqspi);
if (ret) {
dev_err(dev, "Cannot get mandatory OF data.\n");
- ret = -ENODEV;
- goto probe_master_put;
+ return -ENODEV;
}
/* Obtain QSPI clock. */
@@ -1213,7 +2319,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(cqspi->clk)) {
dev_err(dev, "Cannot claim QSPI clock.\n");
ret = PTR_ERR(cqspi->clk);
- goto probe_master_put;
+ return ret;
}
/* Obtain and remap controller address. */
@@ -1222,7 +2328,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(cqspi->iobase)) {
dev_err(dev, "Cannot remap controller address.\n");
ret = PTR_ERR(cqspi->iobase);
- goto probe_master_put;
+ return ret;
}
/* Obtain and remap AHB address. */
@@ -1231,7 +2337,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(cqspi->ahb_base)) {
dev_err(dev, "Cannot remap AHB address.\n");
ret = PTR_ERR(cqspi->ahb_base);
- goto probe_master_put;
+ return ret;
}
cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
cqspi->ahb_size = resource_size(res_ahb);
@@ -1240,16 +2346,14 @@ static int cqspi_probe(struct platform_device *pdev)
/* Obtain IRQ line. */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = -ENXIO;
- goto probe_master_put;
- }
+ if (irq < 0)
+ return -ENXIO;
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
- goto probe_master_put;
+ return ret;
}
ret = clk_prepare_enable(cqspi->clk);
@@ -1280,15 +2384,18 @@ static int cqspi_probe(struct platform_device *pdev)
reset_control_deassert(rstc_ocp);
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+ master->max_speed_hz = cqspi->master_ref_clk_hz;
ddata = of_device_get_match_data(dev);
if (ddata) {
if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
- cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
+ cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC,
cqspi->master_ref_clk_hz);
if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
- master->mode_bits |= SPI_RX_OCTAL;
- if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
+ master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
+ if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
cqspi->use_direct_mode = true;
+ cqspi->use_direct_mode_wr = true;
+ }
}
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
@@ -1309,13 +2416,13 @@ static int cqspi_probe(struct platform_device *pdev)
goto probe_setup_failed;
}
- if (cqspi->use_direct_mode) {
+ if (cqspi->use_direct_mode && !soc_device_match(k3_soc_devices)) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER)
goto probe_setup_failed;
}
- ret = devm_spi_register_master(dev, master);
+ ret = spi_register_master(master);
if (ret) {
dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
goto probe_setup_failed;
@@ -1329,8 +2436,6 @@ probe_reset_failed:
probe_clk_failed:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-probe_master_put:
- spi_master_put(master);
return ret;
}
@@ -1338,6 +2443,7 @@ static int cqspi_remove(struct platform_device *pdev)
{
struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+ spi_unregister_master(cqspi->master);
cqspi_controller_enable(cqspi, 0);
if (cqspi->rx_chan)
@@ -1351,32 +2457,46 @@ static int cqspi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int cqspi_suspend(struct device *dev)
+static void __maybe_unused cqspi_restore_context(struct cqspi_st *cqspi)
+{
+ cqspi_phy_apply_setting(cqspi->f_pdata,
+ &cqspi->f_pdata->phy_setting);
+}
+
+static int __maybe_unused cqspi_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+ ret = spi_master_suspend(master);
cqspi_controller_enable(cqspi, 0);
- return 0;
+
+ clk_disable_unprepare(cqspi->clk);
+
+ return ret;
}
-static int cqspi_resume(struct device *dev)
+static int __maybe_unused cqspi_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
- cqspi_controller_enable(cqspi, 1);
- return 0;
+ clk_prepare_enable(cqspi->clk);
+ cqspi_wait_idle(cqspi);
+ cqspi_controller_init(cqspi);
+
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ cqspi_restore_context(cqspi);
+
+ return spi_master_resume(master);
}
-static const struct dev_pm_ops cqspi__dev_pm_ops = {
- .suspend = cqspi_suspend,
- .resume = cqspi_resume,
-};
+static SIMPLE_DEV_PM_OPS(cqspi__dev_pm_ops, cqspi_suspend, cqspi_resume);
#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
-#else
-#define CQSPI_DEV_PM_OPS NULL
-#endif
static const struct cqspi_driver_platdata cdns_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
@@ -1428,3 +2548,4 @@ MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 4682f49dc733..2e01cb223bfd 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -6,6 +6,7 @@
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
#include <linux/dmaengine.h>
+#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -137,8 +138,8 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
return -ENOTSUPP;
}
-bool spi_mem_default_supports_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
+static bool spi_mem_check_buswidth(struct spi_mem *mem,
+ const struct spi_mem_op *op)
{
if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
return false;
@@ -156,13 +157,39 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
op->data.dir == SPI_MEM_DATA_OUT))
return false;
+ return true;
+}
+
+bool spi_mem_dtr_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (op->cmd.buswidth == 8 && op->cmd.nbytes % 2)
+ return false;
+
+ if (op->addr.nbytes && op->addr.buswidth == 8 && op->addr.nbytes % 2)
+ return false;
+
+ if (op->dummy.nbytes && op->dummy.buswidth == 8 && op->dummy.nbytes % 2)
+ return false;
+
+ if (op->data.dir != SPI_MEM_NO_DATA &&
+ op->dummy.buswidth == 8 && op->data.nbytes % 2)
+ return false;
+
+ return spi_mem_check_buswidth(mem, op);
+}
+EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op);
+
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
return false;
if (op->cmd.nbytes != 1)
return false;
- return true;
+ return spi_mem_check_buswidth(mem, op);
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
@@ -447,6 +474,18 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+int spi_mem_do_calibration(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+
+ if (!ctlr->mem_ops || !ctlr->mem_ops->do_calibration)
+ return -EOPNOTSUPP;
+
+ ctlr->mem_ops->do_calibration(mem, op);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_mem_do_calibration);
+
static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
@@ -726,6 +765,91 @@ static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
return container_of(drv, struct spi_mem_driver, spidrv.driver);
}
+static int spi_mem_read_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 *status)
+{
+ const u8 *bytes = (u8 *)op->data.buf.in;
+ int ret;
+
+ ret = spi_mem_exec_op(mem, op);
+ if (ret)
+ return ret;
+
+ if (op->data.nbytes > 1)
+ *status = ((u16)bytes[0] << 8) | bytes[1];
+ else
+ *status = bytes[0];
+
+ return 0;
+}
+
+/**
+ * spi_mem_poll_status() - Poll memory device status
+ * @mem: SPI memory device
+ * @op: the memory operation to execute
+ * @mask: status bitmask to ckeck
+ * @match: (status & mask) expected value
+ * @initial_delay_us: delay in us before starting to poll
+ * @polling_delay_us: time to sleep between reads in us
+ * @timeout_ms: timeout in milliseconds
+ *
+ * This function polls a status register and returns when
+ * (status & mask) == match or when the timeout has expired.
+ *
+ * Return: 0 in case of success, -ETIMEDOUT in case of error,
+ * -EOPNOTSUPP if not supported.
+ */
+int spi_mem_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_delay_us,
+ u16 timeout_ms)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+ int ret = -EOPNOTSUPP;
+ int read_status_ret;
+ u16 status;
+
+ if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
+ op->data.dir != SPI_MEM_DATA_IN)
+ return -EINVAL;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->poll_status) {
+ ret = spi_mem_access_start(mem);
+ if (ret)
+ return ret;
+
+ ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
+ initial_delay_us, polling_delay_us,
+ timeout_ms);
+
+ spi_mem_access_end(mem);
+ }
+
+ if (ret == -EOPNOTSUPP) {
+ if (!spi_mem_supports_op(mem, op))
+ return ret;
+
+ if (initial_delay_us < 10)
+ udelay(initial_delay_us);
+ else
+ usleep_range((initial_delay_us >> 2) + 1,
+ initial_delay_us);
+
+ ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
+ (read_status_ret || ((status) & mask) == match),
+ polling_delay_us, timeout_ms * 1000, false, mem,
+ op, &status);
+ if (read_status_ret)
+ return read_status_ret;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mem_poll_status);
+
static int spi_mem_probe(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 3596bbe4b776..fe4723e96cac 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -91,10 +91,6 @@
struct omap2_mcspi_dma {
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
-
- struct completion dma_tx_completion;
- struct completion dma_rx_completion;
-
char dma_rx_ch_name[14];
char dma_tx_ch_name[14];
};
@@ -116,7 +112,7 @@ struct omap2_mcspi_regs {
};
struct omap2_mcspi {
- struct completion txdone;
+ struct completion txrxdone;
struct spi_master *master;
/* Virtual base address of the controller */
void __iomem *base;
@@ -376,30 +372,6 @@ static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
return 0;
}
-static void omap2_mcspi_rx_callback(void *data)
-{
- struct spi_device *spi = data;
- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
- struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
-
- /* We must disable the DMA RX request */
- omap2_mcspi_set_dma_req(spi, 1, 0);
-
- complete(&mcspi_dma->dma_rx_completion);
-}
-
-static void omap2_mcspi_tx_callback(void *data)
-{
- struct spi_device *spi = data;
- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
- struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
-
- /* We must disable the DMA TX request */
- omap2_mcspi_set_dma_req(spi, 0, 0);
-
- complete(&mcspi_dma->dma_tx_completion);
-}
-
static void omap2_mcspi_tx_dma(struct spi_device *spi,
struct spi_transfer *xfer,
struct dma_slave_config cfg)
@@ -414,12 +386,9 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
- xfer->tx_sg.nents,
- DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ xfer->tx_sg.nents, DMA_MEM_TO_DEV, DMA_CTRL_ACK);
+
if (tx) {
- tx->callback = omap2_mcspi_tx_callback;
- tx->callback_param = spi;
dmaengine_submit(tx);
} else {
/* FIXME: fall back to PIO? */
@@ -445,6 +414,9 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
struct omap2_mcspi_cs *cs = spi->controller_state;
void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
struct dma_async_tx_descriptor *tx;
+ dma_cookie_t dma_rx_cookie = 0;
+ struct dma_tx_state mcspi_dma_rxstate;
+ enum dma_status dma_status;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -501,12 +473,10 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
}
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, sg_out[0],
- out_mapped_nents[0], DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ out_mapped_nents[0], DMA_DEV_TO_MEM, DMA_CTRL_ACK);
+
if (tx) {
- tx->callback = omap2_mcspi_rx_callback;
- tx->callback_param = spi;
- dmaengine_submit(tx);
+ dma_rx_cookie = dmaengine_submit(tx);
} else {
/* FIXME: fall back to PIO? */
}
@@ -514,10 +484,20 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
dma_async_issue_pending(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 1);
- ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
+ ret = mcspi_wait_for_completion(mcspi, &mcspi->txrxdone);
+
+ /*
+ * Before disabling RX DMA we need to confirm whether DMA RX is complete.
+ * This polling completes on the first attempt itself in most cases.
+ */
+ do {
+ dma_status = dmaengine_tx_status(mcspi_dma->dma_rx, dma_rx_cookie,
+ &mcspi_dma_rxstate);
+ } while (dma_status != DMA_COMPLETE);
+
+ omap2_mcspi_set_dma_req(spi, 1, 0);
if (ret || mcspi->slave_aborted) {
dmaengine_terminate_sync(mcspi_dma->dma_rx);
- omap2_mcspi_set_dma_req(spi, 1, 0);
return 0;
}
@@ -588,8 +568,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
enum dma_slave_buswidth width;
unsigned es;
void __iomem *chstat_reg;
- void __iomem *irqstat_reg;
int wait_res;
+ int ret;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -619,68 +599,36 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
tx = xfer->tx_buf;
mcspi->slave_aborted = false;
- reinit_completion(&mcspi_dma->dma_tx_completion);
- reinit_completion(&mcspi_dma->dma_rx_completion);
- reinit_completion(&mcspi->txdone);
- if (tx) {
- /* Enable EOW IRQ to know end of tx in slave mode */
- if (spi_controller_is_slave(spi->master))
- mcspi_write_reg(spi->master,
- OMAP2_MCSPI_IRQENABLE,
- OMAP2_MCSPI_IRQSTATUS_EOW);
+ reinit_completion(&mcspi->txrxdone);
+ mcspi_write_reg(spi->master, OMAP2_MCSPI_IRQENABLE, OMAP2_MCSPI_IRQSTATUS_EOW);
+ if (tx)
omap2_mcspi_tx_dma(spi, xfer, cfg);
- }
- if (rx != NULL)
+ if (rx)
count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
- if (tx != NULL) {
- int ret;
-
- ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
- if (ret || mcspi->slave_aborted) {
- dmaengine_terminate_sync(mcspi_dma->dma_tx);
- omap2_mcspi_set_dma_req(spi, 0, 0);
- return 0;
- }
-
- if (spi_controller_is_slave(mcspi->master)) {
- ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
- if (ret || mcspi->slave_aborted)
- return 0;
- }
+ ret = mcspi_wait_for_completion(mcspi, &mcspi->txrxdone);
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+ if (ret || mcspi->slave_aborted)
+ return 0;
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (tx && !rx) {
+ chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
if (mcspi->fifo_depth > 0) {
- irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
-
- if (mcspi_wait_for_reg_bit(irqstat_reg,
- OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
- dev_err(&spi->dev, "EOW timed out\n");
-
- mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
- OMAP2_MCSPI_IRQSTATUS_EOW);
- }
-
- /* for TX_ONLY mode, be sure all words have shifted out */
- if (rx == NULL) {
- chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
- if (mcspi->fifo_depth > 0) {
- wait_res = mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_TXFFE);
- if (wait_res < 0)
- dev_err(&spi->dev, "TXFFE timed out\n");
- } else {
- wait_res = mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_TXS);
- if (wait_res < 0)
- dev_err(&spi->dev, "TXS timed out\n");
- }
- if (wait_res >= 0 &&
- (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_EOT) < 0))
- dev_err(&spi->dev, "EOT timed out\n");
+ wait_res = mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_TXFFE);
+ if (wait_res < 0)
+ dev_err(&spi->dev, "TXFFE timed out\n");
+ } else {
+ wait_res = mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_TXS);
+ if (wait_res < 0)
+ dev_err(&spi->dev, "TXS timed out\n");
}
+ if (wait_res >= 0 && (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0))
+ dev_err(&spi->dev, "EOT timed out\n");
}
+
return count;
}
@@ -1005,9 +953,6 @@ static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
mcspi_dma->dma_rx = NULL;
}
- init_completion(&mcspi_dma->dma_rx_completion);
- init_completion(&mcspi_dma->dma_tx_completion);
-
no_dma:
return ret;
}
@@ -1098,8 +1043,10 @@ static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
/* Disable IRQ and wakeup slave xfer task */
mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0);
- if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
- complete(&mcspi->txdone);
+ if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW) {
+ complete_all(&mcspi->txrxdone);
+ mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS, OMAP2_MCSPI_IRQSTATUS_EOW);
+ }
return IRQ_HANDLED;
}
@@ -1107,12 +1054,9 @@ static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
static int omap2_mcspi_slave_abort(struct spi_master *master)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
- struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
mcspi->slave_aborted = true;
- complete(&mcspi_dma->dma_rx_completion);
- complete(&mcspi_dma->dma_tx_completion);
- complete(&mcspi->txdone);
+ complete_all(&mcspi->txrxdone);
return 0;
}
@@ -1498,7 +1442,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "no irq resource found\n");
goto free_master;
}
- init_completion(&mcspi->txdone);
+ init_completion(&mcspi->txrxdone);
status = devm_request_irq(&pdev->dev, status,
omap2_mcspi_irq_handler, 0, pdev->name,
mcspi);
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index 2b71de722ec3..8b562f486a68 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -972,7 +972,7 @@ static int startup(struct v4l2_subdev *sd)
}
static int gc0310_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1008,7 +1008,7 @@ static int gc0310_set_fmt(struct v4l2_subdev *sd,
fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -1042,7 +1042,7 @@ err:
}
static int gc0310_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1221,7 +1221,7 @@ static int gc0310_g_frame_interval(struct v4l2_subdev *sd,
}
static int gc0310_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -1232,7 +1232,7 @@ static int gc0310_enum_mbus_code(struct v4l2_subdev *sd,
}
static int gc0310_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index 78147ffb6099..d8f8269c12ee 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -767,7 +767,7 @@ static int startup(struct v4l2_subdev *sd)
}
static int gc2235_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -796,7 +796,7 @@ static int gc2235_set_fmt(struct v4l2_subdev *sd,
}
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -825,7 +825,7 @@ err:
}
static int gc2235_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -963,7 +963,7 @@ static int gc2235_g_frame_interval(struct v4l2_subdev *sd,
}
static int gc2235_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -974,7 +974,7 @@ static int gc2235_enum_mbus_code(struct v4l2_subdev *sd,
}
static int gc2235_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 77293579a134..7ebb42770add 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -801,7 +801,7 @@ static int mt9m114_get_intg_factor(struct i2c_client *client,
}
static int mt9m114_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -822,7 +822,7 @@ static int mt9m114_get_fmt(struct v4l2_subdev *sd,
}
static int mt9m114_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -846,7 +846,7 @@ static int mt9m114_set_fmt(struct v4l2_subdev *sd,
mt9m114_try_res(&width, &height);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
res_index = mt9m114_to_res(width, height);
@@ -1158,7 +1158,7 @@ static int mt9m114_s_exposure_metering(struct v4l2_subdev *sd, s32 val)
* This function is for touch exposure feature.
*/
static int mt9m114_s_exposure_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1722,7 +1722,7 @@ static int mt9m114_s_stream(struct v4l2_subdev *sd, int enable)
}
static int mt9m114_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index)
@@ -1733,7 +1733,7 @@ static int mt9m114_enum_mbus_code(struct v4l2_subdev *sd,
}
static int mt9m114_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
unsigned int index = fse->index;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index c90730513438..a62775bb3f12 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -911,7 +911,7 @@ static int get_resolution_index(int w, int h)
}
static int ov2680_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -948,7 +948,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
}
fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -997,7 +997,7 @@ err:
}
static int ov2680_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1155,7 +1155,7 @@ static int ov2680_g_frame_interval(struct v4l2_subdev *sd,
}
static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -1166,7 +1166,7 @@ static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index eecefcd734d0..bef7ccda6a10 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -876,7 +876,7 @@ static int startup(struct v4l2_subdev *sd)
}
static int ov2722_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -906,7 +906,7 @@ static int ov2722_set_fmt(struct v4l2_subdev *sd,
}
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -961,7 +961,7 @@ err:
}
static int ov2722_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1104,7 +1104,7 @@ static int ov2722_g_frame_interval(struct v4l2_subdev *sd,
}
static int ov2722_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -1115,7 +1115,7 @@ static int ov2722_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov2722_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index e698b63d6cb7..0828ca9ab6f2 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -1577,7 +1577,7 @@ static int startup(struct v4l2_subdev *sd)
}
static int ov5693_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1608,7 +1608,7 @@ static int ov5693_set_fmt(struct v4l2_subdev *sd,
fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -1676,7 +1676,7 @@ err:
}
static int ov5693_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -1825,7 +1825,7 @@ static int ov5693_g_frame_interval(struct v4l2_subdev *sd,
}
static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= MAX_FMTS)
@@ -1836,7 +1836,7 @@ static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ov5693_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
int index = fse->index;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 20c19e08968e..3b1ef2023b37 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -4883,6 +4883,9 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f,
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -4924,7 +4927,7 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f,
snr_mbus_fmt->width, snr_mbus_fmt->height);
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, set_fmt, &pad_cfg, &format);
+ pad, set_fmt, &pad_state, &format);
if (ret)
return ret;
@@ -5310,11 +5313,11 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
atomisp_output_fmts[] in atomisp_v4l2.c */
vf_ffmt.code = V4L2_MBUS_FMT_CUSTOM_YUV420;
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SOURCE_VF,
V4L2_SEL_TGT_COMPOSE, 0, &vf_size);
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SOURCE_VF, &vf_ffmt);
asd->video_out_vf.sh_fmt = IA_CSS_FRAME_FORMAT_NV12;
@@ -5567,6 +5570,9 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
const struct atomisp_format_bridge *format;
struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg
+ };
struct v4l2_subdev_format vformat = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -5611,7 +5617,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
vformat.which = V4L2_SUBDEV_FORMAT_TRY;
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, set_fmt, &pad_cfg, &vformat);
+ pad, set_fmt, &pad_state, &vformat);
if (ret)
return ret;
if (ffmt->width < req_ffmt->width ||
@@ -5649,7 +5655,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev,
asd->params.video_dis_en = false;
}
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, ffmt);
@@ -5734,7 +5740,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
}
atomisp_subdev_set_selection(
- &asd->subdev, fh.pad,
+ &asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE, source_pad,
V4L2_SEL_TGT_COMPOSE, 0, &r);
@@ -5865,7 +5871,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
ATOMISP_SUBDEV_PAD_SINK);
isp_source_fmt.code = format_bridge->mbus_code;
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
source_pad, &isp_source_fmt);
@@ -5984,13 +5990,13 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
isp_sink_crop.height = f->fmt.pix.height;
}
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK,
V4L2_SEL_TGT_CROP,
V4L2_SEL_FLAG_KEEP_CONFIG,
&isp_sink_crop);
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
source_pad, V4L2_SEL_TGT_COMPOSE,
0, &isp_sink_crop);
@@ -6009,7 +6015,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
f->fmt.pix.height);
}
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
source_pad,
V4L2_SEL_TGT_COMPOSE, 0,
@@ -6043,14 +6049,14 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
f->fmt.pix.width,
ATOM_ISP_STEP_HEIGHT);
}
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK,
V4L2_SEL_TGT_CROP,
V4L2_SEL_FLAG_KEEP_CONFIG,
&sink_crop);
}
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
source_pad,
V4L2_SEL_TGT_COMPOSE, 0,
@@ -6147,7 +6153,8 @@ int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f)
ffmt.height = f->fmt.pix.height;
ffmt.code = format_bridge->mbus_code;
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, V4L2_SUBDEV_FORMAT_ACTIVE,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, &ffmt);
return 0;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
index 060b8765ae96..56456e59bf89 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
@@ -25,13 +25,13 @@
static struct v4l2_mbus_framefmt *__csi2_get_format(struct
atomisp_mipi_csi2_device
* csi2,
- struct
- v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum
v4l2_subdev_format_whence
which, unsigned int pad) {
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
+ pad);
else
return &csi2->formats[pad];
}
@@ -44,7 +44,7 @@ static struct v4l2_mbus_framefmt *__csi2_get_format(struct
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct atomisp_in_fmt_conv *ic = atomisp_in_fmt_conv;
@@ -70,13 +70,13 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csi2_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->which, fmt->pad);
+ format = __csi2_get_format(csi2, sd_state, fmt->which, fmt->pad);
fmt->format = *format;
@@ -84,12 +84,14 @@ static int csi2_get_format(struct v4l2_subdev *sd,
}
int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int which, uint16_t pad,
struct v4l2_mbus_framefmt *ffmt)
{
struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *actual_ffmt = __csi2_get_format(csi2, cfg, which, pad);
+ struct v4l2_mbus_framefmt *actual_ffmt = __csi2_get_format(csi2,
+ sd_state,
+ which, pad);
if (pad == CSI2_PAD_SINK) {
const struct atomisp_in_fmt_conv *ic;
@@ -110,12 +112,14 @@ int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
tmp_ffmt = *ffmt = *actual_ffmt;
- return atomisp_csi2_set_ffmt(sd, cfg, which, CSI2_PAD_SOURCE,
+ return atomisp_csi2_set_ffmt(sd, sd_state, which,
+ CSI2_PAD_SOURCE,
&tmp_ffmt);
}
/* FIXME: DPCM decompression */
- *actual_ffmt = *ffmt = *__csi2_get_format(csi2, cfg, which, CSI2_PAD_SINK);
+ *actual_ffmt = *ffmt = *__csi2_get_format(csi2, sd_state, which,
+ CSI2_PAD_SINK);
return 0;
}
@@ -129,10 +133,10 @@ int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csi2_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- return atomisp_csi2_set_ffmt(sd, cfg, fmt->which, fmt->pad,
+ return atomisp_csi2_set_ffmt(sd, sd_state, fmt->which, fmt->pad,
&fmt->format);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
index 59261e8f1a1a..e35711be8a37 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
@@ -44,7 +44,7 @@ struct atomisp_mipi_csi2_device {
};
int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int which, uint16_t pad,
struct v4l2_mbus_framefmt *ffmt);
int atomisp_mipi_csi2_init(struct atomisp_device *isp);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp_file.c
index e568ca99c45a..4570a9ab100b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_file.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_file.c
@@ -80,7 +80,7 @@ static int file_input_s_stream(struct v4l2_subdev *sd, int enable)
}
static int file_input_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -104,16 +104,16 @@ static int file_input_get_fmt(struct v4l2_subdev *sd,
}
static int file_input_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
if (format->pad)
return -EINVAL;
- file_input_get_fmt(sd, cfg, format);
+ file_input_get_fmt(sd, sd_state, format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
@@ -130,7 +130,7 @@ static int file_input_s_power(struct v4l2_subdev *sd, int on)
}
static int file_input_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/*to fake*/
@@ -138,7 +138,7 @@ static int file_input_enum_mbus_code(struct v4l2_subdev *sd,
}
static int file_input_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
/*to fake*/
@@ -146,7 +146,7 @@ static int file_input_enum_frame_size(struct v4l2_subdev *sd,
}
static int file_input_enum_frame_ival(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum
*fie)
{
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
index b751df31cc24..4890c81dd66c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -968,7 +968,7 @@ static int atomisp_release(struct file *file)
if (!isp->sw_contex.file_input && asd->fmt_auto->val) {
struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
}
@@ -980,7 +980,7 @@ subdev_uninit:
if (isp->sw_contex.file_input && asd->fmt_auto->val) {
struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
- atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
}
@@ -1021,7 +1021,7 @@ subdev_uninit:
done:
if (!acc_node) {
- atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
atomisp_subdev_source_pad(vdev),
V4L2_SEL_TGT_COMPOSE, 0,
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 628e85799274..c4a3c8858465 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -213,7 +213,7 @@ static int isp_subdev_unsubscribe_event(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int isp_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(atomisp_in_fmt_conv) - 1)
@@ -246,7 +246,7 @@ static int isp_subdev_validate_rect(struct v4l2_subdev *sd, uint32_t pad,
}
struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad,
uint32_t target)
{
@@ -255,9 +255,9 @@ struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
if (which == V4L2_SUBDEV_FORMAT_TRY) {
switch (target) {
case V4L2_SEL_TGT_CROP:
- return v4l2_subdev_get_try_crop(sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(sd, sd_state, pad);
case V4L2_SEL_TGT_COMPOSE:
- return v4l2_subdev_get_try_compose(sd, cfg, pad);
+ return v4l2_subdev_get_try_compose(sd, sd_state, pad);
}
}
@@ -273,19 +273,20 @@ struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt
*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state, uint32_t which,
uint32_t pad)
{
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(sd, cfg, pad);
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
return &isp_sd->fmt[pad].fmt;
}
static void isp_get_fmt_rect(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state,
+ uint32_t which,
struct v4l2_mbus_framefmt **ffmt,
struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
struct v4l2_rect *comp[ATOMISP_SUBDEV_PADS_NUM])
@@ -293,16 +294,16 @@ static void isp_get_fmt_rect(struct v4l2_subdev *sd,
unsigned int i;
for (i = 0; i < ATOMISP_SUBDEV_PADS_NUM; i++) {
- ffmt[i] = atomisp_subdev_get_ffmt(sd, cfg, which, i);
- crop[i] = atomisp_subdev_get_rect(sd, cfg, which, i,
+ ffmt[i] = atomisp_subdev_get_ffmt(sd, sd_state, which, i);
+ crop[i] = atomisp_subdev_get_rect(sd, sd_state, which, i,
V4L2_SEL_TGT_CROP);
- comp[i] = atomisp_subdev_get_rect(sd, cfg, which, i,
+ comp[i] = atomisp_subdev_get_rect(sd, sd_state, which, i,
V4L2_SEL_TGT_COMPOSE);
}
}
static void isp_subdev_propagate(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad, uint32_t target,
uint32_t flags)
{
@@ -313,7 +314,7 @@ static void isp_subdev_propagate(struct v4l2_subdev *sd,
if (flags & V4L2_SEL_FLAG_KEEP_CONFIG)
return;
- isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp);
+ isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp);
switch (pad) {
case ATOMISP_SUBDEV_PAD_SINK: {
@@ -323,7 +324,7 @@ static void isp_subdev_propagate(struct v4l2_subdev *sd,
r.width = ffmt[pad]->width;
r.height = ffmt[pad]->height;
- atomisp_subdev_set_selection(sd, cfg, which, pad,
+ atomisp_subdev_set_selection(sd, sd_state, which, pad,
target, flags, &r);
break;
}
@@ -331,7 +332,7 @@ static void isp_subdev_propagate(struct v4l2_subdev *sd,
}
static int isp_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct v4l2_rect *rec;
@@ -340,7 +341,7 @@ static int isp_subdev_get_selection(struct v4l2_subdev *sd,
if (rval)
return rval;
- rec = atomisp_subdev_get_rect(sd, cfg, sel->which, sel->pad,
+ rec = atomisp_subdev_get_rect(sd, sd_state, sel->which, sel->pad,
sel->target);
if (!rec)
return -EINVAL;
@@ -365,7 +366,7 @@ static const char *atomisp_pad_str(unsigned int pad)
}
int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad, uint32_t target,
u32 flags, struct v4l2_rect *r)
{
@@ -382,7 +383,7 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad);
- isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp);
+ isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp);
dev_dbg(isp->dev,
"sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n",
@@ -450,7 +451,8 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_rect tmp = *crop[pad];
atomisp_subdev_set_selection(
- sd, cfg, which, i, V4L2_SEL_TGT_COMPOSE,
+ sd, sd_state, which, i,
+ V4L2_SEL_TGT_COMPOSE,
flags, &tmp);
}
}
@@ -551,9 +553,9 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
ffmt[pad]->height = comp[pad]->height;
}
- if (!atomisp_subdev_get_rect(sd, cfg, which, pad, target))
+ if (!atomisp_subdev_get_rect(sd, sd_state, which, pad, target))
return -EINVAL;
- *r = *atomisp_subdev_get_rect(sd, cfg, which, pad, target);
+ *r = *atomisp_subdev_get_rect(sd, sd_state, which, pad, target);
dev_dbg(isp->dev, "sel actual: l %d t %d w %d h %d\n",
r->left, r->top, r->width, r->height);
@@ -562,7 +564,7 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
}
static int isp_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
@@ -570,7 +572,8 @@ static int isp_subdev_set_selection(struct v4l2_subdev *sd,
if (rval)
return rval;
- return atomisp_subdev_set_selection(sd, cfg, sel->which, sel->pad,
+ return atomisp_subdev_set_selection(sd, sd_state, sel->which,
+ sel->pad,
sel->target, sel->flags, &sel->r);
}
@@ -609,13 +612,14 @@ static int atomisp_get_sensor_bin_factor(struct atomisp_sub_device *asd)
}
void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state,
+ uint32_t which,
u32 pad, struct v4l2_mbus_framefmt *ffmt)
{
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
struct atomisp_device *isp = isp_sd->isp;
struct v4l2_mbus_framefmt *__ffmt =
- atomisp_subdev_get_ffmt(sd, cfg, which, pad);
+ atomisp_subdev_get_ffmt(sd, sd_state, which, pad);
u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode);
enum atomisp_input_stream_id stream_id;
@@ -640,7 +644,7 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
*__ffmt = *ffmt;
- isp_subdev_propagate(sd, cfg, which, pad,
+ isp_subdev_propagate(sd, sd_state, which, pad,
V4L2_SEL_TGT_CROP, 0);
if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
@@ -679,10 +683,11 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
* to the format type.
*/
static int isp_subdev_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- fmt->format = *atomisp_subdev_get_ffmt(sd, cfg, fmt->which, fmt->pad);
+ fmt->format = *atomisp_subdev_get_ffmt(sd, sd_state, fmt->which,
+ fmt->pad);
return 0;
}
@@ -698,10 +703,11 @@ static int isp_subdev_get_format(struct v4l2_subdev *sd,
* to the format type.
*/
static int isp_subdev_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- atomisp_subdev_set_ffmt(sd, cfg, fmt->which, fmt->pad, &fmt->format);
+ atomisp_subdev_set_ffmt(sd, sd_state, fmt->which, fmt->pad,
+ &fmt->format);
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
index 12215d740616..a8d210ea5f8b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h
@@ -440,19 +440,20 @@ uint16_t atomisp_subdev_source_pad(struct video_device *vdev);
/* Get pointer to appropriate format */
struct v4l2_mbus_framefmt
*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state, uint32_t which,
uint32_t pad);
struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad,
uint32_t target);
int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad, uint32_t target,
u32 flags, struct v4l2_rect *r);
/* Actually set the format */
void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_subdev_state *sd_state,
+ uint32_t which,
u32 pad, struct v4l2_mbus_framefmt *ffmt);
int atomisp_update_run_mode(struct atomisp_sub_device *asd);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp_tpg.c
index 1def80bab180..e29a96da5f98 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_tpg.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_tpg.c
@@ -29,7 +29,7 @@ static int tpg_s_stream(struct v4l2_subdev *sd, int enable)
}
static int tpg_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
/*to fake*/
@@ -37,7 +37,7 @@ static int tpg_get_fmt(struct v4l2_subdev *sd,
}
static int tpg_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
@@ -47,7 +47,7 @@ static int tpg_set_fmt(struct v4l2_subdev *sd,
/* only raw8 grbg is supported by TPG */
fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
+ sd_state->pads->try_fmt = *fmt;
return 0;
}
return 0;
@@ -65,7 +65,7 @@ static int tpg_s_power(struct v4l2_subdev *sd, int on)
}
static int tpg_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/*to fake*/
@@ -73,7 +73,7 @@ static int tpg_enum_mbus_code(struct v4l2_subdev *sd,
}
static int tpg_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
/*to fake*/
@@ -81,7 +81,7 @@ static int tpg_enum_frame_size(struct v4l2_subdev *sd,
}
static int tpg_enum_frame_ival(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
/*to fake*/
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 5b09e11b1a0e..d74281d03e1f 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -79,13 +79,13 @@ static void prp_stop(struct prp_priv *priv)
}
static struct v4l2_mbus_framefmt *
-__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ic_priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
else
return &priv->format_mbus;
}
@@ -95,7 +95,7 @@ __prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_pad_config *cfg,
*/
static int prp_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -115,7 +115,8 @@ static int prp_enum_mbus_code(struct v4l2_subdev *sd,
ret = -EINVAL;
goto out;
}
- infmt = __prp_get_fmt(priv, cfg, PRP_SINK_PAD, code->which);
+ infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD,
+ code->which);
code->code = infmt->code;
break;
default:
@@ -127,7 +128,7 @@ out:
}
static int prp_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -139,7 +140,7 @@ static int prp_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
@@ -152,7 +153,7 @@ out:
}
static int prp_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -171,7 +172,7 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- infmt = __prp_get_fmt(priv, cfg, PRP_SINK_PAD, sdformat->which);
+ infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD, sdformat->which);
switch (sdformat->pad) {
case PRP_SINK_PAD:
@@ -201,7 +202,7 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
imx_media_try_colorimetry(&sdformat->format, true);
- fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
out:
mutex_unlock(&priv->lock);
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 74f5de466d5d..78800119bf98 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -790,13 +790,13 @@ static void prp_stop(struct prp_priv *priv)
}
static struct v4l2_mbus_framefmt *
-__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ic_priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
else
return &priv->format_mbus[pad];
}
@@ -844,7 +844,7 @@ static bool prp_bound_align_output(struct v4l2_mbus_framefmt *outfmt,
*/
static int prp_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad >= PRPENCVF_NUM_PADS)
@@ -855,7 +855,7 @@ static int prp_enum_mbus_code(struct v4l2_subdev *sd,
}
static int prp_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -867,7 +867,7 @@ static int prp_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
@@ -880,7 +880,7 @@ out:
}
static void prp_try_fmt(struct prp_priv *priv,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
const struct imx_media_pixfmt **cc)
{
@@ -897,7 +897,8 @@ static void prp_try_fmt(struct prp_priv *priv,
sdformat->format.code = (*cc)->codes[0];
}
- infmt = __prp_get_fmt(priv, cfg, PRPENCVF_SINK_PAD, sdformat->which);
+ infmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SINK_PAD,
+ sdformat->which);
if (sdformat->pad == PRPENCVF_SRC_PAD) {
sdformat->format.field = infmt->field;
@@ -923,7 +924,7 @@ static void prp_try_fmt(struct prp_priv *priv,
}
static int prp_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -941,9 +942,9 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- prp_try_fmt(priv, cfg, sdformat, &cc);
+ prp_try_fmt(priv, sd_state, sdformat, &cc);
- fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
/* propagate a default format to source pad */
@@ -955,9 +956,9 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
format.pad = PRPENCVF_SRC_PAD;
format.which = sdformat->which;
format.format = sdformat->format;
- prp_try_fmt(priv, cfg, &format, &outcc);
+ prp_try_fmt(priv, sd_state, &format, &outcc);
- outfmt = __prp_get_fmt(priv, cfg, PRPENCVF_SRC_PAD,
+ outfmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SRC_PAD,
sdformat->which);
*outfmt = format.format;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -973,7 +974,7 @@ out:
}
static int prp_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct prp_priv *priv = sd_to_priv(sd);
@@ -991,7 +992,7 @@ static int prp_enum_frame_size(struct v4l2_subdev *sd,
format.format.code = fse->code;
format.format.width = 1;
format.format.height = 1;
- prp_try_fmt(priv, cfg, &format, &cc);
+ prp_try_fmt(priv, sd_state, &format, &cc);
fse->min_width = format.format.width;
fse->min_height = format.format.height;
@@ -1003,7 +1004,7 @@ static int prp_enum_frame_size(struct v4l2_subdev *sd,
format.format.code = fse->code;
format.format.width = -1;
format.format.height = -1;
- prp_try_fmt(priv, cfg, &format, &cc);
+ prp_try_fmt(priv, sd_state, &format, &cc);
fse->max_width = format.format.width;
fse->max_height = format.format.height;
out:
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index d9a8667b4bed..f55314f2223f 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -1162,31 +1162,32 @@ static int csi_link_validate(struct v4l2_subdev *sd,
}
static struct v4l2_mbus_framefmt *
-__csi_get_fmt(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__csi_get_fmt(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
else
return &priv->format_mbus[pad];
}
static struct v4l2_rect *
-__csi_get_crop(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__csi_get_crop(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&priv->sd, cfg, CSI_SINK_PAD);
+ return v4l2_subdev_get_try_crop(&priv->sd, sd_state,
+ CSI_SINK_PAD);
else
return &priv->crop;
}
static struct v4l2_rect *
-__csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_compose(&priv->sd, cfg,
+ return v4l2_subdev_get_try_compose(&priv->sd, sd_state,
CSI_SINK_PAD);
else
return &priv->compose;
@@ -1194,7 +1195,7 @@ __csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
static void csi_try_crop(struct csi_priv *priv,
struct v4l2_rect *crop,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *infmt,
struct v4l2_fwnode_endpoint *upstream_ep)
{
@@ -1233,7 +1234,7 @@ static void csi_try_crop(struct csi_priv *priv,
}
static int csi_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1244,7 +1245,7 @@ static int csi_enum_mbus_code(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, code->which);
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, code->which);
incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
switch (code->pad) {
@@ -1286,7 +1287,7 @@ out:
}
static int csi_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1305,7 +1306,7 @@ static int csi_enum_frame_size(struct v4l2_subdev *sd,
fse->min_height = MIN_H;
fse->max_height = MAX_H;
} else {
- crop = __csi_get_crop(priv, cfg, fse->which);
+ crop = __csi_get_crop(priv, sd_state, fse->which);
fse->min_width = fse->index & 1 ?
crop->width / 2 : crop->width;
@@ -1320,7 +1321,7 @@ static int csi_enum_frame_size(struct v4l2_subdev *sd,
}
static int csi_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1336,7 +1337,7 @@ static int csi_enum_frame_interval(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
input_fi = &priv->frame_interval[CSI_SINK_PAD];
- crop = __csi_get_crop(priv, cfg, fie->which);
+ crop = __csi_get_crop(priv, sd_state, fie->which);
if ((fie->width != crop->width && fie->width != crop->width / 2) ||
(fie->height != crop->height && fie->height != crop->height / 2)) {
@@ -1356,7 +1357,7 @@ out:
}
static int csi_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1368,7 +1369,7 @@ static int csi_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- fmt = __csi_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
@@ -1381,11 +1382,11 @@ out:
}
static void csi_try_field(struct csi_priv *priv,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct v4l2_mbus_framefmt *infmt =
- __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sdformat->which);
+ __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
/*
* no restrictions on sink pad field type except must
@@ -1431,7 +1432,7 @@ static void csi_try_field(struct csi_priv *priv,
static void csi_try_fmt(struct csi_priv *priv,
struct v4l2_fwnode_endpoint *upstream_ep,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
struct v4l2_rect *crop,
struct v4l2_rect *compose,
@@ -1441,7 +1442,7 @@ static void csi_try_fmt(struct csi_priv *priv,
struct v4l2_mbus_framefmt *infmt;
u32 code;
- infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sdformat->which);
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
switch (sdformat->pad) {
case CSI_SRC_PAD_DIRECT:
@@ -1468,7 +1469,7 @@ static void csi_try_fmt(struct csi_priv *priv,
}
}
- csi_try_field(priv, cfg, sdformat);
+ csi_try_field(priv, sd_state, sdformat);
/* propagate colorimetry from sink */
sdformat->format.colorspace = infmt->colorspace;
@@ -1492,7 +1493,7 @@ static void csi_try_fmt(struct csi_priv *priv,
sdformat->format.code = (*cc)->codes[0];
}
- csi_try_field(priv, cfg, sdformat);
+ csi_try_field(priv, sd_state, sdformat);
/* Reset crop and compose rectangles */
crop->left = 0;
@@ -1501,7 +1502,8 @@ static void csi_try_fmt(struct csi_priv *priv,
crop->height = sdformat->format.height;
if (sdformat->format.field == V4L2_FIELD_ALTERNATE)
crop->height *= 2;
- csi_try_crop(priv, crop, cfg, &sdformat->format, upstream_ep);
+ csi_try_crop(priv, crop, sd_state, &sdformat->format,
+ upstream_ep);
compose->left = 0;
compose->top = 0;
compose->width = crop->width;
@@ -1515,7 +1517,7 @@ static void csi_try_fmt(struct csi_priv *priv,
}
static int csi_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1541,12 +1543,13 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- crop = __csi_get_crop(priv, cfg, sdformat->which);
- compose = __csi_get_compose(priv, cfg, sdformat->which);
+ crop = __csi_get_crop(priv, sd_state, sdformat->which);
+ compose = __csi_get_compose(priv, sd_state, sdformat->which);
- csi_try_fmt(priv, &upstream_ep, cfg, sdformat, crop, compose, &cc);
+ csi_try_fmt(priv, &upstream_ep, sd_state, sdformat, crop, compose,
+ &cc);
- fmt = __csi_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
if (sdformat->pad == CSI_SINK_PAD) {
@@ -1561,10 +1564,11 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
format.pad = pad;
format.which = sdformat->which;
format.format = sdformat->format;
- csi_try_fmt(priv, &upstream_ep, cfg, &format,
+ csi_try_fmt(priv, &upstream_ep, sd_state, &format,
NULL, compose, &outcc);
- outfmt = __csi_get_fmt(priv, cfg, pad, sdformat->which);
+ outfmt = __csi_get_fmt(priv, sd_state, pad,
+ sdformat->which);
*outfmt = format.format;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -1581,7 +1585,7 @@ out:
}
static int csi_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1594,9 +1598,9 @@ static int csi_get_selection(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sel->which);
- crop = __csi_get_crop(priv, cfg, sel->which);
- compose = __csi_get_compose(priv, cfg, sel->which);
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, sd_state, sel->which);
+ compose = __csi_get_compose(priv, sd_state, sel->which);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
@@ -1645,7 +1649,7 @@ static int csi_set_scale(u32 *compose, u32 crop, u32 flags)
}
static int csi_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1670,9 +1674,9 @@ static int csi_set_selection(struct v4l2_subdev *sd,
goto out;
}
- infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sel->which);
- crop = __csi_get_crop(priv, cfg, sel->which);
- compose = __csi_get_compose(priv, cfg, sel->which);
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, sd_state, sel->which);
+ compose = __csi_get_compose(priv, sd_state, sel->which);
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
@@ -1688,7 +1692,7 @@ static int csi_set_selection(struct v4l2_subdev *sd,
goto out;
}
- csi_try_crop(priv, &sel->r, cfg, infmt, &upstream_ep);
+ csi_try_crop(priv, &sel->r, sd_state, infmt, &upstream_ep);
*crop = sel->r;
@@ -1729,7 +1733,7 @@ static int csi_set_selection(struct v4l2_subdev *sd,
for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
struct v4l2_mbus_framefmt *outfmt;
- outfmt = __csi_get_fmt(priv, cfg, pad, sel->which);
+ outfmt = __csi_get_fmt(priv, sd_state, pad, sel->which);
outfmt->width = compose->width;
outfmt->height = compose->height;
}
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index c2088f7ceef5..8c76dbefd983 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -408,7 +408,7 @@ EXPORT_SYMBOL_GPL(imx_media_init_mbus_fmt);
* of a subdev. Can be used as the .init_cfg pad operation.
*/
int imx_media_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mf_try;
struct v4l2_subdev_format format;
@@ -424,7 +424,7 @@ int imx_media_init_cfg(struct v4l2_subdev *sd,
if (ret)
continue;
- mf_try = v4l2_subdev_get_try_format(sd, cfg, pad);
+ mf_try = v4l2_subdev_get_try_format(sd, sd_state, pad);
*mf_try = format.format;
}
@@ -884,16 +884,16 @@ int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
mutex_lock(&imxmd->md.graph_mutex);
if (on) {
- ret = __media_pipeline_start(entity, &imxmd->pipe);
+ ret = __media_pipeline_start(entity->pads, &imxmd->pipe);
if (ret)
goto out;
ret = v4l2_subdev_call(sd, video, s_stream, 1);
if (ret)
- __media_pipeline_stop(entity);
+ __media_pipeline_stop(entity->pads);
} else {
v4l2_subdev_call(sd, video, s_stream, 0);
- if (entity->pipe)
- __media_pipeline_stop(entity);
+ if (entity->pads->pipe)
+ __media_pipeline_stop(entity->pads);
}
out:
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index 879329f81f79..1b79224f2194 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -532,17 +532,17 @@ out:
}
static struct v4l2_mbus_framefmt *
-__vdic_get_fmt(struct vdic_priv *priv, struct v4l2_subdev_pad_config *cfg,
+__vdic_get_fmt(struct vdic_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&priv->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
else
return &priv->format_mbus[pad];
}
static int vdic_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad >= VDIC_NUM_PADS)
@@ -553,7 +553,7 @@ static int vdic_enum_mbus_code(struct v4l2_subdev *sd,
}
static int vdic_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
@@ -565,7 +565,7 @@ static int vdic_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&priv->lock);
- fmt = __vdic_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
@@ -578,7 +578,7 @@ out:
}
static void vdic_try_fmt(struct vdic_priv *priv,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
const struct imx_media_pixfmt **cc)
{
@@ -594,7 +594,7 @@ static void vdic_try_fmt(struct vdic_priv *priv,
sdformat->format.code = (*cc)->codes[0];
}
- infmt = __vdic_get_fmt(priv, cfg, priv->active_input_pad,
+ infmt = __vdic_get_fmt(priv, sd_state, priv->active_input_pad,
sdformat->which);
switch (sdformat->pad) {
@@ -620,7 +620,7 @@ static void vdic_try_fmt(struct vdic_priv *priv,
}
static int vdic_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
@@ -638,9 +638,9 @@ static int vdic_set_fmt(struct v4l2_subdev *sd,
goto out;
}
- vdic_try_fmt(priv, cfg, sdformat, &cc);
+ vdic_try_fmt(priv, sd_state, sdformat, &cc);
- fmt = __vdic_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
/* propagate format to source pad */
@@ -653,9 +653,9 @@ static int vdic_set_fmt(struct v4l2_subdev *sd,
format.pad = VDIC_SRC_PAD_DIRECT;
format.which = sdformat->which;
format.format = sdformat->format;
- vdic_try_fmt(priv, cfg, &format, &outcc);
+ vdic_try_fmt(priv, sd_state, &format, &outcc);
- outfmt = __vdic_get_fmt(priv, cfg, VDIC_SRC_PAD_DIRECT,
+ outfmt = __vdic_get_fmt(priv, sd_state, VDIC_SRC_PAD_DIRECT,
sdformat->which);
*outfmt = format.format;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index f17135158029..231c2da6ecd9 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -190,7 +190,7 @@ int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
u32 width, u32 height, u32 code, u32 field,
const struct imx_media_pixfmt **cc);
int imx_media_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg);
+ struct v4l2_subdev_state *sd_state);
void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
bool ic_route);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index 9457761b7c8b..e97af5577ae2 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -447,17 +447,17 @@ out:
}
static struct v4l2_mbus_framefmt *
-__csi2_get_fmt(struct csi2_dev *csi2, struct v4l2_subdev_pad_config *cfg,
+__csi2_get_fmt(struct csi2_dev *csi2, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi2->sd, sd_state, pad);
else
return &csi2->format_mbus;
}
static int csi2_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
@@ -465,7 +465,7 @@ static int csi2_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&csi2->lock);
- fmt = __csi2_get_fmt(csi2, cfg, sdformat->pad, sdformat->which);
+ fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
sdformat->format = *fmt;
@@ -475,7 +475,7 @@ static int csi2_get_fmt(struct v4l2_subdev *sd,
}
static int csi2_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
@@ -496,7 +496,7 @@ static int csi2_set_fmt(struct v4l2_subdev *sd,
if (sdformat->pad != CSI2_SINK_PAD)
sdformat->format = csi2->format_mbus;
- fmt = __csi2_get_fmt(csi2, cfg, sdformat->pad, sdformat->which);
+ fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
out:
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 6c59485291ca..8ffd05aed461 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -899,18 +899,18 @@ out_unlock:
static struct v4l2_mbus_framefmt *
imx7_csi_get_format(struct imx7_csi *csi,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
return &csi->format_mbus[pad];
}
static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
@@ -919,7 +919,8 @@ static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
mutex_lock(&csi->lock);
- in_fmt = imx7_csi_get_format(csi, cfg, IMX7_CSI_PAD_SINK, code->which);
+ in_fmt = imx7_csi_get_format(csi, sd_state, IMX7_CSI_PAD_SINK,
+ code->which);
switch (code->pad) {
case IMX7_CSI_PAD_SINK:
@@ -945,7 +946,7 @@ out_unlock:
}
static int imx7_csi_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
@@ -954,7 +955,8 @@ static int imx7_csi_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&csi->lock);
- fmt = imx7_csi_get_format(csi, cfg, sdformat->pad, sdformat->which);
+ fmt = imx7_csi_get_format(csi, sd_state, sdformat->pad,
+ sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out_unlock;
@@ -969,7 +971,7 @@ out_unlock:
}
static int imx7_csi_try_fmt(struct imx7_csi *csi,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
const struct imx_media_pixfmt **cc)
{
@@ -977,7 +979,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
struct v4l2_mbus_framefmt *in_fmt;
u32 code;
- in_fmt = imx7_csi_get_format(csi, cfg, IMX7_CSI_PAD_SINK,
+ in_fmt = imx7_csi_get_format(csi, sd_state, IMX7_CSI_PAD_SINK,
sdformat->which);
if (!in_fmt)
return -EINVAL;
@@ -1022,7 +1024,7 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
}
static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
@@ -1043,11 +1045,12 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
goto out_unlock;
}
- ret = imx7_csi_try_fmt(csi, cfg, sdformat, &cc);
+ ret = imx7_csi_try_fmt(csi, sd_state, sdformat, &cc);
if (ret < 0)
goto out_unlock;
- fmt = imx7_csi_get_format(csi, cfg, sdformat->pad, sdformat->which);
+ fmt = imx7_csi_get_format(csi, sd_state, sdformat->pad,
+ sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out_unlock;
@@ -1060,11 +1063,11 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
format.pad = IMX7_CSI_PAD_SRC;
format.which = sdformat->which;
format.format = sdformat->format;
- if (imx7_csi_try_fmt(csi, cfg, &format, &outcc)) {
+ if (imx7_csi_try_fmt(csi, sd_state, &format, &outcc)) {
ret = -EINVAL;
goto out_unlock;
}
- outfmt = imx7_csi_get_format(csi, cfg, IMX7_CSI_PAD_SRC,
+ outfmt = imx7_csi_get_format(csi, sd_state, IMX7_CSI_PAD_SRC,
sdformat->which);
*outfmt = format.format;
@@ -1121,7 +1124,7 @@ static void imx7_csi_unregistered(struct v4l2_subdev *sd)
}
static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
@@ -1129,7 +1132,7 @@ static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
int i;
for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
- mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, i);
ret = imx_media_init_mbus_fmt(mf, 800, 600, 0, V4L2_FIELD_NONE,
&csi->cc[i]);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index a392f9012626..be75d3ee5e36 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -700,26 +700,27 @@ out:
static struct v4l2_mbus_framefmt *
mipi_csis_get_format(struct csi_state *state,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which,
unsigned int pad)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&state->mipi_sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&state->mipi_sd, sd_state,
+ pad);
return &state->format_mbus;
}
static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
struct v4l2_mbus_framefmt *fmt_sink;
struct v4l2_mbus_framefmt *fmt_source;
enum v4l2_subdev_format_whence which;
- which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt_sink = mipi_csis_get_format(state, cfg, which, CSIS_PAD_SINK);
+ which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt_sink = mipi_csis_get_format(state, sd_state, which, CSIS_PAD_SINK);
fmt_sink->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt_sink->width = MIPI_CSIS_DEF_PIX_WIDTH;
@@ -738,24 +739,26 @@ static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd,
* configuration, cfg is NULL, which indicates there's no source pad
* configuration to set.
*/
- if (!cfg)
+ if (!sd_state)
return 0;
- fmt_source = mipi_csis_get_format(state, cfg, which, CSIS_PAD_SOURCE);
+ fmt_source = mipi_csis_get_format(state, sd_state, which,
+ CSIS_PAD_SOURCE);
*fmt_source = *fmt_sink;
return 0;
}
static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
struct v4l2_mbus_framefmt *fmt;
mutex_lock(&state->lock);
- fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
+ fmt = mipi_csis_get_format(state, sd_state, sdformat->which,
+ sdformat->pad);
sdformat->format = *fmt;
mutex_unlock(&state->lock);
@@ -763,7 +766,7 @@ static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd,
}
static int mipi_csis_enum_mbus_code(struct v4l2_subdev *mipi_sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
@@ -778,7 +781,8 @@ static int mipi_csis_enum_mbus_code(struct v4l2_subdev *mipi_sd,
if (code->index > 0)
return -EINVAL;
- fmt = mipi_csis_get_format(state, cfg, code->which, code->pad);
+ fmt = mipi_csis_get_format(state, sd_state, code->which,
+ code->pad);
code->code = fmt->code;
return 0;
}
@@ -795,7 +799,7 @@ static int mipi_csis_enum_mbus_code(struct v4l2_subdev *mipi_sd,
}
static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
@@ -808,12 +812,13 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
* modified.
*/
if (sdformat->pad == CSIS_PAD_SOURCE)
- return mipi_csis_get_fmt(mipi_sd, cfg, sdformat);
+ return mipi_csis_get_fmt(mipi_sd, sd_state, sdformat);
if (sdformat->pad != CSIS_PAD_SINK)
return -EINVAL;
- fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
+ fmt = mipi_csis_get_format(state, sd_state, sdformat->which,
+ sdformat->pad);
mutex_lock(&state->lock);
@@ -856,7 +861,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
sdformat->format = *fmt;
/* Propagate the format from sink to source. */
- fmt = mipi_csis_get_format(state, cfg, sdformat->which,
+ fmt = mipi_csis_get_format(state, sd_state, sdformat->which,
CSIS_PAD_SOURCE);
*fmt = sdformat->format;
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index 3a45c1fe4957..edd8edda0647 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -418,7 +418,7 @@ struct ipu3_uapi_af_config_s {
IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES) * IPU3_UAPI_MAX_STRIPES)
/**
- * struct ipu3_uapi_awb_fr_meta_data - AWB filter response meta data
+ * struct ipu3_uapi_awb_fr_raw_buffer - AWB filter response meta data
*
* @meta_data: Statistics output on the grid after convolving with 1D filter.
*/
@@ -1506,7 +1506,7 @@ struct ipu3_uapi_sharp_cfg {
} __packed;
/**
- * struct struct ipu3_uapi_far_w - Sharpening config for far sub-group
+ * struct ipu3_uapi_far_w - Sharpening config for far sub-group
*
* @dir_shrp: Weight of wide direct sharpening, u1.6, range [0, 64], default 64.
* @reserved0: reserved
@@ -1526,7 +1526,7 @@ struct ipu3_uapi_far_w {
} __packed;
/**
- * struct struct ipu3_uapi_unsharp_cfg - Unsharp config
+ * struct ipu3_uapi_unsharp_cfg - Unsharp config
*
* @unsharp_weight: Unsharp mask blending weight.
* u1.6, range [0, 64], default 16.
@@ -1772,7 +1772,7 @@ struct ipu3_uapi_vss_lut_y {
} __packed;
/**
- * struct ipu3_uapi_yuvp1_iefd_vssnlm_cf - IEFd Vssnlm Lookup table
+ * struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg - IEFd Vssnlm Lookup table
*
* @vss_lut_x: vss lookup table. See &ipu3_uapi_vss_lut_x description
* @vss_lut_y: vss lookup table. See &ipu3_uapi_vss_lut_y description
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 103f84466f6f..53dea2bedc17 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -36,7 +36,7 @@ static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
/* Initialize try_fmt */
for (i = 0; i < IMGU_NODE_NUM; i++) {
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->pad, i);
+ v4l2_subdev_get_try_format(sd, fh->state, i);
try_fmt->width = try_crop.width;
try_fmt->height = try_crop.height;
@@ -44,8 +44,8 @@ static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
try_fmt->field = V4L2_FIELD_NONE;
}
- *v4l2_subdev_get_try_crop(sd, fh->pad, IMGU_NODE_IN) = try_crop;
- *v4l2_subdev_get_try_compose(sd, fh->pad, IMGU_NODE_IN) = try_crop;
+ *v4l2_subdev_get_try_crop(sd, fh->state, IMGU_NODE_IN) = try_crop;
+ *v4l2_subdev_get_try_compose(sd, fh->state, IMGU_NODE_IN) = try_crop;
return 0;
}
@@ -120,7 +120,7 @@ static int imgu_subdev_s_stream(struct v4l2_subdev *sd, int enable)
}
static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
@@ -136,7 +136,7 @@ static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
fmt->format = imgu_pipe->nodes[pad].pad_fmt;
} else {
- mf = v4l2_subdev_get_try_format(sd, cfg, pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, pad);
fmt->format = *mf;
}
@@ -144,7 +144,7 @@ static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
}
static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imgu_media_pipe *imgu_pipe;
@@ -161,7 +161,7 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
imgu_pipe = &imgu->imgu_pipe[pipe];
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- mf = v4l2_subdev_get_try_format(sd, cfg, pad);
+ mf = v4l2_subdev_get_try_format(sd, sd_state, pad);
else
mf = &imgu_pipe->nodes[pad].pad_fmt;
@@ -189,7 +189,7 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
}
static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct v4l2_rect *try_sel, *r;
@@ -202,11 +202,11 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
r = &imgu_sd->rect.eff;
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
r = &imgu_sd->rect.bds;
break;
default:
@@ -222,7 +222,7 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
}
static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct imgu_device *imgu = v4l2_get_subdevdata(sd);
@@ -241,11 +241,11 @@ static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
rect = &imgu_sd->rect.eff;
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
rect = &imgu_sd->rect.bds;
break;
default:
@@ -485,7 +485,7 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
pipe = node->pipe;
imgu_pipe = &imgu->imgu_pipe[pipe];
- r = media_pipeline_start(&node->vdev.entity, &imgu_pipe->pipeline);
+ r = media_pipeline_start(node->vdev.entity.pads, &imgu_pipe->pipeline);
if (r < 0)
goto fail_return_bufs;
@@ -510,7 +510,7 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
fail_stop_pipeline:
- media_pipeline_stop(&node->vdev.entity);
+ media_pipeline_stop(node->vdev.entity.pads);
fail_return_bufs:
imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED);
@@ -550,7 +550,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
mutex_unlock(&imgu->streaming_lock);
- media_pipeline_stop(&node->vdev.entity);
+ media_pipeline_stop(node->vdev.entity.pads);
}
/******************** v4l2_ioctl_ops ********************/
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 3dac35f68238..f5ea256fb423 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -543,7 +543,7 @@ static int iss_pipeline_is_last(struct media_entity *me)
struct iss_pipeline *pipe;
struct media_pad *pad;
- if (!me->pipe)
+ if (!me->pads->pipe)
return 0;
pipe = to_iss_pipeline(me);
if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED)
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index a6dc2d2b1228..124ab2f44fbf 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -825,19 +825,20 @@ static const struct iss_video_operations csi2_issvideo_ops = {
static struct v4l2_mbus_framefmt *
__csi2_get_format(struct iss_csi2_device *csi2,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
+ pad);
return &csi2->formats[pad];
}
static void
csi2_try_format(struct iss_csi2_device *csi2,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -868,7 +869,8 @@ csi2_try_format(struct iss_csi2_device *csi2,
* compression.
*/
pixelcode = fmt->code;
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK, which);
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
+ which);
memcpy(fmt, format, sizeof(*fmt));
/*
@@ -894,7 +896,7 @@ csi2_try_format(struct iss_csi2_device *csi2,
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -907,7 +909,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csi2_input_fmts[code->index];
} else {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SINK,
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
code->which);
switch (code->index) {
case 0:
@@ -931,7 +933,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
}
static int csi2_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
@@ -943,7 +945,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -953,7 +955,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- csi2_try_format(csi2, cfg, fse->pad, &format, fse->which);
+ csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -968,13 +970,13 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csi2_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
@@ -990,25 +992,26 @@ static int csi2_get_format(struct v4l2_subdev *sd,
* return -EINVAL or zero on success
*/
static int csi2_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __csi2_get_format(csi2, cfg, fmt->pad, fmt->which);
+ format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
- csi2_try_format(csi2, cfg, fmt->pad, &fmt->format, fmt->which);
+ csi2_try_format(csi2, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CSI2_PAD_SINK) {
- format = __csi2_get_format(csi2, cfg, CSI2_PAD_SOURCE,
+ format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
- csi2_try_format(csi2, cfg, CSI2_PAD_SOURCE, format, fmt->which);
+ csi2_try_format(csi2, sd_state, CSI2_PAD_SOURCE, format,
+ fmt->which);
}
return 0;
@@ -1050,7 +1053,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- csi2_set_format(sd, fh ? fh->pad : NULL, &format);
+ csi2_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
index 26be078b69f3..23f707cb336f 100644
--- a/drivers/staging/media/omap4iss/iss_ipipe.c
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -21,7 +21,7 @@
static struct v4l2_mbus_framefmt *
__ipipe_get_format(struct iss_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which);
@@ -175,12 +175,13 @@ static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
static struct v4l2_mbus_framefmt *
__ipipe_get_format(struct iss_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipe->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&ipipe->subdev, sd_state,
+ pad);
return &ipipe->formats[pad];
}
@@ -194,7 +195,7 @@ __ipipe_get_format(struct iss_ipipe_device *ipipe,
*/
static void
ipipe_try_format(struct iss_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
@@ -222,7 +223,8 @@ ipipe_try_format(struct iss_ipipe_device *ipipe,
break;
case IPIPE_PAD_SOURCE_VP:
- format = __ipipe_get_format(ipipe, cfg, IPIPE_PAD_SINK, which);
+ format = __ipipe_get_format(ipipe, sd_state, IPIPE_PAD_SINK,
+ which);
memcpy(fmt, format, sizeof(*fmt));
fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
@@ -243,7 +245,7 @@ ipipe_try_format(struct iss_ipipe_device *ipipe,
* return -EINVAL or zero on success
*/
static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
@@ -270,7 +272,7 @@ static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
@@ -282,7 +284,7 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
+ ipipe_try_format(ipipe, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -292,7 +294,7 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
+ ipipe_try_format(ipipe, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -309,13 +311,13 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
* to the format type.
*/
static int ipipe_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
+ format = __ipipe_get_format(ipipe, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
@@ -333,25 +335,26 @@ static int ipipe_get_format(struct v4l2_subdev *sd,
* to the format type.
*/
static int ipipe_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
+ format = __ipipe_get_format(ipipe, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
- ipipe_try_format(ipipe, cfg, fmt->pad, &fmt->format, fmt->which);
+ ipipe_try_format(ipipe, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == IPIPE_PAD_SINK) {
- format = __ipipe_get_format(ipipe, cfg, IPIPE_PAD_SOURCE_VP,
+ format = __ipipe_get_format(ipipe, sd_state,
+ IPIPE_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
- ipipe_try_format(ipipe, cfg, IPIPE_PAD_SOURCE_VP, format,
+ ipipe_try_format(ipipe, sd_state, IPIPE_PAD_SOURCE_VP, format,
fmt->which);
}
@@ -392,7 +395,7 @@ static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ipipe_set_format(sd, fh ? fh->pad : NULL, &format);
+ ipipe_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
index c2978d02e797..5e7f25cd53ac 100644
--- a/drivers/staging/media/omap4iss/iss_ipipeif.c
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -357,11 +357,12 @@ static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
static struct v4l2_mbus_framefmt *
__ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipeif->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&ipipeif->subdev, sd_state,
+ pad);
return &ipipeif->formats[pad];
}
@@ -374,7 +375,7 @@ __ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
*/
static void
ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -403,7 +404,8 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
break;
case IPIPEIF_PAD_SOURCE_ISIF_SF:
- format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
+ format = __ipipeif_get_format(ipipeif, sd_state,
+ IPIPEIF_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
@@ -418,7 +420,8 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
break;
case IPIPEIF_PAD_SOURCE_VP:
- format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
+ format = __ipipeif_get_format(ipipeif, sd_state,
+ IPIPEIF_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
@@ -442,7 +445,7 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
* return -EINVAL or zero on success
*/
static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -462,7 +465,8 @@ static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- format = __ipipeif_get_format(ipipeif, cfg, IPIPEIF_PAD_SINK,
+ format = __ipipeif_get_format(ipipeif, sd_state,
+ IPIPEIF_PAD_SINK,
code->which);
code->code = format->code;
@@ -476,7 +480,7 @@ static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
}
static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
@@ -488,7 +492,7 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
+ ipipeif_try_format(ipipeif, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -498,7 +502,7 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
+ ipipeif_try_format(ipipeif, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -515,13 +519,13 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
* to the format type.
*/
static int ipipeif_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
+ format = __ipipeif_get_format(ipipeif, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
@@ -539,33 +543,36 @@ static int ipipeif_get_format(struct v4l2_subdev *sd,
* to the format type.
*/
static int ipipeif_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
+ format = __ipipeif_get_format(ipipeif, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
- ipipeif_try_format(ipipeif, cfg, fmt->pad, &fmt->format, fmt->which);
+ ipipeif_try_format(ipipeif, sd_state, fmt->pad, &fmt->format,
+ fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == IPIPEIF_PAD_SINK) {
- format = __ipipeif_get_format(ipipeif, cfg,
+ format = __ipipeif_get_format(ipipeif, sd_state,
IPIPEIF_PAD_SOURCE_ISIF_SF,
fmt->which);
*format = fmt->format;
- ipipeif_try_format(ipipeif, cfg, IPIPEIF_PAD_SOURCE_ISIF_SF,
+ ipipeif_try_format(ipipeif, sd_state,
+ IPIPEIF_PAD_SOURCE_ISIF_SF,
format, fmt->which);
- format = __ipipeif_get_format(ipipeif, cfg,
+ format = __ipipeif_get_format(ipipeif, sd_state,
IPIPEIF_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
- ipipeif_try_format(ipipeif, cfg, IPIPEIF_PAD_SOURCE_VP, format,
+ ipipeif_try_format(ipipeif, sd_state, IPIPEIF_PAD_SOURCE_VP,
+ format,
fmt->which);
}
@@ -608,7 +615,7 @@ static int ipipeif_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
- ipipeif_set_format(sd, fh ? fh->pad : NULL, &format);
+ ipipeif_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
index 3b6875cbca9b..a5f8f9f1ab16 100644
--- a/drivers/staging/media/omap4iss/iss_resizer.c
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -416,11 +416,12 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
static struct v4l2_mbus_framefmt *
__resizer_get_format(struct iss_resizer_device *resizer,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&resizer->subdev, cfg, pad);
+ return v4l2_subdev_get_try_format(&resizer->subdev, sd_state,
+ pad);
return &resizer->formats[pad];
}
@@ -433,7 +434,7 @@ __resizer_get_format(struct iss_resizer_device *resizer,
*/
static void
resizer_try_format(struct iss_resizer_device *resizer,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ struct v4l2_subdev_state *sd_state, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
@@ -461,7 +462,8 @@ resizer_try_format(struct iss_resizer_device *resizer,
case RESIZER_PAD_SOURCE_MEM:
pixelcode = fmt->code;
- format = __resizer_get_format(resizer, cfg, RESIZER_PAD_SINK,
+ format = __resizer_get_format(resizer, sd_state,
+ RESIZER_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
@@ -492,7 +494,7 @@ resizer_try_format(struct iss_resizer_device *resizer,
* return -EINVAL or zero on success
*/
static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
@@ -507,7 +509,8 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
break;
case RESIZER_PAD_SOURCE_MEM:
- format = __resizer_get_format(resizer, cfg, RESIZER_PAD_SINK,
+ format = __resizer_get_format(resizer, sd_state,
+ RESIZER_PAD_SINK,
code->which);
if (code->index == 0) {
@@ -537,7 +540,7 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
}
static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
@@ -549,7 +552,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = 1;
format.height = 1;
- resizer_try_format(resizer, cfg, fse->pad, &format, fse->which);
+ resizer_try_format(resizer, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
@@ -559,7 +562,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
format.code = fse->code;
format.width = -1;
format.height = -1;
- resizer_try_format(resizer, cfg, fse->pad, &format, fse->which);
+ resizer_try_format(resizer, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
@@ -576,13 +579,13 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
* to the format type.
*/
static int resizer_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(resizer, cfg, fmt->pad, fmt->which);
+ format = __resizer_get_format(resizer, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
@@ -600,26 +603,28 @@ static int resizer_get_format(struct v4l2_subdev *sd,
* to the format type.
*/
static int resizer_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- format = __resizer_get_format(resizer, cfg, fmt->pad, fmt->which);
+ format = __resizer_get_format(resizer, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
- resizer_try_format(resizer, cfg, fmt->pad, &fmt->format, fmt->which);
+ resizer_try_format(resizer, sd_state, fmt->pad, &fmt->format,
+ fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == RESIZER_PAD_SINK) {
- format = __resizer_get_format(resizer, cfg,
+ format = __resizer_get_format(resizer, sd_state,
RESIZER_PAD_SOURCE_MEM,
fmt->which);
*format = fmt->format;
- resizer_try_format(resizer, cfg, RESIZER_PAD_SOURCE_MEM, format,
+ resizer_try_format(resizer, sd_state, RESIZER_PAD_SOURCE_MEM,
+ format,
fmt->which);
}
@@ -662,7 +667,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
format.format.code = MEDIA_BUS_FMT_UYVY8_1X16;
format.format.width = 4096;
format.format.height = 4096;
- resizer_set_format(sd, fh ? fh->pad : NULL, &format);
+ resizer_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 66975a37dc85..4b07d6682f6e 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -206,8 +206,8 @@ static struct iss_video *
iss_video_far_end(struct iss_video *video)
{
struct media_graph graph;
- struct media_entity *entity = &video->video.entity;
- struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_pad *pad = video->video.entity.pads;
+ struct media_device *mdev = video->video.entity.graph_obj.mdev;
struct iss_video *far_end = NULL;
mutex_lock(&mdev->graph_mutex);
@@ -217,16 +217,17 @@ iss_video_far_end(struct iss_video *video)
return NULL;
}
- media_graph_walk_start(&graph, entity);
+ media_graph_walk_start(&graph, pad);
- while ((entity = media_graph_walk_next(&graph))) {
- if (entity == &video->video.entity)
+ while ((pad = media_graph_walk_next(&graph))) {
+ if (pad->entity == &video->video.entity)
continue;
- if (!is_media_entity_v4l2_video_device(entity))
+ if (!is_media_entity_v4l2_video_device(pad->entity))
continue;
- far_end = to_iss_video(media_entity_to_video_device(entity));
+ far_end = to_iss_video(media_entity_to_video_device(
+ pad->entity));
if (far_end->type != video->type)
break;
@@ -853,7 +854,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
struct iss_video_fh *vfh = to_iss_video_fh(fh);
struct iss_video *video = video_drvdata(file);
struct media_graph graph;
- struct media_entity *entity = &video->video.entity;
+ struct media_pad *pad = video->video.entity.pads;
enum iss_pipeline_state state;
struct iss_pipeline *pipe;
struct iss_video *far_end;
@@ -869,30 +870,31 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
* Start streaming on the pipeline. No link touching an entity in the
* pipeline can be activated or deactivated once streaming is started.
*/
- pipe = entity->pipe
- ? to_iss_pipeline(entity) : &video->pipe;
+ pipe = pad->pipe
+ ? to_iss_pipeline(pad->entity) : &video->pipe;
pipe->external = NULL;
pipe->external_rate = 0;
pipe->external_bpp = 0;
- ret = media_entity_enum_init(&pipe->ent_enum, entity->graph_obj.mdev);
+ ret = media_entity_enum_init(&pipe->ent_enum,
+ pad->entity->graph_obj.mdev);
if (ret)
goto err_graph_walk_init;
- ret = media_graph_walk_init(&graph, entity->graph_obj.mdev);
+ ret = media_graph_walk_init(&graph, pad->entity->graph_obj.mdev);
if (ret)
goto err_graph_walk_init;
if (video->iss->pdata->set_constraints)
video->iss->pdata->set_constraints(video->iss, true);
- ret = media_pipeline_start(entity, &pipe->pipe);
+ ret = media_pipeline_start(pad, &pipe->pipe);
if (ret < 0)
goto err_media_pipeline_start;
- media_graph_walk_start(&graph, entity);
- while ((entity = media_graph_walk_next(&graph)))
- media_entity_enum_set(&pipe->ent_enum, entity);
+ media_graph_walk_start(&graph, pad);
+ while ((pad = media_graph_walk_next(&graph)))
+ media_entity_enum_set(&pipe->ent_enum, pad->entity);
/*
* Verify that the currently configured format matches the output of
@@ -975,7 +977,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
err_omap4iss_set_stream:
vb2_streamoff(&vfh->queue, type);
err_iss_video_check_format:
- media_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(video->video.entity.pads);
err_media_pipeline_start:
if (video->iss->pdata->set_constraints)
video->iss->pdata->set_constraints(video->iss, false);
@@ -1029,7 +1031,7 @@ iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
if (video->iss->pdata->set_constraints)
video->iss->pdata->set_constraints(video->iss, false);
- media_pipeline_stop(&video->video.entity);
+ media_pipeline_stop(video->video.entity.pads);
done:
mutex_unlock(&video->stream_lock);
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
index 8b3dd92021e1..ea7fa108b890 100644
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -92,7 +92,7 @@ struct iss_pipeline {
};
#define to_iss_pipeline(__e) \
- container_of((__e)->pipe, struct iss_pipeline, pipe)
+ container_of((__e)->pads->pipe, struct iss_pipeline, pipe)
static inline int iss_pipeline_ready(struct iss_pipeline *pipe)
{
diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c
index 8936f5a81680..a732b159e759 100644
--- a/drivers/staging/media/rkisp1/rkisp1-capture.c
+++ b/drivers/staging/media/rkisp1/rkisp1-capture.c
@@ -921,7 +921,7 @@ static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue)
mutex_lock(&cap->rkisp1->stream_lock);
rkisp1_stream_stop(cap);
- media_pipeline_stop(&node->vdev.entity);
+ media_pipeline_stop(node->vdev.entity.pads);
ret = rkisp1_pipeline_sink_walk(&node->vdev.entity, NULL,
rkisp1_pipeline_disable_cb);
if (ret)
@@ -1010,7 +1010,7 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
if (ret)
goto err_stop_stream;
- ret = media_pipeline_start(entity, &cap->rkisp1->pipe);
+ ret = media_pipeline_start(entity->pads, &cap->rkisp1->pipe);
if (ret) {
dev_err(cap->rkisp1->dev, "start pipeline failed %d\n", ret);
goto err_pipe_disable;
diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c
index a9715b0b7264..6087d95029d7 100644
--- a/drivers/staging/media/rkisp1/rkisp1-isp.c
+++ b/drivers/staging/media/rkisp1/rkisp1-isp.c
@@ -208,24 +208,30 @@ static struct v4l2_subdev *rkisp1_get_remote_sensor(struct v4l2_subdev *sd)
static struct v4l2_mbus_framefmt *
rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
+ struct v4l2_subdev_state state = {
+ .pads = isp->pad_cfg
+ };
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&isp->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&isp->sd, sd_state, pad);
else
- return v4l2_subdev_get_try_format(&isp->sd, isp->pad_cfg, pad);
+ return v4l2_subdev_get_try_format(&isp->sd, &state, pad);
}
static struct v4l2_rect *
rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
+ struct v4l2_subdev_state state = {
+ .pads = isp->pad_cfg
+ };
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&isp->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&isp->sd, sd_state, pad);
else
- return v4l2_subdev_get_try_crop(&isp->sd, isp->pad_cfg, pad);
+ return v4l2_subdev_get_try_crop(&isp->sd, &state, pad);
}
/* ----------------------------------------------------------------------------
@@ -561,7 +567,7 @@ static void rkisp1_isp_start(struct rkisp1_device *rkisp1)
*/
static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
unsigned int i, dir;
@@ -601,37 +607,37 @@ static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
}
static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop, *src_crop;
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
- sink_crop = v4l2_subdev_get_try_crop(sd, cfg,
+ sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
sink_crop->height = RKISP1_DEFAULT_HEIGHT;
sink_crop->left = 0;
sink_crop->top = 0;
- src_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_fmt = *sink_fmt;
src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
- src_crop = v4l2_subdev_get_try_crop(sd, cfg,
+ src_crop = v4l2_subdev_get_try_crop(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_crop = *sink_crop;
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SINK_PARAMS);
- src_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_STATS);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
@@ -643,7 +649,7 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
}
static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
@@ -651,9 +657,9 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
struct v4l2_mbus_framefmt *src_fmt;
const struct v4l2_rect *src_crop;
- src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg,
+ src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
- src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
src_fmt->code = format->code;
@@ -684,17 +690,17 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
}
static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *r, unsigned int which)
{
struct v4l2_mbus_framefmt *src_fmt;
const struct v4l2_rect *sink_crop;
struct v4l2_rect *src_crop;
- src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO,
which);
- sink_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO,
which);
@@ -707,21 +713,23 @@ static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
*r = *src_crop;
/* Propagate to out format */
- src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg,
+ src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
- rkisp1_isp_set_src_fmt(isp, cfg, src_fmt, which);
+ rkisp1_isp_set_src_fmt(isp, sd_state, src_fmt, which);
}
static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *r, unsigned int which)
{
struct v4l2_rect *sink_crop, *src_crop;
struct v4l2_mbus_framefmt *sink_fmt;
- sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
which);
- sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
which);
sink_crop->left = ALIGN(r->left, 2);
@@ -733,13 +741,13 @@ static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
*r = *sink_crop;
/* Propagate to out crop */
- src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
- rkisp1_isp_set_src_crop(isp, cfg, src_crop, which);
+ rkisp1_isp_set_src_crop(isp, sd_state, src_crop, which);
}
static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
@@ -747,7 +755,8 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
which);
sink_fmt->code = format->code;
mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
@@ -768,36 +777,40 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
*format = *sink_fmt;
/* Propagate to in crop */
- sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
which);
- rkisp1_isp_set_sink_crop(isp, cfg, sink_crop, which);
+ rkisp1_isp_set_sink_crop(isp, sd_state, sink_crop, which);
}
static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
mutex_lock(&isp->ops_lock);
- fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad, fmt->which);
+ fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
+ fmt->which);
mutex_unlock(&isp->ops_lock);
return 0;
}
static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
mutex_lock(&isp->ops_lock);
if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO)
- rkisp1_isp_set_sink_fmt(isp, cfg, &fmt->format, fmt->which);
+ rkisp1_isp_set_sink_fmt(isp, sd_state, &fmt->format,
+ fmt->which);
else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
- rkisp1_isp_set_src_fmt(isp, cfg, &fmt->format, fmt->which);
+ rkisp1_isp_set_src_fmt(isp, sd_state, &fmt->format,
+ fmt->which);
else
- fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad,
+ fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
fmt->which);
mutex_unlock(&isp->ops_lock);
@@ -805,7 +818,7 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
}
static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
@@ -821,20 +834,20 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
struct v4l2_mbus_framefmt *fmt;
- fmt = rkisp1_isp_get_pad_fmt(isp, cfg, sel->pad,
+ fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, sel->pad,
sel->which);
sel->r.height = fmt->height;
sel->r.width = fmt->width;
sel->r.left = 0;
sel->r.top = 0;
} else {
- sel->r = *rkisp1_isp_get_pad_crop(isp, cfg,
- RKISP1_ISP_PAD_SINK_VIDEO,
- sel->which);
+ sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ sel->which);
}
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *rkisp1_isp_get_pad_crop(isp, cfg, sel->pad,
+ sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state, sel->pad,
sel->which);
break;
default:
@@ -845,7 +858,7 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
}
static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_device *rkisp1 =
@@ -860,9 +873,9 @@ static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
mutex_lock(&isp->ops_lock);
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
- rkisp1_isp_set_sink_crop(isp, cfg, &sel->r, sel->which);
+ rkisp1_isp_set_sink_crop(isp, sd_state, &sel->r, sel->which);
else if (sel->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
- rkisp1_isp_set_src_crop(isp, cfg, &sel->r, sel->which);
+ rkisp1_isp_set_src_crop(isp, sd_state, &sel->r, sel->which);
else
ret = -EINVAL;
@@ -1003,6 +1016,9 @@ static const struct v4l2_subdev_ops rkisp1_isp_ops = {
int rkisp1_isp_register(struct rkisp1_device *rkisp1)
{
+ struct v4l2_subdev_state state = {
+ .pads = rkisp1->isp.pad_cfg
+ };
struct rkisp1_isp *isp = &rkisp1->isp;
struct media_pad *pads = isp->pads;
struct v4l2_subdev *sd = &isp->sd;
@@ -1035,7 +1051,7 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1)
goto err_cleanup_media_entity;
}
- rkisp1_isp_init_config(sd, rkisp1->isp.pad_cfg);
+ rkisp1_isp_init_config(sd, &state);
return 0;
err_cleanup_media_entity:
diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c
index 76f17dd7670f..0fca37bc8a4b 100644
--- a/drivers/staging/media/rkisp1/rkisp1-resizer.c
+++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c
@@ -180,24 +180,30 @@ static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = {
static struct v4l2_mbus_framefmt *
rkisp1_rsz_get_pad_fmt(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
+ struct v4l2_subdev_state state = {
+ .pads = rsz->pad_cfg
+ };
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&rsz->sd, cfg, pad);
+ return v4l2_subdev_get_try_format(&rsz->sd, sd_state, pad);
else
- return v4l2_subdev_get_try_format(&rsz->sd, rsz->pad_cfg, pad);
+ return v4l2_subdev_get_try_format(&rsz->sd, &state, pad);
}
static struct v4l2_rect *
rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
+ struct v4l2_subdev_state state = {
+ .pads = rsz->pad_cfg
+ };
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&rsz->sd, cfg, pad);
+ return v4l2_subdev_get_try_crop(&rsz->sd, sd_state, pad);
else
- return v4l2_subdev_get_try_crop(&rsz->sd, rsz->pad_cfg, pad);
+ return v4l2_subdev_get_try_crop(&rsz->sd, &state, pad);
}
/* ----------------------------------------------------------------------------
@@ -451,12 +457,15 @@ static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
*/
static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
struct v4l2_subdev_pad_config dummy_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &dummy_cfg
+ };
u32 pad = code->pad;
int ret;
@@ -481,7 +490,7 @@ static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
/* supported mbus codes on the sink pad are the same as isp src pad */
code->pad = RKISP1_ISP_PAD_SOURCE_VIDEO;
ret = v4l2_subdev_call(&rsz->rkisp1->isp.sd, pad, enum_mbus_code,
- &dummy_cfg, code);
+ &pad_state, code);
/* restore pad */
code->pad = pad;
@@ -490,12 +499,13 @@ static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
}
static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg)
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = v4l2_subdev_get_try_format(sd, cfg, RKISP1_RSZ_PAD_SRC);
+ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ RKISP1_RSZ_PAD_SRC);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
@@ -505,13 +515,15 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
- sink_crop = v4l2_subdev_get_try_crop(sd, cfg, RKISP1_RSZ_PAD_SINK);
+ sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
sink_crop->height = RKISP1_DEFAULT_HEIGHT;
sink_crop->left = 0;
sink_crop->top = 0;
- src_fmt = v4l2_subdev_get_try_format(sd, cfg, RKISP1_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+ RKISP1_RSZ_PAD_SINK);
*src_fmt = *sink_fmt;
/* NOTE: there is no crop in the source pad, only in the sink */
@@ -520,15 +532,17 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
}
static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
const struct rkisp1_isp_mbus_info *sink_mbus_info;
struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
- src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
+ which);
+ src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
+ which);
sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
/* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
@@ -547,7 +561,7 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
}
static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_rect *r,
unsigned int which)
{
@@ -555,8 +569,10 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
- sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
+ which);
+ sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
+ RKISP1_RSZ_PAD_SINK,
which);
/* Not crop for MP bayer raw data */
@@ -583,7 +599,7 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
}
static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
@@ -591,9 +607,12 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
- src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
- sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
+ which);
+ src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
+ which);
+ sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
+ RKISP1_RSZ_PAD_SINK,
which);
if (rsz->id == RKISP1_SELFPATH)
sink_fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
@@ -621,24 +640,25 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
*format = *sink_fmt;
/* Update sink crop */
- rkisp1_rsz_set_sink_crop(rsz, cfg, sink_crop, which);
+ rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop, which);
}
static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
mutex_lock(&rsz->ops_lock);
- fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, cfg, fmt->pad, fmt->which);
+ fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, sd_state, fmt->pad,
+ fmt->which);
mutex_unlock(&rsz->ops_lock);
return 0;
}
static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_resizer *rsz =
@@ -646,16 +666,18 @@ static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&rsz->ops_lock);
if (fmt->pad == RKISP1_RSZ_PAD_SINK)
- rkisp1_rsz_set_sink_fmt(rsz, cfg, &fmt->format, fmt->which);
+ rkisp1_rsz_set_sink_fmt(rsz, sd_state, &fmt->format,
+ fmt->which);
else
- rkisp1_rsz_set_src_fmt(rsz, cfg, &fmt->format, fmt->which);
+ rkisp1_rsz_set_src_fmt(rsz, sd_state, &fmt->format,
+ fmt->which);
mutex_unlock(&rsz->ops_lock);
return 0;
}
static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_resizer *rsz =
@@ -669,7 +691,8 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
mutex_lock(&rsz->ops_lock);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
- mf_sink = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ mf_sink = rkisp1_rsz_get_pad_fmt(rsz, sd_state,
+ RKISP1_RSZ_PAD_SINK,
sel->which);
sel->r.height = mf_sink->height;
sel->r.width = mf_sink->width;
@@ -677,7 +700,8 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
sel->r.top = 0;
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ sel->r = *rkisp1_rsz_get_pad_crop(rsz, sd_state,
+ RKISP1_RSZ_PAD_SINK,
sel->which);
break;
default:
@@ -689,7 +713,7 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
}
static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_resizer *rsz =
@@ -702,7 +726,7 @@ static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
mutex_lock(&rsz->ops_lock);
- rkisp1_rsz_set_sink_crop(rsz, cfg, &sel->r, sel->which);
+ rkisp1_rsz_set_sink_crop(rsz, sd_state, &sel->r, sel->which);
mutex_unlock(&rsz->ops_lock);
return 0;
@@ -768,6 +792,9 @@ static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz)
static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
{
+ struct v4l2_subdev_state state = {
+ .pads = rsz->pad_cfg
+ };
const char * const dev_names[] = {RKISP1_RSZ_MP_DEV_NAME,
RKISP1_RSZ_SP_DEV_NAME};
struct media_pad *pads = rsz->pads;
@@ -804,7 +831,7 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
goto err_cleanup_media_entity;
}
- rkisp1_rsz_init_config(sd, rsz->pad_cfg);
+ rkisp1_rsz_init_config(sd, &state);
return 0;
err_cleanup_media_entity:
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
index dc5d432a09e8..cc729b727207 100644
--- a/drivers/staging/media/tegra-video/csi.c
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -64,7 +64,7 @@ static const struct v4l2_frmsize_discrete tegra_csi_tpg_sizes[] = {
* V4L2 Subdevice Pad Operations
*/
static int csi_enum_bus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
@@ -79,7 +79,7 @@ static int csi_enum_bus_code(struct v4l2_subdev *subdev,
}
static int csi_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
@@ -127,7 +127,7 @@ static void csi_chan_update_blank_intervals(struct tegra_csi_channel *csi_chan,
}
static int csi_enum_framesizes(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
unsigned int i;
@@ -154,7 +154,7 @@ static int csi_enum_framesizes(struct v4l2_subdev *subdev,
}
static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
@@ -181,7 +181,7 @@ static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
}
static int csi_set_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
diff --git a/drivers/staging/media/tegra-video/tegra210.c b/drivers/staging/media/tegra-video/tegra210.c
index ac066c030a4f..71541a84d2d8 100644
--- a/drivers/staging/media/tegra-video/tegra210.c
+++ b/drivers/staging/media/tegra-video/tegra210.c
@@ -459,7 +459,7 @@ static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count)
VI_INCR_SYNCPT_NO_STALL);
/* start the pipeline */
- ret = media_pipeline_start(&chan->video.entity, pipe);
+ ret = media_pipeline_start(chan->video.entity.pads, pipe);
if (ret < 0)
goto error_pipeline_start;
@@ -500,7 +500,7 @@ error_kthread_done:
error_kthread_start:
tegra_channel_set_stream(chan, false);
error_set_stream:
- media_pipeline_stop(&chan->video.entity);
+ media_pipeline_stop(chan->video.entity.pads);
error_pipeline_start:
tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED);
return ret;
@@ -522,7 +522,7 @@ static void tegra210_vi_stop_streaming(struct vb2_queue *vq)
tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR);
tegra_channel_set_stream(chan, false);
- media_pipeline_stop(&chan->video.entity);
+ media_pipeline_stop(chan->video.entity.pads);
}
/*
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 560d8b368124..b95dd336e95d 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -490,9 +490,10 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
struct v4l2_pix_format *pix)
{
const struct tegra_video_format *fmtinfo;
+ static struct lock_class_key key;
struct v4l2_subdev *subdev;
struct v4l2_subdev_format fmt;
- struct v4l2_subdev_pad_config *pad_cfg;
+ struct v4l2_subdev_state *sd_state;
struct v4l2_subdev_frame_size_enum fse = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -506,9 +507,10 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
if (!subdev)
return -ENODEV;
- pad_cfg = v4l2_subdev_alloc_pad_config(subdev);
- if (!pad_cfg)
- return -ENOMEM;
+ sd_state = __v4l2_subdev_state_alloc(subdev, "tegra:state->lock",
+ &key);
+ if (IS_ERR(sd_state))
+ return PTR_ERR(sd_state);
/*
* Retrieve the format information and if requested format isn't
* supported, keep the current format.
@@ -531,26 +533,26 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
* If not available, try to get crop boundary from subdev.
*/
fse.code = fmtinfo->code;
- ret = v4l2_subdev_call(subdev, pad, enum_frame_size, pad_cfg, &fse);
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, sd_state, &fse);
if (ret) {
ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
if (ret)
return -EINVAL;
- pad_cfg->try_crop.width = sdsel.r.width;
- pad_cfg->try_crop.height = sdsel.r.height;
+ sd_state->pads->try_crop.width = sdsel.r.width;
+ sd_state->pads->try_crop.height = sdsel.r.height;
} else {
- pad_cfg->try_crop.width = fse.max_width;
- pad_cfg->try_crop.height = fse.max_height;
+ sd_state->pads->try_crop.width = fse.max_width;
+ sd_state->pads->try_crop.height = fse.max_height;
}
- ret = v4l2_subdev_call(subdev, pad, set_fmt, pad_cfg, &fmt);
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
if (ret < 0)
return ret;
v4l2_fill_pix_format(pix, &fmt.format);
tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
- v4l2_subdev_free_pad_config(pad_cfg);
+ __v4l2_subdev_state_free(sd_state);
return 0;
}
diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h
index 3f223e5b1872..c4a4d584f46e 100644
--- a/drivers/staging/media/zoran/zoran.h
+++ b/drivers/staging/media/zoran/zoran.h
@@ -58,13 +58,6 @@ static inline struct zr_buffer *vb2_to_zr_buffer(struct vb2_buffer *vb)
#define BUZ_MAX_INPUT 16
-#if VIDEO_MAX_FRAME <= 32
-# define V4L_MAX_FRAME 32
-#elif VIDEO_MAX_FRAME <= 64
-# define V4L_MAX_FRAME 64
-#else
-# error "Too many video frame buffers to handle"
-#endif
#define V4L_MASK_FRAME (V4L_MAX_FRAME - 1)
#define MAX_FRAME (BUZ_MAX_FRAME > VIDEO_MAX_FRAME ? BUZ_MAX_FRAME : VIDEO_MAX_FRAME)
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index b64dd50a6629..6e6f40108e31 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -28,7 +28,7 @@ thermal_sys-$(CONFIG_CPU_IDLE_THERMAL) += cpuidle_cooling.o
# devfreq cooling
thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o
-obj-$(CONFIG_K3_THERMAL) += k3_bandgap.o
+obj-$(CONFIG_K3_THERMAL) += k3_bandgap.o k3_j72xx_bandgap.o
# platform thermal drivers
obj-y += broadcom/
obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
new file mode 100644
index 000000000000..6d1d4e965ea1
--- /dev/null
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI Bandgap temperature sensor driver for J72XX SoC Family
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/thermal.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+
+#define K3_VTM_DEVINFO_PWR0_OFFSET 0x4
+#define K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK 0xf0
+#define K3_VTM_TMPSENS0_CTRL_OFFSET 0x300
+#define K3_VTM_MISC_CTRL_OFFSET 0xc
+#define K3_VTM_TMPSENS_STAT_OFFSET 0x8
+#define K3_VTM_ANYMAXT_OUTRG_ALERT_EN 0x1
+#define K3_VTM_MISC_CTRL2_OFFSET 0x10
+#define K3_VTM_TS_STAT_DTEMP_MASK 0x3ff
+#define K3_VTM_MAX_NUM_TS 8
+#define K3_VTM_TMPSENS_CTRL_SOC BIT(5)
+#define K3_VTM_TMPSENS_CTRL_CLRZ BIT(6)
+#define K3_VTM_TMPSENS_CTRL_CLKON_REQ BIT(7)
+#define K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN BIT(11)
+
+#define K3_VTM_CORRECTION_TEMP_CNT 3
+
+#define MINUS40CREF 5
+#define PLUS30CREF 253
+#define PLUS125CREF 730
+#define PLUS150CREF 940
+
+#define TABLE_SIZE 1024
+#define MAX_TEMP 123000
+#define COOL_DOWN_TEMP 105000
+
+#define FACTORS_REDUCTION 13
+static int *derived_table;
+
+static int compute_value(int index, const s64 *factors, int nr_factors,
+ int reduction)
+{
+ s64 value = 0;
+ int i;
+
+ for (i = 0; i < nr_factors; i++)
+ value += factors[i] * int_pow(index, i);
+
+ return (int)div64_s64(value, int_pow(10, reduction));
+}
+
+static void init_table(int factors_size, int *table, const s64 *factors)
+{
+ int i;
+
+ for (i = 0; i < TABLE_SIZE; i++)
+ table[i] = compute_value(i, factors, factors_size,
+ FACTORS_REDUCTION);
+}
+
+/**
+ * struct err_values - structure containing error/reference values
+ * @refs: reference error values for -40C, 30C, 125C & 150C
+ * @errs: Actual error values for -40C, 30C, 125C & 150C read from the efuse
+ */
+struct err_values {
+ int refs[4];
+ int errs[4];
+};
+
+static void create_table_segments(struct err_values *err_vals, int seg,
+ int *ref_table)
+{
+ int m = 0, c, num, den, i, err, idx1, idx2, err1, err2, ref1, ref2;
+
+ if (seg == 0)
+ idx1 = 0;
+ else
+ idx1 = err_vals->refs[seg];
+
+ idx2 = err_vals->refs[seg + 1];
+ err1 = err_vals->errs[seg];
+ err2 = err_vals->errs[seg + 1];
+ ref1 = err_vals->refs[seg];
+ ref2 = err_vals->refs[seg + 1];
+
+ /*
+ * Calculate the slope with adc values read from the register
+ * as the y-axis param and err in adc value as x-axis param
+ */
+ num = ref2 - ref1;
+ den = err2 - err1;
+ if (den)
+ m = num / den;
+ c = ref2 - m * err2;
+
+ /*
+ * Take care of divide by zero error if error values are same
+ * Or when the slope is 0
+ */
+ if (den != 0 && m != 0) {
+ for (i = idx1; i <= idx2; i++) {
+ err = (i - c) / m;
+ if (((i + err) < 0) || ((i + err) >= TABLE_SIZE))
+ continue;
+ derived_table[i] = ref_table[i + err];
+ }
+ } else { /* Constant error take care of divide by zero */
+ for (i = idx1; i <= idx2; i++) {
+ if (((i + err1) < 0) || ((i + err1) >= TABLE_SIZE))
+ continue;
+ derived_table[i] = ref_table[i + err1];
+ }
+ }
+}
+
+static int prep_lookup_table(struct err_values *err_vals, int *ref_table)
+{
+ int inc, i, seg;
+
+ /*
+ * Fill up the lookup table under 3 segments
+ * region -40C to +30C
+ * region +30C to +125C
+ * region +125C to +150C
+ */
+ for (seg = 0; seg < 3; seg++)
+ create_table_segments(err_vals, seg, ref_table);
+
+ /* Get to the first valid temperature */
+ i = 0;
+ while (!derived_table[i])
+ i++;
+
+ /*
+ * Get to the last zero index and back fill the temperature for
+ * sake of continuity
+ */
+ if (i) {
+ /* 300 milli celsius steps */
+ while (i--)
+ derived_table[i] = derived_table[i + 1] - 300;
+ /* case 0 */
+ derived_table[i] = derived_table[i + 1] - 300;
+ }
+
+ /*
+ * Fill the last trailing 0s which are unfilled with increments of
+ * 100 milli celsius till 1023 code
+ */
+ i = TABLE_SIZE - 1;
+ while (!derived_table[i])
+ i--;
+
+ i++;
+ inc = 1;
+ while (i < TABLE_SIZE) {
+ derived_table[i] = derived_table[i - 1] + inc * 100;
+ i++;
+ }
+
+ return 0;
+}
+
+struct k3_thermal_data;
+
+struct k3_j72xx_bandgap {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *cfg2_base;
+ void __iomem *fuse_base;
+ struct k3_thermal_data *ts_data[K3_VTM_MAX_NUM_TS];
+};
+
+/* common data structures */
+struct k3_thermal_data {
+ struct k3_j72xx_bandgap *bgp;
+ struct cpufreq_policy *policy;
+ struct thermal_zone_device *ti_thermal;
+ struct thermal_cooling_device *cool_dev;
+ struct work_struct thermal_wq;
+ u32 ctrl_offset;
+ u32 stat_offset;
+ enum thermal_device_mode mode;
+ int prev_temp;
+ int sensor_id;
+};
+
+static void k3_thermal_work(struct work_struct *work)
+{
+ struct k3_thermal_data *data = container_of(work,
+ struct k3_thermal_data, thermal_wq);
+
+ thermal_zone_device_update(data->ti_thermal, THERMAL_EVENT_UNSPECIFIED);
+
+ dev_info(&data->ti_thermal->device, "updated thermal zone %s\n",
+ data->ti_thermal->type);
+}
+
+static int two_cmp(int tmp, int mask)
+{
+ tmp = ~(tmp);
+ tmp &= mask;
+ tmp += 1;
+
+ /* Return negative value */
+ return (0 - tmp);
+}
+
+static unsigned int vtm_get_best_value(unsigned int s0, unsigned int s1,
+ unsigned int s2)
+{
+ int d01 = abs(s0 - s1);
+ int d02 = abs(s0 - s2);
+ int d12 = abs(s1 - s2);
+
+ if (d01 <= d02 && d01 <= d12)
+ return (s0 + s1) / 2;
+
+ if (d02 <= d01 && d02 <= d12)
+ return (s0 + s2) / 2;
+
+ return (s1 + s2) / 2;
+}
+
+static inline int k3_bgp_read_temp(struct k3_thermal_data *devdata,
+ int *temp)
+{
+ struct k3_j72xx_bandgap *bgp;
+ unsigned int dtemp, s0, s1, s2;
+
+ bgp = devdata->bgp;
+ /*
+ * Errata is applicable for am654 pg 1.0 silicon/J7ES. There
+ * is a variation of the order for certain degree centigrade on AM654.
+ * Work around that by getting the average of two closest
+ * readings out of three readings everytime we want to
+ * report temperatures.
+ *
+ * Errata workaround.
+ */
+ s0 = readl(bgp->base + devdata->stat_offset) &
+ K3_VTM_TS_STAT_DTEMP_MASK;
+ s1 = readl(bgp->base + devdata->stat_offset) &
+ K3_VTM_TS_STAT_DTEMP_MASK;
+ s2 = readl(bgp->base + devdata->stat_offset) &
+ K3_VTM_TS_STAT_DTEMP_MASK;
+ dtemp = vtm_get_best_value(s0, s1, s2);
+
+ if (dtemp < 0 || dtemp >= TABLE_SIZE)
+ return -EINVAL;
+
+ *temp = derived_table[dtemp];
+
+ return 0;
+}
+
+/* Get temperature callback function for thermal zone */
+static int k3_thermal_get_temp(void *devdata, int *temp)
+{
+ struct k3_thermal_data *data = devdata;
+ int ret = 0;
+
+ ret = k3_bgp_read_temp(data, temp);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int k3_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
+{
+ struct k3_thermal_data *data = p;
+ struct k3_j72xx_bandgap *bgp;
+ u32 temp1, temp2;
+ int id, tr, ret = 0;
+
+ bgp = data->bgp;
+ id = data->sensor_id;
+
+ ret = k3_thermal_get_temp(data, &temp1);
+ if (ret)
+ return ret;
+ temp2 = data->prev_temp;
+
+ tr = temp1 - temp2;
+
+ data->prev_temp = temp1;
+
+ if (tr > 0)
+ *trend = THERMAL_TREND_RAISING;
+ else if (tr < 0)
+ *trend = THERMAL_TREND_DROPPING;
+ else
+ *trend = THERMAL_TREND_STABLE;
+
+ dev_dbg(bgp->dev, "The temperatures are t1 = %d and t2 = %d and trend =%d\n",
+ temp1, temp2, *trend);
+
+ return ret;
+}
+
+static const struct thermal_zone_of_device_ops k3_of_thermal_ops = {
+ .get_temp = k3_thermal_get_temp,
+ .get_trend = k3_thermal_get_trend,
+};
+
+static int k3_j72xx_bandgap_temp_to_adc_code(int temp)
+{
+ int low = 0, high = TABLE_SIZE - 1, mid;
+
+ if (temp > 160000 || temp < -50000)
+ return -EINVAL;
+
+ /* Binary search to find the adc code */
+ while (low < (high - 1)) {
+ mid = (low + high) / 2;
+ if (temp <= derived_table[mid])
+ high = mid;
+ else
+ low = mid;
+ }
+
+ return mid;
+}
+
+static void get_efuse_values(int id, struct k3_thermal_data *data, int *err,
+ struct k3_j72xx_bandgap *bgp)
+{
+ int i, tmp, pow;
+ int ct_offsets[5][K3_VTM_CORRECTION_TEMP_CNT] = {
+ { 0x0, 0x8, 0x4 },
+ { 0x0, 0x8, 0x4 },
+ { 0x0, -1, 0x4 },
+ { 0x0, 0xC, -1 },
+ { 0x0, 0xc, 0x8 }
+ };
+ int ct_bm[5][K3_VTM_CORRECTION_TEMP_CNT] = {
+ { 0x3f, 0x1fe000, 0x1ff },
+ { 0xfc0, 0x1fe000, 0x3fe00 },
+ { 0x3f000, 0x7f800000, 0x7fc0000 },
+ { 0xfc0000, 0x1fe0, 0x1f800000 },
+ { 0x3f000000, 0x1fe000, 0x1ff0 }
+ };
+
+ for (i = 0; i < 3; i++) {
+ /* Extract the offset value using bit-mask */
+ if (ct_offsets[id][i] == -1 && i == 1) {
+ /* 25C offset Case of Sensor 2 split between 2 regs */
+ tmp = (readl(bgp->fuse_base + 0x8) & 0xE0000000) >> (29);
+ tmp |= ((readl(bgp->fuse_base + 0xC) & 0x1F) << 3);
+ pow = tmp & 0x80;
+ } else if (ct_offsets[id][i] == -1 && i == 2) {
+ /* 125C Case of Sensor 3 split between 2 regs */
+ tmp = (readl(bgp->fuse_base + 0x4) & 0xF8000000) >> (27);
+ tmp |= ((readl(bgp->fuse_base + 0x8) & 0xF) << 5);
+ pow = tmp & 0x100;
+ } else {
+ tmp = readl(bgp->fuse_base + ct_offsets[id][i]);
+ tmp &= ct_bm[id][i];
+ tmp = tmp >> __ffs(ct_bm[id][i]);
+
+ /* Obtain the sign bit pow*/
+ pow = ct_bm[id][i] >> __ffs(ct_bm[id][i]);
+ pow += 1;
+ pow /= 2;
+ }
+
+ /* Check for negative value */
+ if (tmp & pow) {
+ /* 2's complement value */
+ tmp = two_cmp(tmp, ct_bm[id][i] >> __ffs(ct_bm[id][i]));
+ }
+ err[i] = tmp;
+ }
+
+ /* Err value for 150C is set to 0 */
+ err[i] = 0;
+}
+
+static void print_look_up_table(struct device *dev, int *ref_table)
+{
+ int i;
+
+ dev_dbg(dev, "The contents of derived array\n");
+ dev_dbg(dev, "Code Temperature\n");
+ for (i = 0; i < TABLE_SIZE; i++)
+ dev_dbg(dev, "%d %d %d\n", i, derived_table[i], ref_table[i]);
+}
+
+struct k3_j72xx_bandgap_data {
+ unsigned int has_errata_i2128;
+};
+
+int k3_thermal_register_cpu_cooling(struct k3_j72xx_bandgap *bgp, int id)
+{
+ struct k3_thermal_data *data;
+ struct device_node *np = bgp->dev->of_node;
+
+ /*
+ * We are assuming here that if one deploys the zone
+ * using DT, then it must be aware that the cooling device
+ * loading has to happen via cpufreq driver.
+ */
+ if (of_find_property(np, "#thermal-sensor-cells", NULL))
+ return 0;
+
+ data = bgp->ts_data[id];
+ if (!data)
+ return -EINVAL;
+
+ data->policy = cpufreq_cpu_get(0);
+ if (!data->policy) {
+ pr_debug("%s: CPUFreq policy not found\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ /* Register cooling device */
+ data->cool_dev = cpufreq_cooling_register(data->policy);
+ if (IS_ERR(data->cool_dev)) {
+ int ret = PTR_ERR(data->cool_dev);
+
+ dev_err(bgp->dev, "Failed to register cpu cooling device %d\n",
+ ret);
+ cpufreq_cpu_put(data->policy);
+
+ return ret;
+ }
+
+ data->mode = THERMAL_DEVICE_ENABLED;
+
+ INIT_WORK(&data->thermal_wq, k3_thermal_work);
+
+ return 0;
+}
+
+int ti_thermal_unregister_cpu_cooling(struct k3_j72xx_bandgap *bgp, int id)
+{
+ struct k3_thermal_data *data;
+
+ data = bgp->ts_data[id];
+
+ if (!IS_ERR_OR_NULL(data)) {
+ cpufreq_cooling_unregister(data->cool_dev);
+ if (data->policy)
+ cpufreq_cpu_put(data->policy);
+ }
+
+ return 0;
+}
+
+static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
+{
+ int ret = 0, cnt, val, id;
+ int high_max, low_temp;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct k3_j72xx_bandgap *bgp;
+ struct k3_thermal_data *data;
+ int workaround_needed = 0;
+ const struct k3_j72xx_bandgap_data *driver_data;
+ struct thermal_zone_device *ti_thermal;
+ int *ref_table;
+ struct err_values err_vals;
+
+ const s64 golden_factors[] = {
+ -490019999999999936,
+ 3251200000000000,
+ -1705800000000,
+ 603730000,
+ -92627,
+ };
+
+ const s64 pvt_wa_factors[] = {
+ -415230000000000000,
+ 3126600000000000,
+ -1157800000000,
+ };
+
+ bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL);
+ if (!bgp)
+ return -ENOMEM;
+
+ bgp->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bgp->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(bgp->base))
+ return PTR_ERR(bgp->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ bgp->cfg2_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(bgp->cfg2_base))
+ return PTR_ERR(bgp->cfg2_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ bgp->fuse_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(bgp->fuse_base))
+ return PTR_ERR(bgp->fuse_base);
+
+ driver_data = of_device_get_match_data(dev);
+ if (driver_data)
+ workaround_needed = driver_data->has_errata_i2128;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ /* Get the sensor count in the VTM */
+ val = readl(bgp->base + K3_VTM_DEVINFO_PWR0_OFFSET);
+ cnt = val & K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK;
+ cnt >>= __ffs(K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK);
+
+ data = devm_kcalloc(bgp->dev, cnt, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ ref_table = kzalloc(sizeof(*ref_table) * TABLE_SIZE, GFP_KERNEL);
+ if (!ref_table) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ derived_table = devm_kzalloc(bgp->dev, sizeof(*derived_table) * TABLE_SIZE,
+ GFP_KERNEL);
+ if (!derived_table) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Workaround not needed if bit30/bit31 is set even for J721e */
+ if (workaround_needed && (readl(bgp->fuse_base + 0x0) & 0xc0000000) == 0xc0000000)
+ workaround_needed = false;
+
+ dev_dbg(bgp->dev, "Work around %sneeded\n",
+ workaround_needed ? "not " : "");
+
+ if (!workaround_needed)
+ init_table(5, ref_table, golden_factors);
+ else
+ init_table(3, ref_table, pvt_wa_factors);
+
+ /* Register the thermal sensors */
+ for (id = 0; id < cnt; id++) {
+ data[id].bgp = bgp;
+ data[id].sensor_id = id;
+ data[id].ctrl_offset = K3_VTM_TMPSENS0_CTRL_OFFSET + id * 0x20;
+ data[id].stat_offset = data[id].ctrl_offset +
+ K3_VTM_TMPSENS_STAT_OFFSET;
+
+ if (workaround_needed) {
+ /* ref adc values for -40C, 30C & 125C respectively */
+ err_vals.refs[0] = MINUS40CREF;
+ err_vals.refs[1] = PLUS30CREF;
+ err_vals.refs[2] = PLUS125CREF;
+ err_vals.refs[3] = PLUS150CREF;
+ get_efuse_values(id, &data[id], err_vals.errs, bgp);
+ }
+
+ if (id == 0 && workaround_needed)
+ prep_lookup_table(&err_vals, ref_table);
+ else if (id == 0 && !workaround_needed)
+ memcpy(derived_table, ref_table, TABLE_SIZE * 4);
+
+ val = readl(data[id].bgp->cfg2_base + data[id].ctrl_offset);
+ val |= (K3_VTM_TMPSENS_CTRL_MAXT_OUTRG_EN |
+ K3_VTM_TMPSENS_CTRL_SOC |
+ K3_VTM_TMPSENS_CTRL_CLRZ | BIT(4));
+ writel(val, data[id].bgp->cfg2_base + data[id].ctrl_offset);
+
+ bgp->ts_data[id] = &data[id];
+ if (id == 1)
+ ret = k3_thermal_register_cpu_cooling(bgp, 1);
+ if (ret)
+ goto err_alloc;
+
+ ti_thermal =
+ devm_thermal_zone_of_sensor_register(bgp->dev, id,
+ &data[id],
+ &k3_of_thermal_ops);
+ if (IS_ERR(ti_thermal)) {
+ dev_err(bgp->dev, "thermal zone device is NULL\n");
+ ret = PTR_ERR(ti_thermal);
+ goto err_alloc;
+ }
+ }
+
+ /*
+ * Program TSHUT thresholds
+ * Step 1: set the thresholds to ~123C and 105C WKUP_VTM_MISC_CTRL2
+ * Step 2: WKUP_VTM_TMPSENS_CTRL_j set the MAXT_OUTRG_EN bit
+ * This is already taken care as per of init
+ * Step 3: WKUP_VTM_MISC_CTRL set the ANYMAXT_OUTRG_ALERT_EN bit
+ */
+ high_max = k3_j72xx_bandgap_temp_to_adc_code(MAX_TEMP);
+ low_temp = k3_j72xx_bandgap_temp_to_adc_code(COOL_DOWN_TEMP);
+
+ writel((low_temp << 16) | high_max, data[0].bgp->cfg2_base +
+ K3_VTM_MISC_CTRL2_OFFSET);
+ mdelay(100);
+ writel(K3_VTM_ANYMAXT_OUTRG_ALERT_EN, data[0].bgp->cfg2_base +
+ K3_VTM_MISC_CTRL_OFFSET);
+
+ platform_set_drvdata(pdev, bgp);
+
+ print_look_up_table(dev, ref_table);
+ /*
+ * Now that the derived_table has the appropriate look up values
+ * Free up the ref_table
+ */
+ kfree(ref_table);
+
+ return 0;
+
+err_alloc:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int k3_j72xx_bandgap_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j721e_data = {
+ .has_errata_i2128 = 1,
+};
+
+const struct k3_j72xx_bandgap_data k3_j72xx_bandgap_j7200_data = {
+ .has_errata_i2128 = 0,
+};
+
+static const struct of_device_id of_k3_j72xx_bandgap_match[] = {
+ {
+ .compatible = "ti,j721e-vtm",
+ .data = &k3_j72xx_bandgap_j721e_data,
+ },
+ {
+ .compatible = "ti,j7200-vtm",
+ .data = &k3_j72xx_bandgap_j7200_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_k3_j72xx_bandgap_match);
+
+static struct platform_driver k3_j72xx_bandgap_sensor_driver = {
+ .probe = k3_j72xx_bandgap_probe,
+ .remove = k3_j72xx_bandgap_remove,
+ .driver = {
+ .name = "k3-j72xx-soc-thermal",
+ .of_match_table = of_k3_j72xx_bandgap_match,
+ },
+};
+
+module_platform_driver(k3_j72xx_bandgap_sensor_driver);
+
+MODULE_DESCRIPTION("K3 bandgap temperature sensor driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 483fff3a95c9..2bc6b982238f 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -44,6 +44,7 @@
#define UART_HAS_EFR2 BIT(4)
#define UART_HAS_RHR_IT_DIS BIT(5)
#define UART_RX_TIMEOUT_QUIRK BIT(6)
+#define UART_HAS_NATIVE_RS485 BIT(7)
#define OMAP_UART_FCR_RX_TRIG 6
#define OMAP_UART_FCR_TX_TRIG 4
@@ -101,6 +102,11 @@
#define UART_OMAP_IER2 0x1B
#define UART_OMAP_IER2_RHR_IT_DIS BIT(2)
+/* Mode Definition Register 3 */
+#define UART_OMAP_MDR3 0x20
+#define UART_OMAP_MDR3_DIR_POL BIT(3)
+#define UART_OMAP_MDR3_DIR_EN BIT(4)
+
/* Enhanced features register 2 */
#define UART_OMAP_EFR2 0x23
#define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6)
@@ -112,6 +118,7 @@ struct omap8250_priv {
int line;
u8 habit;
u8 mdr1;
+ u8 mdr3;
u8 efr;
u8 scr;
u8 wer;
@@ -346,7 +353,10 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
__omap8250_set_mctrl(&up->port, up->port.mctrl);
- if (up->port.rs485.flags & SER_RS485_ENABLED)
+ serial_out(up, UART_OMAP_MDR3, priv->mdr3);
+
+ if (up->port.rs485.flags & SER_RS485_ENABLED &&
+ up->port.rs485_config == serial8250_em485_config)
serial8250_em485_stop_tx(up);
}
@@ -808,6 +818,73 @@ static void omap_8250_unthrottle(struct uart_port *port)
pm_runtime_put_autosuspend(port->dev);
}
+static int omap8250_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct omap8250_priv *priv = port->private_data;
+ struct uart_8250_port *up = up_to_u8250p(port);
+ u32 fixed_delay_rts_before_send = 0;
+ u32 fixed_delay_rts_after_send = 0;
+ unsigned int baud;
+
+ /*
+ * There is a fixed delay of 3 bit clock cycles after the TX shift
+ * register is going empty to allow time for the stop bit to transition
+ * through the transceiver before direction is changed to receive.
+ *
+ * Additionally there appears to be a 1 bit clock delay between writing
+ * to the THR register and transmission of the start bit, per page 8783
+ * of the AM65 TRM: https://www.ti.com/lit/ug/spruid7e/spruid7e.pdf
+ */
+ if (priv->quot) {
+ if (priv->mdr1 == UART_OMAP_MDR1_16X_MODE)
+ baud = port->uartclk / (16 * priv->quot);
+ else
+ baud = port->uartclk / (13 * priv->quot);
+
+ fixed_delay_rts_after_send = 3 * MSEC_PER_SEC / baud;
+ fixed_delay_rts_before_send = 1 * MSEC_PER_SEC / baud;
+ }
+
+ /*
+ * Fall back to RS485 software emulation if the UART is missing
+ * hardware support, if the device tree specifies an mctrl_gpio
+ * (indicates that RTS is unavailable due to a pinmux conflict)
+ * or if the requested delays exceed the fixed hardware delays.
+ */
+ if (!(priv->habit & UART_HAS_NATIVE_RS485) ||
+ mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) ||
+ rs485->delay_rts_after_send > fixed_delay_rts_after_send ||
+ rs485->delay_rts_before_send > fixed_delay_rts_before_send) {
+ priv->mdr3 &= ~UART_OMAP_MDR3_DIR_EN;
+ serial_out(up, UART_OMAP_MDR3, priv->mdr3);
+
+ port->rs485_config = serial8250_em485_config;
+ return serial8250_em485_config(port, rs485);
+ }
+
+ rs485->delay_rts_after_send = fixed_delay_rts_after_send;
+ rs485->delay_rts_before_send = fixed_delay_rts_before_send;
+
+ if (rs485->flags & SER_RS485_ENABLED)
+ priv->mdr3 |= UART_OMAP_MDR3_DIR_EN;
+ else
+ priv->mdr3 &= ~UART_OMAP_MDR3_DIR_EN;
+
+ /*
+ * Retain same polarity semantics as RS485 software emulation,
+ * i.e. SER_RS485_RTS_ON_SEND means driving RTS low on send.
+ */
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
+ priv->mdr3 &= ~UART_OMAP_MDR3_DIR_POL;
+ else
+ priv->mdr3 |= UART_OMAP_MDR3_DIR_POL;
+
+ serial_out(up, UART_OMAP_MDR3, priv->mdr3);
+
+ return 0;
+}
+
#ifdef CONFIG_SERIAL_8250_DMA
static int omap_8250_rx_dma(struct uart_8250_port *p);
@@ -1260,7 +1337,7 @@ static struct omap8250_dma_params am33xx_dma = {
static struct omap8250_platdata am654_platdata = {
.dma_params = &am654_dma,
.habit = UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS |
- UART_RX_TIMEOUT_QUIRK,
+ UART_RX_TIMEOUT_QUIRK | UART_HAS_NATIVE_RS485,
};
static struct omap8250_platdata am33xx_platdata = {
@@ -1353,7 +1430,8 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.shutdown = omap_8250_shutdown;
up.port.throttle = omap_8250_throttle;
up.port.unthrottle = omap_8250_unthrottle;
- up.port.rs485_config = serial8250_em485_config;
+ up.port.rs485_config = omap8250_rs485_config;
+ /* same rs485_supported for software emulation and native RS485 */
up.rs485_start_tx = serial8250_em485_start_tx;
up.rs485_stop_tx = serial8250_em485_stop_tx;
up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
diff --git a/drivers/tty/serial/8250/8250_pruss.c b/drivers/tty/serial/8250/8250_pruss.c
new file mode 100644
index 000000000000..b373714e474c
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_pruss.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Serial Port driver for PRUSS UART on TI platforms
+ *
+ * Copyright (C) 2020-2021 by Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Bin Liu <b-liu@ti.com>
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_core.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pruss.h>
+#include <linux/remoteproc.h>
+#include "8250.h"
+
+#define DEFAULT_CLK_SPEED 192000000
+
+/* extra registers */
+#define PRUSS_UART_PEREMU_MGMT 12
+#define PRUSS_UART_TX_EN BIT(14)
+#define PRUSS_UART_RX_EN BIT(13)
+#define PRUSS_UART_FREE_RUN BIT(0)
+
+#define PRUSS_UART_MDR 13
+#define PRUSS_UART_MDR_OSM_SEL_MASK BIT(0)
+#define PRUSS_UART_MDR_16X_MODE 0
+#define PRUSS_UART_MDR_13X_MODE 1
+
+struct pruss8250_info {
+ int type;
+ int line;
+};
+
+static inline void uart_writel(struct uart_port *p, u32 offset, int value)
+{
+ writel(value, p->membase + (offset << p->regshift));
+}
+
+static int pruss8250_startup(struct uart_port *port)
+{
+ int ret;
+
+ uart_writel(port, PRUSS_UART_PEREMU_MGMT, 0);
+
+ ret = serial8250_do_startup(port);
+ if (!ret)
+ uart_writel(port, PRUSS_UART_PEREMU_MGMT, PRUSS_UART_TX_EN |
+ PRUSS_UART_RX_EN |
+ PRUSS_UART_FREE_RUN);
+ return ret;
+}
+
+static unsigned int pruss8250_get_divisor(struct uart_port *port,
+ unsigned int baud,
+ unsigned int *frac)
+{
+ unsigned int uartclk = port->uartclk;
+ unsigned int div_13, div_16;
+ unsigned int abs_d13, abs_d16;
+ u16 quot;
+
+ /* Old custom speed handling */
+ if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) {
+ quot = port->custom_divisor & UART_DIV_MAX;
+ if (port->custom_divisor & (1 << 16))
+ *frac = PRUSS_UART_MDR_13X_MODE;
+ else
+ *frac = PRUSS_UART_MDR_16X_MODE;
+
+ return quot;
+ }
+
+ div_13 = DIV_ROUND_CLOSEST(uartclk, 13 * baud);
+ div_16 = DIV_ROUND_CLOSEST(uartclk, 16 * baud);
+ div_13 = div_13 ? : 1;
+ div_16 = div_16 ? : 1;
+
+ abs_d13 = abs(baud - uartclk / 13 / div_13);
+ abs_d16 = abs(baud - uartclk / 16 / div_16);
+
+ if (abs_d13 >= abs_d16) {
+ *frac = PRUSS_UART_MDR_16X_MODE;
+ quot = div_16;
+ } else {
+ *frac = PRUSS_UART_MDR_13X_MODE;
+ quot = div_13;
+ }
+
+ return quot;
+}
+
+static void pruss8250_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
+{
+ serial8250_do_set_divisor(port, baud, quot, quot_frac);
+ /*
+ * quot_frac holds the MDR over-sampling mode
+ * which is set in pruss8250_get_divisor()
+ */
+ quot_frac &= PRUSS_UART_MDR_OSM_SEL_MASK;
+ serial_port_out(port, PRUSS_UART_MDR, quot_frac);
+}
+
+static int pruss8250_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct uart_8250_port port8250;
+ struct uart_port *up = &port8250.port;
+ struct pruss8250_info *info;
+ struct resource resource;
+ unsigned int port_type;
+ struct clk *clk;
+ int ret;
+
+ port_type = (unsigned long)of_device_get_match_data(&pdev->dev);
+ if (port_type == PORT_UNKNOWN)
+ return -EINVAL;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ memset(&port8250, 0, sizeof(port8250));
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_err(&pdev->dev, "invalid address\n");
+ return ret;
+ }
+
+ ret = of_alias_get_id(np, "serial");
+ if (ret > 0)
+ up->line = ret;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ up->uartclk = DEFAULT_CLK_SPEED;
+ } else {
+ up->uartclk = clk_get_rate(clk);
+ devm_clk_put(&pdev->dev, clk);
+ }
+
+ up->dev = &pdev->dev;
+ up->mapbase = resource.start;
+ up->mapsize = resource_size(&resource);
+ up->type = port_type;
+ up->iotype = UPIO_MEM;
+ up->regshift = 2;
+ up->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT |
+ UPF_FIXED_TYPE | UPF_IOREMAP;
+ up->irqflags |= IRQF_SHARED;
+ up->startup = pruss8250_startup;
+ up->rs485_config = serial8250_em485_config;
+ up->get_divisor = pruss8250_get_divisor;
+ up->set_divisor = pruss8250_set_divisor;
+
+ ret = of_irq_get(np, 0);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "missing irq\n");
+ return ret;
+ }
+
+ up->irq = ret;
+ spin_lock_init(&port8250.port.lock);
+ port8250.capabilities = UART_CAP_FIFO | UART_CAP_AFE;
+
+ ret = serial8250_register_8250_port(&port8250);
+ if (ret < 0)
+ goto err_dispose;
+
+ info->type = port_type;
+ info->line = ret;
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+err_dispose:
+ irq_dispose_mapping(port8250.port.irq);
+ return ret;
+}
+
+static int pruss8250_remove(struct platform_device *pdev)
+{
+ struct pruss8250_info *info = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(info->line);
+ return 0;
+}
+
+static const struct of_device_id pruss8250_table[] = {
+ { .compatible = "ti,pruss-uart", .data = (void *)PORT_16550A, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, pruss8250_table);
+
+static struct platform_driver pruss8250_driver = {
+ .driver = {
+ .name = "pruss8250",
+ .of_match_table = pruss8250_table,
+ },
+ .probe = pruss8250_probe,
+ .remove = pruss8250_remove,
+};
+
+module_platform_driver(pruss8250_driver);
+
+MODULE_AUTHOR("Bin Liu <b-liu@ti.com");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Serial Port driver for PRUSS UART on TI platforms");
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 136f2b1460f9..9ab718913832 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -510,6 +510,16 @@ config SERIAL_8250_TEGRA
Select this option if you have machine with an NVIDIA Tegra SoC and
wish to enable 8250 serial driver for the Tegra serial interfaces.
+config SERIAL_8250_PRUSS
+ tristate "TI PRU-ICSS UART support"
+ depends on SERIAL_8250
+ depends on PRU_REMOTEPROC && TI_PRUSS_INTC
+ help
+ This driver is to support the UART module in PRU-ICSS which is
+ available in some TI platforms.
+ Say 'Y' here if you wish to use PRU-ICSS UART.
+ Otherwise, say 'N'.
+
config SERIAL_OF_PLATFORM
tristate "Devicetree based probing for 8250 ports"
depends on SERIAL_8250 && OF
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index a8bfb654d490..ea256e622201 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
+obj-$(CONFIG_SERIAL_8250_PRUSS) += 8250_pruss.o
obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o
obj-$(CONFIG_SERIAL_8250_TEGRA) += 8250_tegra.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 28f22e58639c..e51141a19f79 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1583,6 +1583,16 @@ config SERIAL_MILBEAUT_USIO_CONSOLE
receives all kernel messages and warnings and which allows logins in
single user mode).
+config SERIAL_PRU_SWUART
+ tristate "TI PRU Software UART support"
+ depends on PRU_REMOTEPROC && !ARCH_K3
+ select SERIAL_CORE
+ help
+ This driver is to support software UART over PRUSS, provided through
+ PRU firmware.
+ Say 'Y' here if you wish to use PRU software based UART.
+ Otherwise, say 'N'.
+
endmenu
config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index caf167f0c10a..3f70664ff787 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -90,6 +90,7 @@ obj-$(CONFIG_SERIAL_OWL) += owl-uart.o
obj-$(CONFIG_SERIAL_RDA) += rda-uart.o
obj-$(CONFIG_SERIAL_MILBEAUT_USIO) += milbeaut_usio.o
obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o
+obj-$(CONFIG_SERIAL_PRU_SWUART) += pru_swuart.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/pru_swuart.c b/drivers/tty/serial/pru_swuart.c
new file mode 100644
index 000000000000..1060dd9b436e
--- /dev/null
+++ b/drivers/tty/serial/pru_swuart.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UART driver for PRU software based UART (PRU SWUART)
+ *
+ * Copyright (C) 2019-2021 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Bin Liu <b-liu@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pruss.h>
+#include <linux/remoteproc.h>
+#include <linux/serial_core.h>
+#include <linux/tty_flip.h>
+
+#define PSUART_NAME "ttySPRU"
+
+#define PSUART_FW_MAGIC_NUMBER 0x54524155 /* "UART" */
+#define MAX_UART_PORTS 3
+#define DRV_TOTAL_PORTS (MAX_UART_PORTS * 2) /* 2 PRUs */
+#define PORT_MMR_BASE 0x14
+#define PORT_MMR_LEN 0x14
+#define FIFO_BASE 0x100
+#define FIFO_SIZE 256
+#define BPC 2 /* bytes per char */
+
+/* hw flow control threshold */
+#define RX_FIFO_THRES_SHIFT 16
+#define RX_FIFO_THRES ((FIFO_SIZE - 16) << RX_FIFO_THRES_SHIFT)
+#define RX_FIFO_THRES_MASK (0xff << RX_FIFO_THRES_SHIFT)
+
+/* global registers */
+#define PSUART_FW_MAGIC 0x00
+#define PSUART_FW_VERSION 0x08
+#define PSUART_FW_GCFG 0x50
+#define PSUART_FW_INITED BIT(1)
+
+/* uart port registers */
+#define PPORT_ENABLE 0x00
+#define PPORT_STATUS 0x01
+#define PPORT_ENABLED ((BIT(1)) | (BIT(0)))
+
+#define PPORT_CFG 0x04
+#define PPORT_FIFO_POS 0x08
+#define PPORT_TXFIFO_POS 0x08 /* 16-bit register */
+#define PPORT_TXFIFO_WRITE 0x09 /* 8-bit register */
+#define PPORT_RXFIFO_POS 0x0a /* 16-bit register */
+#define PPORT_RXFIFO_READ 0x0a /* 8-bit register */
+#define PPORT_TX_CFG 0x0c
+#define PPORT_TX_INTR_CTRL 0x0d /* 8-bit register */
+#define PPORT_RX_CFG 0x10
+#define PPORT_RX_INTR_CTRL 0x11 /* 8-bit register */
+
+/* PPORT_CFG register bits */
+#define PPORT_CFG_HWFLOW_EN BIT(12)
+#define PPORT_CFG_PARADD BIT(10)
+#define PPORT_CFG_PAR_EN BIT(9)
+#define PPORT_CFG_CSTOPB BIT(8)
+
+#define PPORT_CFG_CS6 0x1
+#define PPORT_CFG_CS7 0x2
+#define PPORT_CFG_CS8 0x3
+#define PPORT_CFG_CSSHIFT 4
+
+#define PPORT_CFG_B600 0x1
+#define PPORT_CFG_B1200 0x2
+#define PPORT_CFG_B2400 0x3
+#define PPORT_CFG_B4800 0x4
+#define PPORT_CFG_B9600 0x5
+#define PPORT_CFG_B19200 0x7
+#define PPORT_CFG_B38400 0x9
+#define PPORT_CFG_B57600 0xa
+#define PPORT_CFG_B115200 0xb
+#define PPORT_CFG_BSHIFT 0
+#define PPORT_MAX_BAUD 115200
+
+/* rx character info bits */
+#define PPORT_RX_CHAR_PE BIT(15)
+#define PPORT_RX_CHAR_FE BIT(14)
+
+struct psuart_port {
+ struct uart_port port;
+ void __iomem *mbase;
+ void __iomem *tx_fifo;
+ void __iomem *rx_fifo;
+};
+
+struct pru_swuart {
+ struct device *dev;
+ struct rproc *pru;
+ struct pruss *pruss;
+ enum pruss_pru_id pru_id;
+ struct pruss_mem_region mem;
+};
+
+struct pport_pins {
+ u8 tx;
+ u8 rx;
+ u8 cts;
+ u8 rts;
+};
+
+union fifo_pos {
+ u16 pos;
+ struct {
+ u8 tail; /* read pointer */
+ u8 head; /* write pointer */
+ } s;
+};
+
+static struct psuart_port pports[DRV_TOTAL_PORTS];
+
+static inline struct psuart_port *up_to_pport(struct uart_port *up)
+{
+ return container_of(up, struct psuart_port, port);
+}
+
+static inline u32 psuart_readl(struct pru_swuart *pu, u32 reg)
+{
+ return readl(pu->mem.va + reg);
+}
+
+static inline void psuart_writel(struct pru_swuart *pu, u32 reg, u32 val)
+{
+ writel(val, pu->mem.va + reg);
+}
+
+static inline u8 pport_readb(struct psuart_port *pp, u32 reg)
+{
+ return readb(pp->mbase + reg);
+}
+
+static inline void pport_writeb(struct psuart_port *pp, u32 reg, u8 val)
+{
+ writeb(val, pp->mbase + reg);
+}
+
+static inline u16 pport_readw(struct psuart_port *pp, u32 reg)
+{
+ return readw(pp->mbase + reg);
+}
+
+static inline u32 pport_readl(struct psuart_port *pp, u32 reg)
+{
+ return readl(pp->mbase + reg);
+}
+
+static inline void pport_writel(struct psuart_port *pp, u32 reg, u32 val)
+{
+ writel(val, pp->mbase + reg);
+}
+
+static inline int pport_is_fifo_empty(union fifo_pos *pos)
+{
+ return pos->s.head == pos->s.tail;
+}
+
+static void pport_rx_chars(struct psuart_port *pp)
+{
+ struct uart_port *up = &pp->port;
+ union fifo_pos fifo;
+ u16 ch;
+ int i, total;
+
+ fifo.pos = pport_readw(pp, PPORT_RXFIFO_POS);
+ total = CIRC_CNT(fifo.s.head, fifo.s.tail, FIFO_SIZE) / BPC;
+ if (!total)
+ return;
+
+ for (i = 0; i < total; i++) {
+ ch = readw(pp->rx_fifo + fifo.s.tail);
+ fifo.s.tail += BPC;
+
+ if (ch & PPORT_RX_CHAR_PE)
+ up->icount.parity++;
+ if (ch & PPORT_RX_CHAR_FE)
+ up->icount.frame++;
+
+ uart_insert_char(up, 0, 0, ch, TTY_NORMAL);
+ }
+
+ up->icount.rx += total;
+ pport_writeb(pp, PPORT_RXFIFO_READ, fifo.s.tail);
+ tty_flip_buffer_push(&up->state->port);
+}
+
+static void pport_tx_chars(struct psuart_port *pp)
+{
+ struct uart_port *up = &pp->port;
+ union fifo_pos fifo;
+ struct circ_buf *xmit = &up->state->xmit;
+ int count;
+
+ fifo.pos = pport_readw(pp, PPORT_TXFIFO_POS);
+ count = CIRC_SPACE(fifo.s.head, fifo.s.tail, FIFO_SIZE) / BPC;
+ if (!count)
+ return;
+
+ if (up->x_char) {
+ writew(up->x_char, pp->tx_fifo + fifo.s.head);
+ fifo.s.head += BPC;
+ pport_writeb(pp, PPORT_TXFIFO_WRITE, fifo.s.head);
+ up->icount.tx++;
+ up->x_char = 0;
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(up))
+ return;
+
+ do {
+ writew(xmit->buf[xmit->tail], pp->tx_fifo + fifo.s.head);
+ fifo.s.head += BPC;
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ pport_writeb(pp, PPORT_TXFIFO_WRITE, fifo.s.head);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(up);
+}
+
+static irqreturn_t pport_handle_irq(int irq, void *pru_port)
+{
+ struct psuart_port *pp = (struct psuart_port *)pru_port;
+ int rx, tx;
+
+ rx = pport_readb(pp, PPORT_RX_INTR_CTRL);
+ if (rx) {
+ pport_writeb(pp, PPORT_RX_INTR_CTRL, 0);
+ pport_rx_chars(pp);
+ }
+
+ tx = pport_readb(pp, PPORT_TX_INTR_CTRL);
+ if (tx) {
+ pport_tx_chars(pp);
+ pport_writeb(pp, PPORT_TX_INTR_CTRL, 1);
+ }
+
+ if (rx)
+ pport_writeb(pp, PPORT_RX_INTR_CTRL, 1);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int pport_tx_empty(struct uart_port *up)
+{
+ struct psuart_port *pp = up_to_pport(up);
+ union fifo_pos tx;
+
+ tx.pos = pport_readw(pp, PPORT_TXFIFO_POS);
+
+ return pport_is_fifo_empty(&tx) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int pport_get_mctrl(struct uart_port *up)
+{
+ return up->mctrl;
+}
+
+/* the hardware flow control doesn't require any software assistance */
+static void pport_set_mctrl(struct uart_port *up, unsigned int mctrl)
+{
+};
+
+static void pport_stop_tx(struct uart_port *up)
+{
+ struct psuart_port *pp = up_to_pport(up);
+
+ pport_writeb(pp, PPORT_TX_INTR_CTRL, 0);
+}
+
+static void pport_start_tx(struct uart_port *up)
+{
+ struct psuart_port *pp = up_to_pport(up);
+
+ pport_writeb(pp, PPORT_TX_INTR_CTRL, 0);
+ pport_tx_chars(pp);
+ pport_writeb(pp, PPORT_TX_INTR_CTRL, 1);
+}
+
+static void pport_stop_rx(struct uart_port *up)
+{
+ struct psuart_port *pp = up_to_pport(up);
+
+ pport_writeb(pp, PPORT_RX_INTR_CTRL, 0);
+}
+
+static void pport_start_rx(struct uart_port *up)
+{
+ struct psuart_port *pp = up_to_pport(up);
+
+ pport_writeb(pp, PPORT_RX_INTR_CTRL, 0);
+ pport_rx_chars(pp);
+ pport_writeb(pp, PPORT_RX_INTR_CTRL, 1);
+}
+
+static void pport_throttle(struct uart_port *up)
+{
+ pport_stop_rx(up);
+}
+
+static void pport_unthrottle(struct uart_port *up)
+{
+ pport_start_rx(up);
+}
+
+/* line break is not supported */
+static void pport_break_ctl(struct uart_port *up, int break_state)
+{
+}
+
+/* software flow control currently not supported */
+static void pport_set_termios(struct uart_port *up, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct psuart_port *pp = up_to_pport(up);
+ tcflag_t cflag;
+ unsigned int baud;
+ u32 cfg;
+
+ /* reset all fields except hw flow control settings */
+ cfg = pport_readl(pp, PPORT_CFG);
+ cfg &= RX_FIFO_THRES_MASK;
+
+ cflag = termios->c_cflag;
+ switch (cflag & CSIZE) {
+ case CS5:
+ break;
+ case CS6:
+ cfg |= (PPORT_CFG_CS6 << PPORT_CFG_CSSHIFT);
+ break;
+ case CS7:
+ cfg |= (PPORT_CFG_CS7 << PPORT_CFG_CSSHIFT);
+ break;
+ case CS8:
+ default:
+ cfg |= (PPORT_CFG_CS8 << PPORT_CFG_CSSHIFT);
+ break;
+ }
+
+ if (cflag & PARENB) {
+ cfg |= PPORT_CFG_PAR_EN;
+ cfg |= (cflag & PARODD) ? PPORT_CFG_PARADD : 0;
+ }
+
+ cfg |= (cflag & CSTOPB) ? PPORT_CFG_CSTOPB : 0;
+
+ if (cflag & CRTSCTS) {
+ cfg |= PPORT_CFG_HWFLOW_EN;
+ /*
+ * Setting TIOCM_CTS here to prevent core uart_change_speed()
+ * calls ops->stop_tx() when hw flow control is enabled.
+ */
+ up->mctrl |= TIOCM_CTS;
+ } else {
+ up->mctrl &= ~TIOCM_CTS;
+ }
+
+ switch (cflag & CBAUD) {
+ case B300:
+ break;
+ case B600:
+ cfg |= (PPORT_CFG_B600 << PPORT_CFG_BSHIFT);
+ break;
+ case B1200:
+ cfg |= (PPORT_CFG_B1200 << PPORT_CFG_BSHIFT);
+ break;
+ case B2400:
+ cfg |= (PPORT_CFG_B2400 << PPORT_CFG_BSHIFT);
+ break;
+ case B4800:
+ cfg |= (PPORT_CFG_B4800 << PPORT_CFG_BSHIFT);
+ break;
+ case B9600:
+ default:
+ cfg |= (PPORT_CFG_B9600 << PPORT_CFG_BSHIFT);
+ break;
+ case B19200:
+ cfg |= (PPORT_CFG_B19200 << PPORT_CFG_BSHIFT);
+ break;
+ case B38400:
+ cfg |= (PPORT_CFG_B38400 << PPORT_CFG_BSHIFT);
+ break;
+ case B57600:
+ cfg |= (PPORT_CFG_B57600 << PPORT_CFG_BSHIFT);
+ break;
+ case B115200:
+ cfg |= (PPORT_CFG_B115200 << PPORT_CFG_BSHIFT);
+ break;
+ }
+
+ baud = uart_get_baud_rate(up, termios, old, 0, PPORT_MAX_BAUD);
+ uart_update_timeout(up, cflag, baud);
+
+ pport_writeb(pp, PPORT_ENABLE, 0);
+ pport_writel(pp, PPORT_CFG, cfg);
+ pport_writeb(pp, PPORT_ENABLE, 1);
+}
+
+static int pport_startup(struct uart_port *up)
+{
+ struct psuart_port *pp = up_to_pport(up);
+ int timeout = 100;
+
+ if (up->flags & UPF_HARD_FLOW) {
+ /*
+ * CTS is a input-only pin in the firmware, so AUTOCTS is
+ * not supported.
+ */
+ up->status |= UPSTAT_AUTORTS;
+ pport_writel(pp, PPORT_CFG, RX_FIFO_THRES);
+ }
+
+ pport_writeb(pp, PPORT_ENABLE, 1);
+ while (!(pport_readb(pp, PPORT_STATUS) & PPORT_ENABLED)) {
+ if (--timeout < 0) {
+ dev_err(up->dev, "failed to enable port\n");
+ return 1;
+ }
+ }
+
+ pport_start_rx(up);
+ return 0;
+};
+
+static void pport_shutdown(struct uart_port *up)
+{
+ struct psuart_port *pp = up_to_pport(up);
+
+ pport_writeb(pp, PPORT_ENABLE, 0);
+};
+
+static void pport_config_port(struct uart_port *up, int flags)
+{
+ up->type = PORT_PSUART;
+ up->flags |= UPF_HARD_FLOW;
+}
+
+/* rs485 is unsupported */
+static int pport_rs485_config(struct uart_port *up, struct serial_rs485 *rs485)
+{
+ return rs485->flags & SER_RS485_ENABLED ? -EOPNOTSUPP : 0;
+}
+
+static const struct uart_ops psuart_port_ops = {
+ .tx_empty = pport_tx_empty,
+ .get_mctrl = pport_get_mctrl,
+ .set_mctrl = pport_set_mctrl,
+ .stop_tx = pport_stop_tx,
+ .start_tx = pport_start_tx,
+ .throttle = pport_throttle,
+ .unthrottle = pport_unthrottle,
+ .stop_rx = pport_stop_rx,
+ .break_ctl = pport_break_ctl,
+ .startup = pport_startup,
+ .shutdown = pport_shutdown,
+ .set_termios = pport_set_termios,
+ .config_port = pport_config_port,
+};
+
+static struct uart_driver psuart_port_drv = {
+ .owner = THIS_MODULE,
+ .driver_name = "PRU-SWUART",
+ .dev_name = PSUART_NAME,
+ .nr = DRV_TOTAL_PORTS,
+};
+
+static int pport_config_port_pins(struct psuart_port *pp,
+ struct device_node *np)
+{
+ struct device *dev = pp->port.dev;
+ struct pport_pins *pins;
+ int nr_pins;
+ int ret;
+ u32 val;
+
+ nr_pins = of_property_count_u8_elems(np, "ti,pru-swuart-pins");
+
+ /* CTS/RTS pins are optional */
+ if (nr_pins != 2 && nr_pins != 4) {
+ dev_err(dev, "unexpected number of pins\n");
+ return -EINVAL;
+ }
+ pp->port.flags = (nr_pins == 4) ? UPF_HARD_FLOW : 0;
+
+ pins = devm_kmalloc(dev, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ /* set non-configured pin value to 0xff */
+ memset(pins, 0xff, sizeof(*pins));
+ ret = of_property_read_u8_array(np, "ti,pru-swuart-pins",
+ (u8 *)pins, nr_pins);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "interrupts", &val);
+ if (ret)
+ return ret;
+
+ val = pins->cts << 24 | pins->tx << 16 | (val & 0xff);
+ pport_writel(pp, PPORT_TX_CFG, val);
+
+ val = pins->rts << 24 | pins->rx << 16 | (val & 0xff);
+ pport_writel(pp, PPORT_RX_CFG, val);
+
+ return 0;
+}
+
+static int psuart_init_port(struct pru_swuart *pu, struct device_node *np,
+ int index)
+{
+ struct psuart_port *pp;
+ int port_id;
+ int ret = 0;
+
+ port_id = pu->pru_id * MAX_UART_PORTS + index;
+ pp = &pports[port_id];
+ if (pp->mbase) {
+ dev_err(pu->dev, "Error: port[%d] is already initialized\n",
+ index);
+ return -EEXIST;
+ }
+
+ ret = of_irq_get(np, 0);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(pu->dev, "port[%d]: failed to get irq (%d)\n",
+ index, ret);
+ return ret;
+ }
+ pp->port.irq = ret;
+
+ ret = request_irq(pp->port.irq, pport_handle_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ dev_name(pu->dev), pp);
+ if (ret) {
+ dev_err(pu->dev, "port[%d]: failed to request irq (%d)\n",
+ index, ret);
+ return ret;
+ }
+
+ pp->mbase = pu->mem.va + PORT_MMR_BASE + PORT_MMR_LEN * index;
+ pp->tx_fifo = pu->mem.va + FIFO_BASE + FIFO_SIZE * 2 * index;
+ pp->rx_fifo = pp->tx_fifo + FIFO_SIZE;
+
+ pp->port.dev = pu->dev;
+ pp->port.type = PORT_PSUART;
+ pp->port.iotype = UPIO_MEM;
+ pp->port.fifosize = FIFO_SIZE / BPC;
+ pp->port.ops = &psuart_port_ops;
+ pp->port.line = port_id;
+ pp->port.rs485_config = pport_rs485_config;
+
+ ret = pport_config_port_pins(pp, np);
+ if (ret) {
+ free_irq(pp->port.irq, pp);
+ return ret;
+ }
+
+ ret = uart_add_one_port(&psuart_port_drv, &pp->port);
+ if (ret) {
+ dev_err(pu->dev, "adding port[%d] failed (%d)\n", index, ret);
+ pp->mbase = NULL;
+ free_irq(pp->port.irq, pp);
+ }
+
+ return ret;
+}
+
+static int psuart_init_pruss(struct device_node *np, struct pru_swuart *pu)
+{
+ u32 reg;
+ int ret = 0;
+
+ pu->pru = pru_rproc_get(np, 0, &pu->pru_id);
+ if (IS_ERR(pu->pru)) {
+ ret = PTR_ERR(pu->pru);
+ if (ret != -EPROBE_DEFER)
+ dev_err(pu->dev, "failed to get pru (%d)\n", ret);
+ return ret;
+ }
+
+ pu->pruss = pruss_get(pu->pru);
+ if (IS_ERR(pu->pruss)) {
+ ret = PTR_ERR(pu->pruss);
+ dev_err(pu->dev, "failed to get pruss handle (%d)\n", ret);
+ goto put_pru;
+ }
+
+ ret = pruss_cfg_ocp_master_ports(pu->pruss, 1);
+ if (ret) {
+ dev_err(pu->dev, "failed to enable ocp master port (%d)\n",
+ ret);
+ goto put_pruss;
+ }
+
+ if (pu->pru_id >= PRUSS_NUM_PRUS) {
+ dev_err(pu->dev, "invalid pru id (%d)\n", pu->pru_id);
+ ret = -EINVAL;
+ goto put_ocp;
+ }
+
+ ret = pruss_request_mem_region(pu->pruss,
+ pu->pru_id ? PRUSS_MEM_DRAM1 : PRUSS_MEM_DRAM0,
+ &pu->mem);
+ if (ret) {
+ dev_err(pu->dev, "failed to get pruss mem region (%d)\n", ret);
+ goto put_ocp;
+ }
+
+ /* clear the mem region before firmware runs by rproc_boot() */
+ memset_io(pu->mem.va, 0, pu->mem.size);
+
+ ret = rproc_boot(pu->pru);
+ if (ret) {
+ dev_err(pu->dev, "failed to boot pru (%d)\n", ret);
+ goto put_mem;
+ }
+
+ reg = psuart_readl(pu, PSUART_FW_MAGIC);
+ if (reg != PSUART_FW_MAGIC_NUMBER) {
+ dev_err(pu->dev, "invalid firmware magic number\n");
+ ret = -EINVAL;
+ goto put_rproc;
+ }
+
+ reg = psuart_readl(pu, PSUART_FW_VERSION);
+ if (reg > 0x01000000) {
+ dev_err(pu->dev, "unsupported firmware version(0x%x)\n",
+ reg);
+ ret = -EINVAL;
+ goto put_rproc;
+ }
+
+ reg = psuart_readl(pu, PSUART_FW_GCFG);
+ if (!(reg & PSUART_FW_INITED)) {
+ dev_err(pu->dev, "failed to initialize firmware\n");
+ ret = -EINVAL;
+ goto put_rproc;
+ }
+
+ return ret;
+
+put_rproc:
+ rproc_shutdown(pu->pru);
+put_mem:
+ pruss_release_mem_region(pu->pruss, &pu->mem);
+put_ocp:
+ pruss_cfg_ocp_master_ports(pu->pruss, 0);
+put_pruss:
+ pruss_put(pu->pruss);
+put_pru:
+ pru_rproc_put(pu->pru);
+
+ return ret;
+}
+
+static void psuart_free_pruss(struct pru_swuart *pu)
+{
+ rproc_shutdown(pu->pru);
+ pruss_release_mem_region(pu->pruss, &pu->mem);
+ pruss_cfg_ocp_master_ports(pu->pruss, 0);
+ pruss_put(pu->pruss);
+ pru_rproc_put(pu->pru);
+}
+
+static const struct of_device_id psuart_dt_ids[] = {
+ { .compatible = "ti,pru-swuart", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, psuart_dt_ids);
+
+static int psuart_probe(struct platform_device *pdev)
+{
+ struct pru_swuart *pu;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int id, ret;
+
+ if (!np)
+ return -ENODEV; /* we don't support non DT */
+
+ pu = devm_kzalloc(dev, sizeof(*pu), GFP_KERNEL);
+ if (!pu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pu);
+ pu->dev = dev;
+
+ ret = psuart_init_pruss(np, pu);
+ if (ret < 0)
+ return ret;
+
+ for_each_available_child_of_node(np, child) {
+ ret = of_property_read_u32(child, "reg", &id);
+
+ if (ret || id < 0 || id >= MAX_UART_PORTS)
+ continue;
+
+ ret = psuart_init_port(pu, child, id);
+ if (ret == -EPROBE_DEFER) {
+ /*
+ * -EPROBE_DEFER could only happen on the first port
+ * so no initialized ports to free.
+ */
+ psuart_free_pruss(pu);
+ return ret;
+ } else if (ret) {
+ dev_err(pu->dev, "init port[%d] failed(%d)\n", id, ret);
+ }
+
+ }
+ return 0;
+}
+
+static int psuart_remove(struct platform_device *pdev)
+{
+ struct pru_swuart *pu = platform_get_drvdata(pdev);
+ struct psuart_port *pp;
+ int i;
+
+ for (i = 0; i < MAX_UART_PORTS; i++) {
+ pp = &pports[pu->pru_id * MAX_UART_PORTS + i];
+ if (!pp->mbase)
+ continue;
+
+ uart_remove_one_port(&psuart_port_drv, &pp->port);
+ pp->mbase = NULL;
+ free_irq(pp->port.irq, pp);
+ }
+
+ psuart_free_pruss(pu);
+ return 0;
+}
+
+static struct platform_driver psuart_driver = {
+ .probe = psuart_probe,
+ .remove = psuart_remove,
+ .driver = {
+ .name = "pru_swuart",
+ .of_match_table = psuart_dt_ids,
+ },
+};
+
+static int __init psuart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&psuart_port_drv);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&psuart_driver);
+ if (ret)
+ uart_unregister_driver(&psuart_port_drv);
+
+ return ret;
+}
+module_init(psuart_init);
+
+static void __exit psuart_exit(void)
+{
+ platform_driver_unregister(&psuart_driver);
+ uart_unregister_driver(&psuart_port_drv);
+}
+module_exit(psuart_exit);
+
+MODULE_AUTHOR("Bin Liu <b-liu@ti.com>");
+MODULE_DESCRIPTION("PRU SWUART Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index 90e246601537..eccb1c766bba 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -214,6 +214,7 @@ static int cdns_ti_remove(struct platform_device *pdev)
static const struct of_device_id cdns_ti_of_match[] = {
{ .compatible = "ti,j721e-usb", },
+ { .compatible = "ti,am64-usb", },
{},
};
MODULE_DEVICE_TABLE(of, cdns_ti_of_match);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 7a2304565a73..766718220e3c 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -139,4 +139,13 @@ config USB_DWC3_QCOM
for peripheral mode support.
Say 'Y' or 'M' if you have one such device.
+config USB_DWC3_AM62
+ tristate "Texas Instruments AM62 Platforms"
+ depends on ARCH_K3 || COMPILE_TEST
+ default USB_DWC3
+ help
+ Support TI's AM62 platforms with DesignWare Core USB3 IP.
+ The Designware Core USB3 IP is progammed to operate in
+ in USB 2.0 mode only.
+ Say 'Y' or 'M' here if you have one such device
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index ae86da0dc5bd..c75e15bba769 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -42,6 +42,7 @@ endif
# and allyesconfig builds.
##
+obj-$(CONFIG_USB_DWC3_AM62) += dwc3-am62.o
obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o
obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
new file mode 100644
index 000000000000..6efcc2b2727f
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-am62.c - TI specific Glue layer for AM62 DWC3 USB Controller
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "core.h"
+
+/* USB WRAPPER register offsets */
+#define USBSS_PID 0x0
+#define USBSS_OVERCURRENT_CTRL 0x4
+#define USBSS_PHY_CONFIG 0x8
+#define USBSS_PHY_TEST 0xc
+#define USBSS_CORE_STAT 0x14
+#define USBSS_HOST_VBUS_CTRL 0x18
+#define USBSS_MODE_CONTROL 0x1c
+#define USBSS_WAKEUP_CONFIG 0x30
+#define USBSS_WAKEUP_STAT 0x34
+#define USBSS_OVERRIDE_CONFIG 0x38
+#define USBSS_IRQ_MISC_STATUS_RAW 0x430
+#define USBSS_IRQ_MISC_STATUS 0x434
+#define USBSS_IRQ_MISC_ENABLE_SET 0x438
+#define USBSS_IRQ_MISC_ENABLE_CLR 0x43c
+#define USBSS_IRQ_MISC_EOI 0x440
+#define USBSS_INTR_TEST 0x490
+#define USBSS_VBUS_FILTER 0x614
+#define USBSS_VBUS_STAT 0x618
+#define USBSS_DEBUG_CFG 0x708
+#define USBSS_DEBUG_DATA 0x70c
+#define USBSS_HOST_HUB_CTRL 0x714
+
+/* PHY CONFIG register bits */
+#define USBSS_PHY_VBUS_SEL_MASK GENMASK(2, 1)
+#define USBSS_PHY_VBUS_SEL_SHIFT 1
+#define USBSS_PHY_LANE_REVERSE BIT(0)
+
+/* CORE STAT register bits */
+#define USBSS_CORE_OPERATIONAL_MODE_MASK GENMASK(13, 12)
+#define USBSS_CORE_OPERATIONAL_MODE_SHIFT 12
+
+/* MODE CONTROL register bits */
+#define USBSS_MODE_VALID BIT(0)
+
+/* WAKEUP CONFIG register bits */
+#define USBSS_WAKEUP_CFG_OVERCURRENT_EN BIT(3)
+#define USBSS_WAKEUP_CFG_LINESTATE_EN BIT(2)
+#define USBSS_WAKEUP_CFG_SESSVALID_EN BIT(1)
+#define USBSS_WAKEUP_CFG_VBUSVALID_EN BIT(0)
+
+/* WAKEUP STAT register bits */
+#define USBSS_WAKEUP_STAT_OVERCURRENT BIT(4)
+#define USBSS_WAKEUP_STAT_LINESTATE BIT(3)
+#define USBSS_WAKEUP_STAT_SESSVALID BIT(2)
+#define USBSS_WAKEUP_STAT_VBUSVALID BIT(1)
+#define USBSS_WAKEUP_STAT_CLR BIT(0)
+
+/* IRQ_MISC_STATUS_RAW register bits */
+#define USBSS_IRQ_MISC_RAW_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_RAW_SESSVALID BIT(20)
+
+/* IRQ_MISC_STATUS register bits */
+#define USBSS_IRQ_MISC_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_SESSVALID BIT(20)
+
+/* IRQ_MISC_ENABLE_SET register bits */
+#define USBSS_IRQ_MISC_ENABLE_SET_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_SET_SESSVALID BIT(20)
+
+/* IRQ_MISC_ENABLE_CLR register bits */
+#define USBSS_IRQ_MISC_ENABLE_CLR_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_CLR_SESSVALID BIT(20)
+
+/* IRQ_MISC_EOI register bits */
+#define USBSS_IRQ_MISC_EOI_VECTOR BIT(0)
+
+/* VBUS_STAT register bits */
+#define USBSS_VBUS_STAT_SESSVALID BIT(2)
+#define USBSS_VBUS_STAT_VBUSVALID BIT(0)
+
+/* Mask for PHY PLL REFCLK */
+#define PHY_PLL_REFCLK_MASK GENMASK(3, 0)
+
+#define DWC3_AM62_AUTOSUSPEND_DELAY 100
+
+struct dwc3_data {
+ struct device *dev;
+ void __iomem *usbss;
+ struct clk *usb2_refclk;
+ int rate_code;
+ struct regmap *syscon;
+ unsigned int offset;
+ unsigned int vbus_divider;
+};
+
+static const int dwc3_ti_rate_table[] = { /* in KHZ */
+ 9600,
+ 10000,
+ 12000,
+ 19200,
+ 20000,
+ 24000,
+ 25000,
+ 26000,
+ 38400,
+ 40000,
+ 58000,
+ 50000,
+ 52000,
+};
+
+static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset)
+{
+ return readl((data->usbss) + offset);
+}
+
+static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value)
+{
+ writel(value, (data->usbss) + offset);
+}
+
+static int phy_syscon_pll_refclk(struct dwc3_data *data)
+{
+ struct device *dev = data->dev;
+ struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ struct regmap *syscon;
+ int ret;
+
+ syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-phy-pll-refclk");
+ if (IS_ERR(syscon)) {
+ dev_err(dev, "unable to get ti,syscon-phy-pll-refclk regmap\n");
+ return PTR_ERR(syscon);
+ }
+
+ data->syscon = syscon;
+
+ ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
+ 0, &args);
+ if (ret)
+ return ret;
+
+ data->offset = args.args[0];
+
+ ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code);
+ if (ret) {
+ dev_err(dev, "failed to set phy pll reference clock rate\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dwc3_ti_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct dwc3_data *data;
+ int i, ret;
+ unsigned long rate;
+ u32 reg;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+ platform_set_drvdata(pdev, data);
+
+ data->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->usbss)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->usbss);
+ }
+
+ data->usb2_refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(data->usb2_refclk)) {
+ dev_err(dev, "can't get usb2_refclk\n");
+ return PTR_ERR(data->usb2_refclk);
+ }
+
+ /* Calculate the rate code */
+ rate = clk_get_rate(data->usb2_refclk);
+ rate /= 1000; // To KHz
+ for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) {
+ if (dwc3_ti_rate_table[i] == rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(dwc3_ti_rate_table)) {
+ dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ data->rate_code = i;
+
+ /* Read the syscon property and set the rate code */
+ ret = phy_syscon_pll_refclk(data);
+ if (ret)
+ goto err_clk_disable;
+
+ /* VBUS divider select */
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG);
+ if (data->vbus_divider)
+ reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
+
+ dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Don't ignore its dependencies with its children
+ */
+ pm_suspend_ignore_children(dev, false);
+ clk_prepare_enable(data->usb2_refclk);
+ pm_runtime_get_noresume(dev);
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create dwc3 core: %d\n", ret);
+ goto err_pm_disable;
+ }
+
+ /* Set mode valid bit to indicate role is valid */
+ reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg |= USBSS_MODE_VALID;
+ dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+ /* Device has capability to wakeup system from sleep */
+ device_set_wakeup_capable(dev, true);
+
+ /* Setting up autosuspend */
+ pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+
+err_pm_disable:
+ clk_disable_unprepare(data->usb2_refclk);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+err_clk_disable:
+ clk_put(data->usb2_refclk);
+ return ret;
+}
+
+static int dwc3_ti_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+ return 0;
+}
+
+static int dwc3_ti_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dwc3_data *data = platform_get_drvdata(pdev);
+ u32 reg;
+
+ device_for_each_child(dev, NULL, dwc3_ti_remove_core);
+
+ /* Clear mode valid bit */
+ reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg &= ~USBSS_MODE_VALID;
+ dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+ pm_runtime_put_sync(dev);
+ clk_disable_unprepare(data->usb2_refclk);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+ clk_put(data->usb2_refclk);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int dwc3_ti_suspend_common(struct device *dev)
+{
+ struct dwc3_data *data = dev_get_drvdata(dev);
+ u32 reg, current_prtcap_dir;
+
+ if (device_may_wakeup(dev)) {
+ reg = dwc3_ti_readl(data, USBSS_CORE_STAT);
+ current_prtcap_dir = (reg & USBSS_CORE_OPERATIONAL_MODE_MASK)
+ >> USBSS_CORE_OPERATIONAL_MODE_SHIFT;
+ /* Set wakeup config enable bits */
+ reg = dwc3_ti_readl(data, USBSS_WAKEUP_CONFIG);
+ if (current_prtcap_dir == DWC3_GCTL_PRTCAP_HOST) {
+ reg |= USBSS_WAKEUP_CFG_LINESTATE_EN | USBSS_WAKEUP_CFG_OVERCURRENT_EN;
+ } else {
+ reg |= USBSS_WAKEUP_CFG_OVERCURRENT_EN | USBSS_WAKEUP_CFG_LINESTATE_EN |
+ USBSS_WAKEUP_CFG_VBUSVALID_EN;
+ }
+ dwc3_ti_writel(data, USBSS_WAKEUP_CONFIG, reg);
+ }
+
+ clk_disable_unprepare(data->usb2_refclk);
+
+ return 0;
+}
+
+static int dwc3_ti_resume_common(struct device *dev)
+{
+ struct dwc3_data *data = dev_get_drvdata(dev);
+ u32 reg;
+
+ clk_prepare_enable(data->usb2_refclk);
+
+ if (device_may_wakeup(dev)) {
+ /* Clear wakeup config enable bits */
+ reg = dwc3_ti_readl(data, USBSS_WAKEUP_CONFIG);
+ reg &= ~(USBSS_WAKEUP_CFG_OVERCURRENT_EN | USBSS_WAKEUP_CFG_LINESTATE_EN |
+ USBSS_WAKEUP_CFG_VBUSVALID_EN);
+ dwc3_ti_writel(data, USBSS_WAKEUP_CONFIG, reg);
+ }
+
+ reg = dwc3_ti_readl(data, USBSS_WAKEUP_STAT);
+ /* Clear the wakeup status with wakeup clear bit */
+ reg |= USBSS_WAKEUP_STAT_CLR;
+ dwc3_ti_writel(data, USBSS_WAKEUP_STAT, reg);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(dwc3_ti_pm_ops, dwc3_ti_suspend_common,
+ dwc3_ti_resume_common, NULL);
+
+#define DEV_PM_OPS (&dwc3_ti_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static const struct of_device_id dwc3_ti_of_match[] = {
+ { .compatible = "ti,am62-usb"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc3_ti_of_match);
+
+static struct platform_driver dwc3_ti_driver = {
+ .probe = dwc3_ti_probe,
+ .remove = dwc3_ti_remove,
+ .driver = {
+ .name = "dwc3-am62",
+ .pm = DEV_PM_OPS,
+ .of_match_table = dwc3_ti_of_match,
+ },
+};
+
+module_platform_driver(dwc3_ti_driver);
+
+MODULE_ALIAS("platform:dwc3-am62");
+MODULE_AUTHOR("Aswath Govindraju <a-govindraju@ti.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DesignWare USB3 TI Glue Layer");
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index 4722b7f7a4a2..60be05c820e4 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/usb/typec.h>
#include <linux/usb/role.h>
+#include <linux/workqueue.h>
/* Register offsets */
#define TPS_REG_VID 0x00
@@ -95,6 +96,7 @@ struct tps6598x {
struct typec_partner *partner;
struct usb_pd_identity partner_identity;
struct usb_role_switch *role_sw;
+ struct delayed_work wq_poll;
};
/*
@@ -389,9 +391,8 @@ static const struct typec_operations tps6598x_ops = {
.pr_set = tps6598x_pr_set,
};
-static irqreturn_t tps6598x_interrupt(int irq, void *data)
+static int tps6598x_handle_interrupt_status(struct tps6598x *tps)
{
- struct tps6598x *tps = data;
u64 event1;
u64 event2;
u32 status;
@@ -431,9 +432,31 @@ err_clear_ints:
err_unlock:
mutex_unlock(&tps->lock);
+ return ret;
+}
+
+static irqreturn_t tps6598x_interrupt(int irq, void *data)
+{
+ struct tps6598x *tps = data;
+ int ret;
+
+ ret = tps6598x_handle_interrupt_status(tps);
return IRQ_HANDLED;
}
+/* Time interval for Polling */
+#define POLL_INTERVAL 500 /* msecs */
+static void tps6598x_poll_work(struct work_struct *work)
+{
+ struct tps6598x *tps = container_of(to_delayed_work(work),
+ struct tps6598x, wq_poll);
+ int ret;
+
+ ret = tps6598x_handle_interrupt_status(tps);
+ if (!ret)
+ queue_delayed_work(system_power_efficient_wq,
+ &tps->wq_poll, msecs_to_jiffies(POLL_INTERVAL));
+}
static int tps6598x_check_mode(struct tps6598x *tps)
{
char mode[5] = { };
@@ -571,10 +594,18 @@ static int tps6598x_probe(struct i2c_client *client)
dev_err(&client->dev, "failed to register partner\n");
}
- ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- tps6598x_interrupt,
- IRQF_SHARED | IRQF_ONESHOT,
- dev_name(&client->dev), tps);
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ tps6598x_interrupt,
+ IRQF_SHARED | IRQF_ONESHOT,
+ dev_name(&client->dev), tps);
+ } else {
+ dev_warn(tps->dev, "Unable to find the interrupt, switching to polling\n");
+ INIT_DELAYED_WORK(&tps->wq_poll, tps6598x_poll_work);
+ queue_delayed_work(system_power_efficient_wq, &tps->wq_poll,
+ msecs_to_jiffies(POLL_INTERVAL));
+ }
+
if (ret) {
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
@@ -605,6 +636,39 @@ static int tps6598x_remove(struct i2c_client *client)
return 0;
}
+static int tps6589x_suspend(struct device *dev)
+{
+ struct tps6598x *tps = dev->driver_data;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq) {
+ disable_irq(client->irq);
+ } else {
+ /* Cancel any delayed pending polling work */
+ cancel_delayed_work_sync(&tps->wq_poll);
+ }
+
+ return 0;
+}
+
+static int tps6589x_resume(struct device *dev)
+{
+ struct tps6598x *tps = dev->driver_data;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq) {
+ enable_irq(client->irq);
+ } else {
+ /* Re-queue work in the polling work queue */
+ queue_delayed_work(system_power_efficient_wq, &tps->wq_poll,
+ msecs_to_jiffies(POLL_INTERVAL));
+ }
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tps6589x_dev_pm_ops,
+ tps6589x_suspend, tps6589x_resume);
+
static const struct of_device_id tps6598x_of_match[] = {
{ .compatible = "ti,tps6598x", },
{}
@@ -621,6 +685,7 @@ static struct i2c_driver tps6598x_i2c_driver = {
.driver = {
.name = "tps6598x",
.of_match_table = tps6598x_of_match,
+ .pm = &tps6589x_dev_pm_ops,
},
.probe_new = tps6598x_probe,
.remove = tps6598x_remove,